BIND 10 trac1386, updated. 3839f74b09d70e852cf1329d55628f6ae712140e [1386] Changelog for #1386
BIND 10 source code commits
bind10-changes at lists.isc.org
Tue Dec 13 13:57:48 UTC 2011
The branch, trac1386 has been updated
via 3839f74b09d70e852cf1329d55628f6ae712140e (commit)
via 920ee68644189ee382135cfcf774e31ca8d9d530 (commit)
via e3c35e75141468ab84fc07ea16d6665ef3fab784 (commit)
via 1615a521d61a0f47a627f92cc4f95d982a04f8d1 (commit)
via e26901a19766452353b43e81ddc993b26b2b8518 (commit)
via eaa492e0bfa0855b01ec86ed0885d90166d32c7e (commit)
via 95b7c29f155b8fa21cf85c6d3afbe3f510db83d1 (commit)
via a677ae91c9d7a5bd2ef8574f84e9f33f90c45e44 (commit)
via 9b2b249d23576c999a65d8c338e008cabe45f0c9 (commit)
via 45274924f3f25b70547809eeda5dbcbe230029b5 (commit)
via a6a35e9970e4937924c1e33c01f6bd7eaf1ed994 (commit)
via c8dc421c5cad0f3296174b44f8deccfb69dec43f (commit)
via 91fb141bfb3aadfdf96f13e157a26636f6e9f9e3 (commit)
via 37a11387baa321daec8311fc66d5d83e567886bd (commit)
via cee6a7af5f4f116ea89ecfde9a235dfe727bf207 (commit)
via a5e9d9176e9c60ef20c0f5ef59eeb6838ed47ab2 (commit)
via 7f5229256540479ef63d707a14d194c80099fab3 (commit)
via c0be7a6c0e12c78a7e02a2a3b3b259a3382b52bf (commit)
via e82cf73f21e06b1a301ff120fb5f73203d343111 (commit)
via 777d6f3037738200a8a8426a0b957b18011d460a (commit)
via b705708aafbe566facc578a02b2f1cce44dff86f (commit)
via a56e72ce1bbc9d016a7ebd83eaba0aadbf2b41aa (commit)
via 318dceaa39aa30ee9d394e1e096d4891f3bee490 (commit)
via 25ac24557f8789f516ac7ffa1db831701ebf3c37 (commit)
via 2a6b5e55caf422997b893e319db83855fe1709b1 (commit)
via 83ecce54ed1ef5215f722e8339ae4a43f50ada5c (commit)
via 8dcf5eebb1b81e6cdc963985daa6c80497ac8c16 (commit)
via 662233a1483040da5dbc29dd9c9baf6bf0832223 (commit)
via 0e3736c7c3e882ba3f0616b9d0877792edd73317 (commit)
via 4b57a79735953705a82d8595a8ac541f7deb7a74 (commit)
via b41b7dc34a8a14339a1ff9daf1d705997d9abc43 (commit)
via 6bda5426c6f8b4e9faefc2075575e1c98bc3907c (commit)
via 6ff03bb9d631023175df99248e8cc0cda586c30a (commit)
via c37c596c844d35dc25eb729a99666948c6af8a6b (commit)
via 0c8c0f808a9ddc3a27a87f55964a965fb30f18ef (commit)
via df44752c7d69b865fa346215c1ec428153d5a3ca (commit)
via e30dfe31fbc6b290e63ffe4666dc39ebbd0d23aa (commit)
via 52802deb18632b028815d25f19976d0576d76e1f (commit)
via 60fd293717cc45323cfb10cf06d5bd264fa083cc (commit)
via 4220ef5ac9c8fdd4b506b3579f0e5eec98e3f3d8 (commit)
via 56be59fbcdc0ba54ccea0d09d49ef28dace3d65d (commit)
via 1341209064bc7afd8e720e3b12060239c368bcdd (commit)
via 86a4ce45115dab4d3978c36dd2dbe07edcac02ac (commit)
via 14a64484a3159a142f1b83a9830ac389a52f6a35 (commit)
via 146203239c50d2a00069986944d4ec168f17b31f (commit)
via b7b90e50531bcbd2caaffe6b51aea8917a56f40d (commit)
via fd5713eae0276a3d623953c88fc6281aab0b71d3 (commit)
via 4c0fd2ba248e5f925566e02724b34c85179a8c51 (commit)
via d408808ea8e4fe24a21d13992a88ad62c22634b8 (commit)
via cce182ed937f7294b2a11a3af5b2e03a380b7cd5 (commit)
via 982fc000a3064515ac30f5457b71802577fec90d (commit)
via eb08c5acb5deafa28ae37032910cce9a385d2030 (commit)
via b2186bf05d8d9858b0b58cf9dca5b215afe447f5 (commit)
via 88f94cf8e025558b14091af5050e2ce424237ea0 (commit)
via 4db174c8f096e2b54b3a5d384a6cffc25b9d9024 (commit)
via 49f1f1bc36042d4b4c27a002e963f400deb694d7 (commit)
via 3bb1cc7d961930edc38d9f8b34d0cccd3d69dd96 (commit)
via 6583a47dde1b851aee99de3c38c6331a22ede260 (commit)
via 99033305fa90310135d37118a0d47df3f2223770 (commit)
via b7bbe25fdf0d0c168c24c904c82c7e04fc269bba (commit)
via 862b0e38047101afef0f6d81ba3f0d74c9a51ea5 (commit)
via cf5e4481b1f1fb00e9897e4cd0527a9a707c4a63 (commit)
via 0ae049de728692224b087e95a645f45f4a69cb68 (commit)
via 1d5d7d8ea8d4dae23783b71e4a93165a36124663 (commit)
via 696f576c4743130bc8a77844c700582d5faaf391 (commit)
via 9f792ee32ba42a44291277d0577196e03a929738 (commit)
via 7a1997b2d44d05c179adecad0bf68c1f7c19935f (commit)
via e580e44a38c8dcf02e6ff40ba5977f18c8200572 (commit)
via 660cf410c0fb41587b977df992879f5dff934c19 (commit)
via b09fdcc6b45d4580b138cc9f59bfc051bd6ad360 (commit)
via 4d97ef5cdb4833a7a36b6679c16338505b07d4e3 (commit)
via 424f32864efcd2c647c6e5303125b6a8afb421ea (commit)
via 11885ff9e91c98ff0c4e93d81fc2b3d47a02090d (commit)
via 0c53ade72d9589d3d521937652af20f9d7a20f8e (commit)
via cc20ff993da1ddb1c6e8a98370438b45a2be9e0a (commit)
via 2142e8e6f760c577b58747c515c38fcc10168e04 (commit)
via 402c03afffde1e664c9dbd7b3c40e78a23b261c5 (commit)
via b3c9fffb335e7a42ff07a99016df46ccaf3dae97 (commit)
via 50fdb098fc80146a714794cb9156ac03de909103 (commit)
via a4f7763150aa2ced077f67bddbaa39255bd2fbf9 (commit)
via d3792aa7fc6ca920c7f9a3f36318ea1160974850 (commit)
via 9351ba5d5461f8f93a169a99a5e378416a970bd5 (commit)
via 8376919647ef84268085bfdd56271714416a6405 (commit)
via 4e636c3e9a365304fc463929cdddcd555dcb3ad2 (commit)
via 73721a97b1f3741bf58bf774601fef99a4ecb54f (commit)
via 6764e7da0d5c6967e5607fc6e31c112895ed1827 (commit)
via 40cfd32c280020af33a28c1501380a17ce604175 (commit)
via f9cbe6fb6e0a3d3dc03218429f530e6c01920169 (commit)
via 18128a99fd70d66eb09312dd8dfa0f0521033f97 (commit)
via e7019de8a8ec9ff562557c9fc2a0bd28a4c64829 (commit)
via 173de1cea65293e5f7cfb904454ee4fa96c51e1d (commit)
via e533dc83ccb7bf541e53f753c28a52248d7b195b (commit)
via 2ae72d76c74f61a67590722c73ebbf631388acbd (commit)
via 1ecbd2b16b44b6439030fd245f951fe5a538ecc5 (commit)
via 83ce13c2d85068a1bec015361e4ef8c35590a5d0 (commit)
via 5f34cb40976859771ab996113f78937310e7bda5 (commit)
via 8ee52fc304388aef162a819e2e59066bb70f0618 (commit)
via 3fdce88046bdad392bd89ea656ec4ac3c858ca2f (commit)
via a60c96464c0b959492a13b10767a7d9352be060e (commit)
via 0c0e8938a3ece603eddd70e3ebba94b03eeeeb92 (commit)
via 6690b8985d656aba3f25082cb62c9c02e5ad5a0b (commit)
via 7715c727d25d6430cbdbd82e40bdb7b3fa2ea843 (commit)
via 6d921ed561b6ef9d26273ca321dfa24622a982b5 (commit)
via 144549c04afdc36a98c7530eaebafb2b3d38d545 (commit)
via a26b979adb54baabdf939ed1a7852b2ee9b8b93c (commit)
via eb703a7e5b3749ca95a43c7582c9cccde564f123 (commit)
via 4464612807e6c4bd120298ca105b0503af0d3110 (commit)
via cbe600decbef4db82cb3b070e03b5702540af4aa (commit)
via c44075a40764cbb5dc37e9dd3666ce46bb8c7955 (commit)
via 0b5da8bd0800bfa3744e23c367cee2c38de7a497 (commit)
via 7ac21664665acee54a2a57331a2afc2a0c0a3530 (commit)
via 96a32f0ab063cbcd98fae0d5a87bc286bb8a7498 (commit)
via 7019db2a44f39897486eea618f4447c37dbabcf8 (commit)
via 024808d2a40b695f6c7191398c5a3d2c39c26736 (commit)
via 9df50bec4e691dc8cb724547659fb71caad656ab (commit)
via 3a206ab523d4612676362274ae8c30def53ac15e (commit)
via 15dffb02f179974c6726f16aff586c49eec8c7ca (commit)
via ad90525811869e2ff6fb5e24d38bf19e5743767e (commit)
via 936d5cad35355e1785550f7150f90e688166f448 (commit)
via 0737908f9e7cb615f80354131dca4df1a8c0bff6 (commit)
via d6d7a352b0b0df685f285cd413568b0e475339da (commit)
via 82fdeb65eba233a63b4425c7c5528a6257b91703 (commit)
via 5832af30821d4d4d077946b063b8f53056fa7e60 (commit)
via 1d24818f927edb1840f673f1ba67d7a45d9ef1c2 (commit)
via d6d90c1976110dcfb94cba2c56086960054cdeae (commit)
via f06cabd31d8e43781e4e32bdfdf24c78931d3ca8 (commit)
via 0e945f09e0e127e5097c32e5c84d96e34a18b3b6 (commit)
via c9be5877151f0564725d1cd9a20fe393fe7b422f (commit)
via 560122414abc11fa2a39331734c607cc37a4e76c (commit)
via 5468c7defd530b29696108cbda6d278b14be351b (commit)
via 64853ae0cca2070a0536ee6f499084c8a9017fa2 (commit)
via 1d8a592d1301b7e3a39c88ce1e001122db125307 (commit)
via 9682bf18607745c83437cd4592d3289e68410772 (commit)
via 20a6000a9f69476797477ca7af5fd83b8e236909 (commit)
via 537af1705fc5c1695b4b601571f65ead81dc1289 (commit)
via 1485c897a9e2c71ed2a33c8972c116a5f7e8e078 (commit)
via b7ac17da5405582098e98ed22bf122fe87658923 (commit)
via ada65c7115b9840f509f2b0c640368735fe69520 (commit)
via b8d14d2e45ee719e4e33adbecddafb4ae3aa4df1 (commit)
via 966fdcc69001cd2562ca96b392b9a45e7c336224 (commit)
via 567260cde6e21499ad4bf47789d538a929df5552 (commit)
via c789138250b33b6b08262425a08a2a0469d90433 (commit)
via b092df6f17e5d8f8f07e726fc4006e346417d49f (commit)
via d9b851b96c9fb3f56c4fe3a626f5c2b05bbb7a5f (commit)
via e665914a467810569a22e093a56ff5c711179143 (commit)
via 5b64e839be2906b8950f5b1e42a3fadd72fca033 (commit)
via 614e0ed92f8e6fb5f66277c7fbec8af6149cfa39 (commit)
via 4dc636c8d46d857f617bfef2ae9444dce438cff4 (commit)
via eb06cb8dfea727c5d9366583581ca674d23c4c2e (commit)
via e35e9b8a1cef995079ef15b0321aa7b420139226 (commit)
via 5de824f59cd2ba7e8cbb3cc58c4cd42c585c09c3 (commit)
via 9300ad5a1030e50ab76ff8a6f87b4d91d2d2b124 (commit)
via 8bab1dfcb1f33d58bf64f4c86ca7ba860b57cc76 (commit)
via 5e07929601d0799df76eaaf3ac5165b634efc556 (commit)
via 81f62344db074bc5eea3aaf3682122fdec6451ad (commit)
via eb6053d466fcea08fa66205d598d316e550863c8 (commit)
via 42d4a37a121ea7df3440268fe15995267fb66b12 (commit)
via 1b186f0a6fc242fa6dff08944ef43b60010d3631 (commit)
via 1825a24fe6fcda419cf2cdcd05180aa1b18ca526 (commit)
via fa7208a015545459cf56b03001fa1e6681e52d3a (commit)
via afee8bc035223c87c385a6855ab210b4e55cc161 (commit)
via 34de4dab534c2ccc735f6c815aa0459553aa1153 (commit)
via 717946a088b5c3fa287258e1ebc3fa6dd9093702 (commit)
via b6568546ccdac044fd30200a54708f9418e7af9d (commit)
via 936511f6e114f26bf86497466a7f61ef467bf5ad (commit)
via 7f573f432cfca90d2f9409829f14b3645083b9af (commit)
via b586771730eb1d22330e3a4f46c6c596d6ab57da (commit)
via 137abb738558ae9602f834890f477a924b520001 (commit)
via 14c51c664a98beb4867728d528190aff335e6f27 (commit)
via 6a4afc2165e4e6e692e71cb6795201c9df5afee2 (commit)
via 047ea7f6cfa2677865dcf441726dcc3e082608a9 (commit)
via de9532ce586ac69ff58dad2096f23db0cb062639 (commit)
via a81436b0c8f9c3974ec3373f586c45e2cf03cb64 (commit)
via b8e895092634bc661baf7fa043fffdba511f8256 (commit)
via 1a81569fb7c422d499f5a8eeef2d70d20e3284c6 (commit)
via 1bad76a6ab0ece059d8a587870f1da84510eccc5 (commit)
via 07b884ef0f72044fa5a5fd661ab068794ff68ca6 (commit)
via 51f3cb54492ef02e4951afb15a9c40ba0cdff4ce (commit)
via 51c9278d000daee776c5e12456d8c4ea60ff5f21 (commit)
via 207038a3ff7503cd2b2ab44238c71da55912bb4a (commit)
via 97cf501e33b45c373aa12a3cb8ae76909d3522bc (commit)
via 4ca30d27a1149bf5c445f382c4767b5c4e168d95 (commit)
via f6def2435fe72e00a782244461e8a186a4a23e63 (commit)
via c35d0dde3e835fc5f0a78fcfcc8b76c74bc727ca (commit)
via 75fc5817606b892c8eeb4964dc2f3d2f044f5491 (commit)
via 64c2d16fff1dd9e903378a55087843ad058791f5 (commit)
via 7ab1afe9a76986c4f175c338fdd6a8076a9d6dc9 (commit)
via e99a54597a5bb6dde1a0240ab74ac010b5029afb (commit)
via f02b9adf8e899f9358a26e087cfb43a5d4657b07 (commit)
via 3d5f2c3c14bcbf9cb7441f61ac8f84bceb8e6594 (commit)
via c1171699a2b501321ab54207ad26e5da2b092d63 (commit)
via 8f5429e41cb99c615abc1de0ee549702ca638217 (commit)
via 52fa244c12eb6d708b0260b5554396df5d00b079 (commit)
via 3e1a6afcabbef24f9501d8f4e3ed27d824db4309 (commit)
via ce54ff203a48042d3fa085037a23b315ccc2ecca (commit)
via 8fe1dbdadf8ce7aa288ae08320f315ab56433cb6 (commit)
via f76fdc8db2c0c9eba1603af7fa0272e7955e20d8 (commit)
via 6247db1cab96103bc06a6a281963227084cfb68d (commit)
via a9dc55c6cc18e2ed28f79cfbbdf7408a64a04ca4 (commit)
via 0dedcdb128646fdbf37be96f91076adda2f37c95 (commit)
via fc6a79af0d625ca18a2cdc3df91e86e8c1e02f9c (commit)
via 1dddec95f5e398269b28473a094dd6ad00ce648b (commit)
via 9150be5e5d0d6c0a46e44a0bbcdbd235c74bd6a7 (commit)
via f11f46b276646364fc115783ccc3d706510a2ee8 (commit)
via 5dc772648fb9779359b4d409086d55745afccad9 (commit)
via b4471621912e7518088b106d829a8431a6c4ea97 (commit)
via c05dc7099e4ed686ad1af573e6795a751d020025 (commit)
via 1beaacf4d924392323bd08a0c7aed65e9324e092 (commit)
via 84d0d090f5452410a58d8f8503c61d81ec85f2f4 (commit)
via 35015af5525965fdb421a856ffb01fb1ab8a7ad4 (commit)
via 151a4b05228e38b2031d095e438b63ae75dc0b76 (commit)
via 3924f73fed9f8158918713b09672175f09a973e4 (commit)
via 25b7595f3f1b158bd6278cea3c4dd0d6eeca8a2f (commit)
via cf8596b58bd57f4ebfff7d83d24294eaed38f7bf (commit)
via 411a806a41666b522ed35552588789d114cc1390 (commit)
via b77f5d1f891daf4c24024b44db6a7502e2728d2a (commit)
via ab3f90da16d31fc6833d869686e07729d9b8c135 (commit)
via e12070c73b529d348f64f8f6e24d75ce710a8a12 (commit)
via 710e8207090f894b14eaa9834a9c6cd551ea950d (commit)
via 80c131f5b0763753d199b0fb9b51f10990bcd92b (commit)
via a01eb512f67a14855fc9be9fff561c3c86634e0b (commit)
via 635662711c673bbcfc8fac95c96cfdc33702ca94 (commit)
via 15e23bca2cf7f266d32c6bb30a142a80ee543227 (commit)
via 0337c552ff717ee890ae784451668ce3d789650f (commit)
via ec1cc2b4be6e19519644534889865a3ee2c81a8a (commit)
via 277b80e0671586d8ace205cb53465b1f6f414466 (commit)
via a435f3ac50667bcb76dca44b7b5d152f45432b57 (commit)
via 63f318aa4405840d77c5e7afcf7c3437c5af241b (commit)
via 6dd270220a4bac70fa4cd6a898e331b658fe0af2 (commit)
via 1bb5168b7014a83690d1bb363dbcc0fa6d8fd7f1 (commit)
via ddb6d109c0947f203eaa6265a22d2fb3b166db0b (commit)
via 2eb9f486619e27aee0684f840c85d152b3ddfe0f (commit)
via 71378c1048bb610c748788dabfd04e421f6b4ac0 (commit)
via de43982b90d0fafd6b4e1857e366a6cd983cfab7 (commit)
via 77d69c99f2b3cc8ee627d8f73174ead9f03da412 (commit)
via 2e12dd60da03170462efad07173036f973813bd8 (commit)
via 3ff33cfedcca0cd1acb80a5cf2651f89403a82a9 (commit)
via cf297878998c80018ba9523a53ae4947fc7e6a5e (commit)
via 52f4e6faf56afb5c0432f88d5b1528090530c62e (commit)
via 13f108b25fbccc56b731bd5bcc505cdf48e91e91 (commit)
via 4d3aef6a83965e26781d6b74f0ff913926845c7c (commit)
via fb33c8d379f9e75b82edafff45d4dc13fda62630 (commit)
via 4f02b45248227dd98904b61bbcd2e6cff36b5fd6 (commit)
via 54d9d7c1597df3bcdf47d07db040f63f7008c6a7 (commit)
via 48c07943ac1dd24922f46cf970c214b5cf24813f (commit)
via bea7b0e3fde35a335bb9e6cf170b0fc240650275 (commit)
via 9b1c64b7d164b6b27d126e55391b2bbafeaf8c00 (commit)
via 96bf3ab5271347542e13b52e2c37b9c8810a6fad (commit)
via c59bb2dcd90a5d580a7f3c9e42a54a080f763add (commit)
via 319bc2d65301606aa938363dcb30a8519755886e (commit)
via d953caeeaf821743ed27ef4a47a45bef66615dc9 (commit)
via 5d382b4295b8455fae844a5ca94886788f6cb19b (commit)
via d08c42ad20f2c91bf64ef47ed893fa2aac4ff037 (commit)
via 8b92bb931e29b7b1bbb8147cda4f7d0aac507ac1 (commit)
via 08915b387e64f3cf9d9a86a5a21c4492db3a488c (commit)
via 1d4541dfd067cd2f0c9e155049c2b7f9d70fa896 (commit)
via 1f6edd11fbf7e0143f99f20fc714044b989b299a (commit)
via ecf6a71b5845c6710119dd97b500c7edeb3f44c2 (commit)
via a24c6579ab039afd67ecb50a71b9fc8eabf9b6c7 (commit)
via 3647e8ff9c194c1c0a576558f4f49ba4ff2614e7 (commit)
via c3d71baca757b39e13968369e0afb39dd4472eb8 (commit)
via a9040d4aba8e3c01a77236c81f07e2b06b300918 (commit)
via 35556de064c193779c3cd5e5b0fde583f4a8d598 (commit)
via c4f22c20ee19e1ffba43914671c059a434f4518c (commit)
via 12b72af07f5e06cf172b115b0acba3fbe3554467 (commit)
via ecd9c5fc4b3cf747e2b5a221504feac3adeb236e (commit)
via fc0a31681f7a8e4198068be0038eb9a4f8a74ec7 (commit)
via d3db538710b6547cc2e04127fb5fc9d2d5a181f9 (commit)
via 2ab2fd55d4a12d1469060a3657893121114e2e2f (commit)
via 2dd7ee33a13a07a00e22fbc81ecb8b19b57efa8f (commit)
via 5cea4cfbee9770f4299f5a701af89f7cbf977ef4 (commit)
via 1af57091dc0c38cff538de2470275f25caeb2eab (commit)
via 256c0a08483ac2bf396dfa8424b4c02f0681a0f4 (commit)
via 8f74718cc2012ca68a44d9ed9996f479c6834101 (commit)
via 5c92f567d93977bd56a7ed2898c7bee098b552ab (commit)
via 956a0a589db0a8250ec94ece377657783ac15caf (commit)
via 39def1d39c9543fc485eceaa5d390062edb97676 (commit)
via bcb432839cacdf10172d49dec94292871aee3526 (commit)
via 164d651a0e4c1059c71f56b52ea87ac72b7f6c77 (commit)
via 09f6d6281a4203a91dcfb6c56e240c06f11935b6 (commit)
via 76fb414ea5257b639ba58ee336fae9a68998b30d (commit)
via e5f37058b67c641b8eb024bd48ca269ae9e41163 (commit)
via 934a07b6d0ebec8bab258398894905de32878a8b (commit)
via 40f6dd2b378f31f4ec561eeeac534874a02a8ae8 (commit)
via 84fa061af28d72e51939039bfcbb04e1febc3cb1 (commit)
via b54f1b460285db4d6ae89dd716098a88363b1511 (commit)
via c1138d13b2692fa3a4f2ae1454052c866d24e654 (commit)
via 35b1914ce6ab5481ce40f584729d0949746c2219 (commit)
via 4df29b3303dbce85b8143d8d74935b3c9283fb31 (commit)
via 33a956b09f22597d91929b22542913412757e279 (commit)
via ed91f985331705fc345bec838697c9bda4b6b7e4 (commit)
via 1219d81b49e51adece77dc57b5902fa1c6be1407 (commit)
via 8380ccceca1b8412fbc6742cb37dbd7de843ac50 (commit)
via 38d84c59fbc097e57d03ac10d6a83edc63c4cffa (commit)
via c0cc183880fc5e1949bcc97585c20ac2ab21e281 (commit)
via 2d85e22f10321fbc5b9cd12f70e90907cb01830f (commit)
via 1c9f121360e6e612d02d365d70bd0843f8f93457 (commit)
via d142274062ed21d53f9e0b2a85531c935580013c (commit)
via 5de9e8a440b6b4ed8c6bbce156d75b740ec4c1b5 (commit)
via 631c5c2d24ba8be2b12930cc8267b2298414d563 (commit)
via 1b3e21e08311d84d649a2780471e9a8b46143dca (commit)
via ddf219d781a40764999bd8b19c80f607c2783b57 (commit)
via 24c2111ed800e95bc62901cd3b2970692a205578 (commit)
via f9224368908dd7ba16875b0d36329cf1161193f0 (commit)
via 4a68215905542025570f06fcc703fa44d6b37cfd (commit)
via b8f67d200e64a2a9931b6d664781caf835f2ecd4 (commit)
via 315f4999df039dbb2baa77ee12afa0dfbe01dc25 (commit)
via 7344d2788cd06e54ca7ca3e3a3f69010dac80670 (commit)
via 46bd9a8e6e3a543f97af6213bc7e43d619064aa7 (commit)
via ce546dddcbbf7efc4778c1d0d4210ca139ed5bf9 (commit)
via fa89a0798d166574e089b38d7bd43a701eda5467 (commit)
via 12b1a920f219e627bb5860f0a0217cc5c86749e5 (commit)
via cd342dae58399be6cdfad55a466a76ee385ccb08 (commit)
via f9e81512329b71d6b5d94bafa789c63e763b2a72 (commit)
via 226dc3ee718e1320079d6c6d8642e0f0dda1bdef (commit)
via 962a91763b9ef79e887e52e22fa23462ff7d680e (commit)
via 170936d47b2e9ad3d5c3ceabf86026fca9795150 (commit)
via dbf32272f3b76b90678add39038fb6978c03ab3e (commit)
via 3e19362bc1ba7dc67a87768e2b172c48b32417f5 (commit)
via 295732d42d2b0a9641edfa352087033d8eff2794 (commit)
via 758ab4461e8792e563ce1e0ad069d53b5e15d8dd (commit)
via b449ad20a4f58eb96aec8cd7dd7bb857bdb5d14b (commit)
via 8279efec0dae2291665a99e4d489e8e5ef7a51c1 (commit)
via 9f89f07adcc9ccdde454016f037076e04eb791c1 (commit)
via fdefb47da0a5d7203496738ba03d4e1737e8149e (commit)
via 93a5d45d9c1aa90249494608b8c2829059cc3b28 (commit)
via c1f5fb059e9c272dedc27a3f14fa8ed2fec71b95 (commit)
via fd1ae8e05771b151877ae3c082a7b3e3b32a20c7 (commit)
via 21887dffc4cd692ce23bfff1685fba0e2c1e55b0 (commit)
via c41c32c11999a34a46d2e20155358438d769f767 (commit)
via 181926059b0162e09c30b4b967b09294d713918e (commit)
via 466a968426ed9062d86239560492edf7dc72ee02 (commit)
via a59f28758abdb92721e010956bd421148643377b (commit)
via e09910d37b783b182ae2dc83f6cb272bff68cbb6 (commit)
via da3e9e54f1374d581d78f1d874ddafd427a622ab (commit)
via b34bf286c064d44746ec0b79e38a6177d01e6956 (commit)
via 648a187c5d7181019dc19531a1057bc3e6f70e96 (commit)
via 16b7feca0339f67acae30eb67d913bd3ef0298be (commit)
via 120946aa30b22c36995135b7d5bfcade4c26e192 (commit)
via 78770f52c7f1e7268d99e8bfa8c61e889813bb33 (commit)
via ff5154291678973eaa0483518302b74a62f0acba (commit)
via 498677a8877e4894fad598f9ec99974c414ef58c (commit)
via c4c93896137dd936066cd1a714569468bf248451 (commit)
via 713160c9bed3d991a00b2ea5e7e3e7714d79625d (commit)
via 9bab697bc984a6565a6f0dfe8a981f4809edc91c (commit)
via ab406229e29b7cfc470142ee0166086bf70790a3 (commit)
via e24f557e8208f43a8ade0855395c87b175bc351c (commit)
via 3f93372ba9416c9d759ea0c6d8981837c036448e (commit)
via fda23d6cf412c2a90df325c244f79811d939d3c7 (commit)
via b79e0ef1ad1ac5c64c8a131ea8e125ca6df066eb (commit)
via 3d3592d4b1e7d3b0b3164067e57c1343db691851 (commit)
via 32b1e0701a9b138321e510a432c5cdd49fa336c6 (commit)
via 84290dae3201ee83c8e4aad6f7e2b181d708811e (commit)
via 9b6f54409617896742151c6aab9f5f318b7f53c5 (commit)
via 36a5cd751a12ccbd31284ea19d0b10e8a5836b70 (commit)
via f1cb067ea86ab38810007ec6743e7c1f91042e99 (commit)
via 6ddab5f4ea56162d0834e22a68605a1a427cc8c2 (commit)
via cd4fd339a084dbfb1e2d35d5c008260de9d48572 (commit)
via e4b99333e4c9946741148b6c95ed070653bec0fe (commit)
via b0cb2b651ec620418e891db0d21791beadb81906 (commit)
via e9e0f96594eec741393fa197c1d91362c96109e1 (commit)
via 96e0aa96b5a2fd31833e9afe64bb8e4cc34e23c5 (commit)
via 48ee64bfbde99ce88eb305d2a751283b42c826ad (commit)
via cfecb1acb98f45a12864b7730ea58afbeb674c7b (commit)
via 9ab6902f20b57452eaecf8f737d37f8dedcd623a (commit)
via d9be597335af84bc93c9559bbd76fa85ef0f49c4 (commit)
via 8c57956e16dd09b528cd11dbf4c2fa51e48da359 (commit)
via e84f2aa5e9e493aa7dadfbd3b31753b5837d9069 (commit)
via dabf62d5444fe3a1e55e72aa393e0dddf188df7b (commit)
via ca3d2d1badee8e5e6d3c1f73fb29afdcc7692fa6 (commit)
via 94ec743d73153258d8a231e2e5126749ea00e3c8 (commit)
via dca136175cf0dde67a63f40953187ca60f90caad (commit)
via 625aea76304c024102cb5065f910e5121b1641f7 (commit)
via a4c51111cc0fc28c6517a11f8ae88682ab8e6996 (commit)
via 8a5b3e3b460e7f741b1560f73423c8d688db9d85 (commit)
via 275d091229e914a848408b785f0143541abed6d5 (commit)
via b5553ef764f6c8cb0acea25e14b6e7a6a3a4cd47 (commit)
via bdde86c05963e9d491015e906c1b899609417887 (commit)
via 936e61c743af685c398abc7590cd813b70a5f5e5 (commit)
via 038c8121cd5e6cdcda93c4b167b8b1e858ced3f5 (commit)
via eb53cae4b35f858436cc20bf28ad06cbdb2211ab (commit)
via 868282b5bbeadf7ba0dda49cb9813a1cb5ad09e7 (commit)
via 11a4fe2def28da2ae83c94647a11fbb2114ec467 (commit)
via c2213ce7281be2aed47023a6f052bbec868a6028 (commit)
via 60f2c617c5951fd465eb094c5c7c82ae14995efb (commit)
via 54d84160bf6ed66a7c86f9f9be8d66ff25f80884 (commit)
via 045c30f0dffebb30ad8862986be435748ed0efb6 (commit)
via a6fd03e989a1fd5ae9514774bb3b3bb2a6668765 (commit)
via 8c07f46adfdd748ee33b3b5e9d33a78a64dded10 (commit)
via 235ff5af7733a7d464b172c4424f8facf284fed6 (commit)
via 8f3f3e9da36c5a0cbbfa4e2a5ddc598be7fece4a (commit)
via fe04c9377836fcd387f79447906e7ec83911b5b2 (commit)
via 43de15e4b0bd0094910ecc4f4365744cb6c1eeab (commit)
via 5e2238494ec665b558a6bf3b6a2c7351c1e022ba (commit)
via 755cd222be64019ea3b8db62e6d2643e6b6374c7 (commit)
via 5720f2262f0a1e4b8b2dcb1b66b94431e0dc6ff2 (commit)
via 8780f998204e96767785b29cd5b0e58cbeb10e1f (commit)
via cb74737554ee7e1bc3f03fc4112dee0d2b64d174 (commit)
via 46c206bab683f816304054c3a3f9c21ffa0af2a1 (commit)
via 0d94cca23a4f22d1bb953d62d38358a8b0e49f01 (commit)
via 4215dabae27f7b9b089ff8fafef2ba5425062fc5 (commit)
via 219879a5c8d6cb361d6d6f91d88c199930560994 (commit)
via 7003eecf6f7792d140e74bac444fb00eb7b8415b (commit)
via 81986f1f0af388bc75baf4fe26e29771f885f200 (commit)
via 08e1873a3593b4fa06754654d22d99771aa388a6 (commit)
via 90a1746c2d4da5b1a75ea76a7f0febc35b80c440 (commit)
via 0878c77ba4bcbaeb509f2bb7c2d52ee62864dadc (commit)
via efeb506e624945c6f21755621897a088715045b7 (commit)
via fda514f6b5ff65648709273dc62f960d85f4e066 (commit)
via 2afbc7d3564b16d49043d48fe5ed9dd343311861 (commit)
via ce28b51d36567d63b5258648f7fbe406baaa5677 (commit)
via 9753568c850855beecaabf500aea33483369d64f (commit)
via 7c6c725225eb89d9911b28aff0c6d80152e26aaf (commit)
via 0ad9b8c8482a134af7c47b64b412f642d08ce642 (commit)
via 132e0b02edf9a0cebccd64a183eb56839f42606f (commit)
via 3b5532c40c4aa55288a8d2c23163525c34568819 (commit)
via 5a2d958780a4a671cd8df9080d99ff95dd16772d (commit)
via 2aac7b891f4ee43fa29bbd41ee3bd48c4a849010 (commit)
via 65f4be2b65bf19baad6bbeda742b44dff7cd9b4a (commit)
via a3ba4cca05891f1052aae6bbe28c125799c7fe6f (commit)
via bccc91bbd2496b87b408ebff3cd9c6880f952b1c (commit)
via 88147da513fdb22eb4e430390746f36c96304c7e (commit)
via 4dc03f5419813b974b9794aa2cba4f55557fbbb5 (commit)
via dc2ea48db152796f6c0f62641f00646ef32e2b9c (commit)
via b513f0ab652e11892c232b6170f675fbb9990609 (commit)
via bde035f1ebcb1a9c7678692538f9aec18f5232e6 (commit)
via 3a330862f1357a4e0edd570de5896785029f4530 (commit)
via 567f822d4758d13b84161d67118ac1bce08b4c47 (commit)
via f94f5bc089b09a77b34138bbf19ea71921a7950d (commit)
via e3406364189d62ba54d85c3d23b40cefd02af584 (commit)
via 6da32eaece41f360a87388c44528dca979c10ab0 (commit)
via b85213cd68ec24c5deede886d466bf0911b9e762 (commit)
via 056a1342f0d73cf53a37ed672a8a4ad907c4cfa2 (commit)
via 3dcdc74a5e0f8cb7fd0c6a3f6dee480e30199f03 (commit)
via 7fb9faf4602b6b4feff4c940942c12be838a8153 (commit)
via d60907a85ba3f762b81189588d1b7317b95e0521 (commit)
via b88b05b2a779554a0e3c345933104d42046fffaa (commit)
via 075e3787986676c7491f157931b6f7da1773db0a (commit)
via 71de39fb8126b7200b2f6dcd9689a000c958fe0e (commit)
via f337180ad87778e3b91111efe93c3e31b1c92a91 (commit)
via 489a53541118413b38865c8a3cf84b24b8b7dfe2 (commit)
via 63f04832f2604868133a23d110ce6df5a9707993 (commit)
via de07e6a0ab66de4d3c7720dc93bc7d9198c9d26b (commit)
via 4ca71b858671d112fade23b449f2a59f14d1d300 (commit)
via 7d2f07481169780071bf564223a20a219b550385 (commit)
via d5e189cf1573446503a4fafa3e909db60eb04623 (commit)
via 0f7a43ef24e2fedfa554200cbfa3d83971dbfd90 (commit)
via 9f854755d1bad72bc4bd94accbc60d211c880cb7 (commit)
via 2139076757c1a14ecce96eafd1388f978732f8aa (commit)
via ab47b771999bd12171e65a8a3fb2ee512b709c4b (commit)
via ebe4e57805eda25ca347e0a9db8adad11fb3d4b5 (commit)
via d85912df5ef89ff95c3653403503f61d120a0761 (commit)
via 0f76bcddad8050baf811b0eaa5a117cc61dcbba1 (commit)
via f01fb1d89b20b23c0a680b1a97dc83e5a174e2e6 (commit)
via d2e805bb39d06f0ed47c49879909f35b5d341530 (commit)
via 9862bdf184aceb37cfdbb4fbb455209bdf88a0f4 (commit)
via 92794c72752a77005c2f9c7683fd2c65d7d802e9 (commit)
via 046729c74341bb2ed1e6f60f81470cf6a6883000 (commit)
via 36db2f897ac139ca9b71ccee07a7b1ba1e3aee7b (commit)
via 3000256b60ee6a2c19a7188be4d17eca833ce869 (commit)
via 0b6937d0e075e1192c41891ae138532f2c733b47 (commit)
via 5371b694b6cc564c3f1899a935769dd024f38e56 (commit)
via 837002896937febe208c141912fc4f8c3beaa2ab (commit)
via edf044e9e2f1572b618ec2438cea1cad46432276 (commit)
via 573abf93bec24753aebb5a6c70d8f50def521879 (commit)
via d287d9c92ecfb59d2c9f525cf79c7bb5167984f6 (commit)
via 50e96053742a30584f91a6bdb4b788977cd166bf (commit)
via 06d6be693064252ed2535fc8685ca4e7b8db0989 (commit)
via f1e08d75cabc45454a9bde86158dc8c7348d7f9d (commit)
via cc48074a9fec60ef9ba69991549f9e167e620225 (commit)
via 7a5903389ed505f6c7ca4c87adf705216d11d1af (commit)
via 8e8607c6faa34d9493a831054ecb64281f1f06c7 (commit)
via d99d546be040419fd49ad3be179eb2206f5023de (commit)
via 4ab7d17edc10ce4f7b834709aa009aba4db9d877 (commit)
via df02b63fe1176c572a7eee996921f211ca970953 (commit)
via f8a64959bc5f3ddf68ba4d01bee092bf4f1f9558 (commit)
via 7e96227163334ecd54e506bd2cedb58d3f6cf91d (commit)
via ca42fb6438b70ef569d00dc07b1bb23c0f6124f2 (commit)
via bcb37a2f6b11128620bb34a0c2d3dbf7334c0ab7 (commit)
via d17ae6dc7160a471abdd05f22aacc359df54b4e4 (commit)
via d9319841c509648c1ac18fec2c3d2b2c08313eb9 (commit)
via 6d5f34008d7e793546fd990cad11e40268c0ff04 (commit)
via 8d7ef6fe3b696ee2cffdc4f10fdf673968933077 (commit)
via 6cd1c3aa7fb998fe9f873045b74185f793177cb5 (commit)
via e6d7624e503084067e6c4659c6bdbd89c038fdd7 (commit)
via 4b56e1807d8ce8b86da6793b67b50ff57ee62b9e (commit)
via 5c16ff47ae8d485da0684ee7dd5547eeef3c6232 (commit)
via 65d8475336b8e884ff261b9a1fe03688e1618cf4 (commit)
via 388e77cae5d9260bcc314465f6711bcdd782a26d (commit)
via 96c94d6baf0a68b641cc9b93966b09b38ebaa15b (commit)
via 1db4e8af5cf9a8600e8005807f0aa5109756c064 (commit)
via 4aa0057db95051e8e554bb5fcbcfbfecf822a5cd (commit)
via 89b3af8226cb89bcc59ceff5e9547dbfc5b30665 (commit)
via d0a7cf4a98daf0ec8759640a91a12059cece6c6d (commit)
via 5dc6be6febd523e202771cd11624efc29854349c (commit)
via f230c7d18b68d5c03131089a4f5c9739af7f9d83 (commit)
via e1682a42d23d36a3647878e13681dcd659622818 (commit)
via e45fa27d90ab3ea7b1081ca7d9513f63f5083b8d (commit)
via 1e9bc2c16ef78f35ec35e340c696b4bdc10b47b2 (commit)
via 85a2ce538c6f939ca539347676e5587228a29895 (commit)
via d1773b2ef6f98c26493ae76783158fc2ae6fbe52 (commit)
via 2f51afcbc57c6d58e7d90f37962f3b93bc768e1b (commit)
via 0b9c1b299f2078ab1a7bf08759a463eb179f0365 (commit)
via 918c35143eb61d6e0ac96e98f2a95b12d55fdc0c (commit)
via 480da1fe075da66aa8a144d37c23bac2fcfa1e2c (commit)
via 81b1ba0e9cf67bc5e8ee6040b28436d4c64b72cc (commit)
via fc17063223655ab14b4db33bd63dd33fdc5ed5ac (commit)
via 61feac8366f972b60410b925e36a9267338b3e9a (commit)
via 586c93cef97215330b8bdffed6c35335fb66173d (commit)
via 36dc8dd6f15a42f401ffa32829ed7c436e529eb3 (commit)
via 2085b2255a79c0e5a04fe457bbb228d2fa24953b (commit)
via 2d20ee347d82f840328c2bddd014cdf232962843 (commit)
via 1ff0be2456cfaf9279970ae9a30a48d6267b96cf (commit)
via 80447181a64656b97afa9ab71440907017e873f4 (commit)
via 3878aa43817deaee33b21956d3066baef77a24ce (commit)
via cb1c34cd2ffb876819441b4869a66a4cb500a8ba (commit)
via 01b4b95b5fb7aa99765f29ffc61f5131173148eb (commit)
via c5117dc4d2fd89f1a66849713c6a3cd51735699f (commit)
via 5d7004d0ac4fe553a61fd2eb99a8af3eb7324956 (commit)
via fc0fe98a085ece85e143188c5647740f95d347bc (commit)
via 456933355bf3bc2db5a6c52ba4dc6d8e826ce6e1 (commit)
via 67a11e710e06647dfb65ea6e592fd80851422dad (commit)
via b4b9c3e18f8d76b695d7b84f1b128ccba229d814 (commit)
via bb76c3f643eb85fc8b1ed8087f72368ad1d23aa3 (commit)
via 2764ae7bde7b314773b7258d23fce3813c4407b2 (commit)
via 1d9614bc52634bd512121f34af66290a2cdb2958 (commit)
via 34092bce6cb5755eb6b53979f8f624ca78b592fb (commit)
via 35ca4f5aa94daa5e3a8ddcb02812e7d76685e65e (commit)
via 6d46a3787127f87aa65c9dfb626476f79b4f0194 (commit)
via c692292fb26bf6af6e94b7e160c0c7af27e123ac (commit)
via d6a9dffdd4ee8af94e31ae9462e2ef851b49fca8 (commit)
via bfae9c1e78bcc1e94b4d5eef4d0bb9da1d42f30e (commit)
via 0428f6fcc7b5acc73f70913a17bd6f23c5a6ad3a (commit)
via 9b9a92fc3d9cd1e37166f04284a922f9ab220bbe (commit)
via a3a4e317a91c075f0d16de7d16cc652e508df101 (commit)
via bd938be1cafae39233d0a8357a4e10b383f7de37 (commit)
via e7d5e8f78ebad76b695e48fc2780babba6ec07d5 (commit)
via 0166b44b81851c687d85e4f3fd87ffb0e92c6d58 (commit)
via 96086ea69576acae7d59e1d7665f622bd526c7c1 (commit)
via 7c229ebaca82e06899126f9b364fe524ec6d4b56 (commit)
via 6b600cb1816705b04470ba2d0aca64dfdf8f55d2 (commit)
via a3fd03e16b71ae4e9b480e4e48c7ddfa393555ac (commit)
via 5038c63b05eaee1bda68346899ac3f6baf5fbe56 (commit)
via 5166d1a65421c3e8515dbcb0d5fcb44c7f400035 (commit)
via c383ebc71434baa5cb314b3de3f3e18f39ebd0c7 (commit)
via e41f8459ca5dbc886e838e6e32585ba5c7eb96e6 (commit)
via e856c49ae33b2b79d8eab0b313e4ba25db261c4a (commit)
via 3a6d50835b621e4825ec0d8434ce066bd31020d0 (commit)
via 6d2960ff386a85c9738fc4cfd3975ee1d58eaa04 (commit)
via 3a25578a01620918cd722e430b61c0fe91177e0a (commit)
via 8f876a23792b3feeedb807a66a08cd4f62d60d8a (commit)
via 6cfcb5a3c784f774702d9ca183e13f6b6690b74d (commit)
via d5ec22cc344998038cf68b6fdf309ad2a12b2b5e (commit)
via 2024a1554624868e5f0a4d09b75c3ddf39dd342d (commit)
via 10b6bc17b7c264f41dcdba64fc3a79904c06164a (commit)
via a48e7fa14f2ef90bce27ff3e7aa4a93165e08d37 (commit)
via 62809f71c563128cb3cc467d867c621c61dbb926 (commit)
via 08d090b4685220d3f286e1506e1a3c884146122f (commit)
via 7b667cbd3bd3baeaceb60b987ab9770684ff5038 (commit)
via af0b62cf1161739d3a1244750b60d3e6b75a22e8 (commit)
via b64ab304aa90d938003922c95926ef1b0ea4fec9 (commit)
via 4e0d6d115cd572e58b886bcaffee3f1df7b6bcad (commit)
via 4493013b75994f8689a26951592fb575a23e5b35 (commit)
via 8df7345ad6d658c6a366499b6e491790289168ed (commit)
via f0ad44ee4a8bc33ea2109d91243d95db1833659a (commit)
via 3f070803d6d61ffbbda0f6628bb2d7f0cfdb6ca0 (commit)
via c9160954fd701796f52c329e5ec3ca2ba6f5995c (commit)
via 25b432b279b90ca97dd4a69dc1d4f5428fe2660f (commit)
via dd63399d282dc503e4009bb579ddc4ca15ccde5f (commit)
via af2a4d06dedf27a1c86cd7ada5e85df495a79ff6 (commit)
via ab48bd8228405d50b149c502d7f73b5eb1a57608 (commit)
via ecf9f48f4b4c3beaf97ae0e83c11f4547f024734 (commit)
via 4d39e13d7f5ae5c30277f602f669f0421e2bf05c (commit)
via 3bf84d5c678f9f86df6382cf30e694404e2f77cb (commit)
via 12a6217c59bf48ead2e11aaaedb774af7a618701 (commit)
via cb57c9fcaa897752dd7599dcc15d647fb880285f (commit)
via 1294219279910a89d4a99e6292cea8e13a4c301e (commit)
via 61dd61b8f259b0938646fa2539fe928608a0fbad (commit)
via f1306bd835659173f3fffcfbe1bf8971dc62efd9 (commit)
via 7cc8a7d6c32472021b34e43d43a288cfa263f007 (commit)
via c75108b70a9d560034949a75dc52ecfb59fa0b3f (commit)
via 6266a0dd4e0537335e22c2941940636fe220c202 (commit)
via 14f9cfa80194d2d391ea6657ad0205e6223e2d25 (commit)
via 5e3d007b0b08f340e646a2df9073b31cd3c76476 (commit)
via c3a5acc65768a1d87c102159baae0d04f8c14790 (commit)
via 1c4e66cfdfab4fb4608f2b8d18a25e28e7a70adc (commit)
via 7db8a3e327aa6eb8fdc5fed2abb7f52b030fe6f8 (commit)
via fd3c952098c46d84c9a277b1409442813a263876 (commit)
via b108bc9f9231872d4f3e0fa768b8c0e4506a2b95 (commit)
via c5cef09ac250129340f357a9ea2dd798d290be4d (commit)
via 8b349f6730bf85ccfb37d368aa18db4f6c0aaa1b (commit)
via 4b584e952e14a40e81b7e360c75cd787ba988481 (commit)
via 702e2dd653a315141e01147ac4cc2a6c06fab673 (commit)
via 5d38929255f7d8cca95020672a2b72273a07de1d (commit)
via 599ec7f889bba386c838ec85735b203514905d9d (commit)
via 44160936a4c52ebaf4be6e1f0fcc02c84c7fb719 (commit)
via db063ad7e102eafe75bda392197e9653be95bea4 (commit)
via e23b6b271c892905c9a14386aee502610502bba4 (commit)
via e7a16b2735b09c0d5b55375e3091fa886940fc40 (commit)
via 8da9b5298d5cbd0df840240e71460d047f4da808 (commit)
via 18e970e16c5044da8b4a7d2c800f0b7baeab9f96 (commit)
via 0b145510ca7b6d4cfe8bc43cd6de2563907dfca3 (commit)
via 72f4baca540cc17e18da4632cb4d32df29f3a9a3 (commit)
via 86123d1dc31432d176eb54fa300eb65e269df0f4 (commit)
via 7e874ac36e4086fc0ff9b50537ffdbaeb685ed09 (commit)
via f0f4387faa4f6246546ee4b79e6289dd370913d1 (commit)
via 13c03c7116df55fa0aad790c2b2a88f3743ba95b (commit)
via 65b9917a960e8b49a947bed1886d1331155b95f5 (commit)
via 5d4e05531e443e355fbf8369a37efc239d1c95c4 (commit)
via c92981134284041b71efc68cff49fead91368e47 (commit)
via 60c6d07decbe759bb57da7dfafc79e71c52a9c6c (commit)
via 5634285ef8bed69dcceab61e84b7aefdf1c1ef5d (commit)
via e0c15795fa09d93fa8c6e3aa0722ca9ed01b61a0 (commit)
via 27f88f2ed0a0a7541f3ea9c6d95db5c805e4b062 (commit)
via 1adb9636b2ba1314140411cd142f9b2f95afede9 (commit)
via 439b8e22a099e641bbe9236bc44beed78634568d (commit)
via 7f150769d5e3485cd801f0b5ab9b1d3b25aae520 (commit)
via 6215c5929bdd6fbb708fd0a2ee034250aa5cc065 (commit)
via d83a117a090eaf417698eea6697ae750dc45c135 (commit)
via ea7f5ad5d326b7ed2d5f0ac1729c2301555b6417 (commit)
via 68ac89fcb9de65cb1c649aa58b317be3fc793fb7 (commit)
via 7f1dcc956a864b70e395d10ba095c0787db802a7 (commit)
via a3e7bf95ad016c9badd98c16614de4a9c168bad1 (commit)
via debb22346698f1be3bbbac4955fd6bd247aa41f4 (commit)
via c2d03d1688ae502c4e0b1eb23427ebae5307a091 (commit)
via 3439230170effea0daec2a106a616965d4830968 (commit)
via ca54736634e25786f6d54317e97f3e4db71064f0 (commit)
via 911b53ae021dbd04a6c12f69aa106fd2d868d54f (commit)
via 1e465d5417011d24cb9aa9ffaf80a369b6511e2c (commit)
via c82f6195acb5a12e91d61956b8b958ceb0a0f821 (commit)
via b458fc09d6749b7435cd3c95952b9ab22322cb49 (commit)
via d059d370074b13b36db3ab685c307ba668faeda6 (commit)
via d8e223ad5439cdf9916e96178a4320403615b507 (commit)
via b8031ec74703c03eec1be362f0d3e321c4d8ebe5 (commit)
via 2117c1db277b10f3bcc48b51d2ca0f821af79f2f (commit)
via e5d4874ace76b0caff412f2394a15a042492560b (commit)
via 76335a521773c8118b7137d79e5f6397614f1904 (commit)
via 292665a460ed22219490c742d52785b503002029 (commit)
via 31cf6504b544e20f5ac84e3f74afcaff817c3693 (commit)
via 0e6639a8432999f2880473b815d8fbeb335a6808 (commit)
via 196b9474f5eeb11a8d96e52fed500270331dabc6 (commit)
via 296a70859ceb0b168c3818a3869991e8b51c3932 (commit)
via f6f425b5e49110b76e9954dc71d152806503c0bf (commit)
via fa9b8636e68a97293c26f51f4ecf50a2753965e4 (commit)
via e438bc6f5d4da2cc953cb76b9a924077d11fe347 (commit)
via 043963cf999791194e2db9e59fb5920ec30fc20f (commit)
via a730ddd17c2a20dc55247b5a86d05e3d0bb740fd (commit)
via b235b396ae97ba25d59f5981da39f1d1e4c072e6 (commit)
via c46aac2b5c86d037c7c3f34fbeb54d7ac0998817 (commit)
via 7d1e13b7fb6a589336cd83bef4f81fa077785beb (commit)
via 49b9f8004299533dd7e54bde3820984d8b04f37b (commit)
via 8f6ca91d01a5155ace94f0c044e674e58f8e7898 (commit)
via be3038ae1b595d1b9942f9aa72fa3d96aed3b22d (commit)
via e81b86767a740bcb1c4d1a0408ad9a70690df0a6 (commit)
via 5222b98f4e2021eb543f836d5e6876eb28eab716 (commit)
via 0d1e50106720fd7c4ec58e88e381ce7cff071648 (commit)
via 8d139f70ee129787af631531e4ea825293007a58 (commit)
via 26841bf1f0c0f0066e17b53bea2261e759bfbdbe (commit)
via 6b4582111d6f9e8a09e305ec3da009d8d393603b (commit)
via 1b5cb4d4168c3fcc2d22bcfdf5260ffc36d0a42e (commit)
via f500fc46e6467263b38c50010170f83c10d22e8a (commit)
via 114e59f9ed93ba3b6e656785df5d527011f8ce2b (commit)
via eaa56b3d005a20f945cd333664cf34633cfe5a7e (commit)
via 236b6ec7a803f9024141e0dacc3dcf75583fea8d (commit)
via 81bb03bbb092bace3bd8a44a6ca2862154503092 (commit)
via b84d1a0e0f13064b8dd68222c063565ac4deec3f (commit)
via 3a6f9f395c141058fb732735beabe7dae1f84bb5 (commit)
via 657349ae281dcdf737b187d0be2cd7d0e4fa92a7 (commit)
via a7505fac495a9746d8bf3e9a2f4a3aa8541b85c2 (commit)
via 2cd7de7f848f743ee31c356fd7edc9231ba6ca3a (commit)
via 1468dd9e7bc1e0a045cdab88d1db815cc7e2bd52 (commit)
via 3582ccf1eb2093d34e944bcda5ea2069158349dc (commit)
via d64cd3aa3d095ad5f0e8054e8b2b2cabdab18d3f (commit)
via 0b7c39d9dcd44dfba0caf6e9353f00f47bbe7e9c (commit)
via d23827556ec500284bd155cdb731213343030f53 (commit)
from d0e0bab2c4e3ce4f60c893d3a89ec8c91e2f11e0 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 3839f74b09d70e852cf1329d55628f6ae712140e
Author: Dima Volodin <dvv at isc.org>
Date: Tue Dec 13 08:56:51 2011 -0500
[1386] Changelog for #1386
commit 920ee68644189ee382135cfcf774e31ca8d9d530
Author: Dima Volodin <dvv at isc.org>
Date: Tue Dec 13 08:54:30 2011 -0500
[1386] tabs in src/lib/asiodns/io_fetch.h src/lib/resolve/recursive_query.cc
commit e3c35e75141468ab84fc07ea16d6665ef3fab784
Author: Dima Volodin <dvv at isc.org>
Date: Tue Dec 13 08:49:26 2011 -0500
[1386] EDNS fallback on FORMERR
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 204 ++++-
Makefile.am | 2 +-
compatcheck/Makefile.am | 8 +
compatcheck/README | 5 +
compatcheck/sqlite3-difftbl-check.py.in | 60 ++
configure.ac | 120 ++-
doc/guide/bind10-guide.html | 241 ++++-
doc/guide/bind10-guide.txt | 205 ++++-
doc/guide/bind10-guide.xml | 245 ++++-
doc/guide/bind10-messages.html | 482 +++++++--
doc/guide/bind10-messages.xml | 549 ++++++++--
src/bin/auth/auth_srv.cc | 24 +-
src/bin/auth/benchmarks/Makefile.am | 2 +-
src/bin/auth/query.cc | 36 +-
src/bin/auth/query.h | 12 +
src/bin/auth/tests/auth_srv_unittest.cc | 155 +++-
src/bin/auth/tests/query_unittest.cc | 126 ++-
src/bin/bind10/TODO | 6 -
src/bin/bind10/bind10.8 | 220 ++++-
src/bin/bind10/bind10.xml | 215 ++++-
src/bin/bind10/bind10_messages.mes | 118 ++-
src/bin/bind10/bind10_src.py.in | 828 ++++++++-------
src/bin/bind10/bob.spec | 73 ++-
src/bin/bind10/tests/bind10_test.py.in | 1033 ++++++++++++++----
src/bin/dhcp6/.gitignore | 1 +
src/bin/dhcp6/dhcp6_srv.cc | 30 +-
src/bin/dhcp6/dhcp6_srv.h | 11 +-
src/bin/dhcp6/iface_mgr.cc | 437 ++++++---
src/bin/dhcp6/iface_mgr.h | 269 ++++-
src/bin/dhcp6/tests/Makefile.am | 4 +-
src/bin/dhcp6/tests/dhcp6_srv_unittest.cc | 23 +-
src/bin/dhcp6/tests/iface_mgr_unittest.cc | 265 ++++-
src/bin/resolver/tests/Makefile.am | 2 +-
src/bin/stats/stats-httpd-xml.tpl | 23 +-
src/bin/stats/stats-httpd-xsd.tpl | 38 +-
src/bin/stats/stats-httpd-xsl.tpl | 27 +-
src/bin/stats/stats.py.in | 8 +-
src/bin/stats/stats_httpd.py.in | 511 ++++++++--
src/bin/stats/stats_httpd_messages.mes | 6 +
src/bin/stats/tests/Makefile.am | 2 +-
src/bin/stats/tests/b10-stats-httpd_test.py | 823 +++++++++++++--
src/bin/stats/tests/b10-stats_test.py | 194 +++-
src/bin/stats/tests/test_utils.py | 59 +-
src/bin/xfrin/tests/Makefile.am | 3 +-
src/bin/xfrin/tests/testdata/example.com.sqlite3 | Bin 11264 -> 12288 bytes
src/bin/xfrin/tests/xfrin_test.py | 352 ++++++-
src/bin/xfrin/xfrin.py.in | 342 ++++--
src/bin/xfrin/xfrin_messages.mes | 69 ++-
src/bin/xfrout/b10-xfrout.8 | 13 +
src/bin/xfrout/b10-xfrout.xml | 25 +
src/bin/xfrout/tests/Makefile.am | 8 +
src/bin/xfrout/tests/testdata/creatediff.py | 58 +
src/bin/xfrout/tests/testdata/example.com | 6 +
src/bin/xfrout/tests/testdata/test.sqlite3 | Bin 0 -> 12288 bytes
src/bin/xfrout/tests/xfrout_test.py.in | 879 ++++++++++++----
src/bin/xfrout/xfrout.py.in | 527 +++++++---
src/bin/xfrout/xfrout_messages.mes | 120 ++-
src/lib/asiodns/io_fetch.cc | 22 +-
src/lib/asiodns/io_fetch.h | 8 +-
src/lib/asiolink/Makefile.am | 3 +
src/lib/cryptolink/Makefile.am | 3 +-
src/lib/cryptolink/tests/Makefile.am | 4 +-
src/lib/datasrc/Makefile.am | 13 +-
src/lib/datasrc/client.h | 88 ++-
src/lib/datasrc/data_source.h | 12 +
src/lib/datasrc/database.cc | 946 ++++++++++++------
src/lib/datasrc/database.h | 1099 ++++++++++++--------
src/lib/datasrc/datasrc_config.h.pre.in | 31 +
src/lib/datasrc/datasrc_messages.mes | 135 ++-
src/lib/datasrc/factory.cc | 53 +-
src/lib/datasrc/factory.h | 11 +-
src/lib/datasrc/memory_datasrc.cc | 64 +-
src/lib/datasrc/memory_datasrc.h | 9 +-
src/lib/datasrc/sqlite3_accessor.cc | 326 ++++++-
src/lib/datasrc/sqlite3_accessor.h | 60 +-
src/lib/datasrc/tests/Makefile.am | 103 ++-
src/lib/datasrc/tests/client_unittest.cc | 11 +-
src/lib/datasrc/tests/database_unittest.cc | 560 ++++++++++-
src/lib/datasrc/tests/factory_unittest.cc | 65 ++
src/lib/datasrc/tests/memory_datasrc_unittest.cc | 48 +
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 203 +++--
src/lib/datasrc/tests/testdata/Makefile.am | 5 -
src/lib/datasrc/tests/testdata/brokendb.sqlite3 | Bin 2048 -> 4096 bytes
src/lib/datasrc/tests/testdata/diffs.sqlite3 | Bin 0 -> 16384 bytes
src/lib/datasrc/tests/testdata/diffs_table.sql | 123 +++
src/lib/datasrc/tests/testdata/example.org.sqlite3 | Bin 14336 -> 14336 bytes
.../datasrc/tests/testdata/example2.com.sqlite3 | Bin 11264 -> 14336 bytes
src/lib/datasrc/tests/testdata/rwtest.sqlite3 | Bin 11264 -> 13312 bytes
src/lib/datasrc/tests/testdata/test-root.sqlite3 | Bin 14336 -> 17408 bytes
src/lib/datasrc/zone.h | 121 ++-
src/lib/dhcp/Makefile.am | 1 +
src/lib/dhcp/libdhcp.cc | 12 +-
src/lib/dhcp/option.cc | 31 +-
src/lib/dhcp/option.h | 27 +-
src/lib/dhcp/option4_addrlst.cc | 135 +++
src/lib/dhcp/option4_addrlst.h | 167 +++
src/lib/dhcp/option6_addrlst.cc | 6 +-
src/lib/dhcp/option6_addrlst.h | 19 +-
src/lib/dhcp/option6_ia.cc | 6 +-
src/lib/dhcp/option6_ia.h | 2 +-
src/lib/dhcp/option6_iaaddr.cc | 4 +-
src/lib/dhcp/option6_iaaddr.h | 3 +-
src/lib/dhcp/pkt4.cc | 53 +-
src/lib/dhcp/pkt4.h | 24 +-
src/lib/dhcp/tests/Makefile.am | 1 +
src/lib/dhcp/tests/option4_addrlst_unittest.cc | 273 +++++
src/lib/dhcp/tests/option_unittest.cc | 2 +
src/lib/dhcp/tests/pkt4_unittest.cc | 17 +-
src/lib/dns/Makefile.am | 3 +
src/lib/dns/python/Makefile.am | 1 +
src/lib/dns/python/pydnspp.cc | 17 +
src/lib/dns/python/rdata_python.cc | 210 +++--
src/lib/dns/python/rrset_python.cc | 97 +-
src/lib/dns/python/serial_python.cc | 281 +++++
src/lib/dns/python/serial_python.h | 64 ++
src/lib/dns/python/tests/Makefile.am | 1 +
src/lib/dns/python/tests/rdata_python_test.py | 8 +
src/lib/dns/python/tests/serial_python_test.py | 111 ++
src/lib/dns/rdata/generic/soa_6.cc | 6 +
src/lib/dns/rdata/generic/soa_6.h | 3 +
src/lib/dns/serial.cc | 76 ++
src/lib/dns/serial.h | 155 +++
src/lib/dns/tests/Makefile.am | 7 +-
src/lib/dns/tests/rdata_soa_unittest.cc | 5 +
src/lib/dns/tests/serial_unittest.cc | 179 ++++
src/lib/exceptions/exceptions.h | 11 +
src/lib/log/Makefile.am | 3 +-
src/lib/log/tests/Makefile.am | 10 +-
src/lib/nsas/nameserver_entry.cc | 24 +-
src/lib/nsas/nsas_messages.mes | 29 +-
src/lib/python/Makefile.am | 9 +-
src/lib/python/bind10_config.py.in | 4 +
src/lib/python/isc/bind10/Makefile.am | 3 +-
src/lib/python/isc/bind10/component.py | 647 ++++++++++++
src/lib/python/isc/bind10/sockcreator.py | 15 +-
src/lib/python/isc/bind10/socket_cache.py | 302 ++++++
src/lib/python/isc/bind10/special_component.py | 153 +++
src/lib/python/isc/bind10/tests/Makefile.am | 2 +-
src/lib/python/isc/bind10/tests/component_test.py | 1032 ++++++++++++++++++
.../python/isc/bind10/tests/sockcreator_test.py | 3 -
.../python/isc/bind10/tests/socket_cache_test.py | 396 +++++++
src/lib/python/isc/datasrc/Makefile.am | 3 +
src/lib/python/isc/datasrc/client_inc.cc | 88 ++-
src/lib/python/isc/datasrc/client_python.cc | 100 ++-
src/lib/python/isc/datasrc/datasrc.cc | 41 +
src/lib/python/isc/datasrc/finder_inc.cc | 5 +-
src/lib/python/isc/datasrc/finder_python.cc | 29 +-
src/lib/python/isc/datasrc/journal_reader_inc.cc | 80 ++
.../python/isc/datasrc/journal_reader_python.cc | 200 ++++
src/lib/python/isc/datasrc/journal_reader_python.h | 47 +
src/lib/python/isc/datasrc/sqlite3_ds.py | 8 +
src/lib/python/isc/datasrc/tests/Makefile.am | 2 +
src/lib/python/isc/datasrc/tests/datasrc_test.py | 337 ++++++-
.../isc/datasrc/tests/testdata/example.com.sqlite3 | Bin 43008 -> 44032 bytes
.../datasrc/tests/testdata/test.sqlite3.nodiffs | Bin 43008 -> 43008 bytes
src/lib/python/isc/log/log.cc | 188 ++--
src/lib/python/isc/log/tests/log_test.py | 31 +
src/lib/python/isc/notify/notify_out.py | 150 ++-
src/lib/python/isc/notify/notify_out_messages.mes | 21 +
src/lib/python/isc/notify/tests/Makefile.am | 10 +
src/lib/python/isc/notify/tests/notify_out_test.py | 73 +-
.../isc/notify/tests/testdata/brokentest.sqlite3 | Bin 0 -> 11264 bytes
.../python/isc/notify/tests/testdata/example.com | 10 +
.../python/isc/notify/tests/testdata/example.net | 14 +
.../isc/notify/tests/testdata/multisoa.example | 5 +
.../python/isc/notify/tests/testdata/nons.example | 3 +
.../python/isc/notify/tests/testdata/nosoa.example | 7 +
.../python/isc/notify/tests/testdata/test.sqlite3 | Bin 0 -> 13312 bytes
src/lib/python/isc/testutils/Makefile.am | 2 +-
src/lib/python/isc/testutils/rrset_utils.py | 82 ++
src/lib/python/isc/xfrin/diff.py | 18 +-
src/lib/python/isc/xfrin/libxfrin_messages.mes | 10 +
src/lib/python/isc/xfrin/tests/diff_tests.py | 26 +-
src/lib/resolve/recursive_query.cc | 186 +++-
src/lib/resolve/resolve_messages.mes | 97 ++-
src/lib/resolve/response_classifier.h | 4 +-
src/lib/xfr/Makefile.am | 2 +
src/lib/xfr/tests/Makefile.am | 25 +
src/lib/xfr/tests/client_test.cc | 37 +
src/lib/{acl => xfr}/tests/run_unittests.cc | 0
src/lib/xfr/xfrout_client.cc | 9 +-
.../configurations/xfrin/retransfer_master.conf | 22 +
.../configurations/xfrin/retransfer_slave.conf | 17 +
tests/lettuce/features/terrain/bind10_control.py | 56 +-
tests/lettuce/features/terrain/steps.py | 26 +-
tests/lettuce/features/terrain/terrain.py | 15 +-
tests/lettuce/features/xfrin_bind10.feature | 11 +
tests/system/bindctl/tests.sh | 5 +-
tests/tools/badpacket/badpacket.cc | 2 +
tools/reorder_message_file.py | 196 ++++
190 files changed, 17431 insertions(+), 3749 deletions(-)
create mode 100644 compatcheck/Makefile.am
create mode 100644 compatcheck/README
create mode 100755 compatcheck/sqlite3-difftbl-check.py.in
create mode 100755 src/bin/xfrout/tests/testdata/creatediff.py
create mode 100644 src/bin/xfrout/tests/testdata/example.com
create mode 100644 src/bin/xfrout/tests/testdata/test.sqlite3
create mode 100644 src/lib/datasrc/datasrc_config.h.pre.in
create mode 100644 src/lib/datasrc/tests/testdata/diffs.sqlite3
create mode 100644 src/lib/datasrc/tests/testdata/diffs_table.sql
create mode 100644 src/lib/dhcp/option4_addrlst.cc
create mode 100644 src/lib/dhcp/option4_addrlst.h
create mode 100644 src/lib/dhcp/tests/option4_addrlst_unittest.cc
create mode 100644 src/lib/dns/python/serial_python.cc
create mode 100644 src/lib/dns/python/serial_python.h
create mode 100644 src/lib/dns/python/tests/serial_python_test.py
create mode 100644 src/lib/dns/serial.cc
create mode 100644 src/lib/dns/serial.h
create mode 100644 src/lib/dns/tests/serial_unittest.cc
create mode 100644 src/lib/python/isc/bind10/component.py
create mode 100644 src/lib/python/isc/bind10/socket_cache.py
create mode 100644 src/lib/python/isc/bind10/special_component.py
create mode 100644 src/lib/python/isc/bind10/tests/component_test.py
create mode 100644 src/lib/python/isc/bind10/tests/socket_cache_test.py
create mode 100644 src/lib/python/isc/datasrc/journal_reader_inc.cc
create mode 100644 src/lib/python/isc/datasrc/journal_reader_python.cc
create mode 100644 src/lib/python/isc/datasrc/journal_reader_python.h
copy src/lib/{ => python/isc}/datasrc/tests/testdata/test.sqlite3.nodiffs (100%)
create mode 100644 src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3
create mode 100644 src/lib/python/isc/notify/tests/testdata/example.com
create mode 100644 src/lib/python/isc/notify/tests/testdata/example.net
create mode 100644 src/lib/python/isc/notify/tests/testdata/multisoa.example
create mode 100644 src/lib/python/isc/notify/tests/testdata/nons.example
create mode 100644 src/lib/python/isc/notify/tests/testdata/nosoa.example
create mode 100644 src/lib/python/isc/notify/tests/testdata/test.sqlite3
create mode 100644 src/lib/python/isc/testutils/rrset_utils.py
create mode 100644 src/lib/xfr/tests/Makefile.am
create mode 100644 src/lib/xfr/tests/client_test.cc
copy src/lib/{acl => xfr}/tests/run_unittests.cc (100%)
create mode 100644 tests/lettuce/configurations/xfrin/retransfer_master.conf
create mode 100644 tests/lettuce/configurations/xfrin/retransfer_slave.conf
create mode 100644 tests/lettuce/features/xfrin_bind10.feature
create mode 100644 tools/reorder_message_file.py
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 45671b7..2a48f63 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,202 @@
- 315. [func] tomek
+xxx. [bug] dvv
+ resolver: EDNS fallback on FORMERR
+ (Trac #1386, git TBD)
+
+342. [bug] stephen
+ In the resolver, a FORMERR received from an upstream nameserver
+ now rsults in a SERVFAIL being returned as a response to the original
+ query. Additional debug messages added to distinguish between
+ different errors in packets received from upstream nameservers.
+ (Trac #1383, git 9b2b249d23576c999a65d8c338e008cabe45f0c9)
+
+341. [func] tomek
+ libdhcp++: Support for handling both IPv4 and IPv6 added.
+ Also added support for binding IPv4 sockets.
+ (Trac #1238, git 86a4ce45115dab4d3978c36dd2dbe07edcac02ac)
+
+340. [build] jelte
+ Fixed several linker issues related to recent gcc versions, botan
+ and gtest.
+ (Trac #1442, git 91fb141bfb3aadfdf96f13e157a26636f6e9f9e3)
+
+339. [bug] jinmei
+ libxfr, used by b10-auth to share TCP sockets with b10-xfrout,
+ incorrectly propagated ASIO specific exceptions to the application
+ if the given file name was too long. This could lead to
+ unexpected shut down of b10-auth.
+ (Trac #1387, git a5e9d9176e9c60ef20c0f5ef59eeb6838ed47ab2)
+
+338. [bug] jinmei
+ b10-xfrin didn't check SOA serials of SOA and IXFR responses,
+ which resulted in unnecessary transfer or unexpected IXFR
+ timeouts (these issues were not overlooked but deferred to be
+ fixed until #1278 was completed). Validation on responses to SOA
+ queries were tightened, too.
+ (Trac #1299, git 6ff03bb9d631023175df99248e8cc0cda586c30a)
+
+337. [func] tomek
+ libdhcp++: Support for DHCPv4 option that can store a single
+ address or a list of IPv4 addresses added. Support for END option
+ added.
+ (Trac #1350, git cc20ff993da1ddb1c6e8a98370438b45a2be9e0a)
+
+336. [func] jelte
+ libdns++ (and its python wrapper) now includes a class Serial, for
+ SOA SERIAL comparison and addition. Operations on instances of this
+ class follow the specification from RFC 1982.
+ Rdata::SOA::getSerial() now returns values of this type (and not
+ uint32_t).
+ (Trac #1278, git 2ae72d76c74f61a67590722c73ebbf631388acbd)
+
+335. [bug]* jelte
+ The DataSourceClientContainer class that dynamically loads
+ datasource backend libraries no longer provides just a .so file name
+ to its call to dlopen(), but passes it an absolute path. This means
+ that it is no longer an system implementation detail that depends on
+ [DY]LD_LIBRARY_PATH which file is chosen, should there be multiple
+ options (for instance, when test-running a new build while a
+ different version is installed).
+ These loadable libraries are also no longer installed in the default
+ library path, but in a subdirectory of the libexec directory of the
+ target ($prefix/libexec/[version]/backends).
+ This also removes the need to handle b10-xfin and b10-xfrout as
+ 'special' hardcoded components, and they are now started as regular
+ components as dictated by the configuration of the boss process.
+ (Trac #1292, git 83ce13c2d85068a1bec015361e4ef8c35590a5d0)
+
+334. [bug] jinmei
+ b10-xfrout could potentially create an overflow response message
+ (exceeding the 64KB max) or could create unnecessarily small
+ messages. The former was actually unlikely to happen due to the
+ effect of name compression, and the latter was marginal and at least
+ shouldn't cause an interoperability problem, but these were still
+ potential problems and were fixed.
+ (Trac #1389, git 3fdce88046bdad392bd89ea656ec4ac3c858ca2f)
+
+333. [bug] dvv
+ Solaris needs "-z now" to force non-lazy binding and prevent g++ static
+ initialization code from deadlocking.
+ (Trac #1439, git c789138250b33b6b08262425a08a2a0469d90433)
+
+332. [bug] vorner
+ C++ exceptions in the isc.dns.Rdata wrapper are now converted
+ to python ones instead of just aborting the interpretter.
+ (Trac #1407, git 5b64e839be2906b8950f5b1e42a3fadd72fca033)
+
+bind10-devel-20111128 released on November 28, 2011
+
+331. [bug] shane
+ Fixed a bug in data source library where a zone with more labels
+ than an out-of-bailiwick name server would cause an exception to
+ be raised.
+ (Trac #1430, git 81f62344db074bc5eea3aaf3682122fdec6451ad)
+
+330. [bug] jelte
+ Fixed a bug in b10-auth where it would sometimes fail because it
+ tried to check for queued msgq messages before the session was
+ fully running.
+ (git c35d0dde3e835fc5f0a78fcfcc8b76c74bc727ca)
+
+329. [doc] vorner, jreed
+ Document the bind10 run control configuration in guide and
+ manual page.
+ (Trac #1341, git c1171699a2b501321ab54207ad26e5da2b092d63)
+
+328. [func] jelte
+ b10-auth now passes IXFR requests on to b10-xfrout, and no longer
+ responds to them with NOTIMPL.
+ (Trac #1390, git ab3f90da16d31fc6833d869686e07729d9b8c135)
+
+327. [func] jinmei
+ b10-xfrout now supports IXFR. (Right now there is no user
+ configurable parameter about this feature; b10-xfrout will
+ always respond to IXFR requests according to RFC1995).
+ (Trac #1371 and #1372, git 80c131f5b0763753d199b0fb9b51f10990bcd92b)
+
+326. [build]* jinmei
+ Added a check script for the SQLite3 schema version. It will be
+ run at the beginning of 'make install', and if it detects an old
+ version of schema, installation will stop. You'll then need to
+ upgrade the database file by following the error message.
+ (Trac #1404, git a435f3ac50667bcb76dca44b7b5d152f45432b57)
+
+325. [func] jinmei
+ Python isc.datasrc: added interfaces for difference management:
+ DataSourceClient.get_updater() now has the 'journaling' parameter
+ to enable storing diffs to the data source, and a new class
+ ZoneJournalReader was introduced to retrieve them, which can be
+ created by the new DataSourceClient.get_journal_reader() method.
+ (Trac #1333, git 3e19362bc1ba7dc67a87768e2b172c48b32417f5,
+ git 39def1d39c9543fc485eceaa5d390062edb97676)
+
+324. [bug] jinmei
+ Fixed reference leak in the isc.log Python module. Most of all
+ BIND 10 Python programs had memory leak (even though the pace of
+ leak may be slow) due to this bug.
+ (Trac #1359, git 164d651a0e4c1059c71f56b52ea87ac72b7f6c77)
+
+323. [bug] jinmei
+ b10-xfrout incorrectly skipped adding TSIG RRs to some
+ intermediate responses (when TSIG is to be used for the
+ responses). While RFC2845 optionally allows to skip intermediate
+ TSIGs (as long as the digest for the skipped part was included
+ in a later TSIG), the underlying TSIG API doesn't support this
+ mode of signing.
+ (Trac #1370, git 76fb414ea5257b639ba58ee336fae9a68998b30d)
+
+322. [func] jinmei
+ datasrc: Added C++ API for retrieving difference of two versions
+ of a zone. A new ZoneJournalReader class was introduced for this
+ purpose, and a corresponding factory method was added to
+ DataSourceClient.
+ (Trac #1332, git c1138d13b2692fa3a4f2ae1454052c866d24e654)
+
+321. [func]* jinmei
+ b10-xfrin now installs IXFR differences into the underlying data
+ source (if it supports journaling) so that the stored differences
+ can be used for subsequent IXFR-out transactions.
+ Note: this is a backward incompatibility change for older sqlite3
+ database files. They need to be upgraded to have a "diffs" table.
+ (Trac #1376, git 1219d81b49e51adece77dc57b5902fa1c6be1407)
+
+320. [func]* vorner
+ The --brittle switch was removed from the bind10 executable.
+ It didn't work after change #316 (Trac #213) and the same
+ effect can be accomplished by declaring all components as core.
+ (Trac #1340, git f9224368908dd7ba16875b0d36329cf1161193f0)
+
+319. [func] naokikambe
+ b10-stats-httpd was updated. In addition of the access to all
+ statistics items of all modules, the specified item or the items
+ of the specified module name can be accessed. For example, the
+ URI requested by using the feature is showed as
+ "/bind10/statistics/xml/Auth" or
+ "/bind10/statistics/xml/Auth/queries.tcp". The list of all possible
+ module names and all possible item names can be showed in the
+ root document, whose URI is "/bind10/statistics/xml". This change
+ is not only for the XML documents but also is for the XSD and
+ XSL documents.
+ (Trac #917, git b34bf286c064d44746ec0b79e38a6177d01e6956)
+
+318. [func] stephen
+ Add C++ API for accessing zone difference information in
+ database-based data sources.
+ (Trac #1330, git 78770f52c7f1e7268d99e8bfa8c61e889813bb33)
+
+317. [func] vorner
+ datasrc: the getUpdater method of DataSourceClient supports an
+ optional 'journaling' parameter to indicate the generated updater
+ to store diffs. The database based derived class implements this
+ extension.
+ (Trac #1331, git 713160c9bed3d991a00b2ea5e7e3e7714d79625d)
+
+316. [func]* vorner
+ The configuration of what parts of the system run is more
+ flexible now. Everything that should run must have an
+ entry in Boss/components.
+ (Trac #213, git 08e1873a3593b4fa06754654d22d99771aa388a6)
+
+315. [func] tomek
libdhcp: Support for DHCPv4 packet manipulation is now implemented.
All fixed fields are now supported. Generic support for DHCPv4
options is available (both parsing and assembly). There is no code
@@ -65,7 +263,7 @@
automatically.
(Trac #1279, git cd3588c9020d0310f949bfd053c4d3a4bd84ef88)
-306. [bug] Stephen
+306. [bug] stephen
Boss process now waits for the configuration manager to initialize
itself before continuing with startup. This fixes a race condition
whereby the Boss could start the configuration manager and then
@@ -479,7 +677,7 @@ bind10-devel-20110705 released on July 05, 2011
(Trac #542, git 1aa773d84cd6431aa1483eb34a7f4204949a610f)
243. [func]* feng
- Add optional hmac algorithm SHA224/384/812.
+ Add optional hmac algorithm SHA224/384/512.
(Trac #782, git 77d792c9d7c1a3f95d3e6a8b721ac79002cd7db1)
bind10-devel-20110519 released on May 19, 2011
diff --git a/Makefile.am b/Makefile.am
index 50aa6b9..cc91a56 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = doc src tests
+SUBDIRS = compatcheck doc src tests
USE_LCOV=@USE_LCOV@
LCOV=@LCOV@
GENHTML=@GENHTML@
diff --git a/compatcheck/Makefile.am b/compatcheck/Makefile.am
new file mode 100644
index 0000000..029578d
--- /dev/null
+++ b/compatcheck/Makefile.am
@@ -0,0 +1,8 @@
+noinst_SCRIPTS = sqlite3-difftbl-check.py
+
+# We're going to abuse install-data-local for a pre-install check.
+# This is to be considered a short term hack and is expected to be removed
+# in a near future version.
+install-data-local:
+ $(PYTHON) sqlite3-difftbl-check.py \
+ $(localstatedir)/$(PACKAGE)/zone.sqlite3
diff --git a/compatcheck/README b/compatcheck/README
new file mode 100644
index 0000000..8381e60
--- /dev/null
+++ b/compatcheck/README
@@ -0,0 +1,5 @@
+This directory is a collection of compatibility checker programs.
+They will be run before any other installation attempts on 'make install'
+to see if the installation causes any substantial compatibility problems
+with existing configuratons. If any checker program finds an issue,
+'make install' will stop at that point.
diff --git a/compatcheck/sqlite3-difftbl-check.py.in b/compatcheck/sqlite3-difftbl-check.py.in
new file mode 100755
index 0000000..e3b7b91
--- /dev/null
+++ b/compatcheck/sqlite3-difftbl-check.py.in
@@ -0,0 +1,60 @@
+#!@PYTHON@
+
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os, sqlite3, sys
+from optparse import OptionParser
+
+usage = 'usage: %prog [options] db_file'
+parser = OptionParser(usage=usage)
+parser.add_option("-u", "--upgrade", action="store_true",
+ dest="upgrade", default=False,
+ help="Upgrade the database file [default: %default]")
+(options, args) = parser.parse_args()
+if len(args) == 0:
+ parser.error('missing argument')
+
+db_file = args[0]
+
+# If the file doesn't exist, there's nothing to do
+if not os.path.exists(db_file):
+ sys.exit(0)
+
+conn = sqlite3.connect(db_file)
+cur = conn.cursor()
+try:
+ # This can be anything that works iff the "diffs" table exists
+ cur.execute('SELECT name FROM diffs DESC LIMIT 1')
+except sqlite3.OperationalError as ex:
+ # If it fails with 'no such table', create a new one or fail with
+ # warning depending on the --upgrade command line option.
+ if str(ex) == 'no such table: diffs':
+ if options.upgrade:
+ cur.execute('CREATE TABLE diffs (id INTEGER PRIMARY KEY, ' +
+ 'zone_id INTEGER NOT NULL, ' +
+ 'version INTEGER NOT NULL, ' +
+ 'operation INTEGER NOT NULL, ' +
+ 'name STRING NOT NULL COLLATE NOCASE, ' +
+ 'rrtype STRING NOT NULL COLLATE NOCASE, ' +
+ 'ttl INTEGER NOT NULL, rdata STRING NOT NULL)')
+ else:
+ sys.stdout.write('Found an older version of SQLite3 DB file: ' +
+ db_file + '\n' + "Perform '" + os.getcwd() +
+ "/sqlite3-difftbl-check.py --upgrade " +
+ db_file + "'\n" +
+ 'before continuing install.\n')
+ sys.exit(1)
+conn.close()
diff --git a/configure.ac b/configure.ac
index 9723b8d..e370e21 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
# Process this file with autoconf to produce a configure script.
AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20111021, bind10-dev at isc.org)
+AC_INIT(bind10-devel, 20111129, bind10-dev at isc.org)
AC_CONFIG_SRCDIR(README)
AM_INIT_AUTOMAKE
AC_CONFIG_HEADERS([config.h])
@@ -96,6 +96,8 @@ case "$host" in
# Solaris requires special definitions to get some standard libraries
# (e.g. getopt(3)) available with common used header files.
CPPFLAGS="$CPPFLAGS -D_XPG4_2 -D__EXTENSIONS__"
+ # "now" binding is necessary to prevent deadlocks in C++ static initialization code
+ LDFLAGS="$LDFLAGS -z now"
;;
*-apple-darwin*)
# libtool doesn't work perfectly with Darwin: libtool embeds the
@@ -107,6 +109,12 @@ case "$host" in
SET_ENV_LIBRARY_PATH=yes
ENV_LIBRARY_PATH=DYLD_LIBRARY_PATH
;;
+*-freebsd*)
+ SET_ENV_LIBRARY_PATH=yes
+ ;;
+*-netbsd*)
+ SET_ENV_LIBRARY_PATH=yes
+ ;;
esac
AM_CONDITIONAL(SET_ENV_LIBRARY_PATH, test $SET_ENV_LIBRARY_PATH = yes)
AC_SUBST(SET_ENV_LIBRARY_PATH)
@@ -472,23 +480,33 @@ else
fi
fi
-BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
+BOTAN_LIBS=`${BOTAN_CONFIG} --libs`
BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
# We expect botan-config --libs to contain -L<path_to_libbotan>, but
# this is not always the case. As a heuristics workaround we add
-# -L`botan-config --prefix/lib` in this case. Same for BOTAN_INCLUDES
-# (but using include instead of lib) below.
+# -L`botan-config --prefix/lib` in this case (if not present already).
+# Same for BOTAN_INCLUDES (but using include instead of lib) below.
if [ $BOTAN_CONFIG --prefix >/dev/null 2>&1 ] ; then
- echo ${BOTAN_LDFLAGS} | grep -- -L > /dev/null || \
- BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
+ echo ${BOTAN_LIBS} | grep -- -L > /dev/null || \
+ BOTAN_LIBS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LIBS}"
echo ${BOTAN_INCLUDES} | grep -- -I > /dev/null || \
BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
fi
+
+# botan-config script (and the way we call pkg-config) returns -L and -l
+# as one string, but we need them in separate values
+BOTAN_LDFLAGS=
+BOTAN_NEWLIBS=
+for flag in ${BOTAN_LIBS}; do
+ BOTAN_LDFLAGS="${BOTAN_LDFLAGS} `echo $flag | sed -ne '/^\(\-L\)/p'`"
+ BOTAN_LIBS="${BOTAN_LIBS} `echo $flag | sed -ne '/^\(\-l\)/p'`"
+done
+
# See python_rpath for some info on why we do this
if test $rpath_available = yes; then
BOTAN_RPATH=
- for flag in ${BOTAN_LDFLAGS}; do
+ for flag in ${BOTAN_LIBS}; do
BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
done
AC_SUBST(BOTAN_RPATH)
@@ -504,13 +522,13 @@ AC_SUBST(BOTAN_RPATH)
fi
AC_SUBST(BOTAN_LDFLAGS)
+AC_SUBST(BOTAN_LIBS)
AC_SUBST(BOTAN_INCLUDES)
CPPFLAGS_SAVED=$CPPFLAGS
CPPFLAGS="$BOTAN_INCLUDES $CPPFLAGS"
-LDFLAGS_SAVED="$LDFLAGS"
-LDFLAGS="$BOTAN_LDFLAGS $LDFLAGS"
-
+LIBS_SAVED="$LIBS"
+LIBS="$LIBS $BOTAN_LIBS"
AC_CHECK_HEADERS([botan/botan.h],,AC_MSG_ERROR([Missing required header files.]))
AC_LINK_IFELSE(
[AC_LANG_PROGRAM([#include <botan/botan.h>
@@ -525,7 +543,7 @@ AC_LINK_IFELSE(
AC_MSG_ERROR([Needs Botan library 1.8 or higher])]
)
CPPFLAGS=$CPPFLAGS_SAVED
-LDFLAGS=$LDFLAGS_SAVED
+LIBS=$LIBS_SAVED
# Check for log4cplus
log4cplus_path="yes"
@@ -537,7 +555,7 @@ if test "${log4cplus_path}" = "no" ; then
AC_MSG_ERROR([Need log4cplus])
elif test "${log4cplus_path}" != "yes" ; then
LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
- LOG4CPLUS_LDFLAGS="-L${log4cplus_path}/lib"
+ LOG4CPLUS_LIBS="-L${log4cplus_path}/lib"
else
# If not specified, try some common paths.
log4cplusdirs="/usr/local /usr/pkg /opt /opt/local"
@@ -545,21 +563,21 @@ else
do
if test -f $d/include/log4cplus/logger.h; then
LOG4CPLUS_INCLUDES="-I$d/include"
- LOG4CPLUS_LDFLAGS="-L$d/lib"
+ LOG4CPLUS_LIBS="-L$d/lib"
break
fi
done
fi
-LOG4CPLUS_LDFLAGS="$LOG4CPLUS_LDFLAGS -llog4cplus $MULTITHREADING_FLAG"
+LOG4CPLUS_LIBS="$LOG4CPLUS_LIBS -llog4cplus $MULTITHREADING_FLAG"
-AC_SUBST(LOG4CPLUS_LDFLAGS)
+AC_SUBST(LOG4CPLUS_LIBS)
AC_SUBST(LOG4CPLUS_INCLUDES)
CPPFLAGS_SAVED=$CPPFLAGS
CPPFLAGS="$LOG4CPLUS_INCLUDES $CPPFLAGS"
-LDFLAGS_SAVED="$LDFLAGS"
-LDFLAGS="$LOG4CPLUS_LDFLAGS $LDFLAGS"
+LIBS_SAVED="$LIBS"
+LIBS="$LOG4CPLUS_LIBS $LIBS"
AC_CHECK_HEADERS([log4cplus/logger.h],,AC_MSG_ERROR([Missing required header files.]))
AC_LINK_IFELSE(
@@ -574,7 +592,7 @@ AC_LINK_IFELSE(
)
CPPFLAGS=$CPPFLAGS_SAVED
-LDFLAGS=$LDFLAGS_SAVED
+LIBS=$LIBS_SAVED
#
# Configure Boost header path
@@ -667,6 +685,13 @@ else
AM_CONDITIONAL(NEED_LIBBOOST_THREAD, test "${use_boost_threads}" = "yes")
fi
+# I can't get some of the #include <asio.hpp> right without this
+# TODO: find the real cause of asio/boost wanting pthreads
+# (this currently only occurs for src/lib/cc/session_unittests)
+PTHREAD_LDFLAGS=
+AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
+AC_SUBST(PTHREAD_LDFLAGS)
+AC_SUBST(MULTITHREADING_FLAG)
#
# Check availability of gtest, which will be used for unit tests.
@@ -703,6 +728,48 @@ then
GTEST_LDFLAGS="-L$dir/lib"
GTEST_LDADD="-lgtest"
GTEST_FOUND="true"
+ # There is no gtest-config script on this
+ # system, which is supposed to inform us
+ # whether we need pthreads as well (a
+ # gtest compile-time option). So we still
+ # need to test that manually.
+ CPPFLAGS_SAVED="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $GTEST_INCLUDES"
+ LDFLAGS_SAVED="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $GTEST_LDFLAGS"
+ LIBS_SAVED=$LIBS
+ LIBS="$LIBS $GTEST_LDADD"
+ AC_MSG_CHECKING([Checking whether gtest tests need pthreads])
+ # First try to compile without pthreads
+ AC_TRY_LINK([
+ #include <gtest/gtest.h>
+ ],[
+ int i = 0;
+ char* c = NULL;
+ ::testing::InitGoogleTest(&i, &c);
+ return (0);
+ ],
+ [ AC_MSG_RESULT(no) ],
+ [
+ LIBS="$SAVED_LIBS $GTEST_LDADD $PTHREAD_LDFLAGS"
+ # Now try to compile with pthreads
+ AC_TRY_LINK([
+ #include <gtest/gtest.h>
+ ],[
+ int i = 0;
+ char* c = NULL;
+ ::testing::InitGoogleTest(&i, &c);
+ return (0);
+ ],
+ [ AC_MSG_RESULT(yes)
+ GTEST_LDADD="$GTEST_LDADD $PTHREAD_LDFLAGS"
+ ],
+ # Apparently we can't compile it at all
+ [ AC_MSG_ERROR(unable to compile with gtest) ])
+ ])
+ CPPFLAGS=$CPPFLAGS_SAVED
+ LDFLAGS=$LDFLAGS_SAVED
+ LIBS=$LIBS_SAVED
break
fi
done
@@ -729,15 +796,6 @@ if test "x$HAVE_PKG_CONFIG" = "xno" ; then
fi
PKG_CHECK_MODULES(SQLITE, sqlite3 >= 3.3.9, enable_features="$enable_features SQLite3")
-# I can't get some of the #include <asio.hpp> right without this
-# TODO: find the real cause of asio/boost wanting pthreads
-# (this currently only occurs for src/lib/cc/session_unittests)
-PTHREAD_LDFLAGS=
-AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
-AC_SUBST(PTHREAD_LDFLAGS)
-
-AC_SUBST(MULTITHREADING_FLAG)
-
#
# ASIO: we extensively use it as the C++ event management module.
#
@@ -810,6 +868,7 @@ AM_CONDITIONAL(INSTALL_CONFIGURATIONS, test x$install_configurations = xyes || t
AC_CONFIG_FILES([Makefile
doc/Makefile
doc/guide/Makefile
+ compatcheck/Makefile
src/Makefile
src/bin/Makefile
src/bin/bind10/Makefile
@@ -903,6 +962,7 @@ AC_CONFIG_FILES([Makefile
src/lib/datasrc/tests/Makefile
src/lib/datasrc/tests/testdata/Makefile
src/lib/xfr/Makefile
+ src/lib/xfr/tests/Makefile
src/lib/log/Makefile
src/lib/log/compiler/Makefile
src/lib/log/tests/Makefile
@@ -931,6 +991,7 @@ AC_CONFIG_FILES([Makefile
tests/tools/badpacket/tests/Makefile
])
AC_OUTPUT([doc/version.ent
+ compatcheck/sqlite3-difftbl-check.py
src/bin/cfgmgr/b10-cfgmgr.py
src/bin/cfgmgr/tests/b10-cfgmgr_test.py
src/bin/cmdctl/cmdctl.py
@@ -982,6 +1043,7 @@ AC_OUTPUT([doc/version.ent
src/lib/python/bind10_config.py
src/lib/cc/session_config.h.pre
src/lib/cc/tests/session_unittests_config.h
+ src/lib/datasrc/datasrc_config.h.pre
src/lib/log/tests/console_test.sh
src/lib/log/tests/destination_test.sh
src/lib/log/tests/init_logger_test.sh
@@ -1010,6 +1072,7 @@ AC_OUTPUT([doc/version.ent
tests/system/ixfr/in-3/setup.sh
tests/system/ixfr/in-4/setup.sh
], [
+ chmod +x compatcheck/sqlite3-difftbl-check.py
chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
chmod +x src/bin/xfrin/run_b10-xfrin.sh
chmod +x src/bin/xfrout/run_b10-xfrout.sh
@@ -1075,8 +1138,9 @@ dnl includes too
Boost: ${BOOST_INCLUDES}
Botan: ${BOTAN_INCLUDES}
${BOTAN_LDFLAGS}
+ ${BOTAN_LIBS}
Log4cplus: ${LOG4CPLUS_INCLUDES}
- ${LOG4CPLUS_LDFLAGS}
+ ${LOG4CPLUS_LIBS}
SQLite: $SQLITE_CFLAGS
$SQLITE_LIBS
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 97ffb84..2972cdf 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,21 +1,21 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110809. The most up-to-date version of this document (in PDF, HTML, and plain text formats), along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p c
lass="releaseinfo">This is the reference guide for BIND 10 version
- 20110809.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20111021. The most up-to-date version of this document (in PDF, HTML, and plain text formats), along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p c
lass="releaseinfo">This is the reference guide for BIND 10 version
+ 20111021.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the reference guide for BIND 10 version 20110809.
+ This is the reference guide for BIND 10 version 20111021.
The most up-to-date version of this document (in PDF, HTML,
and plain text formats), along with other documents for
BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
- </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span c
lass="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a href="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b
10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229437660">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229437725">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229437755">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229437989">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438027">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438069">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfe
rs</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438327">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438512">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438628">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438638">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439154">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439328">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229439609">Logging Message Format</a></span></dt></dl></dd></dl><
/div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+ </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span c
lass="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt><dt><span class="section"><a href="#bind10.config">Configuration of started processes</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a href="#cmdctl">6. Remote control daemon</a><
/span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438007">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229438072">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229438171">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438302">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438340">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438382">Trigger an Incoming Zone Transfer Ma
nually</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438673">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438891">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439042">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439052">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439294">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439468">Example session</a></span></dt></dl></dd><dt><s
pan class="section"><a href="#id1168229440023">Logging Message Format</a></span></dt></dl></dd></dl></div><div class="list-of-tables"><p><b>List of Tables</b></p><dl><dt>3.1. <a href="#id1168229437338"></a></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
BIND is the popular implementation of a DNS server, developer
interfaces, and DNS tools.
BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++ and Python
and provides a modular environment for serving and maintaining DNS.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This guide covers the experimental prototype of
- BIND 10 version 20110809.
+ BIND 10 version 20111021.
</p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
BIND 10 provides a EDNS0- and DNSSEC-capable
authoritative DNS server and a caching recursive name server
@@ -315,11 +315,11 @@
<code class="filename">var/bind10-devel/</code> —
data source and configuration databases.
</li></ul></div><p>
- </p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></div><p>
+ </p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt><dt><span class="section"><a href="#bind10.config">Configuration of started processes</a></span></dt></dl></div><p>
BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
starts up the required processes.
<span class="command"><strong>bind10</strong></span>
- will also restart processes that exit unexpectedly.
+ will also restart some processes that exit unexpectedly.
This is the only command needed to start the BIND 10 system.
</p><p>
After starting the <span class="command"><strong>b10-msgq</strong></span> communications channel,
@@ -327,17 +327,20 @@
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</p><p>
- The <span class="command"><strong>b10-msgq</strong></span> and <span class="command"><strong>b10-cfgmgr</strong></span>
+ The <span class="command"><strong>b10-sockcreator</strong></span>, <span class="command"><strong>b10-msgq</strong></span> and
+ <span class="command"><strong>b10-cfgmgr</strong></span>
services make up the core. The <span class="command"><strong>b10-msgq</strong></span> daemon
provides the communication channel between every part of the system.
The <span class="command"><strong>b10-cfgmgr</strong></span> daemon is always needed by every
module, if only to send information about themselves somewhere,
but more importantly to ask about their own settings, and
- about other modules.
- The <span class="command"><strong>bind10</strong></span> master process will also start up
+ about other modules. The <span class="command"><strong>b10-sockcreator</strong></span> will
+ allocate sockets for the rest of the system.
+ </p><p>
+ In its default configuration, the <span class="command"><strong>bind10</strong></span>
+ master process will also start up
<span class="command"><strong>b10-cmdctl</strong></span> for admins to communicate with the
- system, <span class="command"><strong>b10-auth</strong></span> for authoritative DNS service or
- <span class="command"><strong>b10-resolver</strong></span> for recursive name service,
+ system, <span class="command"><strong>b10-auth</strong></span> for authoritative DNS service,
<span class="command"><strong>b10-stats</strong></span> for statistics collection,
<span class="command"><strong>b10-xfrin</strong></span> for inbound DNS zone transfers,
<span class="command"><strong>b10-xfrout</strong></span> for outbound DNS zone transfers,
@@ -351,7 +354,107 @@
the process names for the Python-based daemons will be renamed
to better identify them instead of just <span class="quote">“<span class="quote">python</span>”</span>.
This is not needed on some operating systems.
- </p></div></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
+ </p></div></div><div class="section" title="Configuration of started processes"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="bind10.config"></a>Configuration of started processes</h2></div></div></div><p>
+ The processes to be started can be configured, with the exception
+ of the <span class="command"><strong>b10-sockcreator</strong></span>, <span class="command"><strong>b10-msgq</strong></span>
+ and <span class="command"><strong>b10-cfgmgr</strong></span>.
+ </p><p>
+ The configuration is in the Boss/components section. Each element
+ represents one component, which is an abstraction of a process
+ (currently there's also one component which doesn't represent
+ a process). If you didn't want to transfer out at all (your server
+ is a slave only), you would just remove the corresponding component
+ from the set, like this and the process would be stopped immediately
+ (and not started on the next startup):
+ </p><pre class="screen">> <strong class="userinput"><code>config remove Boss/components b10-xfrout</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+ </p><p>
+ To add a process to the set, let's say the resolver (which not started
+ by default), you would do this:
+ </p><pre class="screen">> <strong class="userinput"><code>config add Boss/components b10-resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/special resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/kind needed</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/priority 10</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+ Now, what it means. We add an entry called b10-resolver. It is both a
+ name used to reference this component in the configuration and the
+ name of the process to start. Then we set some parameters on how to
+ start it.
+ </p><p>
+ The special one is for components that need some kind of special care
+ during startup or shutdown. Unless specified, the component is started
+ in usual way. This is the list of components that need to be started
+ in a special way, with the value of special used for them:
+ </p><div class="table"><a name="id1168229437338"></a><p class="title"><b>Table 3.1. </b></p><div class="table-contents"><table border="1"><colgroup><col align="left"><col align="left"><col align="left"></colgroup><thead><tr><th align="left">Component</th><th align="left">Special</th><th align="left">Description</th></tr></thead><tbody><tr><td align="left">b10-auth</td><td align="left">auth</td><td align="left">Authoritative server</td></tr><tr><td align="left">b10-resolver</td><td align="left">resolver</td><td align="left">The resolver</td></tr><tr><td align="left">b10-cmdctl</td><td align="left">cmdctl</td><td align="left">The command control (remote control interface)</td></tr><tr><td align="left">setuid</td><td align="left">setuid</td><td align="left">Virtual component, see below</td></tr></tbody></table></div></div><p><br class="table-break">
+ </p><p>
+ The kind specifies how a failure of the component should
+ be handled. If it is set to <span class="quote">“<span class="quote">dispensable</span>”</span>
+ (the default unless you set something else), it will get
+ started again if it fails. If it is set to <span class="quote">“<span class="quote">needed</span>”</span>
+ and it fails at startup, the whole <span class="command"><strong>bind10</strong></span>
+ shuts down and exits with error exit code. But if it fails
+ some time later, it is just started again. If you set it
+ to <span class="quote">“<span class="quote">core</span>”</span>, you indicate that the system is
+ not usable without the component and if such component
+ fails, the system shuts down no matter when the failure
+ happened. This is the behaviour of the core components
+ (the ones you can't turn off), but you can declare any
+ other components as core as well if you wish (but you can
+ turn these off, they just can't fail).
+ </p><p>
+ The priority defines order in which the components should start.
+ The ones with higher number are started sooner than the ones with
+ lower ones. If you don't set it, 0 (zero) is used as the priority.
+ </p><p>
+ There are other parameters we didn't use in our example.
+ One of them is <span class="quote">“<span class="quote">address</span>”</span>. It is the address
+ used by the component on the <span class="command"><strong>b10-msgq</strong></span>
+ message bus. The special components already know their
+ address, but the usual ones don't. The address is by
+ convention the thing after <span class="emphasis"><em>b10-</em></span>, with
+ the first letter capital (eg. <span class="command"><strong>b10-stats</strong></span>
+ would have <span class="quote">“<span class="quote">Stats</span>”</span> as its address).
+
+ </p><p>
+ The last one is process. It is the name of the process to be started.
+ It defaults to the name of the component if not set, but you can use
+ this to override it.
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ This system allows you to start the same component multiple times
+ (by including it in the configuration with different names, but the
+ same process setting). However, the rest of the system doesn't expect
+ such situation, so it would probably not do what you want. Such
+ support is yet to be implemented.
+ </p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ The configuration is quite powerful, but that includes
+ a lot of space for mistakes. You could turn off the
+ <span class="command"><strong>b10-cmdctl</strong></span>, but then you couldn't
+ change it back the usual way, as it would require it to
+ be running (you would have to find and edit the configuration
+ directly). Also, some modules might have dependencies
+ -- <span class="command"><strong>b10-stats-httpd</strong></span> need
+ <span class="command"><strong>b10-stats</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>
+ needs the <span class="command"><strong>b10-auth</strong></span> to be running, etc.
+
+
+
+ </p><p>
+ In short, you should think twice before disabling something here.
+ </p></div><p>
+ Now, to the mysterious setuid virtual component. If you
+ use the <span class="command"><strong>-u</strong></span> option to start the
+ <span class="command"><strong>bind10</strong></span> as root, but change the user
+ later, we need to start the <span class="command"><strong>b10-auth</strong></span> or
+ <span class="command"><strong>b10-resolver</strong></span> as root (until the socket
+ creator is finished). So we need to specify
+ the time when the switch from root do the given user happens
+ and that's what the setuid component is for. The switch is
+ done at the time the setuid component would be started, if
+ it was a process. The default configuration contains the
+ setuid component with priority 5, <span class="command"><strong>b10-auth</strong></span>
+ has 10 to be started before the switch and everything else
+ is without priority, so it is started after the switch.
+ </p></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
The BIND 10 components use the <span class="command"><strong>b10-msgq</strong></span>
message routing daemon to communicate with other BIND 10 components.
The <span class="command"><strong>b10-msgq</strong></span> implements what is called the
@@ -507,12 +610,12 @@ shutdown
the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
channel) the configuration on to the specified module.
</p><p>
- </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229437660">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229437725">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229437755">Loading Master Zones Files</a></span></dt></dl></div><p>
+ </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438007">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229438072">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229438171">Loading Master Zones Files</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
It supports EDNS0 and DNSSEC. It supports IPv6.
Normally it is started by the <span class="command"><strong>bind10</strong></span> master
process.
- </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437660"></a>Server Configurations</h2></div></div></div><p>
+ </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438007"></a>Server Configurations</h2></div></div></div><p>
<span class="command"><strong>b10-auth</strong></span> is configured via the
<span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -532,7 +635,7 @@ This may be a temporary setting until then.
</p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
</dd></dl></div><p>
- </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437725"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438072"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
supports a SQLite3 data source backend and in-memory data source
backend.
@@ -546,7 +649,7 @@ This may be a temporary setting until then.
The default is <code class="filename">/usr/local/var/</code>.)
This data file location may be changed by defining the
<span class="quote">“<span class="quote">database_file</span>”</span> configuration.
- </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437755"></a>Loading Master Zones Files</h2></div></div></div><p>
+ </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438171"></a>Loading Master Zones Files</h2></div></div></div><p>
RFC 1035 style DNS master zone files may imported
into a BIND 10 data source by using the
<span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -575,7 +678,7 @@ This may be a temporary setting until then.
If you reload a zone already existing in the database,
all records from that prior zone disappear and a whole new set
appears.
- </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229437989">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438027">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438069">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438302">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438340">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438382">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></div><p>
Incoming zones are transferred using the <span class="command"><strong>b10-xfrin</strong></span>
process which is started by <span class="command"><strong>bind10</strong></span>.
When received, the zone is stored in the corresponding BIND 10
@@ -593,7 +696,7 @@ This may be a temporary setting until then.
In the current development release of BIND 10, incoming zone
transfers are only available for SQLite3-based data sources,
that is, they don't work for an in-memory data source.
- </p></div><div class="section" title="Configuration for Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437989"></a>Configuration for Incoming Zone Transfers</h2></div></div></div><p>
+ </p></div><div class="section" title="Configuration for Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438302"></a>Configuration for Incoming Zone Transfers</h2></div></div></div><p>
In practice, you need to specify a list of secondary zones to
enable incoming zone transfers for these zones (you can still
trigger a zone transfer manually, without a prior configuration
@@ -609,7 +712,7 @@ This may be a temporary setting until then.
> <strong class="userinput"><code>config commit</code></strong></pre><p>
(We assume there has been no zone configuration before).
- </p></div><div class="section" title="Enabling IXFR"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438027"></a>Enabling IXFR</h2></div></div></div><p>
+ </p></div><div class="section" title="Enabling IXFR"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438340"></a>Enabling IXFR</h2></div></div></div><p>
As noted above, <span class="command"><strong>b10-xfrin</strong></span> uses AXFR for
zone transfers by default. To enable IXFR for zone transfers
for a particular zone, set the <strong class="userinput"><code>use_ixfr</code></strong>
@@ -631,7 +734,7 @@ This may be a temporary setting until then.
make this selection automatically.
These features will be implemented in a near future
version, at which point we will enable IXFR by default.
- </p></div></div><div class="section" title="Trigger an Incoming Zone Transfer Manually"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438069"></a>Trigger an Incoming Zone Transfer Manually</h2></div></div></div><p>
+ </p></div></div><div class="section" title="Trigger an Incoming Zone Transfer Manually"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438382"></a>Trigger an Incoming Zone Transfer Manually</h2></div></div></div><p>
To manually trigger a zone transfer to retrieve a remote zone,
you may use the <span class="command"><strong>bindctl</strong></span> utility.
For example, at the <span class="command"><strong>bindctl</strong></span> prompt run:
@@ -641,16 +744,53 @@ This may be a temporary setting until then.
The <span class="command"><strong>b10-xfrout</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
When the <span class="command"><strong>b10-auth</strong></span> authoritative DNS server
- receives an AXFR request, <span class="command"><strong>b10-xfrout</strong></span>
- sends the zone.
- This is used to provide master DNS service to share zones
+ receives an AXFR or IXFR request, <span class="command"><strong>b10-auth</strong></span>
+ internally forwards the request to <span class="command"><strong>b10-xfrout</strong></span>,
+ which handles the rest of request processing.
+ This is used to provide primary DNS service to share zones
to secondary name servers.
The <span class="command"><strong>b10-xfrout</strong></span> is also used to send
- NOTIFY messages to slaves.
+ NOTIFY messages to secondary servers.
+ </p><p>
+ A global or per zone <code class="option">transfer_acl</code> configuration
+ can be used to control accessibility of the outbound zone
+ transfer service.
+ By default, <span class="command"><strong>b10-xfrout</strong></span> allows any clients to
+ perform zone transfers for any zones:
+ </p><pre class="screen">> <strong class="userinput"><code>config show Xfrout/transfer_acl</code></strong>
+Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)</pre><p>
+ You can change this to, for example, rejecting all transfer
+ requests by default while allowing requests for the transfer
+ of zone "example.com" from 192.0.2.1 and 2001:db8::1 as follows:
+ </p><pre class="screen">> <strong class="userinput"><code>config set Xfrout/transfer_acl[0] {"action": "REJECT"}</code></strong>
+> <strong class="userinput"><code>config add Xfrout/zone_config</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/origin "example.com"</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},</code></strong>
+<strong class="userinput"><code> {"action": "ACCEPT", "from": "2001:db8::1"}]</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ In the above example the lines
+ for <code class="option">transfer_acl</code> were divided for
+ readability. In the actual input it must be in a single line.
+ </p></div><p>
+ If you want to require TSIG in access control, a separate TSIG
+ "key ring" must be configured specifically
+ for <span class="command"><strong>b10-xfrout</strong></span> as well as a system wide
+ key ring, both containing a consistent set of keys.
+ For example, to change the previous example to allowing requests
+ from 192.0.2.1 signed by a TSIG with a key name of
+ "key.example", you'll need to do this:
+ </p><pre class="screen">> <strong class="userinput"><code>config set tsig_keys/keys ["key.example:<base64-key>"]</code></strong>
+> <strong class="userinput"><code>config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+ The first line of configuration defines a system wide key ring.
+ This is necessary because the <span class="command"><strong>b10-auth</strong></span> server
+ also checks TSIGs and it uses the system wide configuration.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
- The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
- Access control is not yet provided.
+ In a future version, <span class="command"><strong>b10-xfrout</strong></span> will also
+ use the system wide TSIG configuration.
+ The way to specify zone specific configuration (ACLs, etc) is
+ likely to be changed, too.
</p></div></div><div class="chapter" title="Chapter 11. Secondary Manager"><div class="titlepage"><div><div><h2 class="title"><a name="zonemgr"></a>Chapter 11. Secondary Manager</h2></div></div></div><p>
The <span class="command"><strong>b10-zonemgr</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
@@ -665,7 +805,7 @@ This may be a temporary setting until then.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Access control (such as allowing notifies) is not yet provided.
The primary/secondary service is not yet complete.
- </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438327">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438512">Forwarding</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438673">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438891">Forwarding</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-resolver</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
@@ -678,8 +818,13 @@ This may be a temporary setting until then.
You may change this using <span class="command"><strong>bindctl</strong></span>, for example:
</p><pre class="screen">
-> <strong class="userinput"><code>config set Boss/start_auth false</code></strong>
-> <strong class="userinput"><code>config set Boss/start_resolver true</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-xfrout</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-xfrin</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-auth</code></strong>
+> <strong class="userinput"><code>config add Boss/components b10-resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/special resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/kind needed</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/priority 10</code></strong>
> <strong class="userinput"><code>config commit</code></strong>
</pre><p>
@@ -699,7 +844,7 @@ This may be a temporary setting until then.
</pre><p>
</p><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
- Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438327"></a>Access Control</h2></div></div></div><p>
+ Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438673"></a>Access Control</h2></div></div></div><p>
By default, the <span class="command"><strong>b10-resolver</strong></span> daemon only accepts
DNS queries from the localhost (127.0.0.1 and ::1).
The <code class="option">Resolver/query_acl</code> configuration may
@@ -732,7 +877,7 @@ This may be a temporary setting until then.
</pre><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
Resolver/query_acl</code></strong></span>”</span> if needed.)</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>This prototype access control configuration
- syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438512"></a>Forwarding</h2></div></div></div><p>
+ syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438891"></a>Forwarding</h2></div></div></div><p>
To enable forwarding, the upstream address and port must be
configured to forward queries to, such as:
@@ -786,7 +931,7 @@ This may be a temporary setting until then.
}
}
</pre><p>
- </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438628">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438638">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439154">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439328">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229439609">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438628"></a>Logging configuration</h2></div></div></div><p>
+ </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229439042">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439052">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439294">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439468">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229440023">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229439042"></a>Logging configuration</h2></div></div></div><p>
The logging system in BIND 10 is configured through the
Logging module. All BIND 10 modules will look at the
@@ -795,7 +940,7 @@ This may be a temporary setting until then.
- </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229438638"></a>Loggers</h3></div></div></div><p>
+ </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439052"></a>Loggers</h3></div></div></div><p>
Within BIND 10, a message is logged through a component
called a "logger". Different parts of BIND 10 log messages
@@ -816,7 +961,7 @@ This may be a temporary setting until then.
(what to log), and the <code class="option">output_options</code>
(where to log).
- </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229438663"></a>name (string)</h4></div></div></div><p>
+ </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439077"></a>name (string)</h4></div></div></div><p>
Each logger in the system has a name, the name being that
of the component using it to log messages. For instance,
if you want to configure logging for the resolver module,
@@ -889,7 +1034,7 @@ This may be a temporary setting until then.
<span class="quote">“<span class="quote">Auth.cache</span>”</span> logger will appear in the output
with a logger name of <span class="quote">“<span class="quote">b10-auth.cache</span>”</span>).
- </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439035"></a>severity (string)</h4></div></div></div><p>
+ </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439176"></a>severity (string)</h4></div></div></div><p>
This specifies the category of messages logged.
Each message is logged with an associated severity which
@@ -905,7 +1050,7 @@ This may be a temporary setting until then.
- </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439086"></a>output_options (list)</h4></div></div></div><p>
+ </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439227"></a>output_options (list)</h4></div></div></div><p>
Each logger can have zero or more
<code class="option">output_options</code>. These specify where log
@@ -915,7 +1060,7 @@ This may be a temporary setting until then.
The other options for a logger are:
- </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439102"></a>debuglevel (integer)</h4></div></div></div><p>
+ </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439243"></a>debuglevel (integer)</h4></div></div></div><p>
When a logger's severity is set to DEBUG, this value
specifies what debug messages should be printed. It ranges
@@ -924,7 +1069,7 @@ This may be a temporary setting until then.
If severity for the logger is not DEBUG, this value is ignored.
- </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439117"></a>additive (true or false)</h4></div></div></div><p>
+ </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439258"></a>additive (true or false)</h4></div></div></div><p>
If this is true, the <code class="option">output_options</code> from
the parent will be used. For example, if there are two
@@ -938,18 +1083,18 @@ This may be a temporary setting until then.
- </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439154"></a>Output Options</h3></div></div></div><p>
+ </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439294"></a>Output Options</h3></div></div></div><p>
The main settings for an output option are the
<code class="option">destination</code> and a value called
<code class="option">output</code>, the meaning of which depends on
the destination that is set.
- </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439169"></a>destination (string)</h4></div></div></div><p>
+ </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439309"></a>destination (string)</h4></div></div></div><p>
The destination is the type of output. It can be one of:
- </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439201"></a>output (string)</h4></div></div></div><p>
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439341"></a>output (string)</h4></div></div></div><p>
Depending on what is set as the output destination, this
value is interpreted as follows:
@@ -971,12 +1116,12 @@ This may be a temporary setting until then.
The other options for <code class="option">output_options</code> are:
- </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439286"></a>flush (true of false)</h5></div></div></div><p>
+ </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439427"></a>flush (true of false)</h5></div></div></div><p>
Flush buffers after each log message. Doing this will
reduce performance but will ensure that if the program
terminates abnormally, all messages up to the point of
termination are output.
- </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439296"></a>maxsize (integer)</h5></div></div></div><p>
+ </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439436"></a>maxsize (integer)</h5></div></div></div><p>
Only relevant when destination is file, this is maximum
file size of output files in bytes. When the maximum
size is reached, the file is renamed and a new file opened.
@@ -985,11 +1130,11 @@ This may be a temporary setting until then.
etc.)
</p><p>
If this is 0, no maximum file size is used.
- </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439308"></a>maxver (integer)</h5></div></div></div><p>
+ </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439449"></a>maxver (integer)</h5></div></div></div><p>
Maximum number of old log files to keep around when
rolling the output file. Only relevant when
<code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span>.
- </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439328"></a>Example session</h3></div></div></div><p>
+ </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439468"></a>Example session</h3></div></div></div><p>
In this example we want to set the global logging to
write to the file <code class="filename">/var/log/my_bind10.log</code>,
@@ -1150,7 +1295,7 @@ Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
And every module will now be using the values from the
logger named <span class="quote">“<span class="quote">*</span>”</span>.
- </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229439609"></a>Logging Message Format</h2></div></div></div><p>
+ </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229440023"></a>Logging Message Format</h2></div></div></div><p>
Each message written by BIND 10 to the configured logging
destinations comprises a number of components that identify
the origin of the message and, if the message indicates
diff --git a/doc/guide/bind10-guide.txt b/doc/guide/bind10-guide.txt
index 619d56f..9c8ffbe 100644
--- a/doc/guide/bind10-guide.txt
+++ b/doc/guide/bind10-guide.txt
@@ -2,7 +2,7 @@
Administrator Reference for BIND 10
- This is the reference guide for BIND 10 version 20110809.
+ This is the reference guide for BIND 10 version 20111021.
Copyright (c) 2010-2011 Internet Systems Consortium, Inc.
@@ -12,7 +12,7 @@ Administrator Reference for BIND 10
Consortium (ISC). It includes DNS libraries and modular components for
controlling authoritative and recursive DNS servers.
- This is the reference guide for BIND 10 version 20110809. The most
+ This is the reference guide for BIND 10 version 20111021. The most
up-to-date version of this document (in PDF, HTML, and plain text
formats), along with other documents for BIND 10, can be found at
http://bind10.isc.org/docs.
@@ -55,6 +55,8 @@ Administrator Reference for BIND 10
Starting BIND 10
+ Configuration of started processes
+
4. Command channel
5. Configuration manager
@@ -105,6 +107,10 @@ Administrator Reference for BIND 10
Logging Message Format
+ List of Tables
+
+ 3.1.
+
Chapter 1. Introduction
Table of Contents
@@ -124,7 +130,7 @@ Chapter 1. Introduction
Note
- This guide covers the experimental prototype of BIND 10 version 20110809.
+ This guide covers the experimental prototype of BIND 10 version 20111021.
Note
@@ -427,24 +433,28 @@ Chapter 3. Starting BIND10 with bind10
Starting BIND 10
+ Configuration of started processes
+
BIND 10 provides the bind10 command which starts up the required
- processes. bind10 will also restart processes that exit unexpectedly. This
- is the only command needed to start the BIND 10 system.
+ processes. bind10 will also restart some processes that exit unexpectedly.
+ This is the only command needed to start the BIND 10 system.
After starting the b10-msgq communications channel, bind10 connects to it,
runs the configuration manager, and reads its own configuration. Then it
starts the other modules.
- The b10-msgq and b10-cfgmgr services make up the core. The b10-msgq daemon
- provides the communication channel between every part of the system. The
- b10-cfgmgr daemon is always needed by every module, if only to send
- information about themselves somewhere, but more importantly to ask about
- their own settings, and about other modules. The bind10 master process
- will also start up b10-cmdctl for admins to communicate with the system,
- b10-auth for authoritative DNS service or b10-resolver for recursive name
- service, b10-stats for statistics collection, b10-xfrin for inbound DNS
- zone transfers, b10-xfrout for outbound DNS zone transfers, and
- b10-zonemgr for secondary service.
+ The b10-sockcreator, b10-msgq and b10-cfgmgr services make up the core.
+ The b10-msgq daemon provides the communication channel between every part
+ of the system. The b10-cfgmgr daemon is always needed by every module, if
+ only to send information about themselves somewhere, but more importantly
+ to ask about their own settings, and about other modules. The
+ b10-sockcreator will allocate sockets for the rest of the system.
+
+ In its default configuration, the bind10 master process will also start up
+ b10-cmdctl for admins to communicate with the system, b10-auth for
+ authoritative DNS service, b10-stats for statistics collection, b10-xfrin
+ for inbound DNS zone transfers, b10-xfrout for outbound DNS zone
+ transfers, and b10-zonemgr for secondary service.
Starting BIND 10
@@ -457,6 +467,110 @@ Starting BIND 10
names for the Python-based daemons will be renamed to better identify them
instead of just "python". This is not needed on some operating systems.
+Configuration of started processes
+
+ The processes to be started can be configured, with the exception of the
+ b10-sockcreator, b10-msgq and b10-cfgmgr.
+
+ The configuration is in the Boss/components section. Each element
+ represents one component, which is an abstraction of a process (currently
+ there's also one component which doesn't represent a process). If you
+ didn't want to transfer out at all (your server is a slave only), you
+ would just remove the corresponding component from the set, like this and
+ the process would be stopped immediately (and not started on the next
+ startup):
+
+ > config remove Boss/components b10-xfrout
+ > config commit
+
+ To add a process to the set, let's say the resolver (which not started by
+ default), you would do this:
+
+ > config add Boss/components b10-resolver
+ > config set Boss/components/b10-resolver/special resolver
+ > config set Boss/components/b10-resolver/kind needed
+ > config set Boss/components/b10-resolver/priority 10
+ > config commit
+
+ Now, what it means. We add an entry called b10-resolver. It is both a name
+ used to reference this component in the configuration and the name of the
+ process to start. Then we set some parameters on how to start it.
+
+ The special one is for components that need some kind of special care
+ during startup or shutdown. Unless specified, the component is started in
+ usual way. This is the list of components that need to be started in a
+ special way, with the value of special used for them:
+
+ Table 3.1.
+
+ +------------------------------------------------------------------------+
+ | Component | Special | Description |
+ |--------------+----------+----------------------------------------------|
+ | b10-auth | auth | Authoritative server |
+ |--------------+----------+----------------------------------------------|
+ | b10-resolver | resolver | The resolver |
+ |--------------+----------+----------------------------------------------|
+ | b10-cmdctl | cmdctl | The command control (remote control |
+ | | | interface) |
+ |--------------+----------+----------------------------------------------|
+ | setuid | setuid | Virtual component, see below |
+ +------------------------------------------------------------------------+
+
+ The kind specifies how a failure of the component should be handled. If it
+ is set to "dispensable" (the default unless you set something else), it
+ will get started again if it fails. If it is set to "needed" and it fails
+ at startup, the whole bind10 shuts down and exits with error exit code.
+ But if it fails some time later, it is just started again. If you set it
+ to "core", you indicate that the system is not usable without the
+ component and if such component fails, the system shuts down no matter
+ when the failure happened. This is the behaviour of the core components
+ (the ones you can't turn off), but you can declare any other components as
+ core as well if you wish (but you can turn these off, they just can't
+ fail).
+
+ The priority defines order in which the components should start. The ones
+ with higher number are started sooner than the ones with lower ones. If
+ you don't set it, 0 (zero) is used as the priority.
+
+ There are other parameters we didn't use in our example. One of them is
+ "address". It is the address used by the component on the b10-msgq message
+ bus. The special components already know their address, but the usual ones
+ don't. The address is by convention the thing after b10-, with the first
+ letter capital (eg. b10-stats would have "Stats" as its address).
+
+ The last one is process. It is the name of the process to be started. It
+ defaults to the name of the component if not set, but you can use this to
+ override it.
+
+ Note
+
+ This system allows you to start the same component multiple times (by
+ including it in the configuration with different names, but the same
+ process setting). However, the rest of the system doesn't expect such
+ situation, so it would probably not do what you want. Such support is yet
+ to be implemented.
+
+ Note
+
+ The configuration is quite powerful, but that includes a lot of space for
+ mistakes. You could turn off the b10-cmdctl, but then you couldn't change
+ it back the usual way, as it would require it to be running (you would
+ have to find and edit the configuration directly). Also, some modules
+ might have dependencies -- b10-stats-httpd need b10-stats, b10-xfrout
+ needs the b10-auth to be running, etc.
+
+ In short, you should think twice before disabling something here.
+
+ Now, to the mysterious setuid virtual component. If you use the -u option
+ to start the bind10 as root, but change the user later, we need to start
+ the b10-auth or b10-resolver as root (until the socket creator is
+ finished). So we need to specify the time when the switch from root do the
+ given user happens and that's what the setuid component is for. The switch
+ is done at the time the setuid component would be started, if it was a
+ process. The default configuration contains the setuid component with
+ priority 5, b10-auth has 10 to be started before the switch and everything
+ else is without priority, so it is started after the switch.
+
Chapter 4. Command channel
The BIND 10 components use the b10-msgq message routing daemon to
@@ -739,15 +853,55 @@ Trigger an Incoming Zone Transfer Manually
Chapter 10. Outbound Zone Transfers
The b10-xfrout process is started by bind10. When the b10-auth
- authoritative DNS server receives an AXFR request, b10-xfrout sends the
- zone. This is used to provide master DNS service to share zones to
- secondary name servers. The b10-xfrout is also used to send NOTIFY
- messages to slaves.
+ authoritative DNS server receives an AXFR or IXFR request, b10-auth
+ internally forwards the request to b10-xfrout, which handles the rest of
+ request processing. This is used to provide primary DNS service to share
+ zones to secondary name servers. The b10-xfrout is also used to send
+ NOTIFY messages to secondary servers.
+
+ A global or per zone transfer_acl configuration can be used to control
+ accessibility of the outbound zone transfer service. By default,
+ b10-xfrout allows any clients to perform zone transfers for any zones:
+
+ > config show Xfrout/transfer_acl
+ Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)
+
+ You can change this to, for example, rejecting all transfer requests by
+ default while allowing requests for the transfer of zone "example.com"
+ from 192.0.2.1 and 2001:db8::1 as follows:
+
+ > config set Xfrout/transfer_acl[0] {"action": "REJECT"}
+ > config add Xfrout/zone_config
+ > config set Xfrout/zone_config[0]/origin "example.com"
+ > config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},
+ {"action": "ACCEPT", "from": "2001:db8::1"}]
+ > config commit
+
+ Note
+
+ In the above example the lines for transfer_acl were divided for
+ readability. In the actual input it must be in a single line.
+
+ If you want to require TSIG in access control, a separate TSIG "key ring"
+ must be configured specifically for b10-xfrout as well as a system wide
+ key ring, both containing a consistent set of keys. For example, to change
+ the previous example to allowing requests from 192.0.2.1 signed by a TSIG
+ with a key name of "key.example", you'll need to do this:
+
+ > config set tsig_keys/keys ["key.example:<base64-key>"]
+ > config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]
+ > config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]
+ > config commit
+
+ The first line of configuration defines a system wide key ring. This is
+ necessary because the b10-auth server also checks TSIGs and it uses the
+ system wide configuration.
Note
- The current development release of BIND 10 only supports AXFR. (IXFR is
- not supported.) Access control is not yet provided.
+ In a future version, b10-xfrout will also use the system wide TSIG
+ configuration. The way to specify zone specific configuration (ACLs, etc)
+ is likely to be changed, too.
Chapter 11. Secondary Manager
@@ -777,8 +931,13 @@ Chapter 12. Recursive Name Server
authoritative or resolver or both. By default, it starts the authoritative
service. You may change this using bindctl, for example:
- > config set Boss/start_auth false
- > config set Boss/start_resolver true
+ > config remove Boss/components b10-xfrout
+ > config remove Boss/components b10-xfrin
+ > config remove Boss/components b10-auth
+ > config add Boss/components b10-resolver
+ > config set Boss/components/b10-resolver/special resolver
+ > config set Boss/components/b10-resolver/kind needed
+ > config set Boss/components/b10-resolver/priority 10
> config commit
The master bind10 will stop and start the desired services.
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 21bb671..e61725f 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -706,7 +706,7 @@ Debian and Ubuntu:
BIND 10 provides the <command>bind10</command> command which
starts up the required processes.
<command>bind10</command>
- will also restart processes that exit unexpectedly.
+ will also restart some processes that exit unexpectedly.
This is the only command needed to start the BIND 10 system.
</para>
@@ -718,17 +718,22 @@ Debian and Ubuntu:
</para>
<para>
- The <command>b10-msgq</command> and <command>b10-cfgmgr</command>
+ The <command>b10-sockcreator</command>, <command>b10-msgq</command> and
+ <command>b10-cfgmgr</command>
services make up the core. The <command>b10-msgq</command> daemon
provides the communication channel between every part of the system.
The <command>b10-cfgmgr</command> daemon is always needed by every
module, if only to send information about themselves somewhere,
but more importantly to ask about their own settings, and
- about other modules.
- The <command>bind10</command> master process will also start up
+ about other modules. The <command>b10-sockcreator</command> will
+ allocate sockets for the rest of the system.
+ </para>
+
+ <para>
+ In its default configuration, the <command>bind10</command>
+ master process will also start up
<command>b10-cmdctl</command> for admins to communicate with the
- system, <command>b10-auth</command> for authoritative DNS service or
- <command>b10-resolver</command> for recursive name service,
+ system, <command>b10-auth</command> for authoritative DNS service,
<command>b10-stats</command> for statistics collection,
<command>b10-xfrin</command> for inbound DNS zone transfers,
<command>b10-xfrout</command> for outbound DNS zone transfers,
@@ -754,6 +759,159 @@ Debian and Ubuntu:
</note>
</section>
+ <section id="bind10.config">
+ <title>Configuration of started processes</title>
+ <para>
+ The processes to be started can be configured, with the exception
+ of the <command>b10-sockcreator</command>, <command>b10-msgq</command>
+ and <command>b10-cfgmgr</command>.
+ </para>
+
+ <para>
+ The configuration is in the Boss/components section. Each element
+ represents one component, which is an abstraction of a process
+ (currently there's also one component which doesn't represent
+ a process). If you didn't want to transfer out at all (your server
+ is a slave only), you would just remove the corresponding component
+ from the set, like this and the process would be stopped immediately
+ (and not started on the next startup):
+ <screen>> <userinput>config remove Boss/components b10-xfrout</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+
+ <para>
+ To add a process to the set, let's say the resolver (which not started
+ by default), you would do this:
+ <screen>> <userinput>config add Boss/components b10-resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
+> <userinput>config commit</userinput></screen></para>
+
+ <para>
+ Now, what it means. We add an entry called b10-resolver. It is both a
+ name used to reference this component in the configuration and the
+ name of the process to start. Then we set some parameters on how to
+ start it.
+ </para>
+
+ <para>
+ The special one is for components that need some kind of special care
+ during startup or shutdown. Unless specified, the component is started
+ in usual way. This is the list of components that need to be started
+ in a special way, with the value of special used for them:
+ <table>
+ <tgroup cols='3' align='left'>
+ <colspec colname='component'/>
+ <colspec colname='special'/>
+ <colspec colname='description'/>
+ <thead><row><entry>Component</entry><entry>Special</entry><entry>Description</entry></row></thead>
+ <tbody>
+ <row><entry>b10-auth</entry><entry>auth</entry><entry>Authoritative server</entry></row>
+ <row><entry>b10-resolver</entry><entry>resolver</entry><entry>The resolver</entry></row>
+ <row><entry>b10-cmdctl</entry><entry>cmdctl</entry><entry>The command control (remote control interface)</entry></row>
+ <row><entry>setuid</entry><entry>setuid</entry><entry>Virtual component, see below</entry></row>
+ <!-- TODO Either add xfrin and xfrout as well or clean up the workarounds in boss before the release -->
+ </tbody>
+ </tgroup>
+ </table>
+ </para>
+
+ <para>
+ The kind specifies how a failure of the component should
+ be handled. If it is set to <quote>dispensable</quote>
+ (the default unless you set something else), it will get
+ started again if it fails. If it is set to <quote>needed</quote>
+ and it fails at startup, the whole <command>bind10</command>
+ shuts down and exits with error exit code. But if it fails
+ some time later, it is just started again. If you set it
+ to <quote>core</quote>, you indicate that the system is
+ not usable without the component and if such component
+ fails, the system shuts down no matter when the failure
+ happened. This is the behaviour of the core components
+ (the ones you can't turn off), but you can declare any
+ other components as core as well if you wish (but you can
+ turn these off, they just can't fail).
+ </para>
+
+ <para>
+ The priority defines order in which the components should start.
+ The ones with higher number are started sooner than the ones with
+ lower ones. If you don't set it, 0 (zero) is used as the priority.
+ </para>
+
+ <para>
+ There are other parameters we didn't use in our example.
+ One of them is <quote>address</quote>. It is the address
+ used by the component on the <command>b10-msgq</command>
+ message bus. The special components already know their
+ address, but the usual ones don't. The address is by
+ convention the thing after <emphasis>b10-</emphasis>, with
+ the first letter capital (eg. <command>b10-stats</command>
+ would have <quote>Stats</quote> as its address).
+<!-- TODO: this should be simplified so we don't even have to document it -->
+ </para>
+
+<!-- TODO: what does "The special components already know their
+address, but the usual ones don't." mean? -->
+
+<!-- TODO: document params when is enabled -->
+
+ <para>
+ The last one is process. It is the name of the process to be started.
+ It defaults to the name of the component if not set, but you can use
+ this to override it.
+ </para>
+
+ <!-- TODO Add parameters when they work, not implemented yet-->
+
+ <note>
+ <para>
+ This system allows you to start the same component multiple times
+ (by including it in the configuration with different names, but the
+ same process setting). However, the rest of the system doesn't expect
+ such situation, so it would probably not do what you want. Such
+ support is yet to be implemented.
+ </para>
+ </note>
+
+ <note>
+ <para>
+ The configuration is quite powerful, but that includes
+ a lot of space for mistakes. You could turn off the
+ <command>b10-cmdctl</command>, but then you couldn't
+ change it back the usual way, as it would require it to
+ be running (you would have to find and edit the configuration
+ directly). Also, some modules might have dependencies
+ -- <command>b10-stats-httpd</command> need
+ <command>b10-stats</command>, <command>b10-xfrout</command>
+ needs the <command>b10-auth</command> to be running, etc.
+
+<!-- TODO: should we define dependencies? -->
+
+ </para>
+ <para>
+ In short, you should think twice before disabling something here.
+ </para>
+ </note>
+
+ <para>
+ Now, to the mysterious setuid virtual component. If you
+ use the <command>-u</command> option to start the
+ <command>bind10</command> as root, but change the user
+ later, we need to start the <command>b10-auth</command> or
+ <command>b10-resolver</command> as root (until the socket
+ creator is finished).<!-- TODO --> So we need to specify
+ the time when the switch from root do the given user happens
+ and that's what the setuid component is for. The switch is
+ done at the time the setuid component would be started, if
+ it was a process. The default configuration contains the
+ setuid component with priority 5, <command>b10-auth</command>
+ has 10 to be started before the switch and everything else
+ is without priority, so it is started after the switch.
+ </para>
+
+ </section>
</chapter>
@@ -1369,20 +1527,72 @@ what if a NOTIFY is sent?
The <command>b10-xfrout</command> process is started by
<command>bind10</command>.
When the <command>b10-auth</command> authoritative DNS server
- receives an AXFR request, <command>b10-xfrout</command>
- sends the zone.
- This is used to provide master DNS service to share zones
+ receives an AXFR or IXFR request, <command>b10-auth</command>
+ internally forwards the request to <command>b10-xfrout</command>,
+ which handles the rest of request processing.
+ This is used to provide primary DNS service to share zones
to secondary name servers.
The <command>b10-xfrout</command> is also used to send
- NOTIFY messages to slaves.
+ NOTIFY messages to secondary servers.
+ </para>
+
+ <para>
+ A global or per zone <option>transfer_acl</option> configuration
+ can be used to control accessibility of the outbound zone
+ transfer service.
+ By default, <command>b10-xfrout</command> allows any clients to
+ perform zone transfers for any zones:
+ </para>
+
+ <screen>> <userinput>config show Xfrout/transfer_acl</userinput>
+Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)</screen>
+
+ <para>
+ You can change this to, for example, rejecting all transfer
+ requests by default while allowing requests for the transfer
+ of zone "example.com" from 192.0.2.1 and 2001:db8::1 as follows:
</para>
+ <screen>> <userinput>config set Xfrout/transfer_acl[0] {"action": "REJECT"}</userinput>
+> <userinput>config add Xfrout/zone_config</userinput>
+> <userinput>config set Xfrout/zone_config[0]/origin "example.com"</userinput>
+> <userinput>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},</userinput>
+<userinput> {"action": "ACCEPT", "from": "2001:db8::1"}]</userinput>
+> <userinput>config commit</userinput></screen>
+
<note><simpara>
- The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
- Access control is not yet provided.
+ In the above example the lines
+ for <option>transfer_acl</option> were divided for
+ readability. In the actual input it must be in a single line.
</simpara></note>
+ <para>
+ If you want to require TSIG in access control, a separate TSIG
+ "key ring" must be configured specifically
+ for <command>b10-xfrout</command> as well as a system wide
+ key ring, both containing a consistent set of keys.
+ For example, to change the previous example to allowing requests
+ from 192.0.2.1 signed by a TSIG with a key name of
+ "key.example", you'll need to do this:
+ </para>
+
+ <screen>> <userinput>config set tsig_keys/keys ["key.example:<base64-key>"]</userinput>
+> <userinput>config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]</userinput>
+> <userinput>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]</userinput>
+> <userinput>config commit</userinput></screen>
+
+ <para>
+ The first line of configuration defines a system wide key ring.
+ This is necessary because the <command>b10-auth</command> server
+ also checks TSIGs and it uses the system wide configuration.
+ </para>
+
+ <note><simpara>
+ In a future version, <command>b10-xfrout</command> will also
+ use the system wide TSIG configuration.
+ The way to specify zone specific configuration (ACLs, etc) is
+ likely to be changed, too.
+ </simpara></note>
<!--
TODO:
@@ -1442,8 +1652,13 @@ what is XfroutClient xfr_client??
You may change this using <command>bindctl</command>, for example:
<screen>
-> <userinput>config set Boss/start_auth false</userinput>
-> <userinput>config set Boss/start_resolver true</userinput>
+> <userinput>config remove Boss/components b10-xfrout</userinput>
+> <userinput>config remove Boss/components b10-xfrin</userinput>
+> <userinput>config remove Boss/components b10-auth</userinput>
+> <userinput>config add Boss/components b10-resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
> <userinput>config commit</userinput>
</screen>
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
index 237b7ad..f2f57f1 100644
--- a/doc/guide/bind10-messages.html
+++ b/doc/guide/bind10-messages.html
@@ -1,10 +1,10 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
- 20110809.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20111021. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+ 20111021.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the messages manual for BIND 10 version 20110809.
+ This is the messages manual for BIND 10 version 20111021.
The most up-to-date version of this document, along with
other documents for BIND 10, can be found at
<a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
@@ -107,6 +107,9 @@ This is a debug message, generated by the authoritative server when an
attempt to parse the header of a received DNS packet has failed. (The
reason for the failure is given in the message.) The server will drop the
packet.
+</p></dd><dt><a name="AUTH_INVALID_STATISTICS_DATA"></a><span class="term">AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified</span></dt><dd><p>
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
</p></dd><dt><a name="AUTH_LOAD_TSIG"></a><span class="term">AUTH_LOAD_TSIG loading TSIG keys</span></dt><dd><p>
This is a debug message indicating that the authoritative server
has requested the keyring holding TSIG keys from the configuration
@@ -263,12 +266,58 @@ NOTIFY request will not be honored.
The boss process is starting up and will now check if the message bus
daemon is already running. If so, it will not be able to start, as it
needs a dedicated message bus.
-</p></dd><dt><a name="BIND10_CONFIGURATION_START_AUTH"></a><span class="term">BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</span></dt><dd><p>
-This message shows whether or not the authoritative server should be
-started according to the configuration.
-</p></dd><dt><a name="BIND10_CONFIGURATION_START_RESOLVER"></a><span class="term">BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</span></dt><dd><p>
-This message shows whether or not the resolver should be
-started according to the configuration.
+</p></dd><dt><a name="BIND10_COMPONENT_FAILED"></a><span class="term">BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status</span></dt><dd><p>
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
+</p></dd><dt><a name="BIND10_COMPONENT_RESTART"></a><span class="term">BIND10_COMPONENT_RESTART component %1 is about to restart</span></dt><dd><p>
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+</p></dd><dt><a name="BIND10_COMPONENT_START"></a><span class="term">BIND10_COMPONENT_START component %1 is starting</span></dt><dd><p>
+The named component is about to be started by the boss process.
+</p></dd><dt><a name="BIND10_COMPONENT_START_EXCEPTION"></a><span class="term">BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2</span></dt><dd><p>
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+</p></dd><dt><a name="BIND10_COMPONENT_STOP"></a><span class="term">BIND10_COMPONENT_STOP component %1 is being stopped</span></dt><dd><p>
+A component is about to be asked to stop willingly by the boss.
+</p></dd><dt><a name="BIND10_COMPONENT_UNSATISFIED"></a><span class="term">BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed</span></dt><dd><p>
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_BUILD"></a><span class="term">BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'</span></dt><dd><p>
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_PLAN_INTERRUPTED"></a><span class="term">BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done</span></dt><dd><p>
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_RECONFIGURE"></a><span class="term">BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components</span></dt><dd><p>
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_RUN"></a><span class="term">BIND10_CONFIGURATOR_RUN running plan of %1 tasks</span></dt><dd><p>
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_START"></a><span class="term">BIND10_CONFIGURATOR_START bind10 component configurator is starting up</span></dt><dd><p>
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_STOP"></a><span class="term">BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down</span></dt><dd><p>
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+</p></dd><dt><a name="BIND10_CONFIGURATOR_TASK"></a><span class="term">BIND10_CONFIGURATOR_TASK performing task %1 on %2</span></dt><dd><p>
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
+</p></dd><dt><a name="BIND10_INVALID_STATISTICS_DATA"></a><span class="term">BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified</span></dt><dd><p>
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
</p></dd><dt><a name="BIND10_INVALID_USER"></a><span class="term">BIND10_INVALID_USER invalid user: %1</span></dt><dd><p>
The boss process was started with the -u option, to drop root privileges
and continue running as the specified user, but the user is unknown.
@@ -284,24 +333,14 @@ There already appears to be a message bus daemon running. Either an
old process was not shut down correctly, and needs to be killed, or
another instance of BIND10, with the same msgq domain socket, is
running, which needs to be stopped.
-</p></dd><dt><a name="BIND10_MSGQ_DAEMON_ENDED"></a><span class="term">BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</span></dt><dd><p>
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
</p></dd><dt><a name="BIND10_MSGQ_DISAPPEARED"></a><span class="term">BIND10_MSGQ_DISAPPEARED msgq channel disappeared</span></dt><dd><p>
While listening on the message bus channel for messages, it suddenly
disappeared. The msgq daemon may have died. This might lead to an
inconsistent state of the system, and BIND 10 will now shut down.
-</p></dd><dt><a name="BIND10_PROCESS_ENDED_NO_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</span></dt><dd><p>
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
-</p></dd><dt><a name="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</span></dt><dd><p>
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED"></a><span class="term">BIND10_PROCESS_ENDED process %2 of %1 ended with status %3</span></dt><dd><p>
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
</p></dd><dt><a name="BIND10_READING_BOSS_CONFIGURATION"></a><span class="term">BIND10_READING_BOSS_CONFIGURATION reading boss configuration</span></dt><dd><p>
The boss process is starting up, and will now process the initial
configuration, as received from the configuration manager.
@@ -327,6 +366,8 @@ so BIND 10 will now shut down. The specific error is printed.
The boss module is sending a SIGKILL signal to the given process.
</p></dd><dt><a name="BIND10_SEND_SIGTERM"></a><span class="term">BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</span></dt><dd><p>
The boss module is sending a SIGTERM signal to the given process.
+</p></dd><dt><a name="BIND10_SETUID"></a><span class="term">BIND10_SETUID setting UID to %1</span></dt><dd><p>
+The boss switches the user it runs as to the given UID.
</p></dd><dt><a name="BIND10_SHUTDOWN"></a><span class="term">BIND10_SHUTDOWN stopping the server</span></dt><dd><p>
The boss process received a command or signal telling it to shut down.
It will send a shutdown command to each process. The processes that do
@@ -341,10 +382,6 @@ which failed is unknown (not one of 'S' for socket or 'B' for bind).
</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_RESPONSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</span></dt><dd><p>
The boss requested a socket from the creator, but the answer is unknown. This
looks like a programmer error.
-</p></dd><dt><a name="BIND10_SOCKCREATOR_CRASHED"></a><span class="term">BIND10_SOCKCREATOR_CRASHED the socket creator crashed</span></dt><dd><p>
-The socket creator terminated unexpectedly. It is not possible to restart it
-(because the boss already gave up root privileges), so the system is going
-to terminate.
</p></dd><dt><a name="BIND10_SOCKCREATOR_EOF"></a><span class="term">BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</span></dt><dd><p>
There should be more data from the socket creator, but it closed the socket.
It probably crashed.
@@ -368,12 +405,18 @@ The socket creator failed to create the requested socket. It failed on the
indicated OS API function with given error.
</p></dd><dt><a name="BIND10_SOCKET_GET"></a><span class="term">BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</span></dt><dd><p>
The boss forwards a request for a socket to the socket creator.
+</p></dd><dt><a name="BIND10_STARTED_CC"></a><span class="term">BIND10_STARTED_CC started configuration/command session</span></dt><dd><p>
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
</p></dd><dt><a name="BIND10_STARTED_PROCESS"></a><span class="term">BIND10_STARTED_PROCESS started %1</span></dt><dd><p>
The given process has successfully been started.
</p></dd><dt><a name="BIND10_STARTED_PROCESS_PID"></a><span class="term">BIND10_STARTED_PROCESS_PID started %1 (PID %2)</span></dt><dd><p>
The given process has successfully been started, and has the given PID.
</p></dd><dt><a name="BIND10_STARTING"></a><span class="term">BIND10_STARTING starting BIND10: %1</span></dt><dd><p>
Informational message on startup that shows the full version.
+</p></dd><dt><a name="BIND10_STARTING_CC"></a><span class="term">BIND10_STARTING_CC starting configuration/command session</span></dt><dd><p>
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
</p></dd><dt><a name="BIND10_STARTING_PROCESS"></a><span class="term">BIND10_STARTING_PROCESS starting process %1</span></dt><dd><p>
The boss module is starting the given process.
</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT"></a><span class="term">BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</span></dt><dd><p>
@@ -387,8 +430,24 @@ All modules have been successfully started, and BIND 10 is now running.
</p></dd><dt><a name="BIND10_STARTUP_ERROR"></a><span class="term">BIND10_STARTUP_ERROR error during startup: %1</span></dt><dd><p>
There was a fatal error when BIND10 was trying to start. The error is
shown, and BIND10 will now shut down.
-</p></dd><dt><a name="BIND10_START_AS_NON_ROOT"></a><span class="term">BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</span></dt><dd><p>
-The given module is being started or restarted without root privileges.
+</p></dd><dt><a name="BIND10_STARTUP_UNEXPECTED_MESSAGE"></a><span class="term">BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1</span></dt><dd><p>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts. This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected. It may be that processes are starting
+of sequence.
+</p></dd><dt><a name="BIND10_STARTUP_UNRECOGNISED_MESSAGE"></a><span class="term">BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1</span></dt><dd><p>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts. This error is output when a
+message received by the Boss process is not recognised.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT_AUTH"></a><span class="term">BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.</span></dt><dd><p>
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT_RESOLVER"></a><span class="term">BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.</span></dt><dd><p>
+The resolver is being started or restarted without root privileges.
If the module needs these privileges, it may have problems starting.
Note that this issue should be resolved by the pending 'socket-creator'
process; once that has been implemented, modules should not need root
@@ -399,6 +458,15 @@ the message channel.
</p></dd><dt><a name="BIND10_UNKNOWN_CHILD_PROCESS_ENDED"></a><span class="term">BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</span></dt><dd><p>
An unknown child process has exited. The PID is printed, but no further
action will be taken by the boss process.
+</p></dd><dt><a name="BIND10_WAIT_CFGMGR"></a><span class="term">BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize</span></dt><dd><p>
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing. This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up. The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
</p></dd><dt><a name="CACHE_ENTRY_MISSING_RRSET"></a><span class="term">CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</span></dt><dd><p>
The cache tried to generate the complete answer message. It knows the structure
of the message, but some of the RRsets to be put there are not in cache (they
@@ -487,7 +555,7 @@ Debug message. The RRset cache to hold at most this many RRsets for the given
class is being created.
</p></dd><dt><a name="CACHE_RRSET_LOOKUP"></a><span class="term">CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</span></dt><dd><p>
Debug message. The resolver is trying to look up data in the RRset cache.
-</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</span></dt><dd><p>
+</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache</span></dt><dd><p>
Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
in the cache.
</p></dd><dt><a name="CACHE_RRSET_REMOVE_OLD"></a><span class="term">CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</span></dt><dd><p>
@@ -642,6 +710,8 @@ The user was denied because the SSL connection could not successfully
be set up. The specific error is given in the log message. Possible
causes may be that the ssl request itself was bad, or the local key or
certificate file could not be read.
+</p></dd><dt><a name="CMDCTL_STARTED"></a><span class="term">CMDCTL_STARTED cmdctl is listening for connections on %1:%2</span></dt><dd><p>
+The cmdctl daemon has started and is now listening for connections.
</p></dd><dt><a name="CMDCTL_STOPPED_BY_KEYBOARD"></a><span class="term">CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
There was a keyboard interrupt signal to stop the cmdctl daemon. The
daemon will now shut down.
@@ -756,28 +826,18 @@ Debug information. An item is being removed from the hotspot cache.
The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
means no limit.
-</p></dd><dt><a name="DATASRC_DATABASE_FIND_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</span></dt><dd><p>
-This was an internal error while reading data from a datasource. This can either
-mean the specific data source implementation is not behaving correctly, or the
-data it provides is invalid. The current search is aborted.
-The error message contains specific information about the error.
+</p></dd><dt><a name="DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED"></a><span class="term">DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2</span></dt><dd><p>
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
</p></dd><dt><a name="DATASRC_DATABASE_FIND_RECORDS"></a><span class="term">DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</span></dt><dd><p>
Debug information. The database data source is looking up records with the given
name and type in the database.
</p></dd><dt><a name="DATASRC_DATABASE_FIND_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</span></dt><dd><p>
The datasource backend provided resource records for the given RRset with
-different TTL values. The TTL of the RRSET is set to the lowest value, which
-is printed in the log message.
-</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</span></dt><dd><p>
-There was an uncaught general exception while reading data from a datasource.
-This most likely points to a logic error in the code, and can be considered a
-bug. The current search is aborted. Specific information about the exception is
-printed in this error message.
-</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</span></dt><dd><p>
-There was an uncaught ISC exception while reading data from a datasource. This
-most likely points to a logic error in the code, and can be considered a bug.
-The current search is aborted. Specific information about the exception is
-printed in this error message.
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</span></dt><dd><p>
When searching for a domain, the program met a delegation to a different zone
at the given domain name. It will return that one instead.
@@ -789,6 +849,10 @@ It will return the NS record instead.
When searching for a domain, the program met a DNAME redirection to a different
place in the domain space at the given domain name. It will return that one
instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL"></a><span class="term">DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1</span></dt><dd><p>
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXDOMAIN"></a><span class="term">DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</span></dt><dd><p>
The data returned by the database backend did not contain any data for the given
domain name, class and type.
@@ -799,6 +863,91 @@ name and class, but not for the given type.
The data returned by the database backend contained data for the given domain
name, and it either matches the type or has a relevant type. The RRset that is
returned is printed.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE"></a><span class="term">DATASRC_DATABASE_ITERATE iterating zone %1</span></dt><dd><p>
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_END"></a><span class="term">DATASRC_DATABASE_ITERATE_END iterating zone finished</span></dt><dd><p>
+While iterating through the zone, the program reached end of the data.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_NEXT"></a><span class="term">DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2</span></dt><dd><p>
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4</span></dt><dd><p>
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_END"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5</span></dt><dd><p>
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences. The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_NEXT"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5</span></dt><dd><p>
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset. The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_START"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5</span></dt><dd><p>
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source. The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADR_BADDATA"></a><span class="term">DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6</span></dt><dd><p>
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset. The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message. The administrator should examine the diff in the database
+to find any invalid data and fix it.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_COMMIT"></a><span class="term">DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</span></dt><dd><p>
+Debug information. A set of updates to a zone has been successfully
+committed to the corresponding database backend. The zone name,
+its class and the database name are printed.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_CREATED"></a><span class="term">DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3</span></dt><dd><p>
+Debug information. A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_DESTROYED"></a><span class="term">DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3</span></dt><dd><p>
+Debug information. A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_ROLLBACK"></a><span class="term">DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3</span></dt><dd><p>
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes. The intermediate changes made through the updater won't
+be applied to the underlying database. The zone name, its class, and
+the underlying database name are shown in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_ROLLBACKFAIL"></a><span class="term">DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4</span></dt><dd><p>
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails. The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation. In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid. The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD"></a><span class="term">DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1</span></dt><dd><p>
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_CANCEL_NS"></a><span class="term">DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1</span></dt><dd><p>
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_CANCEL_SUB"></a><span class="term">DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1</span></dt><dd><p>
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_EMPTY"></a><span class="term">DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1</span></dt><dd><p>
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
</p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
A debug message indicating that a query for the given name and RR type is being
processed.
@@ -1138,6 +1287,19 @@ data source.
</p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
This indicates a programming error. An internal task of unknown type was
generated.
+</p></dd><dt><a name="LIBXFRIN_DIFFERENT_TTL"></a><span class="term">LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.</span></dt><dd><p>
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
+</p></dd><dt><a name="LIBXFRIN_NO_JOURNAL"></a><span class="term">LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2</span></dt><dd><p>
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled. At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR). Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
</p></dd><dt><a name="LOGIMPL_ABOVE_MAX_DEBUG"></a><span class="term">LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
A message from the interface to the underlying logger implementation reporting
that the debug level (as set by an internally-created string DEBUGn, where n
@@ -1259,6 +1421,16 @@ Within a message file, a line starting with a dollar symbol was found
</p></dd><dt><a name="LOG_WRITE_ERROR"></a><span class="term">LOG_WRITE_ERROR error writing to %1: %2</span></dt><dd><p>
The specified error was encountered by the message compiler when writing
to the named output file.
+</p></dd><dt><a name="NOTIFY_OUT_DATASRC_ACCESS_FAILURE"></a><span class="term">NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1</span></dt><dd><p>
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message. This can be either a
+configuration error or installation setup failure.
+</p></dd><dt><a name="NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND"></a><span class="term">NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found</span></dt><dd><p>
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source. This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process. So this means some critical inconsistency in the data
+source or software bug.
</p></dd><dt><a name="NOTIFY_OUT_INVALID_ADDRESS"></a><span class="term">NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</span></dt><dd><p>
The notify_out library tried to send a notify message to the given
address, but it appears to be an invalid address. The configuration
@@ -1315,6 +1487,13 @@ provide more information.
The notify message to the given address (noted as address#port) has
timed out, and the message will be resent until the max retry limit
is reached.
+</p></dd><dt><a name="NOTIFY_OUT_ZONE_BAD_SOA"></a><span class="term">NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA</span></dt><dd><p>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs. Notify message won't
+be sent to such a zone.
+</p></dd><dt><a name="NOTIFY_OUT_ZONE_NO_NS"></a><span class="term">NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR</span></dt><dd><p>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR. Notify message won't be sent to such a zone.
</p></dd><dt><a name="NSAS_FIND_NS_ADDRESS"></a><span class="term">NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
A debug message issued when the NSAS (nameserver address store - part
of the resolver) is making a callback into the resolver to retrieve the
@@ -1732,6 +1911,11 @@ respond with 'Stats Httpd is up.' and its PID.
An unknown command has been sent to the stats-httpd module. The
stats-httpd module will respond with an error, and the command will
be ignored.
+</p></dd><dt><a name="STATHTTPD_SERVER_DATAERROR"></a><span class="term">STATHTTPD_SERVER_DATAERROR HTTP server data error: %1</span></dt><dd><p>
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
</p></dd><dt><a name="STATHTTPD_SERVER_ERROR"></a><span class="term">STATHTTPD_SERVER_ERROR HTTP server error: %1</span></dt><dd><p>
An internal error occurred while handling an HTTP request. An HTTP 500
response will be sent back, and the specific error is printed. This
@@ -1776,14 +1960,10 @@ control bus. A likely problem is that the message bus daemon
</p></dd><dt><a name="STATS_RECEIVED_NEW_CONFIG"></a><span class="term">STATS_RECEIVED_NEW_CONFIG received new configuration: %1</span></dt><dd><p>
This debug message is printed when the stats module has received a
configuration update from the configuration manager.
-</p></dd><dt><a name="STATS_RECEIVED_REMOVE_COMMAND"></a><span class="term">STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</span></dt><dd><p>
-A remove command for the given name was sent to the stats module, and
-the given statistics value will now be removed. It will not appear in
-statistics reports until it appears in a statistics update from a
-module again.
-</p></dd><dt><a name="STATS_RECEIVED_RESET_COMMAND"></a><span class="term">STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</span></dt><dd><p>
-The stats module received a command to clear all collected statistics.
-The data is cleared until it receives an update from the modules again.
+</p></dd><dt><a name="STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema</span></dt><dd><p>
+The stats module received a command to show all statistics schemas of all modules.
+</p></dd><dt><a name="STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND"></a><span class="term">STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1</span></dt><dd><p>
+The stats module received a command to show the specified statistics schema of the specified module.
</p></dd><dt><a name="STATS_RECEIVED_SHOW_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</span></dt><dd><p>
The stats module received a command to show all statistics that it has
collected.
@@ -1801,6 +1981,11 @@ will respond with an error and the command will be ignored.
</p></dd><dt><a name="STATS_SEND_REQUEST_BOSS"></a><span class="term">STATS_SEND_REQUEST_BOSS requesting boss to send statistics</span></dt><dd><p>
This debug message is printed when a request is sent to the boss module
to send its data to the stats module.
+</p></dd><dt><a name="STATS_STARTING"></a><span class="term">STATS_STARTING starting</span></dt><dd><p>
+The stats module will be now starting.
+</p></dd><dt><a name="STATS_START_ERROR"></a><span class="term">STATS_START_ERROR stats module error: %1</span></dt><dd><p>
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
</p></dd><dt><a name="STATS_STOPPED_BY_KEYBOARD"></a><span class="term">STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
There was a keyboard interrupt signal to stop the stats module. The
daemon will now shut down.
@@ -1812,19 +1997,23 @@ from a different version of BIND 10 than the stats module itself.
Please check your installation.
</p></dd><dt><a name="XFRIN_AXFR_DATABASE_FAILURE"></a><span class="term">XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message.
-</p></dd><dt><a name="XFRIN_AXFR_INTERNAL_FAILURE"></a><span class="term">XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
-The AXFR transfer for the given zone has failed due to an internal
-problem in the bind10 python wrapper library.
-The error is shown in the log message.
-</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
-The AXFR transfer for the given zone has failed due to a protocol error.
-The error is shown in the log message.
-</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_STARTED"></a><span class="term">XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</span></dt><dd><p>
-A connection to the master server has been made, the serial value in
-the SOA record has been checked, and a zone transfer has been started.
-</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</span></dt><dd><p>
-The AXFR transfer of the given zone was successfully completed.
+The error is shown in the log message. Note: due to the code structure
+this can only happen for AXFR.
+</p></dd><dt><a name="XFRIN_AXFR_INCONSISTENT_SOA"></a><span class="term">XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received</span></dt><dd><p>
+The serial fields of the first and last SOAs of AXFR (including AXFR-style
+IXFR) are not the same. According to RFC 5936 these two SOAs must be the
+"same" (not only for the serial), but it is still not clear what the
+receiver should do if this condition does not hold. There was a discussion
+about this at the IETF dnsext wg:
+http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+and the general feeling seems that it would be better to reject the
+transfer if a mismatch is detected. On the other hand, also as noted
+in that email thread, neither BIND 9 nor NSD performs any comparison
+on the SOAs. For now, we only check the serials (ignoring other fields)
+and only leave a warning log message when a mismatch is found. If it
+turns out to happen with a real world primary server implementation
+and that server actually feeds broken data (e.g. mixed versions of
+zone), we can consider a stricter action.
</p></dd><dt><a name="XFRIN_BAD_MASTER_ADDR_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</span></dt><dd><p>
The given master address is not a valid IP address.
</p></dd><dt><a name="XFRIN_BAD_MASTER_PORT_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</span></dt><dd><p>
@@ -1843,6 +2032,17 @@ error is given in the log message.
</p></dd><dt><a name="XFRIN_CONNECT_MASTER"></a><span class="term">XFRIN_CONNECT_MASTER error connecting to master at %1: %2</span></dt><dd><p>
There was an error opening a connection to the master. The error is
shown in the log message.
+</p></dd><dt><a name="XFRIN_GOT_INCREMENTAL_RESP"></a><span class="term">XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1</span></dt><dd><p>
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found. This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+</p></dd><dt><a name="XFRIN_GOT_NONINCREMENTAL_RESP"></a><span class="term">XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1</span></dt><dd><p>
+Non incremental transfer was detected at the "first data" of a transfer,
+which is the RR following the initial SOA. Non incremental transfer is
+either AXFR or AXFR-style IXFR. In the latter case, it means that
+in a response to IXFR query the first data is not SOA or its SOA serial
+is not equal to the requested SOA serial.
</p></dd><dt><a name="XFRIN_IMPORT_DNS"></a><span class="term">XFRIN_IMPORT_DNS error importing python DNS module: %1</span></dt><dd><p>
There was an error importing the python DNS module pydnspp. The most
likely cause is a PYTHONPATH problem.
@@ -1853,6 +2053,11 @@ was killed.
</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER"></a><span class="term">XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</span></dt><dd><p>
There was a problem sending a message to the zone manager. This most
likely means that the msgq daemon has quit or was killed.
+</p></dd><dt><a name="XFRIN_NOTIFY_UNKNOWN_MASTER"></a><span class="term">XFRIN_NOTIFY_UNKNOWN_MASTER got notification to retransfer zone %1 from %2, expected %3</span></dt><dd><p>
+The system received a notify for the given zone, but the address it came
+from does not match the master address in the Xfrin configuration. The notify
+is ignored. This may indicate that the configuration for the master is wrong,
+that a wrong machine is sending notifies, or that fake notifies are being sent.
</p></dd><dt><a name="XFRIN_RETRANSFER_UNKNOWN_ZONE"></a><span class="term">XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</span></dt><dd><p>
There was an internal command to retransfer the given zone, but the
zone is not known to the system. This may indicate that the configuration
@@ -1866,24 +2071,37 @@ daemon will now shut down.
</p></dd><dt><a name="XFRIN_UNKNOWN_ERROR"></a><span class="term">XFRIN_UNKNOWN_ERROR unknown error: %1</span></dt><dd><p>
An uncaught exception was raised while running the xfrin daemon. The
exception message is printed in the log message.
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_DONE"></a><span class="term">XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</span></dt><dd><p>
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_ERROR"></a><span class="term">XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</span></dt><dd><p>
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_FAILED"></a><span class="term">XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</span></dt><dd><p>
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_STARTED"></a><span class="term">XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</span></dt><dd><p>
-A transfer out of the given zone has started.
+</p></dd><dt><a name="XFRIN_XFR_OTHER_FAILURE"></a><span class="term">XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3</span></dt><dd><p>
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module. Possible reasons are a broken DNS message or failure
+in database connection. The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_XFR_PROCESS_FAILURE"></a><span class="term">XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4</span></dt><dd><p>
+An XFR session failed outside the main protocol handling. This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure. Details of the error are shown in
+the log message. In general, these errors are not really expected
+ones, and indicate an installation error or a program bug. The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete. So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</span></dt><dd><p>
+The XFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_FALLBACK"></a><span class="term">XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1</span></dt><dd><p>
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_STARTED"></a><span class="term">XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</span></dt><dd><p>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded</span></dt><dd><p>
+The XFR transfer of the given zone was successfully completed.
</p></dd><dt><a name="XFROUT_BAD_TSIG_KEY_STRING"></a><span class="term">XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
The TSIG key string as read from the configuration does not represent
a valid TSIG key.
@@ -1894,6 +2112,9 @@ most likely cause is that the msgq daemon is not running.
There was a problem reading a response from another module over the
command and control channel. The most likely cause is that the
configuration manager b10-cfgmgr is not running.
+</p></dd><dt><a name="XFROUT_CONFIG_ERROR"></a><span class="term">XFROUT_CONFIG_ERROR error found in configuration data: %1</span></dt><dd><p>
+The xfrout process encountered an error when installing the configuration at
+startup time. Details of the error are included in the log message.
</p></dd><dt><a name="XFROUT_FETCH_REQUEST_ERROR"></a><span class="term">XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</span></dt><dd><p>
There was a socket error while contacting the b10-auth daemon to
fetch a transfer request. The auth daemon may have shutdown.
@@ -1908,6 +2129,45 @@ by xfrout could not be found. This suggests that either some libraries
are missing on the system, or the PYTHONPATH variable is not correct.
The specific place where this library needs to be depends on your
system and your specific installation.
+</p></dd><dt><a name="XFROUT_IXFR_MULTIPLE_SOA"></a><span class="term">XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs</span></dt><dd><p>
+An IXFR request was received with more than one SOA RRs in the authority
+section. The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_JOURNAL_SUPPORT"></a><span class="term">XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR</span></dt><dd><p>
+An IXFR request was received but the underlying data source did
+not support journaling. The xfrout daemon fell back to AXFR-style
+IXFR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_SOA"></a><span class="term">XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA</span></dt><dd><p>
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_VERSION"></a><span class="term">XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR</span></dt><dd><p>
+An IXFR request was received, but the requested range of differences
+were not found in the data source. The xfrout daemon fell back to
+AXFR-style IXFR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_ZONE"></a><span class="term">XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal</span></dt><dd><p>
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source. This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone. The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
+</p></dd><dt><a name="XFROUT_IXFR_UPTODATE"></a><span class="term">XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)</span></dt><dd><p>
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server. The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic. This will soon
+be implemented.
+</p></dd><dt><a name="XFROUT_MODULECC_SESSION_ERROR"></a><span class="term">XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</span></dt><dd><p>
+There was a problem in the lower level module handling configuration and
+control commands. This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization. A detailed error message from the module
+will also be displayed.
</p></dd><dt><a name="XFROUT_NEW_CONFIG"></a><span class="term">XFROUT_NEW_CONFIG Update xfrout configuration</span></dt><dd><p>
New configuration settings have been sent from the configuration
manager. The xfrout daemon will now apply them.
@@ -1929,15 +2189,25 @@ There was an error processing a transfer request. The error is included
in the log message, but at this point no specific information other
than that could be given. This points to incomplete exception handling
in the code.
-</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</span></dt><dd><p>
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
-</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</span></dt><dd><p>
+</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped</span></dt><dd><p>
+The xfrout process silently dropped a request to transfer zone to
+given host. This is required by the ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</p></dd><dt><a name="XFROUT_QUERY_QUOTA_EXCCEEDED"></a><span class="term">XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)</span></dt><dd><p>
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message. The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
+</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected</span></dt><dd><p>
The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
</p></dd><dt><a name="XFROUT_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
The xfrout daemon received a shutdown command from the command channel
and will now shut down.
@@ -1973,6 +2243,30 @@ socket needed for contacting the b10-auth daemon to pass requests
on, but the file is in use. The most likely cause is that another
xfrout daemon process is still running. This xfrout daemon (the one
printing this message) will not start.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_CHECK_ERROR"></a><span class="term">XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4</span></dt><dd><p>
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage. Some detail of the error is also included in the
+message. The xfrout server tries to return a SERVFAIL response in this case.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_DONE"></a><span class="term">XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete</span></dt><dd><p>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_ERROR"></a><span class="term">XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4</span></dt><dd><p>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_FAILED"></a><span class="term">XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4</span></dt><dd><p>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_STARTED"></a><span class="term">XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started</span></dt><dd><p>
+A transfer out of the given zone has started.
</p></dd><dt><a name="ZONEMGR_CCSESSION_ERROR"></a><span class="term">ZONEMGR_CCSESSION_ERROR command channel session error: %1</span></dt><dd><p>
An error was encountered on the command channel. The message indicates
the nature of the error.
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
index bade381..4dc02d4 100644
--- a/doc/guide/bind10-messages.xml
+++ b/doc/guide/bind10-messages.xml
@@ -573,19 +573,117 @@ needs a dedicated message bus.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_CONFIGURATION_START_AUTH">
-<term>BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</term>
+<varlistentry id="BIND10_COMPONENT_FAILED">
+<term>BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status</term>
<listitem><para>
-This message shows whether or not the authoritative server should be
-started according to the configuration.
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_CONFIGURATION_START_RESOLVER">
-<term>BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</term>
+<varlistentry id="BIND10_COMPONENT_RESTART">
+<term>BIND10_COMPONENT_RESTART component %1 is about to restart</term>
<listitem><para>
-This message shows whether or not the resolver should be
-started according to the configuration.
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_START">
+<term>BIND10_COMPONENT_START component %1 is starting</term>
+<listitem><para>
+The named component is about to be started by the boss process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_START_EXCEPTION">
+<term>BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2</term>
+<listitem><para>
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_STOP">
+<term>BIND10_COMPONENT_STOP component %1 is being stopped</term>
+<listitem><para>
+A component is about to be asked to stop willingly by the boss.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_UNSATISFIED">
+<term>BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed</term>
+<listitem><para>
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_BUILD">
+<term>BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'</term>
+<listitem><para>
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_PLAN_INTERRUPTED">
+<term>BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done</term>
+<listitem><para>
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_RECONFIGURE">
+<term>BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components</term>
+<listitem><para>
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_RUN">
+<term>BIND10_CONFIGURATOR_RUN running plan of %1 tasks</term>
+<listitem><para>
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_START">
+<term>BIND10_CONFIGURATOR_START bind10 component configurator is starting up</term>
+<listitem><para>
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_STOP">
+<term>BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down</term>
+<listitem><para>
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_TASK">
+<term>BIND10_CONFIGURATOR_TASK performing task %1 on %2</term>
+<listitem><para>
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
</para></listitem>
</varlistentry>
@@ -632,14 +730,6 @@ running, which needs to be stopped.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_MSGQ_DAEMON_ENDED">
-<term>BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</term>
-<listitem><para>
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
-</para></listitem>
-</varlistentry>
-
<varlistentry id="BIND10_MSGQ_DISAPPEARED">
<term>BIND10_MSGQ_DISAPPEARED msgq channel disappeared</term>
<listitem><para>
@@ -649,24 +739,12 @@ inconsistent state of the system, and BIND 10 will now shut down.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_PROCESS_ENDED_NO_EXIT_STATUS">
-<term>BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</term>
-<listitem><para>
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS">
-<term>BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</term>
+<varlistentry id="BIND10_PROCESS_ENDED">
+<term>BIND10_PROCESS_ENDED process %2 of %1 ended with status %3</term>
<listitem><para>
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
</para></listitem>
</varlistentry>
@@ -740,6 +818,13 @@ The boss module is sending a SIGTERM signal to the given process.
</para></listitem>
</varlistentry>
+<varlistentry id="BIND10_SETUID">
+<term>BIND10_SETUID setting UID to %1</term>
+<listitem><para>
+The boss switches the user it runs as to the given UID.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="BIND10_SHUTDOWN">
<term>BIND10_SHUTDOWN stopping the server</term>
<listitem><para>
@@ -774,15 +859,6 @@ looks like a programmer error.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_SOCKCREATOR_CRASHED">
-<term>BIND10_SOCKCREATOR_CRASHED the socket creator crashed</term>
-<listitem><para>
-The socket creator terminated unexpectedly. It is not possible to restart it
-(because the boss already gave up root privileges), so the system is going
-to terminate.
-</para></listitem>
-</varlistentry>
-
<varlistentry id="BIND10_SOCKCREATOR_EOF">
<term>BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</term>
<listitem><para>
@@ -846,6 +922,14 @@ The boss forwards a request for a socket to the socket creator.
</para></listitem>
</varlistentry>
+<varlistentry id="BIND10_STARTED_CC">
+<term>BIND10_STARTED_CC started configuration/command session</term>
+<listitem><para>
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="BIND10_STARTED_PROCESS">
<term>BIND10_STARTED_PROCESS started %1</term>
<listitem><para>
@@ -867,6 +951,14 @@ Informational message on startup that shows the full version.
</para></listitem>
</varlistentry>
+<varlistentry id="BIND10_STARTING_CC">
+<term>BIND10_STARTING_CC starting configuration/command session</term>
+<listitem><para>
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="BIND10_STARTING_PROCESS">
<term>BIND10_STARTING_PROCESS starting process %1</term>
<listitem><para>
@@ -905,10 +997,41 @@ shown, and BIND10 will now shut down.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_START_AS_NON_ROOT">
-<term>BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</term>
+<varlistentry id="BIND10_STARTUP_UNEXPECTED_MESSAGE">
+<term>BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1</term>
<listitem><para>
-The given module is being started or restarted without root privileges.
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts. This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected. It may be that processes are starting
+of sequence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_UNRECOGNISED_MESSAGE">
+<term>BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1</term>
+<listitem><para>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts. This error is output when a
+message received by the Boss process is not recognised.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT_AUTH">
+<term>BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.</term>
+<listitem><para>
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT_RESOLVER">
+<term>BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.</term>
+<listitem><para>
+The resolver is being started or restarted without root privileges.
If the module needs these privileges, it may have problems starting.
Note that this issue should be resolved by the pending 'socket-creator'
process; once that has been implemented, modules should not need root
@@ -932,6 +1055,20 @@ action will be taken by the boss process.
</para></listitem>
</varlistentry>
+<varlistentry id="BIND10_WAIT_CFGMGR">
+<term>BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize</term>
+<listitem><para>
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing. This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up. The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="CACHE_ENTRY_MISSING_RRSET">
<term>CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</term>
<listitem><para>
@@ -1535,6 +1672,13 @@ certificate file could not be read.
</para></listitem>
</varlistentry>
+<varlistentry id="CMDCTL_STARTED">
+<term>CMDCTL_STARTED cmdctl is listening for connections on %1:%2</term>
+<listitem><para>
+The cmdctl daemon has started and is now listening for connections.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="CMDCTL_STOPPED_BY_KEYBOARD">
<term>CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
<listitem><para>
@@ -1909,6 +2053,50 @@ database). The data in database should be checked and fixed.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_END">
+<term>DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5</term>
+<listitem><para>
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences. The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_NEXT">
+<term>DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5</term>
+<listitem><para>
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset. The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_START">
+<term>DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5</term>
+<listitem><para>
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source. The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADR_BADDATA">
+<term>DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6</term>
+<listitem><para>
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset. The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message. The administrator should examine the diff in the database
+to find any invalid data and fix it.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_DATABASE_UPDATER_COMMIT">
<term>DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</term>
<listitem><para>
@@ -2890,6 +3078,20 @@ together, the later one get's overwritten to the earlier one in the sequence.
</para></listitem>
</varlistentry>
+<varlistentry id="LIBXFRIN_NO_JOURNAL">
+<term>LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2</term>
+<listitem><para>
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled. At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR). Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
<term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
<listitem><para>
@@ -3126,6 +3328,26 @@ to the named output file.
</para></listitem>
</varlistentry>
+<varlistentry id="NOTIFY_OUT_DATASRC_ACCESS_FAILURE">
+<term>NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1</term>
+<listitem><para>
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message. This can be either a
+configuration error or installation setup failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND">
+<term>NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found</term>
+<listitem><para>
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source. This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process. So this means some critical inconsistency in the data
+source or software bug.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="NOTIFY_OUT_INVALID_ADDRESS">
<term>NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</term>
<listitem><para>
@@ -3237,6 +3459,23 @@ is reached.
</para></listitem>
</varlistentry>
+<varlistentry id="NOTIFY_OUT_ZONE_BAD_SOA">
+<term>NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA</term>
+<listitem><para>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs. Notify message won't
+be sent to such a zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_ZONE_NO_NS">
+<term>NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR</term>
+<listitem><para>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR. Notify message won't be sent to such a zone.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="NSAS_FIND_NS_ADDRESS">
<term>NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</term>
<listitem><para>
@@ -4144,6 +4383,16 @@ be ignored.
</para></listitem>
</varlistentry>
+<varlistentry id="STATHTTPD_SERVER_DATAERROR">
+<term>STATHTTPD_SERVER_DATAERROR HTTP server data error: %1</term>
+<listitem><para>
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="STATHTTPD_SERVER_ERROR">
<term>STATHTTPD_SERVER_ERROR HTTP server error: %1</term>
<listitem><para>
@@ -4518,6 +4767,25 @@ in database connection. The error is shown in the log message.
</para></listitem>
</varlistentry>
+<varlistentry id="XFRIN_XFR_PROCESS_FAILURE">
+<term>XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4</term>
+<listitem><para>
+An XFR session failed outside the main protocol handling. This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure. Details of the error are shown in
+the log message. In general, these errors are not really expected
+ones, and indicate an installation error or a program bug. The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete. So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFRIN_XFR_TRANSFER_FAILURE">
<term>XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</term>
<listitem><para>
@@ -4526,6 +4794,16 @@ The error is shown in the log message.
</para></listitem>
</varlistentry>
+<varlistentry id="XFRIN_XFR_TRANSFER_FALLBACK">
+<term>XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1</term>
+<listitem><para>
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFRIN_XFR_TRANSFER_STARTED">
<term>XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</term>
<listitem><para>
@@ -4541,44 +4819,6 @@ The XFR transfer of the given zone was successfully completed.
</para></listitem>
</varlistentry>
-<varlistentry id="XFROUT_AXFR_TRANSFER_DONE">
-<term>XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</term>
-<listitem><para>
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_ERROR">
-<term>XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</term>
-<listitem><para>
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_FAILED">
-<term>XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</term>
-<listitem><para>
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_STARTED">
-<term>XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</term>
-<listitem><para>
-A transfer out of the given zone has started.
-</para></listitem>
-</varlistentry>
-
<varlistentry id="XFROUT_BAD_TSIG_KEY_STRING">
<term>XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
<listitem><para>
@@ -4641,6 +4881,69 @@ system and your specific installation.
</para></listitem>
</varlistentry>
+<varlistentry id="XFROUT_IXFR_MULTIPLE_SOA">
+<term>XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs</term>
+<listitem><para>
+An IXFR request was received with more than one SOA RRs in the authority
+section. The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_JOURNAL_SUPPORT">
+<term>XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR</term>
+<listitem><para>
+An IXFR request was received but the underlying data source did
+not support journaling. The xfrout daemon fell back to AXFR-style
+IXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_SOA">
+<term>XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA</term>
+<listitem><para>
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_VERSION">
+<term>XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR</term>
+<listitem><para>
+An IXFR request was received, but the requested range of differences
+were not found in the data source. The xfrout daemon fell back to
+AXFR-style IXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_ZONE">
+<term>XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal</term>
+<listitem><para>
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source. This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone. The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_UPTODATE">
+<term>XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)</term>
+<listitem><para>
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server. The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic. This will soon
+be implemented.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFROUT_MODULECC_SESSION_ERROR">
<term>XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</term>
<listitem><para>
@@ -4699,21 +5002,36 @@ in the code.
</varlistentry>
<varlistentry id="XFROUT_QUERY_DROPPED">
-<term>XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</term>
+<term>XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped</term>
+<listitem><para>
+The xfrout process silently dropped a request to transfer zone to
+given host. This is required by the ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_QUOTA_EXCCEEDED">
+<term>XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)</term>
<listitem><para>
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message. The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
</para></listitem>
</varlistentry>
<varlistentry id="XFROUT_QUERY_REJECTED">
-<term>XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</term>
+<term>XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected</term>
<listitem><para>
The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
</para></listitem>
</varlistentry>
@@ -4792,6 +5110,55 @@ printing this message) will not start.
</para></listitem>
</varlistentry>
+<varlistentry id="XFROUT_XFR_TRANSFER_CHECK_ERROR">
+<term>XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4</term>
+<listitem><para>
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage. Some detail of the error is also included in the
+message. The xfrout server tries to return a SERVFAIL response in this case.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_DONE">
+<term>XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete</term>
+<listitem><para>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_ERROR">
+<term>XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4</term>
+<listitem><para>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_FAILED">
+<term>XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4</term>
+<listitem><para>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_STARTED">
+<term>XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started</term>
+<listitem><para>
+A transfer out of the given zone has started.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="ZONEMGR_CCSESSION_ERROR">
<term>ZONEMGR_CCSESSION_ERROR command channel session error: %1</term>
<listitem><para>
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index c9dac88..caf69b9 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -91,9 +91,9 @@ public:
bool processNormalQuery(const IOMessage& io_message, MessagePtr message,
OutputBufferPtr buffer,
auto_ptr<TSIGContext> tsig_context);
- bool processAxfrQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer,
- auto_ptr<TSIGContext> tsig_context);
+ bool processXfrQuery(const IOMessage& io_message, MessagePtr message,
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context);
bool processNotify(const IOMessage& io_message, MessagePtr message,
OutputBufferPtr buffer,
auto_ptr<TSIGContext> tsig_context);
@@ -219,8 +219,9 @@ class ConfigChecker : public SimpleCallback {
public:
ConfigChecker(AuthSrv* srv) : server_(srv) {}
virtual void operator()(const IOMessage&) const {
- if (server_->getConfigSession()->hasQueuedMsgs()) {
- server_->getConfigSession()->checkCommand();
+ ModuleCCSession* cfg_session = server_->getConfigSession();
+ if (cfg_session != NULL && cfg_session->hasQueuedMsgs()) {
+ cfg_session->checkCommand();
}
}
private:
@@ -472,10 +473,11 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
ConstQuestionPtr question = *message->beginQuestion();
const RRType &qtype = question->getType();
if (qtype == RRType::AXFR()) {
- sendAnswer = impl_->processAxfrQuery(io_message, message, buffer,
- tsig_context);
+ sendAnswer = impl_->processXfrQuery(io_message, message, buffer,
+ tsig_context);
} else if (qtype == RRType::IXFR()) {
- makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
+ sendAnswer = impl_->processXfrQuery(io_message, message, buffer,
+ tsig_context);
} else {
sendAnswer = impl_->processNormalQuery(io_message, message, buffer,
tsig_context);
@@ -543,9 +545,9 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
}
bool
-AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer,
- auto_ptr<TSIGContext> tsig_context)
+AuthSrvImpl::processXfrQuery(const IOMessage& io_message, MessagePtr message,
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context)
{
// Increment query counter.
incCounter(io_message.getSocket().getProtocol());
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index 53c019f..dd00ea5 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -32,8 +32,8 @@ query_bench_LDADD += $(top_builddir)/src/lib/cc/libcc.la
query_bench_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
query_bench_LDADD += $(top_builddir)/src/lib/log/liblog.la
query_bench_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
query_bench_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
query_bench_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
query_bench_LDADD += $(SQLITE_LIBS)
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index b2e0234..f159262 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -117,7 +117,6 @@ void
Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
if (nsec->getRdataCount() == 0) {
isc_throw(BadNSEC, "NSEC for NXDOMAIN is empty");
- return;
}
// Add the NSEC proving NXDOMAIN to the authority section.
@@ -152,7 +151,6 @@ Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
fresult.rrset->getRdataCount() == 0) {
isc_throw(BadNSEC, "Unexpected result for wildcard NXDOMAIN proof");
- return;
}
// Add the (no-) wildcard proof only when it's different from the NSEC
@@ -178,7 +176,6 @@ Query::addWildcardProof(ZoneFinder& finder) {
if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
fresult.rrset->getRdataCount() == 0) {
isc_throw(BadNSEC, "Unexpected result for wildcard proof");
- return;
}
response_.addRRset(Message::SECTION_AUTHORITY,
boost::const_pointer_cast<RRset>(fresult.rrset),
@@ -186,6 +183,33 @@ Query::addWildcardProof(ZoneFinder& finder) {
}
void
+Query::addWildcardNXRRSETProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
+ // There should be one NSEC RR which was found in the zone to prove
+ // that there is not matched <QNAME,QTYPE> via wildcard expansion.
+ if (nsec->getRdataCount() == 0) {
+ isc_throw(BadNSEC, "NSEC for WILDCARD_NXRRSET is empty");
+ }
+ // Add this NSEC RR to authority section.
+ response_.addRRset(Message::SECTION_AUTHORITY,
+ boost::const_pointer_cast<RRset>(nsec), dnssec_);
+
+ const ZoneFinder::FindResult fresult =
+ finder.find(qname_, RRType::NSEC(), NULL,
+ dnssec_opt_ | ZoneFinder::NO_WILDCARD);
+ if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
+ fresult.rrset->getRdataCount() == 0) {
+ isc_throw(BadNSEC, "Unexpected result for no match QNAME proof");
+ }
+
+ if (nsec->getName() != fresult.rrset->getName()) {
+ // one NSEC RR proves wildcard_nxrrset that no matched QNAME.
+ response_.addRRset(Message::SECTION_AUTHORITY,
+ boost::const_pointer_cast<RRset>(fresult.rrset),
+ dnssec_);
+ }
+}
+
+void
Query::addAuthAdditional(ZoneFinder& finder) {
// Fill in authority and addtional sections.
ZoneFinder::FindResult ns_result = finder.find(finder.getOrigin(),
@@ -355,6 +379,12 @@ Query::process() {
dnssec_);
}
break;
+ case ZoneFinder::WILDCARD_NXRRSET:
+ addSOA(*result.zone_finder);
+ if (dnssec_ && db_result.rrset) {
+ addWildcardNXRRSETProof(zfinder, db_result.rrset);
+ }
+ break;
default:
// This is basically a bug of the data source implementation,
// but could also happen in the middle of development where
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index 3282c0d..43a8b6b 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -82,6 +82,18 @@ private:
/// This corresponds to Section 3.1.3.3 of RFC 4035.
void addWildcardProof(isc::datasrc::ZoneFinder& finder);
+ /// \brief Adds one NSEC RR proved no matched QNAME,one NSEC RR proved no
+ /// matched <QNAME,QTYPE> through wildcard extension.
+ ///
+ /// Add NSEC RRs that prove an WILDCARD_NXRRSET result.
+ /// This corresponds to Section 3.1.3.4 of RFC 4035.
+ /// \param finder The ZoneFinder through which the authority data for the
+ /// query is to be found.
+ /// \param nsec The RRset (NSEC RR) which proved that there is no matched
+ /// <QNAME,QTTYPE>.
+ void addWildcardNXRRSETProof(isc::datasrc::ZoneFinder& finder,
+ isc::dns::ConstRRsetPtr nsec);
+
/// \brief Look up additional data (i.e., address records for the names
/// included in NS or MX records) and add them to the additional section.
///
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index 4698588..ac25cd6 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -229,7 +229,8 @@ TEST_F(AuthSrvTest, AXFROverUDP) {
TEST_F(AuthSrvTest, AXFRSuccess) {
EXPECT_FALSE(xfrout.isConnected());
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("example.com"), RRClass::IN(), RRType::AXFR());
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
createRequestPacket(request_message, IPPROTO_TCP);
// On success, the AXFR query has been passed to a separate process,
// so we shouldn't have to respond.
@@ -245,7 +246,8 @@ TEST_F(AuthSrvTest, TSIGSigned) {
const TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
TSIGContext context(key);
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("version.bind"), RRClass::CH(), RRType::TXT());
+ Name("version.bind"), RRClass::CH(),
+ RRType::TXT());
createRequestPacket(request_message, IPPROTO_UDP, &context);
// Run the message through the server
@@ -278,7 +280,8 @@ TEST_F(AuthSrvTest, TSIGSignedBadKey) {
TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
TSIGContext context(key);
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("version.bind"), RRClass::CH(), RRType::TXT());
+ Name("version.bind"), RRClass::CH(),
+ RRType::TXT());
createRequestPacket(request_message, IPPROTO_UDP, &context);
// Process the message, but use a different key there
@@ -309,7 +312,8 @@ TEST_F(AuthSrvTest, TSIGBadSig) {
TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
TSIGContext context(key);
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("version.bind"), RRClass::CH(), RRType::TXT());
+ Name("version.bind"), RRClass::CH(),
+ RRType::TXT());
createRequestPacket(request_message, IPPROTO_UDP, &context);
// Process the message, but use a different key there
@@ -375,7 +379,8 @@ TEST_F(AuthSrvTest, AXFRConnectFail) {
EXPECT_FALSE(xfrout.isConnected()); // check prerequisite
xfrout.disableConnect();
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("example.com"), RRClass::IN(), RRType::AXFR());
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
createRequestPacket(request_message, IPPROTO_TCP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
EXPECT_TRUE(dnsserv.hasAnswer());
@@ -388,7 +393,8 @@ TEST_F(AuthSrvTest, AXFRSendFail) {
// first send a valid query, making the connection with the xfr process
// open.
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("example.com"), RRClass::IN(), RRType::AXFR());
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
createRequestPacket(request_message, IPPROTO_TCP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
EXPECT_TRUE(xfrout.isConnected());
@@ -397,7 +403,8 @@ TEST_F(AuthSrvTest, AXFRSendFail) {
parse_message->clear(Message::PARSE);
response_obuffer->clear();
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("example.com"), RRClass::IN(), RRType::AXFR());
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
createRequestPacket(request_message, IPPROTO_TCP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
EXPECT_TRUE(dnsserv.hasAnswer());
@@ -414,7 +421,66 @@ TEST_F(AuthSrvTest, AXFRDisconnectFail) {
xfrout.disableSend();
xfrout.disableDisconnect();
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("example.com"), RRClass::IN(), RRType::AXFR());
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ EXPECT_THROW(server.processMessage(*io_message, parse_message,
+ response_obuffer, &dnsserv),
+ XfroutError);
+ EXPECT_TRUE(xfrout.isConnected());
+ // XXX: we need to re-enable disconnect. otherwise an exception would be
+ // thrown via the destructor of the server.
+ xfrout.enableDisconnect();
+}
+
+TEST_F(AuthSrvTest, IXFRConnectFail) {
+ EXPECT_FALSE(xfrout.isConnected()); // check prerequisite
+ xfrout.disableConnect();
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::IXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, Rcode::SERVFAIL(),
+ opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+ EXPECT_FALSE(xfrout.isConnected());
+}
+
+TEST_F(AuthSrvTest, IXFRSendFail) {
+ // first send a valid query, making the connection with the xfr process
+ // open.
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::IXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+ EXPECT_TRUE(xfrout.isConnected());
+
+ xfrout.disableSend();
+ parse_message->clear(Message::PARSE);
+ response_obuffer->clear();
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::IXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, Rcode::SERVFAIL(),
+ opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+
+ // The connection should have been closed due to the send failure.
+ EXPECT_FALSE(xfrout.isConnected());
+}
+
+TEST_F(AuthSrvTest, IXFRDisconnectFail) {
+ // In our usage disconnect() shouldn't fail. So we'll see the exception
+ // should it be thrown.
+ xfrout.disableSend();
+ xfrout.disableDisconnect();
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::IXFR());
createRequestPacket(request_message, IPPROTO_TCP);
EXPECT_THROW(server.processMessage(*io_message, parse_message,
response_obuffer, &dnsserv),
@@ -426,8 +492,9 @@ TEST_F(AuthSrvTest, AXFRDisconnectFail) {
}
TEST_F(AuthSrvTest, notify) {
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -458,8 +525,9 @@ TEST_F(AuthSrvTest, notify) {
TEST_F(AuthSrvTest, notifyForCHClass) {
// Same as the previous test, but for the CH RRClass.
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::CH(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::CH(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -487,8 +555,9 @@ TEST_F(AuthSrvTest, notifyEmptyQuestion) {
}
TEST_F(AuthSrvTest, notifyMultiQuestions) {
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
// add one more SOA question
request_message.addQuestion(Question(Name("example.com"), RRClass::IN(),
RRType::SOA()));
@@ -501,8 +570,9 @@ TEST_F(AuthSrvTest, notifyMultiQuestions) {
}
TEST_F(AuthSrvTest, notifyNonSOAQuestion) {
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::NS());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::NS());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -513,8 +583,9 @@ TEST_F(AuthSrvTest, notifyNonSOAQuestion) {
TEST_F(AuthSrvTest, notifyWithoutAA) {
// implicitly leave the AA bit off. our implementation will accept it.
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
EXPECT_TRUE(dnsserv.hasAnswer());
@@ -523,8 +594,9 @@ TEST_F(AuthSrvTest, notifyWithoutAA) {
}
TEST_F(AuthSrvTest, notifyWithErrorRcode) {
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
request_message.setRcode(Rcode::SERVFAIL());
createRequestPacket(request_message, IPPROTO_UDP);
@@ -537,8 +609,9 @@ TEST_F(AuthSrvTest, notifyWithErrorRcode) {
TEST_F(AuthSrvTest, notifyWithoutSession) {
server.setXfrinSession(NULL);
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
@@ -551,8 +624,9 @@ TEST_F(AuthSrvTest, notifyWithoutSession) {
TEST_F(AuthSrvTest, notifySendFail) {
notify_session.disableSend();
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
@@ -563,8 +637,9 @@ TEST_F(AuthSrvTest, notifySendFail) {
TEST_F(AuthSrvTest, notifyReceiveFail) {
notify_session.disableReceive();
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -574,8 +649,9 @@ TEST_F(AuthSrvTest, notifyReceiveFail) {
TEST_F(AuthSrvTest, notifyWithBogusSessionMessage) {
notify_session.setMessage(Element::fromJSON("{\"foo\": 1}"));
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -586,8 +662,9 @@ TEST_F(AuthSrvTest, notifyWithSessionMessageError) {
notify_session.setMessage(
Element::fromJSON("{\"result\": [1, \"FAIL\"]}"));
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -737,12 +814,28 @@ TEST_F(AuthSrvTest, queryCounterTCPAXFR) {
Name("example.com"), RRClass::IN(), RRType::AXFR());
createRequestPacket(request_message, IPPROTO_TCP);
// On success, the AXFR query has been passed to a separate process,
- // so we shouldn't have to respond.
+ // so auth itself shouldn't respond.
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+ EXPECT_FALSE(dnsserv.hasAnswer());
// After processing TCP AXFR query, the counter should be 1.
EXPECT_EQ(1, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
}
+// Submit TCP IXFR query and check query counter
+TEST_F(AuthSrvTest, queryCounterTCPIXFR) {
+ // The counter should be initialized to 0.
+ EXPECT_EQ(0, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(), RRType::IXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ // On success, the IXFR query has been passed to a separate process,
+ // so auth itself shouldn't respond.
+ server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+ EXPECT_FALSE(dnsserv.hasAnswer());
+ // After processing TCP IXFR query, the counter should be 1.
+ EXPECT_EQ(1, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+}
+
// class for queryCounterUnexpected test
// getProtocol() returns IPPROTO_IP
class DummyUnknownSocket : public IOSocket {
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index 16a2409..14067ab 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -100,6 +100,22 @@ const char* const cnamewild_txt =
"*.cnamewild.example.com. 3600 IN CNAME www.example.org.\n";
const char* const nsec_cnamewild_txt = "*.cnamewild.example.com. "
"3600 IN NSEC delegation.example.com. CNAME NSEC RRSIG\n";
+// Wildcard_nxrrset
+const char* const wild_txt_nxrrset =
+ "*.uwild.example.com. 3600 IN A 192.0.2.9\n";
+const char* const nsec_wild_txt_nxrrset =
+ "*.uwild.example.com. 3600 IN NSEC www.uwild.example.com. A NSEC RRSIG\n";
+const char* const wild_txt_next =
+ "www.uwild.example.com. 3600 IN A 192.0.2.11\n";
+const char* const nsec_wild_txt_next =
+ "www.uwild.example.com. 3600 IN NSEC *.wild.example.com. A NSEC RRSIG\n";
+// Wildcard empty
+const char* const empty_txt = "b.*.t.example.com. 3600 IN A 192.0.2.13\n";
+const char* const nsec_empty_txt =
+ "b.*.t.example.com. 3600 IN NSEC *.uwild.example.com. A NSEC RRSIG\n";
+const char* const empty_prev_txt = "t.example.com. 3600 IN A 192.0.2.15\n";
+const char* const nsec_empty_prev_txt =
+ "t.example.com. 3600 IN NSEC b.*.t.example.com. A NSEC RRSIG\n";
// Used in NXDOMAIN proof test. We are going to test some unusual case where
// the best possible wildcard is below the "next domain" of the NSEC RR that
// proves the NXDOMAIN, i.e.,
@@ -116,7 +132,6 @@ const char* const nsec_mx_txt =
"mx.example.com. 3600 IN NSEC ).no.example.com. MX NSEC RRSIG\n";
const char* const nsec_no_txt =
").no.example.com. 3600 IN NSEC nz.no.example.com. AAAA NSEC RRSIG\n";
-
// We'll also test the case where a single NSEC proves both NXDOMAIN and the
// non existence of wildcard. The following records will be used for that
// test.
@@ -179,7 +194,10 @@ public:
other_zone_rrs << no_txt << nz_txt <<
nsec_apex_txt << nsec_mx_txt << nsec_no_txt << nsec_nz_txt <<
nsec_nxdomain_txt << nsec_www_txt << nonsec_a_txt <<
- wild_txt << nsec_wild_txt << cnamewild_txt << nsec_cnamewild_txt;
+ wild_txt << nsec_wild_txt << cnamewild_txt << nsec_cnamewild_txt <<
+ wild_txt_nxrrset << nsec_wild_txt_nxrrset << wild_txt_next <<
+ nsec_wild_txt_next << empty_txt << nsec_empty_txt <<
+ empty_prev_txt << nsec_empty_prev_txt;
masterLoad(zone_stream, origin_, rrclass_,
boost::bind(&MockZoneFinder::loadRRset, this, _1));
@@ -396,15 +414,47 @@ MockZoneFinder::find(const Name& name, const RRType& type,
// hardcoded specific cases, ignoring other details such as canceling
// due to the existence of closer name.
if ((options & NO_WILDCARD) == 0) {
- const Name wild_suffix("wild.example.com");
- if (name.compare(wild_suffix).getRelation() ==
- NameComparisonResult::SUBDOMAIN) {
- domain = domains_.find(Name("*").concatenate(wild_suffix));
- assert(domain != domains_.end());
- RRsetStore::const_iterator found_rrset = domain->second.find(type);
- assert(found_rrset != domain->second.end());
- return (FindResult(WILDCARD,
- substituteWild(*found_rrset->second, name)));
+ const Name wild_suffix(name.split(1));
+ // Unit Tests use those domains for Wildcard test.
+ if (name.equals(Name("www.wild.example.com"))||
+ name.equals(Name("www1.uwild.example.com"))||
+ name.equals(Name("a.t.example.com"))) {
+ if (name.compare(wild_suffix).getRelation() ==
+ NameComparisonResult::SUBDOMAIN) {
+ domain = domains_.find(Name("*").concatenate(wild_suffix));
+ // Matched the QNAME
+ if (domain != domains_.end()) {
+ RRsetStore::const_iterator found_rrset =
+ domain->second.find(type);
+ // Matched the QTYPE
+ if(found_rrset != domain->second.end()) {
+ return (FindResult(WILDCARD,
+ substituteWild(*found_rrset->second, name)));
+ } else {
+ // No matched QTYPE, this case is for WILDCARD_NXRRSET
+ found_rrset = domain->second.find(RRType::NSEC());
+ assert(found_rrset != domain->second.end());
+ Name newName = Name("*").concatenate(wild_suffix);
+ return (FindResult(WILDCARD_NXRRSET,
+ substituteWild(*found_rrset->second,newName)));
+ }
+ } else {
+ // This is empty non terminal name case on wildcard.
+ Name emptyName = Name("*").concatenate(wild_suffix);
+ for (Domains::reverse_iterator it = domains_.rbegin();
+ it != domains_.rend();
+ ++it) {
+ RRsetStore::const_iterator nsec_it;
+ if ((*it).first < emptyName &&
+ (nsec_it = (*it).second.find(RRType::NSEC()))
+ != (*it).second.end()) {
+ return (FindResult(WILDCARD_NXRRSET,
+ (*nsec_it).second));
+ }
+ }
+ }
+ return (FindResult(WILDCARD_NXRRSET,RRsetPtr()));
+ }
}
const Name cnamewild_suffix("cnamewild.example.com");
if (name.compare(cnamewild_suffix).getRelation() ==
@@ -924,6 +974,60 @@ TEST_F(QueryTest, badWildcardProof3) {
Query::BadNSEC);
}
+TEST_F(QueryTest, wildcardNxrrsetWithDuplicateNSEC) {
+ // WILDCARD_NXRRSET with DNSSEC proof. We should have SOA, NSEC that proves the
+ // NXRRSET and their RRSIGs. In this case we only need one NSEC,
+ // which proves both NXDOMAIN and the non existence RRSETs of wildcard.
+ Query(memory_client, Name("www.wild.example.com"), RRType::TXT(), response,
+ true).process();
+
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
+ (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("SOA") + "\n" +
+ string(nsec_wild_txt) +
+ string("*.wild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC")+"\n").c_str(),
+ NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, wildcardNxrrsetWithNSEC) {
+ // WILDCARD_NXRRSET with DNSSEC proof. We should have SOA, NSEC that proves the
+ // NXRRSET and their RRSIGs. In this case we need two NSEC RRs,
+ // one proves NXDOMAIN and the other proves non existence RRSETs of wildcard.
+ Query(memory_client, Name("www1.uwild.example.com"), RRType::TXT(), response,
+ true).process();
+
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
+ (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("SOA") + "\n" +
+ string(nsec_wild_txt_nxrrset) +
+ string("*.uwild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC")+"\n" +
+ string(nsec_wild_txt_next) +
+ string("www.uwild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC") + "\n").c_str(),
+ NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, wildcardEmptyWithNSEC) {
+ // WILDCARD_EMPTY with DNSSEC proof. We should have SOA, NSEC that proves the
+ // NXDOMAIN and their RRSIGs. In this case we need two NSEC RRs,
+ // one proves NXDOMAIN and the other proves non existence wildcard.
+ Query(memory_client, Name("a.t.example.com"), RRType::A(), response,
+ true).process();
+
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
+ (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("SOA") + "\n" +
+ string(nsec_empty_prev_txt) +
+ string("t.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC")+"\n" +
+ string(nsec_empty_txt) +
+ string("b.*.t.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC")+"\n").c_str(),
+ NULL, mock_finder->getOrigin());
+}
+
/*
* This tests that when there's no SOA and we need a negative answer. It should
* throw in that case.
diff --git a/src/bin/bind10/TODO b/src/bin/bind10/TODO
index eb0abcd..6f50dbd 100644
--- a/src/bin/bind10/TODO
+++ b/src/bin/bind10/TODO
@@ -1,19 +1,13 @@
- Read msgq configuration from configuration manager (Trac #213)
https://bind10.isc.org/ticket/213
- Provide more administrator options:
- - Get process list
- Get information on a process (returns list of times started & stopped,
plus current information such as PID)
- - Add a component (not necessary for parking lot, but...)
- Stop a component
- Force-stop a component
- Mechanism to wait for child to start before continuing
-- Way to ask a child to die politely
-- Start statistics daemon
-- Statistics interaction (?)
- Use .spec file to define comands
- Rename "c-channel" stuff to msgq for clarity
-- Use logger
- Reply to shutdown message?
- Some sort of group creation so termination signals can be sent to
children of children processes (if any)
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index 0adcb70..c2e44e7 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -2,21 +2,12 @@
.\" Title: bind10
.\" Author: [see the "AUTHORS" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: August 11, 2011
+.\" Date: November 23, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "BIND10" "8" "August 11, 2011" "BIND10" "BIND10"
-.\" -----------------------------------------------------------------
-.\" * Define some portability stuff
-.\" -----------------------------------------------------------------
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.\" http://bugs.debian.org/507673
-.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.ie \n(.g .ds Aq \(aq
-.el .ds Aq '
+.TH "BIND10" "8" "November 23, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -31,7 +22,7 @@
bind10 \- BIND 10 boss process
.SH "SYNOPSIS"
.HP \w'\fBbind10\fR\ 'u
-\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-brittle\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR]
+\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR]
.SH "DESCRIPTION"
.PP
The
@@ -41,13 +32,6 @@ daemon starts up other BIND 10 required daemons\&. It handles restarting of exit
.PP
The arguments are as follows:
.PP
-\fB\-\-brittle\fR
-.RS 4
-Shutdown if any of the child processes of
-\fBbind10\fR
-exit\&. This is intended to help developers debug the server, and should not be used in production\&.
-.RE
-.PP
\fB\-c\fR \fIconfig\-filename\fR, \fB\-\-config\-file\fR \fIconfig\-filename\fR
.RS 4
The configuration filename to use\&. Can be either absolute or relative to data path\&. In case it is absolute, value of data path is not considered\&.
@@ -121,6 +105,204 @@ and its child processes\&.
.RS 4
Sets the amount of time that BIND 10 will wait for the configuration manager (a key component of BIND 10) to initialize itself before abandoning the start up and terminating with an error\&. The wait_time is specified in seconds and has a default value of 10\&.
.RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The configuration provides settings for components for
+\fBbind10\fR
+to manage under
+\fI/Boss/components/\fR\&. The default elements are:
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-auth\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-cmdctl\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/setuid\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-stats\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-stats\-httpd\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-xfrin\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-xfrout\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-zonemgr\fR
+.RE
+.PP
+(Note that the startup of
+\fBb10\-sockcreator\fR,
+\fBb10\-cfgmgr\fR, and
+\fBb10\-msgq\fR
+is not configurable\&. It is hardcoded and
+\fBbind10\fR
+will not run without them\&.)
+.PP
+These named sets (listed above) contain the following settings:
+.PP
+\fIaddress\fR
+.RS 4
+The name used for communicating to it on the message bus\&.
+.RE
+.PP
+\fIkind\fR
+.RS 4
+This defines how required a component is\&. The possible settings for
+\fIkind\fR
+are:
+\fIcore\fR
+(system won\'t start if it won\'t start and
+\fBbind10\fR
+will shutdown if a
+\(lqcore\(rq
+component crashes),
+\fIdispensable\fR
+(\fBbind10\fR
+will restart failing component), and
+\fIneeded\fR
+(\fBbind10\fR
+will shutdown if component won\'t initially start, but if crashes later, it will attempt to restart)\&. This setting is required\&.
+.RE
+.PP
+\fIpriority\fR
+.RS 4
+This is an integer\&.
+\fBbind10\fR
+will start the components with largest priority numbers first\&.
+.RE
+.PP
+\fIprocess\fR
+.RS 4
+This is the filename of the executable to be started\&. If not defined, then
+\fBbind10\fR
+will use the component name instead\&.
+.RE
+.PP
+\fIspecial\fR
+.RS 4
+This defines if the component is started a special way\&.
+.RE
+.PP
+The
+\fIBoss\fR
+configuration commands are:
+.PP
+
+\fBgetstats\fR
+tells
+\fBbind10\fR
+to send its statistics data to the
+\fBb10\-stats\fR
+daemon\&. This is an internal command and not exposed to the administrator\&.
+
+.PP
+
+\fBping\fR
+is used to check the connection with the
+\fBbind10\fR
+daemon\&. It returns the text
+\(lqpong\(rq\&.
+.PP
+
+\fBsendstats\fR
+tells
+\fBbind10\fR
+to send its statistics data to the
+\fBb10\-stats\fR
+daemon immediately\&.
+.PP
+
+\fBshow_processes\fR
+lists the current processes managed by
+\fBbind10\fR\&. The output is an array in JSON format containing the process ID and the name for each\&.
+
+
+.PP
+
+\fBshutdown\fR
+tells
+\fBbind10\fR
+to shutdown the BIND 10 servers\&. It will tell each process it manages to shutdown and, when complete,
+\fBbind10\fR
+will exit\&.
.SH "STATISTICS DATA"
.PP
The statistics data collected by the
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 6de0947..6705760 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>August 11, 2011</date>
+ <date>November 23, 2011</date>
</refentryinfo>
<refmeta>
@@ -51,7 +51,6 @@
<arg><option>-u <replaceable>user</replaceable></option></arg>
<arg><option>-v</option></arg>
<arg><option>-w <replaceable>wait_time</replaceable></option></arg>
- <arg><option>--brittle</option></arg>
<arg><option>--cmdctl-port</option> <replaceable>port</replaceable></arg>
<arg><option>--config-file</option> <replaceable>config-filename</replaceable></arg>
<arg><option>--data-path</option> <replaceable>directory</replaceable></arg>
@@ -92,20 +91,6 @@
<varlistentry>
<term>
- <option>--brittle</option>
- </term>
- <listitem>
- <para>
- Shutdown if any of the child processes of
- <command>bind10</command> exit. This is intended to
- help developers debug the server, and should not be
- used in production.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
<option>-c</option> <replaceable>config-filename</replaceable>,
<option>--config-file</option> <replaceable>config-filename</replaceable>
</term>
@@ -233,6 +218,204 @@ TODO: configuration section
-->
<refsect1>
+ <title>CONFIGURATION AND COMMANDS</title>
+
+ <para>
+ The configuration provides settings for components for
+ <command>bind10</command> to manage under
+ <varname>/Boss/components/</varname>.
+ The default elements are:
+ </para>
+
+ <itemizedlist>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-auth</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-cmdctl</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/setuid</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-stats</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-stats-httpd</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-xfrin</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-xfrout</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-zonemgr</varname> </para>
+ </listitem>
+
+ </itemizedlist>
+
+ <para>
+ (Note that the startup of <command>b10-sockcreator</command>,
+ <command>b10-cfgmgr</command>, and <command>b10-msgq</command>
+ is not configurable. It is hardcoded and <command>bind10</command>
+ will not run without them.)
+ </para>
+
+ <para>
+ These named sets (listed above) contain the following settings:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term><varname>address</varname></term>
+ <listitem>
+ <para>The name used for communicating to it on the message
+ bus.</para>
+<!-- NOTE: vorner said:
+These can be null, because the components are special ones, and
+the special class there already knows the address. It is (I hope)
+explained in the guide. I'd like to get rid of the special components
+sometime and I'd like it to teach to guess the address.
+-->
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><varname>kind</varname></term>
+ <listitem>
+ <para>
+ This defines how required a component is.
+ The possible settings for <varname>kind</varname> are:
+ <varname>core</varname> (system won't start if it won't
+ start and <command>bind10</command> will shutdown if
+ a <quote>core</quote> component crashes),
+ <varname>dispensable</varname> (<command>bind10</command>
+ will restart failing component),
+ and
+ <varname>needed</varname> (<command>bind10</command>
+ will shutdown if component won't initially start, but
+ if crashes later, it will attempt to restart).
+ This setting is required.
+<!-- TODO: formatting -->
+ </para>
+ </listitem>
+ </varlistentry>
+
+<!--
+TODO: currently not used
+ <varlistentry>
+ <term> <varname>params</varname> </term>
+ <listitem>
+ <para>
+list
+</para>
+ </listitem>
+ </varlistentry>
+-->
+
+ <varlistentry>
+ <term> <varname>priority</varname> </term>
+ <listitem>
+ <para>This is an integer. <command>bind10</command>
+ will start the components with largest priority numbers first.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term> <varname>process</varname> </term>
+ <listitem>
+ <para>This is the filename of the executable to be started.
+ If not defined, then <command>bind10</command> will
+ use the component name instead.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term> <varname>special</varname> </term>
+ <listitem>
+ <para>
+ This defines if the component is started a special
+ way.
+<!--
+TODO: document this ... but maybe some of these will be removed
+once we get rid of some using switches for components?
+
+auth
+cfgmgr
+cmdctl
+msgq
+resolver
+setuid
+sockcreator
+xfrin
+-->
+
+</para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+<!-- TODO: formating -->
+ <para>
+ The <varname>Boss</varname> configuration commands are:
+ </para>
+<!-- TODO: let's just let bind10 be known as bind10 and not Boss -->
+
+ <para>
+ <command>getstats</command> tells <command>bind10</command>
+ to send its statistics data to the <command>b10-stats</command>
+ daemon.
+ This is an internal command and not exposed to the administrator.
+<!-- not defined in spec -->
+<!-- TODO: explain difference with sendstat -->
+ </para>
+
+ <para>
+ <command>ping</command> is used to check the connection with the
+ <command>bind10</command> daemon.
+ It returns the text <quote>pong</quote>.
+ </para>
+
+ <para>
+ <command>sendstats</command> tells <command>bind10</command>
+ to send its statistics data to the <command>b10-stats</command>
+ daemon immediately.
+<!-- TODO: compare with internal command getstats? -->
+ </para>
+
+ <para>
+ <command>show_processes</command> lists the current processes
+ managed by <command>bind10</command>.
+ The output is an array in JSON format containing the process
+ ID and the name for each.
+<!-- TODO: what is name? -->
+<!-- TODO: change to JSON object format? -->
+<!-- TODO: ticket #1406 -->
+ </para>
+
+ <para>
+ <command>shutdown</command> tells <command>bind10</command>
+ to shutdown the BIND 10 servers.
+ It will tell each process it manages to shutdown and, when
+ complete, <command>bind10</command> will exit.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
<title>STATISTICS DATA</title>
<para>
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index 2769aa9..79635fd 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -20,18 +20,72 @@ The boss process is starting up and will now check if the message bus
daemon is already running. If so, it will not be able to start, as it
needs a dedicated message bus.
-% BIND10_CONFIGURATION_START_AUTH start authoritative server: %1
-This message shows whether or not the authoritative server should be
-started according to the configuration.
-
-% BIND10_CONFIGURATION_START_RESOLVER start resolver: %1
-This message shows whether or not the resolver should be
-started according to the configuration.
-
% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
An error was encountered when the boss module specified
statistics data which is invalid for the boss specification file.
+% BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
+
+% BIND10_COMPONENT_RESTART component %1 is about to restart
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+
+% BIND10_COMPONENT_START component %1 is starting
+The named component is about to be started by the boss process.
+
+% BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+
+% BIND10_COMPONENT_STOP component %1 is being stopped
+A component is about to be asked to stop willingly by the boss.
+
+% BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+
+% BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+
+% BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+
+% BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+
+% BIND10_CONFIGURATOR_RUN running plan of %1 tasks
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+
+% BIND10_CONFIGURATOR_START bind10 component configurator is starting up
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+
+% BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+
+% BIND10_CONFIGURATOR_TASK performing task %1 on %2
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
+
% BIND10_INVALID_USER invalid user: %1
The boss process was started with the -u option, to drop root privileges
and continue running as the specified user, but the user is unknown.
@@ -45,33 +99,32 @@ The boss module is sending a kill signal to process with the given name,
as part of the process of killing all started processes during a failed
startup, as described for BIND10_KILLING_ALL_PROCESSES
+% BIND10_LOST_SOCKET_CONSUMER consumer %1 of sockets disconnected, considering all its sockets closed
+A connection from one of the applications which requested a socket was
+closed. This means the application has terminated, so all the sockets it was
+using are now closed and bind10 process can release them as well, unless the
+same sockets are used by yet another application.
+
% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
There already appears to be a message bus daemon running. Either an
old process was not shut down correctly, and needs to be killed, or
another instance of BIND10, with the same msgq domain socket, is
running, which needs to be stopped.
-% BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
-
% BIND10_MSGQ_DISAPPEARED msgq channel disappeared
While listening on the message bus channel for messages, it suddenly
disappeared. The msgq daemon may have died. This might lead to an
inconsistent state of the system, and BIND 10 will now shut down.
-% BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
+% BIND10_NO_SOCKET couldn't send a socket for token %1 because of error: %2
+An error occurred when the bind10 process was asked to send a socket file
+descriptor. The error is mentioned, most common reason is that the request
+is invalid and may not come from bind10 process at all.
-% BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+% BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
% BIND10_READING_BOSS_CONFIGURATION reading boss configuration
The boss process is starting up, and will now process the initial
@@ -107,6 +160,9 @@ The boss module is sending a SIGKILL signal to the given process.
% BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
The boss module is sending a SIGTERM signal to the given process.
+% BIND10_SETUID setting UID to %1
+The boss switches the user it runs as to the given UID.
+
% BIND10_SHUTDOWN stopping the server
The boss process received a command or signal telling it to shut down.
It will send a shutdown command to each process. The processes that do
@@ -125,11 +181,6 @@ which failed is unknown (not one of 'S' for socket or 'B' for bind).
The boss requested a socket from the creator, but the answer is unknown. This
looks like a programmer error.
-% BIND10_SOCKCREATOR_CRASHED the socket creator crashed
-The socket creator terminated unexpectedly. It is not possible to restart it
-(because the boss already gave up root privileges), so the system is going
-to terminate.
-
% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
There should be more data from the socket creator, but it closed the socket.
It probably crashed.
@@ -208,8 +259,15 @@ During the startup process, a number of messages are exchanged between the
Boss process and the processes it starts. This error is output when a
message received by the Boss process is not recognised.
-% BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.
-The given module is being started or restarted without root privileges.
+% BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+
+% BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.
+The resolver is being started or restarted without root privileges.
If the module needs these privileges, it may have problems starting.
Note that this issue should be resolved by the pending 'socket-creator'
process; once that has been implemented, modules should not need root
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index 4bcd778..00858d8 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -70,7 +70,11 @@ import isc.util.process
import isc.net.parse
import isc.log
from isc.log_messages.bind10_messages import *
-import isc.bind10.sockcreator
+import isc.bind10.component
+import isc.bind10.special_component
+import isc.bind10.socket_cache
+import libutil_io_python
+import tempfile
isc.log.init("b10-boss")
logger = isc.log.Logger("boss")
@@ -80,6 +84,10 @@ logger = isc.log.Logger("boss")
DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
+# Messages sent over the unix domain socket to indicate if it is followed by a real socket
+CREATOR_SOCKET_OK = "1\n"
+CREATOR_SOCKET_UNAVAILABLE = "0\n"
+
# Assign this process some longer name
isc.util.process.rename(sys.argv[0])
@@ -91,51 +99,6 @@ VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
# This is for boot_time of Boss
_BASETIME = time.gmtime()
-class RestartSchedule:
- """
-Keeps state when restarting something (in this case, a process).
-
-When a process dies unexpectedly, we need to restart it. However, if
-it fails to restart for some reason, then we should not simply keep
-restarting it at high speed.
-
-A more sophisticated algorithm can be developed, but for now we choose
-a simple set of rules:
-
- * If a process was been running for >=10 seconds, we restart it
- right away.
- * If a process was running for <10 seconds, we wait until 10 seconds
- after it was started.
-
-To avoid programs getting into lockstep, we use a normal distribution
-to avoid being restarted at exactly 10 seconds."""
-
- def __init__(self, restart_frequency=10.0):
- self.restart_frequency = restart_frequency
- self.run_start_time = None
- self.run_stop_time = None
- self.restart_time = None
-
- def set_run_start_time(self, when=None):
- if when is None:
- when = time.time()
- self.run_start_time = when
- sigma = self.restart_frequency * 0.05
- self.restart_time = when + random.normalvariate(self.restart_frequency,
- sigma)
-
- def set_run_stop_time(self, when=None):
- """We don't actually do anything with stop time now, but it
- might be useful for future algorithms."""
- if when is None:
- when = time.time()
- self.run_stop_time = when
-
- def get_restart_time(self, when=None):
- if when is None:
- when = time.time()
- return max(when, self.restart_time)
-
class ProcessInfoError(Exception): pass
class ProcessInfo:
@@ -150,7 +113,6 @@ class ProcessInfo:
self.env = env
self.dev_null_stdout = dev_null_stdout
self.dev_null_stderr = dev_null_stderr
- self.restart_schedule = RestartSchedule()
self.uid = uid
self.username = username
self.process = None
@@ -199,7 +161,6 @@ class ProcessInfo:
env=spawn_env,
preexec_fn=self._preexec_work)
self.pid = self.process.pid
- self.restart_schedule.set_run_start_time()
# spawn() and respawn() are the same for now, but in the future they
# may have different functionality
@@ -218,7 +179,7 @@ class BoB:
def __init__(self, msgq_socket_file=None, data_path=None,
config_filename=None, nocache=False, verbose=False, setuid=None,
- username=None, cmdctl_port=None, brittle=False, wait_time=10):
+ username=None, cmdctl_port=None, wait_time=10):
"""
Initialize the Boss of BIND. This is a singleton (only one can run).
@@ -232,27 +193,24 @@ class BoB:
The cmdctl_port is passed to cmdctl and specify on which port it
should listen.
- brittle is a debug option that controls whether the Boss shuts down
- after any process dies.
-
wait_time controls the amount of time (in seconds) that Boss waits
for selected processes to initialize before continuing with the
initialization. Currently this is only the configuration manager.
"""
self.cc_session = None
self.ccs = None
- self.cfg_start_auth = True
- self.cfg_start_resolver = False
- self.cfg_start_dhcp6 = False
- self.cfg_start_dhcp4 = False
- self.started_auth_family = False
- self.started_resolver_family = False
self.curproc = None
- self.dead_processes = {}
self.msgq_socket_file = msgq_socket_file
self.nocache = nocache
- self.processes = {}
- self.expected_shutdowns = {}
+ self.component_config = {}
+ # Some time in future, it may happen that a single component has
+ # multple processes. If so happens, name "components" may be
+ # inapropriate. But as the code isn't probably completely ready
+ # for it, we leave it at components for now.
+ self.components = {}
+ # Simply list of components that died and need to wait for a
+ # restart. Components manage their own restart schedule now
+ self.components_to_restart = []
self.runnable = False
self.uid = setuid
self.username = username
@@ -260,68 +218,73 @@ class BoB:
self.data_path = data_path
self.config_filename = config_filename
self.cmdctl_port = cmdctl_port
- self.brittle = brittle
self.wait_time = wait_time
- self.sockcreator = None
+ self._component_configurator = isc.bind10.component.Configurator(self,
+ isc.bind10.special_component.get_specials())
+ # The priorities here make them start in the correct order. First
+ # the socket creator (which would drop root privileges by then),
+ # then message queue and after that the config manager (which uses
+ # the config manager)
+ self.__core_components = {
+ 'sockcreator': {
+ 'kind': 'core',
+ 'special': 'sockcreator',
+ 'priority': 200
+ },
+ 'msgq': {
+ 'kind': 'core',
+ 'special': 'msgq',
+ 'priority': 199
+ },
+ 'cfgmgr': {
+ 'kind': 'core',
+ 'special': 'cfgmgr',
+ 'priority': 198
+ }
+ }
+ self.__started = False
+ self.exitcode = 0
# If -v was set, enable full debug logging.
if self.verbose:
logger.set_severity("DEBUG", 99)
+ # This is set in init_socket_srv
+ self._socket_path = None
+ self._socket_cache = None
+ self._tmpdir = None
+ self._srv_socket = None
+ self._unix_sockets = {}
+
+ def __propagate_component_config(self, config):
+ comps = dict(config)
+ # Fill in the core components, so they stay alive
+ for comp in self.__core_components:
+ if comp in comps:
+ raise Exception(comp + " is core component managed by " +
+ "bind10 boss, do not set it")
+ comps[comp] = self.__core_components[comp]
+ # Update the configuration
+ self._component_configurator.reconfigure(comps)
def config_handler(self, new_config):
# If this is initial update, don't do anything now, leave it to startup
if not self.runnable:
return
- # Now we declare few functions used only internally here. Besides the
- # benefit of not polluting the name space, they are closures, so we
- # don't need to pass some variables
- def start_stop(name, started, start, stop):
- if not'start_' + name in new_config:
- return
- if new_config['start_' + name]:
- if not started:
- if self.uid is not None:
- logger.info(BIND10_START_AS_NON_ROOT, name)
- start()
- else:
- stop()
- # These four functions are passed to start_stop (smells like functional
- # programming little bit)
- def resolver_on():
- self.start_resolver(self.c_channel_env)
- self.started_resolver_family = True
- def resolver_off():
- self.stop_resolver()
- self.started_resolver_family = False
- def auth_on():
- self.start_auth(self.c_channel_env)
- self.start_xfrout(self.c_channel_env)
- self.start_xfrin(self.c_channel_env)
- self.start_zonemgr(self.c_channel_env)
- self.started_auth_family = True
- def auth_off():
- self.stop_zonemgr()
- self.stop_xfrin()
- self.stop_xfrout()
- self.stop_auth()
- self.started_auth_family = False
-
- # The real code of the config handler function follows here
logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
new_config)
- start_stop('resolver', self.started_resolver_family, resolver_on,
- resolver_off)
- start_stop('auth', self.started_auth_family, auth_on, auth_off)
-
- answer = isc.config.ccsession.create_answer(0)
- return answer
+ try:
+ if 'components' in new_config:
+ self.__propagate_component_config(new_config['components'])
+ return isc.config.ccsession.create_answer(0)
+ except Exception as e:
+ return isc.config.ccsession.create_answer(1, str(e))
def get_processes(self):
- pids = list(self.processes.keys())
+ pids = list(self.components.keys())
pids.sort()
process_list = [ ]
for pid in pids:
- process_list.append([pid, self.processes[pid].name])
+ process_list.append([pid, self.components[pid].name()])
return process_list
def _get_stats_data(self):
@@ -365,28 +328,24 @@ class BoB:
elif command == "show_processes":
answer = isc.config.ccsession. \
create_answer(0, self.get_processes())
+ elif command == "get_socket":
+ answer = self._get_socket(args)
+ elif command == "drop_socket":
+ if "token" not in args:
+ answer = isc.config.ccsession. \
+ create_answer(1, "Missing token parameter")
+ else:
+ try:
+ self._socket_cache.drop_socket(args["token"])
+ answer = isc.config.ccsession.create_answer(0)
+ except Exception as e:
+ answer = isc.config.ccsession.create_answer(1, str(e))
else:
answer = isc.config.ccsession.create_answer(1,
"Unknown command")
return answer
- def start_creator(self):
- self.curproc = 'b10-sockcreator'
- creator_path = os.environ['PATH']
- if ADD_LIBEXEC_PATH:
- creator_path = "@@LIBEXECDIR@@:" + creator_path
- self.sockcreator = isc.bind10.sockcreator.Creator(creator_path)
-
- def stop_creator(self, kill=False):
- if self.sockcreator is None:
- return
- if kill:
- self.sockcreator.kill()
- else:
- self.sockcreator.terminate()
- self.sockcreator = None
-
- def kill_started_processes(self):
+ def kill_started_components(self):
"""
Called as part of the exception handling when a process fails to
start, this runs through the list of started processes, killing
@@ -394,31 +353,25 @@ class BoB:
"""
logger.info(BIND10_KILLING_ALL_PROCESSES)
- self.stop_creator(True)
+ for pid in self.components:
+ logger.info(BIND10_KILL_PROCESS, self.components[pid].name())
+ self.components[pid].kill(True)
+ self.components = {}
- for pid in self.processes:
- logger.info(BIND10_KILL_PROCESS, self.processes[pid].name)
- self.processes[pid].process.kill()
- self.processes = {}
-
- def read_bind10_config(self):
+ def _read_bind10_config(self):
"""
Reads the parameters associated with the BoB module itself.
- At present these are the components to start although arguably this
- information should be in the configuration for the appropriate
- module itself. (However, this would cause difficulty in the case of
- xfrin/xfrout and zone manager as we don't need to start those if we
- are not running the authoritative server.)
+ This means the list of components we should start now.
+
+ This could easily be combined into start_all_processes, but
+ it stays because of historical reasons and because the tests
+ replace the method sometimes.
"""
logger.info(BIND10_READING_BOSS_CONFIGURATION)
config_data = self.ccs.get_full_config()
- self.cfg_start_auth = config_data.get("start_auth")
- self.cfg_start_resolver = config_data.get("start_resolver")
-
- logger.info(BIND10_CONFIGURATION_START_AUTH, self.cfg_start_auth)
- logger.info(BIND10_CONFIGURATION_START_RESOLVER, self.cfg_start_resolver)
+ self.__propagate_component_config(config_data['components'])
def log_starting(self, process, port = None, address = None):
"""
@@ -480,17 +433,16 @@ class BoB:
# raised which is caught by the caller of start_all_processes(); this kills
# processes started up to that point before terminating the program.
- def start_msgq(self, c_channel_env):
+ def start_msgq(self):
"""
Start the message queue and connect to the command channel.
"""
self.log_starting("b10-msgq")
- c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
+ msgq_proc = ProcessInfo("b10-msgq", ["b10-msgq"], self.c_channel_env,
True, not self.verbose, uid=self.uid,
username=self.username)
- c_channel.spawn()
- self.processes[c_channel.pid] = c_channel
- self.log_started(c_channel.pid)
+ msgq_proc.spawn()
+ self.log_started(msgq_proc.pid)
# Now connect to the c-channel
cc_connect_start = time.time()
@@ -509,7 +461,9 @@ class BoB:
# on this channel are once relating to process startup.
self.cc_session.group_subscribe("Boss")
- def start_cfgmgr(self, c_channel_env):
+ return msgq_proc
+
+ def start_cfgmgr(self):
"""
Starts the configuration manager process
"""
@@ -520,10 +474,9 @@ class BoB:
if self.config_filename is not None:
args.append("--config-filename=" + self.config_filename)
bind_cfgd = ProcessInfo("b10-cfgmgr", args,
- c_channel_env, uid=self.uid,
+ self.c_channel_env, uid=self.uid,
username=self.username)
bind_cfgd.spawn()
- self.processes[bind_cfgd.pid] = bind_cfgd
self.log_started(bind_cfgd.pid)
# Wait for the configuration manager to start up as subsequent initialization
@@ -539,6 +492,8 @@ class BoB:
if not self.process_running(msg, "ConfigManager"):
raise ProcessStartError("Configuration manager process has not started")
+ return bind_cfgd
+
def start_ccsession(self, c_channel_env):
"""
Start the CC Session
@@ -570,10 +525,20 @@ class BoB:
self.log_starting(name, port, address)
newproc = ProcessInfo(name, args, c_channel_env)
newproc.spawn()
- self.processes[newproc.pid] = newproc
self.log_started(newproc.pid)
+ return newproc
+
+ def register_process(self, pid, component):
+ """
+ Put another process into boss to watch over it. When the process
+ dies, the component.failed() is called with the exit code.
- def start_simple(self, name, c_channel_env, port=None, address=None):
+ It is expected the info is a isc.bind10.component.BaseComponent
+ subclass (or anything having the same interface).
+ """
+ self.components[pid] = component
+
+ def start_simple(self, name):
"""
Most of the BIND-10 processes are started with the command:
@@ -590,7 +555,7 @@ class BoB:
args += ['-v']
# ... and start the process
- self.start_process(name, args, c_channel_env, port, address)
+ return self.start_process(name, args, self.c_channel_env)
# The next few methods start up the rest of the BIND-10 processes.
# Although many of these methods are little more than a call to
@@ -598,10 +563,12 @@ class BoB:
# where modifications can be made if the process start-up sequence changes
# for a given process.
- def start_auth(self, c_channel_env):
+ def start_auth(self):
"""
Start the Authoritative server
"""
+ if self.uid is not None and self.__started:
+ logger.warn(BIND10_START_AS_NON_ROOT_AUTH)
authargs = ['b10-auth']
if self.nocache:
authargs += ['-n']
@@ -611,14 +578,16 @@ class BoB:
authargs += ['-v']
# ... and start
- self.start_process("b10-auth", authargs, c_channel_env)
+ return self.start_process("b10-auth", authargs, self.c_channel_env)
- def start_resolver(self, c_channel_env):
+ def start_resolver(self):
"""
Start the Resolver. At present, all these arguments and switches
are pure speculation. As with the auth daemon, they should be
read from the configuration database.
"""
+ if self.uid is not None and self.__started:
+ logger.warn(BIND10_START_AS_NON_ROOT_RESOLVER)
self.curproc = "b10-resolver"
# XXX: this must be read from the configuration manager in the future
resargs = ['b10-resolver']
@@ -628,47 +597,9 @@ class BoB:
resargs += ['-v']
# ... and start
- self.start_process("b10-resolver", resargs, c_channel_env)
-
- def start_xfrout(self, c_channel_env):
- self.start_simple("b10-xfrout", c_channel_env)
-
- def start_xfrin(self, c_channel_env):
- # XXX: a quick-hack workaround. xfrin will implicitly use dynamically
- # loadable data source modules, which will be installed in $(libdir).
- # On some OSes (including MacOS X and *BSDs) the main process (python)
- # cannot find the modules unless they are located in a common shared
- # object path or a path in the (DY)LD_LIBRARY_PATH. We should seek
- # a cleaner solution, but for a short term workaround we specify the
- # path here, unconditionally, and without even bothering which
- # environment variable should be used.
- #
- # We reuse the ADD_LIBEXEC_PATH variable to see whether we need to
- # do this, as the conditions that make this workaround needed are
- # the same as for the libexec path addition
- if ADD_LIBEXEC_PATH:
- cur_path = os.getenv('DYLD_LIBRARY_PATH')
- cur_path = '' if cur_path is None else ':' + cur_path
- c_channel_env['DYLD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
-
- cur_path = os.getenv('LD_LIBRARY_PATH')
- cur_path = '' if cur_path is None else ':' + cur_path
- c_channel_env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
- self.start_simple("b10-xfrin", c_channel_env)
-
- def start_zonemgr(self, c_channel_env):
- self.start_simple("b10-zonemgr", c_channel_env)
-
- def start_stats(self, c_channel_env):
- self.start_simple("b10-stats", c_channel_env)
-
- def start_stats_httpd(self, c_channel_env):
- self.start_simple("b10-stats-httpd", c_channel_env)
-
- def start_dhcp6(self, c_channel_env):
- self.start_simple("b10-dhcp6", c_channel_env)
+ return self.start_process("b10-resolver", resargs, self.c_channel_env)
- def start_cmdctl(self, c_channel_env):
+ def start_cmdctl(self):
"""
Starts the command control process
"""
@@ -677,59 +608,27 @@ class BoB:
args.append("--port=" + str(self.cmdctl_port))
if self.verbose:
args.append("-v")
- self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
+ return self.start_process("b10-cmdctl", args, self.c_channel_env,
+ self.cmdctl_port)
- def start_all_processes(self):
+ def start_all_components(self):
"""
- Starts up all the processes. Any exception generated during the
- starting of the processes is handled by the caller.
+ Starts up all the components. Any exception generated during the
+ starting of the components is handled by the caller.
"""
- # The socket creator first, as it is the only thing that needs root
- self.start_creator()
- # TODO: Once everything uses the socket creator, we can drop root
- # privileges right now
+ # Start the real core (sockcreator, msgq, cfgmgr)
+ self._component_configurator.startup(self.__core_components)
- c_channel_env = self.c_channel_env
- self.start_msgq(c_channel_env)
- self.start_cfgmgr(c_channel_env)
- self.start_ccsession(c_channel_env)
+ # Connect to the msgq. This is not a process, so it's not handled
+ # inside the configurator.
+ self.start_ccsession(self.c_channel_env)
# Extract the parameters associated with Bob. This can only be
# done after the CC Session is started. Note that the logging
# configuration may override the "-v" switch set on the command line.
- self.read_bind10_config()
+ self._read_bind10_config()
- # Continue starting the processes. The authoritative server (if
- # selected):
- if self.cfg_start_auth:
- self.start_auth(c_channel_env)
-
- # ... and resolver (if selected):
- if self.cfg_start_resolver:
- self.start_resolver(c_channel_env)
- self.started_resolver_family = True
-
- # Everything after the main components can run as non-root.
- # TODO: this is only temporary - once the privileged socket creator is
- # fully working, nothing else will run as root.
- if self.uid is not None:
- posix.setuid(self.uid)
-
- # xfrin/xfrout and the zone manager are only meaningful if the
- # authoritative server has been started.
- if self.cfg_start_auth:
- self.start_xfrout(c_channel_env)
- self.start_xfrin(c_channel_env)
- self.start_zonemgr(c_channel_env)
- self.started_auth_family = True
-
- # ... and finally start the remaining processes
- self.start_stats(c_channel_env)
- self.start_stats_httpd(c_channel_env)
- self.start_cmdctl(c_channel_env)
-
- if self.cfg_start_dhcp6:
- self.start_dhcp6(c_channel_env)
+ # TODO: Return the dropping of privileges
def startup(self):
"""
@@ -753,99 +652,81 @@ class BoB:
# this is the case we want, where the msgq is not running
pass
- # Start all processes. If any one fails to start, kill all started
- # processes and exit with an error indication.
+ # Start all components. If any one fails to start, kill all started
+ # components and exit with an error indication.
try:
self.c_channel_env = c_channel_env
- self.start_all_processes()
+ self.start_all_components()
except Exception as e:
- self.kill_started_processes()
+ self.kill_started_components()
return "Unable to start " + self.curproc + ": " + str(e)
# Started successfully
self.runnable = True
+ self.__started = True
return None
- def stop_all_processes(self):
- """Stop all processes."""
- cmd = { "command": ['shutdown']}
-
- self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
- self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
- self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
- self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
- self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
- self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
- self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
- self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
- self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
- # Terminate the creator last
- self.stop_creator()
-
def stop_process(self, process, recipient):
"""
Stop the given process, friendly-like. The process is the name it has
(in logs, etc), the recipient is the address on msgq.
"""
logger.info(BIND10_STOP_PROCESS, process)
- # TODO: Some timeout to solve processes that don't want to die would
- # help. We can even store it in the dict, it is used only as a set
- self.expected_shutdowns[process] = 1
- # Ask the process to die willingly
self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
recipient)
- # Series of stop_process wrappers
- def stop_resolver(self):
- self.stop_process('b10-resolver', 'Resolver')
-
- def stop_auth(self):
- self.stop_process('b10-auth', 'Auth')
-
- def stop_xfrout(self):
- self.stop_process('b10-xfrout', 'Xfrout')
+ def component_shutdown(self, exitcode=0):
+ """
+ Stop the Boss instance from a components' request. The exitcode
+ indicates the desired exit code.
- def stop_xfrin(self):
- self.stop_process('b10-xfrin', 'Xfrin')
+ If we did not start yet, it raises an exception, which is meant
+ to propagate through the component and configurator to the startup
+ routine and abort the startup immediately. If it is started up already,
+ we just mark it so we terminate soon.
- def stop_zonemgr(self):
- self.stop_process('b10-zonemgr', 'Zonemgr')
+ It does set the exit code in both cases.
+ """
+ self.exitcode = exitcode
+ if not self.__started:
+ raise Exception("Component failed during startup");
+ else:
+ self.runnable = False
def shutdown(self):
"""Stop the BoB instance."""
logger.info(BIND10_SHUTDOWN)
# first try using the BIND 10 request to stop
try:
- self.stop_all_processes()
+ self._component_configurator.shutdown()
except:
pass
# XXX: some delay probably useful... how much is uncertain
# I have changed the delay from 0.5 to 1, but sometime it's
# still not enough.
- time.sleep(1)
+ time.sleep(1)
self.reap_children()
# next try sending a SIGTERM
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- logger.info(BIND10_SEND_SIGTERM, proc_info.name,
- proc_info.pid)
+ components_to_stop = list(self.components.values())
+ for component in components_to_stop:
+ logger.info(BIND10_SEND_SIGTERM, component.name(), component.pid())
try:
- proc_info.process.terminate()
+ component.kill()
except OSError:
# ignore these (usually ESRCH because the child
# finally exited)
pass
# finally, send SIGKILL (unmaskable termination) until everybody dies
- while self.processes:
+ while self.components:
# XXX: some delay probably useful... how much is uncertain
time.sleep(0.1)
self.reap_children()
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- logger.info(BIND10_SEND_SIGKILL, proc_info.name,
- proc_info.pid)
+ components_to_stop = list(self.components.values())
+ for component in components_to_stop:
+ logger.info(BIND10_SEND_SIGKILL, component.name(),
+ component.pid())
try:
- proc_info.process.kill()
+ component.kill(True)
except OSError:
# ignore these (usually ESRCH because the child
# finally exited)
@@ -867,40 +748,20 @@ class BoB:
# XXX: should be impossible to get any other error here
raise
if pid == 0: break
- if self.sockcreator is not None and self.sockcreator.pid() == pid:
- # This is the socket creator, started and terminated
- # differently. This can't be restarted.
- if self.runnable:
- logger.fatal(BIND10_SOCKCREATOR_CRASHED)
- self.sockcreator = None
- self.runnable = False
- elif pid in self.processes:
- # One of the processes we know about. Get information on it.
- proc_info = self.processes.pop(pid)
- proc_info.restart_schedule.set_run_stop_time()
- self.dead_processes[proc_info.pid] = proc_info
-
- # Write out message, but only if in the running state:
- # During startup and shutdown, these messages are handled
- # elsewhere.
- if self.runnable:
- if exit_status is None:
- logger.warn(BIND10_PROCESS_ENDED_NO_EXIT_STATUS,
- proc_info.name, proc_info.pid)
- else:
- logger.warn(BIND10_PROCESS_ENDED_WITH_EXIT_STATUS,
- proc_info.name, proc_info.pid,
- exit_status)
-
- # Was it a special process?
- if proc_info.name == "b10-msgq":
- logger.fatal(BIND10_MSGQ_DAEMON_ENDED)
- self.runnable = False
-
- # If we're in 'brittle' mode, we want to shutdown after
- # any process dies.
- if self.brittle:
- self.runnable = False
+ if pid in self.components:
+ # One of the components we know about. Get information on it.
+ component = self.components.pop(pid)
+ logger.info(BIND10_PROCESS_ENDED, component.name(), pid,
+ exit_status)
+ if component.running() and self.runnable:
+ # Tell it it failed. But only if it matters (we are
+ # not shutting down and the component considers itself
+ # to be running.
+ component_restarted = component.failed(exit_status);
+ # if the process wants to be restarted, but not just yet,
+ # it returns False
+ if not component_restarted:
+ self.components_to_restart.append(component)
else:
logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
@@ -914,36 +775,227 @@ class BoB:
The values returned can be safely passed into select() as the
timeout value.
+
"""
- next_restart = None
- # if we're shutting down, then don't restart
if not self.runnable:
return 0
- # otherwise look through each dead process and try to restart
- still_dead = {}
+ still_dead = []
+ # keep track of the first time we need to check this queue again,
+ # if at all
+ next_restart_time = None
now = time.time()
- for proc_info in self.dead_processes.values():
- if proc_info.name in self.expected_shutdowns:
- # We don't restart, we wanted it to die
- del self.expected_shutdowns[proc_info.name]
- continue
- restart_time = proc_info.restart_schedule.get_restart_time(now)
- if restart_time > now:
- if (next_restart is None) or (next_restart > restart_time):
- next_restart = restart_time
- still_dead[proc_info.pid] = proc_info
+ for component in self.components_to_restart:
+ if not component.restart(now):
+ still_dead.append(component)
+ if next_restart_time is None or\
+ next_restart_time > component.get_restart_time():
+ next_restart_time = component.get_restart_time()
+ self.components_to_restart = still_dead
+
+ return next_restart_time
+
+ def _get_socket(self, args):
+ """
+ Implementation of the get_socket CC command. It asks the cache
+ to provide the token and sends the information back.
+ """
+ try:
+ try:
+ addr = isc.net.parse.addr_parse(args['address'])
+ port = isc.net.parse.port_parse(args['port'])
+ protocol = args['protocol']
+ if protocol not in ['UDP', 'TCP']:
+ raise ValueError("Protocol must be either UDP or TCP")
+ share_mode = args['share_mode']
+ if share_mode not in ['ANY', 'SAMEAPP', 'NO']:
+ raise ValueError("Share mode must be one of ANY, SAMEAPP" +
+ " or NO")
+ share_name = args['share_name']
+ except KeyError as ke:
+ return \
+ isc.config.ccsession.create_answer(1,
+ "Missing parameter " +
+ str(ke))
+
+ # FIXME: This call contains blocking IPC. It is expected to be
+ # short, but if it turns out to be problem, we'll need to do
+ # something about it.
+ token = self._socket_cache.get_token(protocol, addr, port,
+ share_mode, share_name)
+ return isc.config.ccsession.create_answer(0, {
+ 'token': token,
+ 'path': self._socket_path
+ })
+ except Exception as e:
+ return isc.config.ccsession.create_answer(1, str(e))
+
+ def socket_request_handler(self, token, unix_socket):
+ """
+ This function handles a token that comes over a unix_domain socket.
+ The function looks into the _socket_cache and sends the socket
+ identified by the token back over the unix_socket.
+ """
+ try:
+ fd = self._socket_cache.get_socket(token, unix_socket.fileno())
+ # FIXME: These two calls are blocking in their nature. An OS-level
+ # buffer is likely to be large enough to hold all these data, but
+ # if it wasn't and the remote application got stuck, we would have
+ # a problem. If there appear such problems, we should do something
+ # about it.
+ unix_socket.sendall(CREATOR_SOCKET_OK)
+ libutil_io_python.send_fd(unix_socket.fileno(), fd)
+ except Exception as e:
+ logger.info(BIND10_NO_SOCKET, token, e)
+ unix_socket.sendall(CREATOR_SOCKET_UNAVAILABLE)
+
+ def socket_consumer_dead(self, unix_socket):
+ """
+ This function handles when a unix_socket closes. This means all
+ sockets sent to it are to be considered closed. This function signals
+ so to the _socket_cache.
+ """
+ logger.info(BIND10_LOST_SOCKET_CONSUMER, unix_socket.fileno())
+ try:
+ self._socket_cache.drop_application(unix_socket.fileno())
+ except ValueError:
+ # This means the application holds no sockets. It's harmless, as it
+ # can happen in real life - for example, it requests a socket, but
+ # get_socket doesn't find it, so the application dies. It should be
+ # rare, though.
+ pass
+
+ def set_creator(self, creator):
+ """
+ Registeres a socket creator into the boss. The socket creator is not
+ used directly, but through a cache. The cache is created in this
+ method.
+
+ If called more than once, it raises a ValueError.
+ """
+ if self._socket_cache is not None:
+ raise ValueError("A creator was inserted previously")
+ self._socket_cache = isc.bind10.socket_cache.Cache(creator)
+
+ def init_socket_srv(self):
+ """
+ Creates and listens on a unix-domain socket to be able to send out
+ the sockets.
+
+ This method should be called after switching user, or the switched
+ applications won't be able to access the socket.
+ """
+ self._srv_socket = socket.socket(socket.AF_UNIX)
+ # We create a temporary directory somewhere safe and unique, to avoid
+ # the need to find the place ourself or bother users. Also, this
+ # secures the socket on some platforms, as it creates a private
+ # directory.
+ self._tmpdir = tempfile.mkdtemp()
+ # Get the name
+ self._socket_path = os.path.join(self._tmpdir, "sockcreator")
+ # And bind the socket to the name
+ self._srv_socket.bind(self._socket_path)
+ self._srv_socket.listen(5)
+
+ def remove_socket_srv(self):
+ """
+ Closes and removes the listening socket and the directory where it
+ lives, as we created both.
+
+ It does nothing if the _srv_socket is not set (eg. it was not yet
+ initialized).
+ """
+ if self._srv_socket is not None:
+ self._srv_socket.close()
+ os.remove(self._socket_path)
+ os.rmdir(self._tmpdir)
+
+ def _srv_accept(self):
+ """
+ Accept a socket from the unix domain socket server and put it to the
+ others we care about.
+ """
+ socket = self._srv_socket.accept()
+ self._unix_sockets[socket.fileno()] = (socket, b'')
+
+ def _socket_data(self, socket_fileno):
+ """
+ This is called when a socket identified by the socket_fileno needs
+ attention. We try to read data from there. If it is closed, we remove
+ it.
+ """
+ (sock, previous) = self._unix_sockets[socket_fileno]
+ while True:
+ try:
+ data = sock.recv(1, socket.MSG_DONTWAIT)
+ except socket.error as se:
+ # These two might be different on some systems
+ if se.errno == errno.EAGAIN or se.errno == errno.EWOULDBLOCK:
+ # No more data now. Oh, well, just store what we have.
+ self._unix_sockets[socket_fileno] = (sock, previous)
+ return
+ else:
+ data = b'' # Pretend it got closed
+ if len(data) == 0: # The socket got to it's end
+ del self._unix_sockets[socket_fileno]
+ self.socket_consumer_dead(sock)
+ sock.close()
+ return
else:
- logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
- try:
- proc_info.respawn()
- self.processes[proc_info.pid] = proc_info
- logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
- except:
- still_dead[proc_info.pid] = proc_info
- # remember any processes that refuse to be resurrected
- self.dead_processes = still_dead
- # return the time when the next process is ready to be restarted
- return next_restart
+ if data == b"\n":
+ # Handle this token and clear it
+ self.socket_request_handler(previous, sock)
+ previous = b''
+ else:
+ previous += data
+
+ def run(self, wakeup_fd):
+ """
+ The main loop, waiting for sockets, commands and dead processes.
+ Runs as long as the runnable is true.
+
+ The wakeup_fd descriptor is the read end of pipe where CHLD signal
+ handler writes.
+ """
+ ccs_fd = self.ccs.get_socket().fileno()
+ while self.runnable:
+ # clean up any processes that exited
+ self.reap_children()
+ next_restart = self.restart_processes()
+ if next_restart is None:
+ wait_time = None
+ else:
+ wait_time = max(next_restart - time.time(), 0)
+
+ # select() can raise EINTR when a signal arrives,
+ # even if they are resumable, so we have to catch
+ # the exception
+ try:
+ (rlist, wlist, xlist) = \
+ select.select([wakeup_fd, ccs_fd,
+ self._srv_socket.fileno()] +
+ list(self._unix_sockets.keys()), [], [],
+ wait_time)
+ except select.error as err:
+ if err.args[0] == errno.EINTR:
+ (rlist, wlist, xlist) = ([], [], [])
+ else:
+ logger.fatal(BIND10_SELECT_ERROR, err)
+ break
+
+ for fd in rlist + xlist:
+ if fd == ccs_fd:
+ try:
+ self.ccs.check_command()
+ except isc.cc.session.ProtocolError:
+ logger.fatal(BIND10_MSGQ_DISAPPEARED)
+ self.runnable = False
+ break
+ elif fd == wakeup_fd:
+ os.read(wakeup_fd, 32)
+ elif fd == self._srv_socket.fileno():
+ self._srv_accept()
+ elif fd in self._unix_sockets:
+ self._socket_data(fd)
# global variables, needed for signal handlers
options = None
@@ -1006,8 +1058,6 @@ def parse_args(args=sys.argv[1:], Parser=OptionParser):
parser.add_option("--pid-file", dest="pid_file", type="string",
default=None,
help="file to dump the PID of the BIND 10 process")
- parser.add_option("--brittle", dest="brittle", action="store_true",
- help="debugging flag: exit if any component dies")
parser.add_option("-w", "--wait", dest="wait_time", type="int",
default=10, help="Time (in seconds) to wait for config manager to start up")
@@ -1109,60 +1159,32 @@ def main():
# Block SIGPIPE, as we don't want it to end this process
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
- # Go bob!
- boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
- options.config_file, options.nocache, options.verbose,
- setuid, username, options.cmdctl_port, options.brittle,
- options.wait_time)
- startup_result = boss_of_bind.startup()
- if startup_result:
- logger.fatal(BIND10_STARTUP_ERROR, startup_result)
- sys.exit(1)
- logger.info(BIND10_STARTUP_COMPLETE)
- dump_pid(options.pid_file)
-
- # In our main loop, we check for dead processes or messages
- # on the c-channel.
- wakeup_fd = wakeup_pipe[0]
- ccs_fd = boss_of_bind.ccs.get_socket().fileno()
- while boss_of_bind.runnable:
- # clean up any processes that exited
- boss_of_bind.reap_children()
- next_restart = boss_of_bind.restart_processes()
- if next_restart is None:
- wait_time = None
- else:
- wait_time = max(next_restart - time.time(), 0)
-
- # select() can raise EINTR when a signal arrives,
- # even if they are resumable, so we have to catch
- # the exception
- try:
- (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
- wait_time)
- except select.error as err:
- if err.args[0] == errno.EINTR:
- (rlist, wlist, xlist) = ([], [], [])
- else:
- logger.fatal(BIND10_SELECT_ERROR, err)
- break
-
- for fd in rlist + xlist:
- if fd == ccs_fd:
- try:
- boss_of_bind.ccs.check_command()
- except isc.cc.session.ProtocolError:
- logger.fatal(BIND10_MSGQ_DISAPPEARED)
- self.runnable = False
- break
- elif fd == wakeup_fd:
- os.read(wakeup_fd, 32)
-
- # shutdown
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.shutdown()
- unlink_pid_file(options.pid_file)
- sys.exit(0)
+ try:
+ # Go bob!
+ boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
+ options.config_file, options.nocache,
+ options.verbose, setuid, username,
+ options.cmdctl_port, options.wait_time)
+ startup_result = boss_of_bind.startup()
+ if startup_result:
+ logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+ sys.exit(1)
+ boss_of_bind.init_socket_srv()
+ logger.info(BIND10_STARTUP_COMPLETE)
+ dump_pid(options.pid_file)
+
+ # Let it run
+ boss_of_bind.run(wakeup_pipe[0])
+
+ # shutdown
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ boss_of_bind.shutdown()
+ finally:
+ # Clean up the filesystem
+ unlink_pid_file(options.pid_file)
+ if boss_of_bind is not None:
+ boss_of_bind.remove_socket_srv()
+ sys.exit(boss_of_bind.exitcode)
if __name__ == "__main__":
main()
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index b4cfac6..adc9798 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -4,16 +4,71 @@
"module_description": "Master process",
"config_data": [
{
- "item_name": "start_auth",
- "item_type": "boolean",
+ "item_name": "components",
+ "item_type": "named_set",
"item_optional": false,
- "item_default": true
- },
- {
- "item_name": "start_resolver",
- "item_type": "boolean",
- "item_optional": false,
- "item_default": false
+ "item_default": {
+ "b10-auth": { "special": "auth", "kind": "needed", "priority": 10 },
+ "setuid": {
+ "special": "setuid",
+ "priority": 5,
+ "kind": "dispensable"
+ },
+ "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+ "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-stats": { "address": "Stats", "kind": "dispensable" },
+ "b10-stats-httpd": {
+ "address": "StatsHttpd",
+ "kind": "dispensable"
+ },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ },
+ "named_set_item_spec": {
+ "item_name": "component",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": { },
+ "map_item_spec": [
+ {
+ "item_name": "special",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "process",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "kind",
+ "item_optional": false,
+ "item_type": "string",
+ "item_default": "dispensable"
+ },
+ {
+ "item_name": "address",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "params",
+ "item_optional": true,
+ "item_type": "list",
+ "list_item_spec": {
+ "item_name": "param",
+ "item_optional": false,
+ "item_type": "string",
+ "item_default": ""
+ }
+ },
+ {
+ "item_name": "priority",
+ "item_optional": true,
+ "item_type": "integer"
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 1bd6ab4..f9537fd 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -13,7 +13,11 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# Most of the time, we omit the "bind10_src" for brevity. Sometimes,
+# we want to be explicit about what we do, like when hijacking a library
+# call used by the bind10_src.
from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
+import bind10_src
# XXX: environment tests are currently disabled, due to the preprocessor
# setup that we have now complicating the environment
@@ -28,6 +32,8 @@ from isc.net.addr import IPAddr
import time
import isc
import isc.log
+import isc.bind10.socket_cache
+import errno
from isc.testutils.parse_args import TestOptParser, OptsError
@@ -97,6 +103,232 @@ class TestProcessInfo(unittest.TestCase):
self.assertTrue(type(pi.pid) is int)
self.assertNotEqual(pi.pid, old_pid)
+class TestCacheCommands(unittest.TestCase):
+ """
+ Test methods of boss related to the socket cache and socket handling.
+ """
+ def setUp(self):
+ """
+ Prepare the boss for some tests.
+
+ Also prepare some variables we need.
+ """
+ self.__boss = BoB()
+ # Fake the cache here so we can pretend it is us and hijack the
+ # calls to its methods.
+ self.__boss._socket_cache = self
+ self.__boss._socket_path = '/socket/path'
+ self.__raise_exception = None
+ self.__socket_args = {
+ "port": 53,
+ "address": "::",
+ "protocol": "UDP",
+ "share_mode": "ANY",
+ "share_name": "app"
+ }
+ # What was and wasn't called.
+ self.__drop_app_called = None
+ self.__get_socket_called = None
+ self.__send_fd_called = None
+ self.__get_token_called = None
+ self.__drop_socket_called = None
+ bind10_src.libutil_io_python.send_fd = self.__send_fd
+
+ def __send_fd(self, to, socket):
+ """
+ A function to hook the send_fd in the bind10_src.
+ """
+ self.__send_fd_called = (to, socket)
+
+ class FalseSocket:
+ """
+ A socket where we can fake methods we need instead of having a real
+ socket.
+ """
+ def __init__(self):
+ self.send = ""
+ def fileno(self):
+ """
+ The file number. Used for identifying the remote application.
+ """
+ return 42
+
+ def sendall(self, data):
+ """
+ Adds data to the self.send.
+ """
+ self.send += data
+
+ def drop_application(self, application):
+ """
+ Part of pretending to be the cache. Logs the parameter to
+ self.__drop_app_called.
+
+ In the case self.__raise_exception is set, the exception there
+ is raised instead.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__drop_app_called = application
+
+ def test_consumer_dead(self):
+ """
+ Test that it calls the drop_application method of the cache.
+ """
+ self.__boss.socket_consumer_dead(self.FalseSocket())
+ self.assertEqual(42, self.__drop_app_called)
+
+ def test_consumer_dead_invalid(self):
+ """
+ Test that it doesn't crash in case the application is not known to
+ the cache, the boss doesn't crash, as this actually can happen in
+ practice.
+ """
+ self.__raise_exception = ValueError("This application is unknown")
+ # This doesn't crash
+ self.__boss.socket_consumer_dead(self.FalseSocket())
+
+ def get_socket(self, token, application):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the call is logged
+ into __get_socket_called and a number is returned.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__get_socket_called = (token, application)
+ return 13
+
+ def test_request_handler(self):
+ """
+ Test that a request for socket is forwarded and the socket is sent
+ back, if it returns a socket.
+ """
+ socket = self.FalseSocket()
+ # An exception from the cache
+ self.__raise_exception = ValueError("Test value error")
+ self.__boss.socket_request_handler("token", socket)
+ # It was called, but it threw, so it is not noted here
+ self.assertIsNone(self.__get_socket_called)
+ self.assertEqual("0\n", socket.send)
+ # It should not have sent any socket.
+ self.assertIsNone(self.__send_fd_called)
+ # Now prepare a valid scenario
+ self.__raise_exception = None
+ socket.send = ""
+ self.__boss.socket_request_handler("token", socket)
+ self.assertEqual("1\n", socket.send)
+ self.assertEqual((42, 13), self.__send_fd_called)
+ self.assertEqual(("token", 42), self.__get_socket_called)
+
+ def get_token(self, protocol, address, port, share_mode, share_name):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the parameters are
+ logged into __get_token_called and a token is returned.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__get_token_called = (protocol, address, port, share_mode,
+ share_name)
+ return "token"
+
+ def test_get_socket_ok(self):
+ """
+ Test the successful scenario of getting a socket.
+ """
+ result = self.__boss._get_socket(self.__socket_args)
+ [code, answer] = result['result']
+ self.assertEqual(0, code)
+ self.assertEqual({
+ 'token': 'token',
+ 'path': '/socket/path'
+ }, answer)
+ addr = self.__get_token_called[1]
+ self.assertTrue(isinstance(addr, IPAddr))
+ self.assertEqual("::", str(addr))
+ self.assertEqual(("UDP", addr, 53, "ANY", "app"),
+ self.__get_token_called)
+
+ def test_get_socket_error(self):
+ """
+ Test that bad inputs are handled correctly, etc.
+ """
+ def check_code(code, args):
+ """
+ Pass the args there and check if it returns success or not.
+
+ The rest is not tested, as it is already checked in the
+ test_get_socket_ok.
+ """
+ [rcode, ranswer] = self.__boss._get_socket(args)['result']
+ self.assertEqual(code, rcode)
+ if code == 1:
+ # This should be an error message. The exact formatting
+ # is unknown, but we check it is string at least
+ self.assertTrue(isinstance(ranswer, str))
+ def mod_args(name, value):
+ """
+ Override a parameter in the args.
+ """
+ result = dict(self.__socket_args)
+ result[name] = value
+ return result
+
+ # Port too large
+ check_code(1, mod_args('port', 65536))
+ # Not numeric address
+ check_code(1, mod_args('address', 'example.org.'))
+ # Some bad values of enum-like params
+ check_code(1, mod_args('protocol', 'BAD PROTO'))
+ check_code(1, mod_args('share_mode', 'BAD SHARE'))
+ # Check missing parameters
+ for param in self.__socket_args.keys():
+ args = dict(self.__socket_args)
+ del args[param]
+ check_code(1, args)
+ # These are OK values for the enum-like parameters
+ # The ones from test_get_socket_ok are not tested here
+ check_code(0, mod_args('protocol', 'TCP'))
+ check_code(0, mod_args('share_mode', 'SAMEAPP'))
+ check_code(0, mod_args('share_mode', 'NO'))
+ # If an exception is raised from within the cache, it is converted
+ # to an error, not propagated
+ self.__raise_exception = Exception("Test exception")
+ check_code(1, self.__socket_args)
+
+ def drop_socket(self, token):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the parameter is stored
+ in __drop_socket_called.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__drop_socket_called = token
+
+ def test_drop_socket(self):
+ """
+ Check the drop_socket command. It should directly call the method
+ on the cache. Exceptions should be translated to error messages.
+ """
+ # This should be OK and just propagated to the call.
+ self.assertEqual({"result": [0]},
+ self.__boss.command_handler("drop_socket",
+ {"token": "token"}))
+ self.assertEqual("token", self.__drop_socket_called)
+ self.__drop_socket_called = None
+ # Missing parameter
+ self.assertEqual({"result": [1, "Missing token parameter"]},
+ self.__boss.command_handler("drop_socket", {}))
+ self.assertIsNone(self.__drop_socket_called)
+ # An exception is raised from within the cache
+ self.__raise_exception = ValueError("Test error")
+ self.assertEqual({"result": [1, "Test error"]},
+ self.__boss.command_handler("drop_socket",
+ {"token": "token"}))
+
+
class TestBoB(unittest.TestCase):
def test_init(self):
bob = BoB()
@@ -104,17 +336,27 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.msgq_socket_file, None)
self.assertEqual(bob.cc_session, None)
self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.processes, {})
- self.assertEqual(bob.dead_processes, {})
+ self.assertEqual(bob.components, {})
self.assertEqual(bob.runnable, False)
self.assertEqual(bob.uid, None)
self.assertEqual(bob.username, None)
self.assertEqual(bob.nocache, False)
- self.assertEqual(bob.cfg_start_auth, True)
- self.assertEqual(bob.cfg_start_resolver, False)
+ self.assertIsNone(bob._socket_cache)
- self.assertEqual(bob.cfg_start_dhcp4, False)
- self.assertEqual(bob.cfg_start_dhcp6, False)
+ def test_set_creator(self):
+ """
+ Test the call to set_creator. First time, the cache is created
+ with the passed creator. The next time, it throws an exception.
+ """
+ bob = BoB()
+ # The cache doesn't use it at start, so just create an empty class
+ class Creator: pass
+ creator = Creator()
+ bob.set_creator(creator)
+ self.assertTrue(isinstance(bob._socket_cache,
+ isc.bind10.socket_cache.Cache))
+ self.assertEqual(creator, bob._socket_cache._creator)
+ self.assertRaises(ValueError, bob.set_creator, creator)
def test_init_alternate_socket(self):
bob = BoB("alt_socket_file")
@@ -122,16 +364,11 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.msgq_socket_file, "alt_socket_file")
self.assertEqual(bob.cc_session, None)
self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.processes, {})
- self.assertEqual(bob.dead_processes, {})
+ self.assertEqual(bob.components, {})
self.assertEqual(bob.runnable, False)
self.assertEqual(bob.uid, None)
self.assertEqual(bob.username, None)
self.assertEqual(bob.nocache, False)
- self.assertEqual(bob.cfg_start_auth, True)
- self.assertEqual(bob.cfg_start_resolver, False)
- self.assertEqual(bob.cfg_start_dhcp4, False)
- self.assertEqual(bob.cfg_start_dhcp6, False)
def test_command_handler(self):
class DummySession():
@@ -194,6 +431,26 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.command_handler("__UNKNOWN__", None),
isc.config.ccsession.create_answer(1, "Unknown command"))
+ # Fake the get_token of cache and test the command works
+ bob._socket_path = '/socket/path'
+ class cache:
+ def get_token(self, protocol, addr, port, share_mode, share_name):
+ return str(addr) + ':' + str(port)
+ bob._socket_cache = cache()
+ args = {
+ "port": 53,
+ "address": "0.0.0.0",
+ "protocol": "UDP",
+ "share_mode": "ANY",
+ "share_name": "app"
+ }
+ # at all and this is the easiest way to check.
+ self.assertEqual({'result': [0, {'token': '0.0.0.0:53',
+ 'path': '/socket/path'}]},
+ bob.command_handler("get_socket", args))
+ # The drop_socket is not tested here, but in TestCacheCommands.
+ # It needs the cache mocks to be in place and they are there.
+
# Class for testing the BoB without actually starting processes.
# This is used for testing the start/stop components routines and
# the BoB commands.
@@ -218,147 +475,186 @@ class MockBob(BoB):
self.stats = False
self.stats_httpd = False
self.cmdctl = False
+ self.dhcp6 = False
+ self.dhcp4 = False
self.c_channel_env = {}
- self.processes = { }
+ self.components = { }
self.creator = False
+ class MockSockCreator(isc.bind10.component.Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ isc.bind10.component.Component.__init__(self, process, boss,
+ kind, 'SockCreator')
+ self._start_func = boss.start_creator
+
+ specials = isc.bind10.special_component.get_specials()
+ specials['sockcreator'] = MockSockCreator
+ self._component_configurator = \
+ isc.bind10.component.Configurator(self, specials)
+
def start_creator(self):
self.creator = True
+ procinfo = ProcessInfo('b10-sockcreator', ['/bin/false'])
+ procinfo.pid = 1
+ return procinfo
- def stop_creator(self, kill=False):
- self.creator = False
-
- def read_bind10_config(self):
+ def _read_bind10_config(self):
# Configuration options are set directly
pass
- def start_msgq(self, c_channel_env):
+ def start_msgq(self):
self.msgq = True
- self.processes[2] = ProcessInfo('b10-msgq', ['/bin/false'])
- self.processes[2].pid = 2
-
- def start_cfgmgr(self, c_channel_env):
- self.cfgmgr = True
- self.processes[3] = ProcessInfo('b10-cfgmgr', ['/bin/false'])
- self.processes[3].pid = 3
+ procinfo = ProcessInfo('b10-msgq', ['/bin/false'])
+ procinfo.pid = 2
+ return procinfo
def start_ccsession(self, c_channel_env):
+ # this is not a process, don't have to do anything with procinfo
self.ccsession = True
- self.processes[4] = ProcessInfo('b10-ccsession', ['/bin/false'])
- self.processes[4].pid = 4
- def start_auth(self, c_channel_env):
+ def start_cfgmgr(self):
+ self.cfgmgr = True
+ procinfo = ProcessInfo('b10-cfgmgr', ['/bin/false'])
+ procinfo.pid = 3
+ return procinfo
+
+ def start_auth(self):
self.auth = True
- self.processes[5] = ProcessInfo('b10-auth', ['/bin/false'])
- self.processes[5].pid = 5
+ procinfo = ProcessInfo('b10-auth', ['/bin/false'])
+ procinfo.pid = 5
+ return procinfo
- def start_resolver(self, c_channel_env):
+ def start_resolver(self):
self.resolver = True
- self.processes[6] = ProcessInfo('b10-resolver', ['/bin/false'])
- self.processes[6].pid = 6
-
- def start_xfrout(self, c_channel_env):
+ procinfo = ProcessInfo('b10-resolver', ['/bin/false'])
+ procinfo.pid = 6
+ return procinfo
+
+ def start_simple(self, name):
+ procmap = { 'b10-zonemgr': self.start_zonemgr,
+ 'b10-stats': self.start_stats,
+ 'b10-stats-httpd': self.start_stats_httpd,
+ 'b10-cmdctl': self.start_cmdctl,
+ 'b10-dhcp6': self.start_dhcp6,
+ 'b10-dhcp4': self.start_dhcp4,
+ 'b10-xfrin': self.start_xfrin,
+ 'b10-xfrout': self.start_xfrout }
+ return procmap[name]()
+
+ def start_xfrout(self):
self.xfrout = True
- self.processes[7] = ProcessInfo('b10-xfrout', ['/bin/false'])
- self.processes[7].pid = 7
+ procinfo = ProcessInfo('b10-xfrout', ['/bin/false'])
+ procinfo.pid = 7
+ return procinfo
- def start_xfrin(self, c_channel_env):
+ def start_xfrin(self):
self.xfrin = True
- self.processes[8] = ProcessInfo('b10-xfrin', ['/bin/false'])
- self.processes[8].pid = 8
+ procinfo = ProcessInfo('b10-xfrin', ['/bin/false'])
+ procinfo.pid = 8
+ return procinfo
- def start_zonemgr(self, c_channel_env):
+ def start_zonemgr(self):
self.zonemgr = True
- self.processes[9] = ProcessInfo('b10-zonemgr', ['/bin/false'])
- self.processes[9].pid = 9
+ procinfo = ProcessInfo('b10-zonemgr', ['/bin/false'])
+ procinfo.pid = 9
+ return procinfo
- def start_stats(self, c_channel_env):
+ def start_stats(self):
self.stats = True
- self.processes[10] = ProcessInfo('b10-stats', ['/bin/false'])
- self.processes[10].pid = 10
+ procinfo = ProcessInfo('b10-stats', ['/bin/false'])
+ procinfo.pid = 10
+ return procinfo
- def start_stats_httpd(self, c_channel_env):
+ def start_stats_httpd(self):
self.stats_httpd = True
- self.processes[11] = ProcessInfo('b10-stats-httpd', ['/bin/false'])
- self.processes[11].pid = 11
+ procinfo = ProcessInfo('b10-stats-httpd', ['/bin/false'])
+ procinfo.pid = 11
+ return procinfo
- def start_cmdctl(self, c_channel_env):
+ def start_cmdctl(self):
self.cmdctl = True
- self.processes[12] = ProcessInfo('b10-cmdctl', ['/bin/false'])
- self.processes[12].pid = 12
+ procinfo = ProcessInfo('b10-cmdctl', ['/bin/false'])
+ procinfo.pid = 12
+ return procinfo
- def start_dhcp6(self, c_channel_env):
+ def start_dhcp6(self):
self.dhcp6 = True
- self.processes[13] = ProcessInfo('b10-dhcp6', ['/bin/false'])
- self.processes[13]
+ procinfo = ProcessInfo('b10-dhcp6', ['/bin/false'])
+ procinfo.pid = 13
+ return procinfo
- def start_dhcp4(self, c_channel_env):
+ def start_dhcp4(self):
self.dhcp4 = True
- self.processes[14] = ProcessInfo('b10-dhcp4', ['/bin/false'])
- self.processes[14]
-
- # We don't really use all of these stop_ methods. But it might turn out
- # someone would add some stop_ method to BoB and we want that one overriden
- # in case he forgets to update the tests.
+ procinfo = ProcessInfo('b10-dhcp4', ['/bin/false'])
+ procinfo.pid = 14
+ return procinfo
+
+ def stop_process(self, process, recipient):
+ procmap = { 'b10-auth': self.stop_auth,
+ 'b10-resolver': self.stop_resolver,
+ 'b10-xfrout': self.stop_xfrout,
+ 'b10-xfrin': self.stop_xfrin,
+ 'b10-zonemgr': self.stop_zonemgr,
+ 'b10-stats': self.stop_stats,
+ 'b10-stats-httpd': self.stop_stats_httpd,
+ 'b10-cmdctl': self.stop_cmdctl }
+ procmap[process]()
+
+ # Some functions to pretend we stop processes, use by stop_process
def stop_msgq(self):
if self.msgq:
- del self.processes[2]
+ del self.components[2]
self.msgq = False
def stop_cfgmgr(self):
if self.cfgmgr:
- del self.processes[3]
+ del self.components[3]
self.cfgmgr = False
- def stop_ccsession(self):
- if self.ccssession:
- del self.processes[4]
- self.ccsession = False
-
def stop_auth(self):
if self.auth:
- del self.processes[5]
+ del self.components[5]
self.auth = False
def stop_resolver(self):
if self.resolver:
- del self.processes[6]
+ del self.components[6]
self.resolver = False
def stop_xfrout(self):
if self.xfrout:
- del self.processes[7]
+ del self.components[7]
self.xfrout = False
def stop_xfrin(self):
if self.xfrin:
- del self.processes[8]
+ del self.components[8]
self.xfrin = False
def stop_zonemgr(self):
if self.zonemgr:
- del self.processes[9]
+ del self.components[9]
self.zonemgr = False
def stop_stats(self):
if self.stats:
- del self.processes[10]
+ del self.components[10]
self.stats = False
def stop_stats_httpd(self):
if self.stats_httpd:
- del self.processes[11]
+ del self.components[11]
self.stats_httpd = False
def stop_cmdctl(self):
if self.cmdctl:
- del self.processes[12]
+ del self.components[12]
self.cmdctl = False
class TestStartStopProcessesBob(unittest.TestCase):
"""
- Check that the start_all_processes method starts the right combination
- of processes and that the right processes are started and stopped
+ Check that the start_all_components method starts the right combination
+ of components and that the right components are started and stopped
according to changes in configuration.
"""
def check_environment_unchanged(self):
@@ -392,7 +688,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
def check_started_none(self, bob):
"""
Check that the situation is according to configuration where no servers
- should be started. Some processes still need to be running.
+ should be started. Some components still need to be running.
"""
self.check_started(bob, True, False, False)
self.check_environment_unchanged()
@@ -407,14 +703,14 @@ class TestStartStopProcessesBob(unittest.TestCase):
def check_started_auth(self, bob):
"""
- Check the set of processes needed to run auth only is started.
+ Check the set of components needed to run auth only is started.
"""
self.check_started(bob, True, True, False)
self.check_environment_unchanged()
def check_started_resolver(self, bob):
"""
- Check the set of processes needed to run resolver only is started.
+ Check the set of components needed to run resolver only is started.
"""
self.check_started(bob, True, False, True)
self.check_environment_unchanged()
@@ -423,80 +719,66 @@ class TestStartStopProcessesBob(unittest.TestCase):
"""
Check if proper combinations of DHCPv4 and DHCpv6 can be started
"""
- v4found = 0
- v6found = 0
-
- for pid in bob.processes:
- if (bob.processes[pid].name == "b10-dhcp4"):
- v4found += 1
- if (bob.processes[pid].name == "b10-dhcp6"):
- v6found += 1
-
- # there should be exactly one DHCPv4 daemon (if v4==True)
- # there should be exactly one DHCPv6 daemon (if v6==True)
- self.assertEqual(v4==True, v4found==1)
- self.assertEqual(v6==True, v6found==1)
+ self.assertEqual(v4, bob.dhcp4)
+ self.assertEqual(v6, bob.dhcp6)
self.check_environment_unchanged()
- # Checks the processes started when starting neither auth nor resolver
- # is specified.
- def test_start_none(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
- self.check_started_none(bob)
-
- # Checks the processes started when starting only the auth process
- def test_start_auth(self):
- # Create BoB and ensure correct initialization
+ def construct_config(self, start_auth, start_resolver):
+ # The things that are common, not turned on an off
+ config = {}
+ config['b10-stats'] = { 'kind': 'dispensable', 'address': 'Stats' }
+ config['b10-stats-httpd'] = { 'kind': 'dispensable',
+ 'address': 'StatsHttpd' }
+ config['b10-cmdctl'] = { 'kind': 'needed', 'special': 'cmdctl' }
+ if start_auth:
+ config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
+ config['b10-xfrout'] = { 'kind': 'dispensable',
+ 'address': 'Xfrout' }
+ config['b10-xfrin'] = { 'kind': 'dispensable',
+ 'address': 'Xfrin' }
+ config['b10-zonemgr'] = { 'kind': 'dispensable',
+ 'address': 'Zonemgr' }
+ if start_resolver:
+ config['b10-resolver'] = { 'kind': 'needed',
+ 'special': 'resolver' }
+ return {'components': config}
+
+ def config_start_init(self, start_auth, start_resolver):
+ """
+ Test the configuration is loaded at the startup.
+ """
bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
+ config = self.construct_config(start_auth, start_resolver)
+ class CC:
+ def get_full_config(self):
+ return config
+ # Provide the fake CC with data
+ bob.ccs = CC()
+ # And make sure it's not overwritten
+ def start_ccsession():
+ bob.ccsession = True
+ bob.start_ccsession = lambda _: start_ccsession()
+ # We need to return the original _read_bind10_config
+ bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
+ bob.start_all_components()
+ self.check_started(bob, True, start_auth, start_resolver)
+ self.check_environment_unchanged()
- self.check_started_auth(bob)
+ def test_start_none(self):
+ self.config_start_init(False, False)
- # Checks the processes started when starting only the resolver process
def test_start_resolver(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = True
+ self.config_start_init(False, True)
- bob.start_all_processes()
-
- self.check_started_resolver(bob)
+ def test_start_auth(self):
+ self.config_start_init(True, False)
- # Checks the processes started when starting both auth and resolver process
def test_start_both(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = True
-
- bob.start_all_processes()
-
- self.check_started_both(bob)
+ self.config_start_init(True, True)
def test_config_start(self):
"""
- Test that the configuration starts and stops processes according
+ Test that the configuration starts and stops components according
to configuration changes.
"""
@@ -504,17 +786,13 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob = MockBob()
self.check_preconditions(bob)
- # Start processes (nothing much should be started, as in
- # test_start_none)
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
+ bob.start_all_components()
bob.runnable = True
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Enable both at once
- bob.config_handler({'start_auth': True, 'start_resolver': True})
+ bob.config_handler(self.construct_config(True, True))
self.check_started_both(bob)
# Not touched by empty change
@@ -522,11 +800,11 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.check_started_both(bob)
# Not touched by change to the same configuration
- bob.config_handler({'start_auth': True, 'start_resolver': True})
+ bob.config_handler(self.construct_config(True, True))
self.check_started_both(bob)
# Turn them both off again
- bob.config_handler({'start_auth': False, 'start_resolver': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Not touched by empty change
@@ -534,47 +812,45 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.check_started_none(bob)
# Not touched by change to the same configuration
- bob.config_handler({'start_auth': False, 'start_resolver': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Start and stop auth separately
- bob.config_handler({'start_auth': True})
+ bob.config_handler(self.construct_config(True, False))
self.check_started_auth(bob)
- bob.config_handler({'start_auth': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Start and stop resolver separately
- bob.config_handler({'start_resolver': True})
+ bob.config_handler(self.construct_config(False, True))
self.check_started_resolver(bob)
- bob.config_handler({'start_resolver': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Alternate
- bob.config_handler({'start_auth': True})
+ bob.config_handler(self.construct_config(True, False))
self.check_started_auth(bob)
- bob.config_handler({'start_auth': False, 'start_resolver': True})
+ bob.config_handler(self.construct_config(False, True))
self.check_started_resolver(bob)
- bob.config_handler({'start_auth': True, 'start_resolver': False})
+ bob.config_handler(self.construct_config(True, False))
self.check_started_auth(bob)
def test_config_start_once(self):
"""
- Tests that a process is started only once.
+ Tests that a component is started only once.
"""
# Create BoB and ensure correct initialization
bob = MockBob()
self.check_preconditions(bob)
- # Start processes (both)
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = True
+ bob.start_all_components()
- bob.start_all_processes()
bob.runnable = True
+ bob.config_handler(self.construct_config(True, True))
self.check_started_both(bob)
bob.start_auth = lambda: self.fail("Started auth again")
@@ -584,12 +860,11 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob.start_resolver = lambda: self.fail("Started resolver again")
# Send again we want to start them. Should not do it, as they are.
- bob.config_handler({'start_auth': True})
- bob.config_handler({'start_resolver': True})
+ bob.config_handler(self.construct_config(True, True))
def test_config_not_started_early(self):
"""
- Test that processes are not started by the config handler before
+ Test that components are not started by the config handler before
startup.
"""
bob = MockBob()
@@ -603,27 +878,29 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob.config_handler({'start_auth': True, 'start_resolver': True})
- # Checks that DHCP (v4 and v6) processes are started when expected
+ # Checks that DHCP (v4 and v6) components are started when expected
def test_start_dhcp(self):
# Create BoB and ensure correct initialization
bob = MockBob()
self.check_preconditions(bob)
- # don't care about DNS stuff
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- # v4 and v6 disabled
- bob.cfg_start_dhcp6 = False
- bob.cfg_start_dhcp4 = False
- bob.start_all_processes()
+ bob.start_all_components()
+ bob.config_handler(self.construct_config(False, False))
self.check_started_dhcp(bob, False, False)
+ def test_start_dhcp_v6only(self):
+ # Create BoB and ensure correct initialization
+ bob = MockBob()
+ self.check_preconditions(bob)
# v6 only enabled
- bob.cfg_start_dhcp6 = True
- bob.cfg_start_dhcp4 = False
- bob.start_all_processes()
+ bob.start_all_components()
+ bob.runnable = True
+ bob._BoB_started = True
+ config = self.construct_config(False, False)
+ config['components']['b10-dhcp6'] = { 'kind': 'needed',
+ 'address': 'Dhcp6' }
+ bob.config_handler(config)
self.check_started_dhcp(bob, False, True)
# uncomment when dhcpv4 becomes implemented
@@ -637,6 +914,12 @@ class TestStartStopProcessesBob(unittest.TestCase):
#bob.cfg_start_dhcp4 = True
#self.check_started_dhcp(bob, True, True)
+class MockComponent:
+ def __init__(self, name, pid):
+ self.name = lambda: name
+ self.pid = lambda: pid
+
+
class TestBossCmd(unittest.TestCase):
def test_ping(self):
"""
@@ -646,7 +929,7 @@ class TestBossCmd(unittest.TestCase):
answer = bob.command_handler("ping", None)
self.assertEqual(answer, {'result': [0, 'pong']})
- def test_show_processes(self):
+ def test_show_processes_empty(self):
"""
Confirm getting a list of processes works.
"""
@@ -654,23 +937,16 @@ class TestBossCmd(unittest.TestCase):
answer = bob.command_handler("show_processes", None)
self.assertEqual(answer, {'result': [0, []]})
- def test_show_processes_started(self):
+ def test_show_processes(self):
"""
Confirm getting a list of processes works.
"""
bob = MockBob()
- bob.start_all_processes()
+ bob.register_process(1, MockComponent('first', 1))
+ bob.register_process(2, MockComponent('second', 2))
answer = bob.command_handler("show_processes", None)
- processes = [[2, 'b10-msgq'],
- [3, 'b10-cfgmgr'],
- [4, 'b10-ccsession'],
- [5, 'b10-auth'],
- [7, 'b10-xfrout'],
- [8, 'b10-xfrin'],
- [9, 'b10-zonemgr'],
- [10, 'b10-stats'],
- [11, 'b10-stats-httpd'],
- [12, 'b10-cmdctl']]
+ processes = [[1, 'first'],
+ [2, 'second']]
self.assertEqual(answer, {'result': [0, processes]})
class TestParseArgs(unittest.TestCase):
@@ -724,15 +1000,6 @@ class TestParseArgs(unittest.TestCase):
options = parse_args(['--cmdctl-port=1234'], TestOptParser)
self.assertEqual(1234, options.cmdctl_port)
- def test_brittle(self):
- """
- Test we can use the "brittle" flag.
- """
- options = parse_args([], TestOptParser)
- self.assertFalse(options.brittle)
- options = parse_args(['--brittle'], TestOptParser)
- self.assertTrue(options.brittle)
-
class TestPIDFile(unittest.TestCase):
def setUp(self):
self.pid_file = '@builddir@' + os.sep + 'bind10.pid'
@@ -780,34 +1047,352 @@ class TestPIDFile(unittest.TestCase):
self.assertRaises(IOError, dump_pid,
'nonexistent_dir' + os.sep + 'bind10.pid')
-class TestBrittle(unittest.TestCase):
- def test_brittle_disabled(self):
- bob = MockBob()
- bob.start_all_processes()
- bob.runnable = True
+class TestBossComponents(unittest.TestCase):
+ """
+ Test the boss propagates component configuration properly to the
+ component configurator and acts sane.
+ """
+ def setUp(self):
+ self.__param = None
+ self.__called = False
+ self.__compconfig = {
+ 'comp': {
+ 'kind': 'needed',
+ 'process': 'cat'
+ }
+ }
+
+ def __unary_hook(self, param):
+ """
+ A hook function that stores the parameter for later examination.
+ """
+ self.__param = param
- bob.reap_children()
- self.assertTrue(bob.runnable)
+ def __nullary_hook(self):
+ """
+ A hook function that notes down it was called.
+ """
+ self.__called = True
- def simulated_exit(self):
- ret_val = self.exit_info
- self.exit_info = (0, 0)
- return ret_val
+ def __check_core(self, config):
+ """
+ A function checking that the config contains parts for the valid
+ core component configuration.
+ """
+ self.assertIsNotNone(config)
+ for component in ['sockcreator', 'msgq', 'cfgmgr']:
+ self.assertTrue(component in config)
+ self.assertEqual(component, config[component]['special'])
+ self.assertEqual('core', config[component]['kind'])
- def test_brittle_enabled(self):
+ def __check_extended(self, config):
+ """
+ This checks that the config contains the core and one more component.
+ """
+ self.__check_core(config)
+ self.assertTrue('comp' in config)
+ self.assertEqual('cat', config['comp']['process'])
+ self.assertEqual('needed', config['comp']['kind'])
+ self.assertEqual(4, len(config))
+
+ def test_correct_run(self):
+ """
+ Test the situation when we run in usual scenario, nothing fails,
+ we just start, reconfigure and then stop peacefully.
+ """
bob = MockBob()
- bob.start_all_processes()
+ # Start it
+ orig = bob._component_configurator.startup
+ bob._component_configurator.startup = self.__unary_hook
+ bob.start_all_components()
+ bob._component_configurator.startup = orig
+ self.__check_core(self.__param)
+ self.assertEqual(3, len(self.__param))
+
+ # Reconfigure it
+ self.__param = None
+ orig = bob._component_configurator.reconfigure
+ bob._component_configurator.reconfigure = self.__unary_hook
+ # Otherwise it does not work
bob.runnable = True
+ bob.config_handler({'components': self.__compconfig})
+ self.__check_extended(self.__param)
+ currconfig = self.__param
+ # If we reconfigure it, but it does not contain the components part,
+ # nothing is called
+ bob.config_handler({})
+ self.assertEqual(self.__param, currconfig)
+ self.__param = None
+ bob._component_configurator.reconfigure = orig
+ # Check a configuration that messes up the core components is rejected.
+ compconf = dict(self.__compconfig)
+ compconf['msgq'] = { 'process': 'echo' }
+ result = bob.config_handler({'components': compconf})
+ # Check it rejected it
+ self.assertEqual(1, result['result'][0])
+
+ # We can't call shutdown, that one relies on the stuff in main
+ # We check somewhere else that the shutdown is actually called
+ # from there (the test_kills).
+
+ def test_kills(self):
+ """
+ Test that the boss kills components which don't want to stop.
+ """
+ bob = MockBob()
+ killed = []
+ class ImmortalComponent:
+ """
+ An immortal component. It does not stop when it is told so
+ (anyway it is not told so). It does not die if it is killed
+ the first time. It dies only when killed forcefully.
+ """
+ def kill(self, forceful=False):
+ killed.append(forceful)
+ if forceful:
+ bob.components = {}
+ def pid(self):
+ return 1
+ def name(self):
+ return "Immortal"
+ bob.components = {}
+ bob.register_process(1, ImmortalComponent())
+
+ # While at it, we check the configurator shutdown is actually called
+ orig = bob._component_configurator.shutdown
+ bob._component_configurator.shutdown = self.__nullary_hook
+ self.__called = False
+
+ bob.shutdown()
+
+ self.assertEqual([False, True], killed)
+ self.assertTrue(self.__called)
+
+ bob._component_configurator.shutdown = orig
+
+ def test_component_shutdown(self):
+ """
+ Test the component_shutdown sets all variables accordingly.
+ """
+ bob = MockBob()
+ self.assertRaises(Exception, bob.component_shutdown, 1)
+ self.assertEqual(1, bob.exitcode)
+ bob._BoB__started = True
+ bob.component_shutdown(2)
+ self.assertEqual(2, bob.exitcode)
+ self.assertFalse(bob.runnable)
- bob.brittle = True
- self.exit_info = (5, 0)
- bob._get_process_exit_status = self.simulated_exit
+ def test_init_config(self):
+ """
+ Test initial configuration is loaded.
+ """
+ bob = MockBob()
+ # Start it
+ bob._component_configurator.reconfigure = self.__unary_hook
+ # We need to return the original read_bind10_config
+ bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
+ # And provide a session to read the data from
+ class CC:
+ pass
+ bob.ccs = CC()
+ bob.ccs.get_full_config = lambda: {'components': self.__compconfig}
+ bob.start_all_components()
+ self.__check_extended(self.__param)
+
+class SocketSrvTest(unittest.TestCase):
+ """
+ This tests some methods of boss related to the unix domain sockets used
+ to transfer other sockets to applications.
+ """
+ def setUp(self):
+ """
+ Create the boss to test, testdata and backup some functions.
+ """
+ self.__boss = BoB()
+ self.__select_backup = bind10_src.select.select
+ self.__select_called = None
+ self.__socket_data_called = None
+ self.__consumer_dead_called = None
+ self.__socket_request_handler_called = None
- old_stdout = sys.stdout
- sys.stdout = open("/dev/null", "w")
- bob.reap_children()
- sys.stdout = old_stdout
- self.assertFalse(bob.runnable)
+ def tearDown(self):
+ """
+ Restore functions.
+ """
+ bind10_src.select.select = self.__select_backup
+
+ class __FalseSocket:
+ """
+ A mock socket for the select and accept and stuff like that.
+ """
+ def __init__(self, owner, fileno=42):
+ self.__owner = owner
+ self.__fileno = fileno
+ self.data = None
+ self.closed = False
+
+ def fileno(self):
+ return self.__fileno
+
+ def accept(self):
+ return self.__class__(self.__owner, 13)
+
+ def recv(self, bufsize, flags=0):
+ self.__owner.assertEqual(1, bufsize)
+ self.__owner.assertEqual(socket.MSG_DONTWAIT, flags)
+ if isinstance(self.data, socket.error):
+ raise self.data
+ elif self.data is not None:
+ if len(self.data):
+ result = self.data[0:1]
+ self.data = self.data[1:]
+ return result
+ else:
+ raise socket.error(errno.EAGAIN, "Would block")
+ else:
+ return b''
+
+ def close(self):
+ self.closed = True
+
+ class __CCS:
+ """
+ A mock CCS, just to provide the socket file number.
+ """
+ class __Socket:
+ def fileno(self):
+ return 1
+ def get_socket(self):
+ return self.__Socket()
+
+ def __select_accept(self, r, w, x, t):
+ self.__select_called = (r, w, x, t)
+ return ([42], [], [])
+
+ def __select_data(self, r, w, x, t):
+ self.__select_called = (r, w, x, t)
+ return ([13], [], [])
+
+ def __accept(self):
+ """
+ Hijact the accept method of the boss.
+
+ Notes down it was called and stops the boss.
+ """
+ self.__accept_called = True
+ self.__boss.runnable = False
+
+ def test_srv_accept_called(self):
+ """
+ Test that the _srv_accept method of boss is called when the listening
+ socket is readable.
+ """
+ self.__boss.runnable = True
+ self.__boss._srv_socket = self.__FalseSocket(self)
+ self.__boss._srv_accept = self.__accept
+ self.__boss.ccs = self.__CCS()
+ bind10_src.select.select = self.__select_accept
+ self.__boss.run(2)
+ # It called the accept
+ self.assertTrue(self.__accept_called)
+ # And the select had the right parameters
+ self.assertEqual(([2, 1, 42], [], [], None), self.__select_called)
+
+ def test_srv_accept(self):
+ """
+ Test how the _srv_accept method works.
+ """
+ self.__boss._srv_socket = self.__FalseSocket(self)
+ self.__boss._srv_accept()
+ # After we accepted, a new socket is added there
+ socket = self.__boss._unix_sockets[13][0]
+ # The socket is properly stored there
+ self.assertTrue(isinstance(socket, self.__FalseSocket))
+ # And the buffer (yet empty) is there
+ self.assertEqual({13: (socket, b'')}, self.__boss._unix_sockets)
+
+ def __socket_data(self, socket):
+ self.__boss.runnable = False
+ self.__socket_data_called = socket
+
+ def test_socket_data(self):
+ """
+ Test that a socket that wants attention gets it.
+ """
+ self.__boss._srv_socket = self.__FalseSocket(self)
+ self.__boss._socket_data = self.__socket_data
+ self.__boss.ccs = self.__CCS()
+ self.__boss._unix_sockets = {13: (self.__FalseSocket(self, 13), b'')}
+ self.__boss.runnable = True
+ bind10_src.select.select = self.__select_data
+ self.__boss.run(2)
+ self.assertEqual(13, self.__socket_data_called)
+ self.assertEqual(([2, 1, 42, 13], [], [], None), self.__select_called)
+
+ def __prepare_data(self, data):
+ socket = self.__FalseSocket(self, 13)
+ self.__boss._unix_sockets = {13: (socket, b'')}
+ socket.data = data
+ self.__boss.socket_consumer_dead = self.__consumer_dead
+ self.__boss.socket_request_handler = self.__socket_request_handler
+ return socket
+
+ def __consumer_dead(self, socket):
+ self.__consumer_dead_called = socket
+
+ def __socket_request_handler(self, token, socket):
+ self.__socket_request_handler_called = (token, socket)
+
+ def test_socket_closed(self):
+ """
+ Test that a socket is removed and the socket_consumer_dead is called
+ when it is closed.
+ """
+ socket = self.__prepare_data(None)
+ self.__boss._socket_data(13)
+ self.assertEqual(socket, self.__consumer_dead_called)
+ self.assertEqual({}, self.__boss._unix_sockets)
+ self.assertTrue(socket.closed)
+
+ def test_socket_short(self):
+ """
+ Test that if there's not enough data to get the whole socket, it is
+ kept there, but nothing is called.
+ """
+ socket = self.__prepare_data(b'tok')
+ self.__boss._socket_data(13)
+ self.assertEqual({13: (socket, b'tok')}, self.__boss._unix_sockets)
+ self.assertFalse(socket.closed)
+ self.assertIsNone(self.__consumer_dead_called)
+ self.assertIsNone(self.__socket_request_handler_called)
+
+ def test_socket_continue(self):
+ """
+ Test that we call the token handling function when the whole token
+ comes. This test pretends to continue reading where the previous one
+ stopped.
+ """
+ socket = self.__prepare_data(b"en\nanothe")
+ # The data to finish
+ self.__boss._unix_sockets[13] = (socket, b'tok')
+ self.__boss._socket_data(13)
+ self.assertEqual({13: (socket, b'anothe')}, self.__boss._unix_sockets)
+ self.assertFalse(socket.closed)
+ self.assertIsNone(self.__consumer_dead_called)
+ self.assertEqual((b'token', socket),
+ self.__socket_request_handler_called)
+
+ def test_broken_socket(self):
+ """
+ If the socket raises an exception during the read other than EAGAIN,
+ it is broken and we remove it.
+ """
+ sock = self.__prepare_data(socket.error(errno.ENOMEM,
+ "There's more memory available, but not for you"))
+ self.__boss._socket_data(13)
+ self.assertEqual(sock, self.__consumer_dead_called)
+ self.assertEqual({}, self.__boss._unix_sockets)
+ self.assertTrue(sock.closed)
if __name__ == '__main__':
# store os.environ for test_unchanged_environment
diff --git a/src/bin/dhcp6/.gitignore b/src/bin/dhcp6/.gitignore
index 6a6060b..e4e8f2d 100644
--- a/src/bin/dhcp6/.gitignore
+++ b/src/bin/dhcp6/.gitignore
@@ -7,3 +7,4 @@ Makefile.in
b10-dhcp6
spec_config.h
spec_config.h.pre
+tests/dhcp6_unittests
diff --git a/src/bin/dhcp6/dhcp6_srv.cc b/src/bin/dhcp6/dhcp6_srv.cc
index ba5afec..d5a969f 100644
--- a/src/bin/dhcp6/dhcp6_srv.cc
+++ b/src/bin/dhcp6/dhcp6_srv.cc
@@ -12,26 +12,32 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include "dhcp/dhcp6.h"
-#include "dhcp/pkt6.h"
-#include "dhcp6/iface_mgr.h"
-#include "dhcp6/dhcp6_srv.h"
-#include "dhcp/option6_ia.h"
-#include "dhcp/option6_iaaddr.h"
-#include "asiolink/io_address.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/pkt6.h>
+#include <dhcp6/iface_mgr.h>
+#include <dhcp6/dhcp6_srv.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/option6_iaaddr.h>
+#include <asiolink/io_address.h>
+#include <exceptions/exceptions.h>
using namespace std;
using namespace isc;
using namespace isc::dhcp;
using namespace isc::asiolink;
-Dhcpv6Srv::Dhcpv6Srv() {
+Dhcpv6Srv::Dhcpv6Srv(uint16_t port) {
+
+//void Dhcpv6Srv::Dhcpv6Srv_impl(uint16_t port) {
cout << "Initialization" << endl;
- // first call to instance() will create IfaceMgr (it's a singleton)
- // it may throw something if things go wrong
+ // First call to instance() will create IfaceMgr (it's a singleton).
+ // It may throw something if things go wrong.
IfaceMgr::instance();
+ // Now try to open IPv6 sockets on detected interfaces.
+ IfaceMgr::instance().openSockets(port);
+
/// @todo: instantiate LeaseMgr here once it is imlpemented.
setServerID();
@@ -41,6 +47,8 @@ Dhcpv6Srv::Dhcpv6Srv() {
Dhcpv6Srv::~Dhcpv6Srv() {
cout << "DHCPv6 Srv shutdown." << endl;
+
+ IfaceMgr::instance().closeSockets();
}
bool
@@ -49,7 +57,7 @@ Dhcpv6Srv::run() {
boost::shared_ptr<Pkt6> query; // client's message
boost::shared_ptr<Pkt6> rsp; // server's response
- query = IfaceMgr::instance().receive();
+ query = IfaceMgr::instance().receive6();
if (query) {
if (!query->unpack()) {
diff --git a/src/bin/dhcp6/dhcp6_srv.h b/src/bin/dhcp6/dhcp6_srv.h
index 4daef3a..bcc7818 100644
--- a/src/bin/dhcp6/dhcp6_srv.h
+++ b/src/bin/dhcp6/dhcp6_srv.h
@@ -17,8 +17,9 @@
#include <boost/shared_ptr.hpp>
#include <boost/noncopyable.hpp>
-#include "dhcp/pkt6.h"
-#include "dhcp/option.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/option.h>
#include <iostream>
namespace isc {
@@ -41,10 +42,12 @@ public:
/// In particular, creates IfaceMgr that will be responsible for
/// network interaction. Will instantiate lease manager, and load
/// old or create new DUID.
- Dhcpv6Srv();
+ ///
+ /// @param port port on will all sockets will listen
+ Dhcpv6Srv(uint16_t port = DHCP6_SERVER_PORT);
/// @brief Destructor. Used during DHCPv6 service shutdown.
- ~Dhcpv6Srv();
+ virtual ~Dhcpv6Srv();
/// @brief Returns server-intentifier option
///
diff --git a/src/bin/dhcp6/iface_mgr.cc b/src/bin/dhcp6/iface_mgr.cc
index a96db07..de2b93c 100644
--- a/src/bin/dhcp6/iface_mgr.cc
+++ b/src/bin/dhcp6/iface_mgr.cc
@@ -18,9 +18,9 @@
#include <netinet/in.h>
#include <arpa/inet.h>
-#include "dhcp/dhcp6.h"
-#include "dhcp6/iface_mgr.h"
-#include "exceptions/exceptions.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp6/iface_mgr.h>
+#include <exceptions/exceptions.h>
using namespace std;
using namespace isc;
@@ -79,6 +79,30 @@ IfaceMgr::Iface::getPlainMac() const {
return (tmp.str());
}
+bool IfaceMgr::Iface::delAddress(const isc::asiolink::IOAddress& addr) {
+
+ // Let's delete all addresses that match. It really shouldn't matter
+ // if we delete first or all, as the OS should allow to add a single
+ // address to an interface only once. If OS allows multiple instances
+ // of the same address added, we are in deep problems anyway.
+ size_t size = addrs_.size();
+ addrs_.erase(remove(addrs_.begin(), addrs_.end(), addr), addrs_.end());
+ return (addrs_.size() < size);
+}
+
+bool IfaceMgr::Iface::delSocket(uint16_t sockfd) {
+ list<SocketInfo>::iterator sock = sockets_.begin();
+ while (sock!=sockets_.end()) {
+ if (sock->sockfd_ == sockfd) {
+ close(sockfd);
+ sockets_.erase(sock);
+ return (true); //socket found
+ }
+ ++sock;
+ }
+ return (false); // socket not found
+}
+
IfaceMgr::IfaceMgr()
:control_buf_len_(CMSG_SPACE(sizeof(struct in6_pktinfo))),
control_buf_(new char[control_buf_len_])
@@ -95,9 +119,6 @@ IfaceMgr::IfaceMgr()
detectIfaces();
- if (!openSockets()) {
- isc_throw(Unexpected, "Failed to open/bind sockets.");
- }
} catch (const std::exception& ex) {
cout << "IfaceMgr creation failed:" << ex.what() << endl;
@@ -109,7 +130,23 @@ IfaceMgr::IfaceMgr()
}
}
+void IfaceMgr::closeSockets() {
+ for (IfaceCollection::iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
+
+ for (SocketCollection::iterator sock = iface->sockets_.begin();
+ sock != iface->sockets_.end(); ++sock) {
+ cout << "Closing socket " << sock->sockfd_ << endl;
+ close(sock->sockfd_);
+ }
+ iface->sockets_.clear();
+ }
+
+}
+
IfaceMgr::~IfaceMgr() {
+ closeSockets();
+
// control_buf_ is deleted automatically (scoped_ptr)
control_buf_len_ = 0;
}
@@ -139,8 +176,8 @@ IfaceMgr::detectIfaces() {
Iface iface(ifaceName, if_nametoindex( ifaceName.c_str() ) );
IOAddress addr(linkLocal);
- iface.addrs_.push_back(addr);
- ifaces_.push_back(iface);
+ iface.addAddress(addr);
+ addInterface(iface);
interfaces.close();
} catch (const std::exception& ex) {
// TODO: deallocate whatever memory we used
@@ -154,51 +191,55 @@ IfaceMgr::detectIfaces() {
}
}
-bool
-IfaceMgr::openSockets() {
- int sock;
+void
+IfaceMgr::openSockets(uint16_t port) {
+ int sock1, sock2;
+
+ for (IfaceCollection::iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
- for (IfaceLst::iterator iface=ifaces_.begin();
- iface!=ifaces_.end();
- ++iface) {
+ AddressCollection addrs = iface->getAddresses();
- for (Addr6Lst::iterator addr=iface->addrs_.begin();
- addr!=iface->addrs_.end();
+ for (AddressCollection::iterator addr = addrs.begin();
+ addr != addrs.end();
++addr) {
- sock = openSocket(iface->name_, *addr,
- DHCP6_SERVER_PORT);
- if (sock<0) {
- cout << "Failed to open unicast socket." << endl;
- return (false);
+ sock1 = openSocket(iface->getName(), *addr, port);
+ if (sock1 < 0) {
+ isc_throw(Unexpected, "Failed to open unicast socket on "
+ << " interface " << iface->getFullName());
}
- sendsock_ = sock;
-
- sock = openSocket(iface->name_,
- IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
- DHCP6_SERVER_PORT);
- if (sock<0) {
- cout << "Failed to open multicast socket." << endl;
- close(sendsock_);
- return (false);
+
+ if ( !joinMcast(sock1, iface->getName(),
+ string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
+ close(sock1);
+ isc_throw(Unexpected, "Failed to join " << ALL_DHCP_RELAY_AGENTS_AND_SERVERS
+ << " multicast group.");
+ }
+
+ // this doesn't work too well on NetBSD
+ sock2 = openSocket(iface->getName(),
+ IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
+ port);
+ if (sock2 < 0) {
+ isc_throw(Unexpected, "Failed to open multicast socket on "
+ << " interface " << iface->getFullName());
+ iface->delSocket(sock1); // delete previously opened socket
}
- recvsock_ = sock;
}
}
-
- return (true);
}
void
IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
- for (IfaceLst::const_iterator iface=ifaces_.begin();
- iface!=ifaces_.end();
- ++iface) {
+ for (IfaceCollection::const_iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
out << "Detected interface " << iface->getFullName() << endl;
- out << " " << iface->addrs_.size() << " addr(s):" << endl;
- for (Addr6Lst::const_iterator addr=iface->addrs_.begin();
- addr != iface->addrs_.end();
- ++addr) {
+ out << " " << iface->getAddresses().size() << " addr(s):" << endl;
+ const AddressCollection addrs = iface->getAddresses();
+
+ for (AddressCollection::const_iterator addr = addrs.begin();
+ addr != addrs.end(); ++addr) {
out << " " << addr->toText() << endl;
}
out << " mac: " << iface->getPlainMac() << endl;
@@ -207,11 +248,11 @@ IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
IfaceMgr::Iface*
IfaceMgr::getIface(int ifindex) {
- for (IfaceLst::iterator iface=ifaces_.begin();
- iface!=ifaces_.end();
- ++iface) {
- if (iface->ifindex_ == ifindex)
+ for (IfaceCollection::iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
+ if (iface->getIndex() == ifindex) {
return (&(*iface));
+ }
}
return (NULL); // not found
@@ -219,29 +260,88 @@ IfaceMgr::getIface(int ifindex) {
IfaceMgr::Iface*
IfaceMgr::getIface(const std::string& ifname) {
- for (IfaceLst::iterator iface=ifaces_.begin();
- iface!=ifaces_.end();
- ++iface) {
- if (iface->name_ == ifname)
+ for (IfaceCollection::iterator iface = ifaces_.begin();
+ iface != ifaces_.end(); ++iface) {
+ if (iface->getName() == ifname) {
return (&(*iface));
+ }
}
return (NULL); // not found
}
int
-IfaceMgr::openSocket(const std::string& ifname,
- const IOAddress& addr,
+IfaceMgr::openSocket(const std::string& ifname, const IOAddress& addr,
int port) {
- struct sockaddr_in6 addr6;
+ Iface* iface = getIface(ifname);
+ if (!iface) {
+ isc_throw(BadValue, "There is no " << ifname << " interface present.");
+ }
+ switch (addr.getFamily()) {
+ case AF_INET:
+ return openSocket4(*iface, addr, port);
+ case AF_INET6:
+ return openSocket6(*iface, addr, port);
+ default:
+ isc_throw(BadValue, "Failed to detect family of address: "
+ << addr.toText());
+ }
+}
+
+int
+IfaceMgr::openSocket4(Iface& iface, const IOAddress& addr, int port) {
+
+ cout << "Creating UDP4 socket on " << iface.getFullName()
+ << " " << addr.toText() << "/port=" << port << endl;
+
+ struct sockaddr_in addr4;
+ memset(&addr4, 0, sizeof(sockaddr));
+ addr4.sin_family = AF_INET;
+ addr4.sin_port = htons(port);
+ memcpy(&addr4.sin_addr, addr.getAddress().to_v4().to_bytes().data(),
+ sizeof(addr4.sin_addr));
+
+ int sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock < 0) {
+ isc_throw(Unexpected, "Failed to create UDP6 socket.");
+ }
+
+ if (bind(sock, (struct sockaddr *)&addr4, sizeof(addr4)) < 0) {
+ close(sock);
+ isc_throw(Unexpected, "Failed to bind socket " << sock << " to " << addr.toText()
+ << "/port=" << port);
+ }
+
+ // If there is no support for IP_PKTINFO, we are really out of luck.
+ // It will be difficult to understand, where this packet came from.
+#if defined(IP_PKTINFO)
+ int flag = 1;
+ if (setsockopt(sock, IPPROTO_IP, IP_PKTINFO, &flag, sizeof(flag)) != 0) {
+ close(sock);
+ isc_throw(Unexpected, "setsockopt: IP_PKTINFO: failed.");
+ }
+#endif
+
+ cout << "Created socket " << sock << " on " << iface.getName() << "/" <<
+ addr.toText() << "/port=" << port << endl;
- cout << "Creating socket on " << ifname << "/" << addr.toText()
- << "/port=" << port << endl;
+ iface.addSocket(SocketInfo(sock, addr, port));
+ return (sock);
+}
+
+int
+IfaceMgr::openSocket6(Iface& iface, const IOAddress& addr, int port) {
+
+ cout << "Creating UDP6 socket on " << iface.getFullName()
+ << " " << addr.toText() << "/port=" << port << endl;
+
+ struct sockaddr_in6 addr6;
memset(&addr6, 0, sizeof(addr6));
addr6.sin6_family = AF_INET6;
addr6.sin6_port = htons(port);
- addr6.sin6_scope_id = if_nametoindex(ifname.c_str());
+ if (addr.toText() != "::1")
+ addr6.sin6_scope_id = if_nametoindex(iface.getName().c_str());
memcpy(&addr6.sin6_addr,
addr.getAddress().to_v6().to_bytes().data(),
@@ -255,61 +355,58 @@ IfaceMgr::openSocket(const std::string& ifname,
// make a socket
int sock = socket(AF_INET6, SOCK_DGRAM, 0);
if (sock < 0) {
- cout << "Failed to create UDP6 socket." << endl;
- return (-1);
+ isc_throw(Unexpected, "Failed to create UDP6 socket.");
}
- /* Set the REUSEADDR option so that we don't fail to start if
- we're being restarted. */
+ // Set the REUSEADDR option so that we don't fail to start if
+ // we're being restarted.
int flag = 1;
if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
(char *)&flag, sizeof(flag)) < 0) {
- cout << "Can't set SO_REUSEADDR option on dhcpv6 socket." << endl;
close(sock);
- return (-1);
+ isc_throw(Unexpected, "Can't set SO_REUSEADDR option on dhcpv6 socket.");
}
if (bind(sock, (struct sockaddr *)&addr6, sizeof(addr6)) < 0) {
- cout << "Failed to bind socket " << sock << " to " << addr.toText()
- << "/port=" << port << endl;
close(sock);
- return (-1);
+ isc_throw(Unexpected, "Failed to bind socket " << sock << " to " << addr.toText()
+ << "/port=" << port);
}
#ifdef IPV6_RECVPKTINFO
- /* RFC3542 - a new way */
+ // RFC3542 - a new way
if (setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO,
&flag, sizeof(flag)) != 0) {
- cout << "setsockopt: IPV6_RECVPKTINFO failed." << endl;
close(sock);
- return (-1);
+ isc_throw(Unexpected, "setsockopt: IPV6_RECVPKTINFO failed.");
}
#else
- /* RFC2292 - an old way */
+ // RFC2292 - an old way
if (setsockopt(sock, IPPROTO_IPV6, IPV6_PKTINFO,
&flag, sizeof(flag)) != 0) {
- cout << "setsockopt: IPV6_PKTINFO: failed." << endl;
close(sock);
- return (-1);
+ isc_throw(Unexpected, "setsockopt: IPV6_PKTINFO: failed.");
}
#endif
// multicast stuff
-
if (addr.getAddress().to_v6().is_multicast()) {
// both mcast (ALL_DHCP_RELAY_AGENTS_AND_SERVERS and ALL_DHCP_SERVERS)
// are link and site-scoped, so there is no sense to join those groups
// with global addresses.
- if ( !joinMcast( sock, ifname,
+ if ( !joinMcast( sock, iface.getName(),
string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
close(sock);
- return (-1);
+ isc_throw(Unexpected, "Failed to join " << ALL_DHCP_RELAY_AGENTS_AND_SERVERS
+ << " multicast group.");
}
}
- cout << "Created socket " << sock << " on " << ifname << "/" <<
+ cout << "Created socket " << sock << " on " << iface.getName() << "/" <<
addr.toText() << "/port=" << port << endl;
+ iface.addSocket(SocketInfo(sock, addr, port));
+
return (sock);
}
@@ -345,16 +442,19 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
int result;
struct in6_pktinfo *pktinfo;
struct cmsghdr *cmsg;
+
+ Iface* iface = getIface(pkt->iface_);
+ if (!iface) {
+ isc_throw(BadValue, "Unable to send Pkt6. Invalid interface ("
+ << pkt->iface_ << ") specified.");
+ }
+
memset(&control_buf_[0], 0, control_buf_len_);
- /*
- * Initialize our message header structure.
- */
+ // Initialize our message header structure.
memset(&m, 0, sizeof(m));
- /*
- * Set the target address we're sending to.
- */
+ // Set the target address we're sending to.
sockaddr_in6 to;
memset(&to, 0, sizeof(to));
to.sin6_family = AF_INET6;
@@ -367,24 +467,20 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
m.msg_name = &to;
m.msg_namelen = sizeof(to);
- /*
- * Set the data buffer we're sending. (Using this wacky
- * "scatter-gather" stuff... we only have a single chunk
- * of data to send, so we declare a single vector entry.)
- */
+ // Set the data buffer we're sending. (Using this wacky
+ // "scatter-gather" stuff... we only have a single chunk
+ // of data to send, so we declare a single vector entry.)
v.iov_base = (char *) &pkt->data_[0];
v.iov_len = pkt->data_len_;
m.msg_iov = &v;
m.msg_iovlen = 1;
- /*
- * Setting the interface is a bit more involved.
- *
- * We have to create a "control message", and set that to
- * define the IPv6 packet information. We could set the
- * source address if we wanted, but we can safely let the
- * kernel decide what that should be.
- */
+ // Setting the interface is a bit more involved.
+ //
+ // We have to create a "control message", and set that to
+ // define the IPv6 packet information. We could set the
+ // source address if we wanted, but we can safely let the
+ // kernel decide what that should be.
m.msg_control = &control_buf_[0];
m.msg_controllen = control_buf_len_;
cmsg = CMSG_FIRSTHDR(&m);
@@ -396,14 +492,12 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
pktinfo->ipi6_ifindex = pkt->ifindex_;
m.msg_controllen = cmsg->cmsg_len;
- result = sendmsg(sendsock_, &m, 0);
+ result = sendmsg(getSocket(*pkt), &m, 0);
if (result < 0) {
cout << "Send packet failed." << endl;
}
- cout << "Sent " << result << " bytes." << endl;
-
- cout << "Sent " << pkt->data_len_ << " bytes over "
- << pkt->iface_ << "/" << pkt->ifindex_ << " interface: "
+ cout << "Sent " << pkt->data_len_ << " bytes over socket " << getSocket(*pkt)
+ << " on " << iface->getFullName() << " interface: "
<< " dst=" << pkt->remote_addr_.toText()
<< ", src=" << pkt->local_addr_.toText()
<< endl;
@@ -411,8 +505,24 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
return (result);
}
+bool
+IfaceMgr::send(boost::shared_ptr<Pkt4>& )
+{
+ /// TODO: Implement this (ticket #1240)
+ isc_throw(NotImplemented, "Pkt4 send not implemented yet.");
+}
+
+
+boost::shared_ptr<Pkt4>
+IfaceMgr::receive4() {
+ isc_throw(NotImplemented, "Pkt4 reception not implemented yet.");
+
+ // TODO: To be implemented (ticket #1239)
+ return (boost::shared_ptr<Pkt4>()); // NULL
+}
+
boost::shared_ptr<Pkt6>
-IfaceMgr::receive() {
+IfaceMgr::receive6() {
struct msghdr m;
struct iovec v;
int result;
@@ -442,49 +552,66 @@ IfaceMgr::receive() {
memset(&from, 0, sizeof(from));
memset(&to_addr, 0, sizeof(to_addr));
- /*
- * Initialize our message header structure.
- */
+ // Initialize our message header structure.
memset(&m, 0, sizeof(m));
- /*
- * Point so we can get the from address.
- */
+ // Point so we can get the from address.
m.msg_name = &from;
m.msg_namelen = sizeof(from);
- /*
- * Set the data buffer we're receiving. (Using this wacky
- * "scatter-gather" stuff... but we that doesn't really make
- * sense for us, so we use a single vector entry.)
- */
+ // Set the data buffer we're receiving. (Using this wacky
+ // "scatter-gather" stuff... but we that doesn't really make
+ // sense for us, so we use a single vector entry.)
v.iov_base = (void*)&pkt->data_[0];
v.iov_len = pkt->data_len_;
m.msg_iov = &v;
m.msg_iovlen = 1;
- /*
- * Getting the interface is a bit more involved.
- *
- * We set up some space for a "control message". We have
- * previously asked the kernel to give us packet
- * information (when we initialized the interface), so we
- * should get the destination address from that.
- */
+ // Getting the interface is a bit more involved.
+ //
+ // We set up some space for a "control message". We have
+ // previously asked the kernel to give us packet
+ // information (when we initialized the interface), so we
+ // should get the destination address from that.
m.msg_control = &control_buf_[0];
m.msg_controllen = control_buf_len_;
- result = recvmsg(recvsock_, &m, 0);
+ /// TODO: Need to move to select() and pool over
+ /// all available sockets. For now, we just take the
+ /// first interface and use first socket from it.
+ IfaceCollection::const_iterator iface = ifaces_.begin();
+ if (iface == ifaces_.end()) {
+ isc_throw(Unexpected, "No interfaces detected. Can't receive anything.");
+ }
+ SocketCollection::const_iterator s = iface->sockets_.begin();
+ const SocketInfo* candidate = 0;
+ while (s != iface->sockets_.end()) {
+ if (s->addr_.getAddress().to_v6().is_multicast()) {
+ candidate = &(*s);
+ break;
+ }
+ if (!candidate) {
+ candidate = &(*s); // it's not multicast, but it's better than none
+ }
+ ++s;
+ }
+ if (!candidate) {
+ isc_throw(Unexpected, "Interface " << iface->getFullName()
+ << " does not have any sockets open.");
+ }
+
+ cout << "Trying to receive over socket " << candidate->sockfd_ << " bound to "
+ << candidate->addr_.toText() << "/port=" << candidate->port_ << " on "
+ << iface->getFullName() << endl;
+ result = recvmsg(candidate->sockfd_, &m, 0);
if (result >= 0) {
- /*
- * If we did read successfully, then we need to loop
- * through the control messages we received and
- * find the one with our destination address.
- *
- * We also keep a flag to see if we found it. If we
- * didn't, then we consider this to be an error.
- */
+ // If we did read successfully, then we need to loop
+ // through the control messages we received and
+ // find the one with our destination address.
+ //
+ // We also keep a flag to see if we found it. If we
+ // didn't, then we consider this to be an error.
int found_pktinfo = 0;
cmsg = CMSG_FIRSTHDR(&m);
while (cmsg != NULL) {
@@ -520,7 +647,7 @@ IfaceMgr::receive() {
Iface* received = getIface(pkt->ifindex_);
if (received) {
- pkt->iface_ = received->name_;
+ pkt->iface_ = received->getName();
} else {
cout << "Received packet over unknown interface (ifindex="
<< pkt->ifindex_ << ")." << endl;
@@ -539,4 +666,60 @@ IfaceMgr::receive() {
return (pkt);
}
+uint16_t
+IfaceMgr::getSocket(isc::dhcp::Pkt6 const& pkt) {
+ Iface* iface = getIface(pkt.iface_);
+ if (!iface) {
+ isc_throw(BadValue, "Tried to find socket for non-existent interface "
+ << pkt.iface_);
+ }
+
+ SocketCollection::const_iterator s;
+ for (s = iface->sockets_.begin(); s != iface->sockets_.end(); ++s) {
+ if (s->family_ != AF_INET6) {
+ // don't use IPv4 sockets
+ continue;
+ }
+ if (s->addr_.getAddress().to_v6().is_multicast()) {
+ // don't use IPv6 sockets bound to multicast address
+ continue;
+ }
+ /// TODO: Add more checks here later. If remote address is
+ /// not link-local, we can't use link local bound socket
+ /// to send data.
+
+ return (s->sockfd_);
+ }
+
+ isc_throw(Unexpected, "Interface " << iface->getFullName()
+ << " does not have any suitable IPv6 sockets open.");
+}
+
+uint16_t
+IfaceMgr::getSocket(isc::dhcp::Pkt4 const& pkt) {
+ Iface* iface = getIface(pkt.getIface());
+ if (!iface) {
+ isc_throw(BadValue, "Tried to find socket for non-existent interface "
+ << pkt.getIface());
+ }
+
+ SocketCollection::const_iterator s;
+ for (s = iface->sockets_.begin(); s != iface->sockets_.end(); ++s) {
+ if (s->family_ != AF_INET) {
+ // don't use IPv4 sockets
+ continue;
+ }
+ /// TODO: Add more checks here later. If remote address is
+ /// not link-local, we can't use link local bound socket
+ /// to send data.
+
+ return (s->sockfd_);
+ }
+
+ isc_throw(Unexpected, "Interface " << iface->getFullName()
+ << " does not have any suitable IPv4 sockets open.");
+}
+
+
+
}
diff --git a/src/bin/dhcp6/iface_mgr.h b/src/bin/dhcp6/iface_mgr.h
index 249c7ef..0aa2592 100644
--- a/src/bin/dhcp6/iface_mgr.h
+++ b/src/bin/dhcp6/iface_mgr.h
@@ -19,8 +19,9 @@
#include <boost/shared_ptr.hpp>
#include <boost/scoped_array.hpp>
#include <boost/noncopyable.hpp>
-#include "asiolink/io_address.h"
-#include "dhcp/pkt6.h"
+#include <asiolink/io_address.h>
+#include <dhcp/pkt4.h>
+#include <dhcp/pkt6.h>
namespace isc {
@@ -34,26 +35,119 @@ namespace dhcp {
class IfaceMgr : public boost::noncopyable {
public:
/// type that defines list of addresses
- typedef std::list<isc::asiolink::IOAddress> Addr6Lst;
+ typedef std::vector<isc::asiolink::IOAddress> AddressCollection;
/// maximum MAC address length (Infiniband uses 20 bytes)
static const unsigned int MAX_MAC_LEN = 20;
+ /// Holds information about socket.
+ struct SocketInfo {
+ uint16_t sockfd_; /// socket descriptor
+ isc::asiolink::IOAddress addr_; /// bound address
+ uint16_t port_; /// socket port
+ uint16_t family_; /// IPv4 or IPv6
+
+ /// @brief SocketInfo constructor.
+ ///
+ /// @param sockfd socket descriptor
+ /// @param addr an address the socket is bound to
+ /// @param port a port the socket is bound to
+ SocketInfo(uint16_t sockfd, const isc::asiolink::IOAddress& addr,
+ uint16_t port)
+ :sockfd_(sockfd), addr_(addr), port_(port), family_(addr.getFamily()) { }
+ };
+
+ /// type that holds a list of socket informations
+ typedef std::list<SocketInfo> SocketCollection;
+
/// @brief represents a single network interface
///
/// Iface structure represents network interface with all useful
/// information, like name, interface index, MAC address and
/// list of assigned addresses
- struct Iface {
- /// constructor
+ class Iface {
+ public:
+ /// @brief Iface constructor.
+ ///
+ /// Creates Iface object that represents network interface.
+ ///
+ /// @param name name of the interface
+ /// @param ifindex interface index (unique integer identifier)
Iface(const std::string& name, int ifindex);
- /// returns full interface name in format ifname/ifindex
+ /// @brief Returns full interface name as "ifname/ifindex" string.
+ ///
+ /// @return string with interface name
std::string getFullName() const;
- /// returns link-layer address a plain text
+ /// @brief Returns link-layer address a plain text.
+ ///
+ /// @return MAC address as a plain text (string)
std::string getPlainMac() const;
+ /// @brief Returns interface index.
+ ///
+ /// @return interface index
+ uint16_t getIndex() const { return ifindex_; }
+
+ /// @brief Returns interface name.
+ ///
+ /// @return interface name
+ std::string getName() const { return name_; };
+
+ /// @brief Returns all interfaces available on an interface.
+ ///
+ /// Care should be taken to not use this collection after Iface object
+ /// ceases to exist. That is easy in most cases as Iface objects are
+ /// created by IfaceMgr that is a singleton an is expected to be
+ /// available at all time. We may revisit this if we ever decide to
+ /// implement dynamic interface detection, but such fancy feature would
+ /// mostly be useful for clients with wifi/vpn/virtual interfaces.
+ ///
+ /// @return collection of addresses
+ const AddressCollection& getAddresses() const { return addrs_; }
+
+ /// @brief Adds an address to an interface.
+ ///
+ /// This only adds an address to collection, it does not physically
+ /// configure address on actual network interface.
+ ///
+ /// @param addr address to be added
+ void addAddress(const isc::asiolink::IOAddress& addr) {
+ addrs_.push_back(addr);
+ }
+
+ /// @brief Deletes an address from an interface.
+ ///
+ /// This only deletes address from collection, it does not physically
+ /// remove address configuration from actual network interface.
+ ///
+ /// @param addr address to be removed.
+ ///
+ /// @return true if removal was successful (address was in collection),
+ /// false otherwise
+ bool delAddress(const isc::asiolink::IOAddress& addr);
+
+ /// @brief Adds socket descriptor to an interface.
+ ///
+ /// @param socket SocketInfo structure that describes socket.
+ void addSocket(const SocketInfo& sock)
+ { sockets_.push_back(sock); }
+
+ /// @brief Closes socket.
+ ///
+ /// Closes socket and removes corresponding SocketInfo structure
+ /// from an interface.
+ ///
+ /// @param socket descriptor to be closed/removed.
+ /// @return true if there was such socket, false otherwise
+ bool delSocket(uint16_t sockfd);
+
+ /// socket used to sending data
+ /// TODO: this should be protected
+ SocketCollection sockets_;
+
+ protected:
/// network interface name
std::string name_;
@@ -61,19 +155,13 @@ public:
int ifindex_;
/// list of assigned addresses
- Addr6Lst addrs_;
+ AddressCollection addrs_;
/// link-layer address
uint8_t mac_[MAX_MAC_LEN];
/// length of link-layer address (usually 6)
int mac_len_;
-
- /// socket used to sending data
- int sendsock_;
-
- /// socket used for receiving data
- int recvsock_;
};
// TODO performance improvement: we may change this into
@@ -81,7 +169,7 @@ public:
// also hide it (make it public make tests easier for now)
/// type that holds a list of interfaces
- typedef std::list<Iface> IfaceLst;
+ typedef std::list<Iface> IfaceCollection;
/// IfaceMgr is a singleton class. This method returns reference
/// to its sole instance.
@@ -109,27 +197,63 @@ public:
Iface*
getIface(const std::string& ifname);
+ /// @brief Return most suitable socket for transmitting specified IPv6 packet.
+ ///
+ /// This method takes Pkt6 (see overloaded implementation that takes
+ /// Pkt4) and chooses appropriate socket to send it. This method
+ /// may throw BadValue if specified packet does not have outbound
+ /// interface specified, no such interface exists, or specified
+ /// interface does not have any appropriate sockets open.
+ ///
+ /// @param pkt a packet to be transmitted
+ ///
+ /// @return a socket descriptor
+ uint16_t getSocket(const isc::dhcp::Pkt6& pkt);
+
+ /// @brief Return most suitable socket for transmitting specified IPv6 packet.
+ ///
+ /// This method takes Pkt4 (see overloaded implementation that takes
+ /// Pkt6) and chooses appropriate socket to send it. This method
+ /// may throw BadValue if specified packet does not have outbound
+ /// interface specified, no such interface exists, or specified
+ /// interface does not have any appropriate sockets open.
+ ///
+ /// @param pkt a packet to be transmitted
+ ///
+ /// @return a socket descriptor
+ uint16_t getSocket(const isc::dhcp::Pkt4& pkt);
+
/// debugging method that prints out all available interfaces
///
/// @param out specifies stream to print list of interfaces to
void
printIfaces(std::ostream& out = std::cout);
- /// @brief Sends a packet.
+ /// @brief Sends an IPv6 packet.
///
- /// Sends a packet. All parameters for actual transmission are specified in
+ /// Sends an IPv6 packet. All parameters for actual transmission are specified in
/// Pkt6 structure itself. That includes destination address, src/dst port
/// and interface over which data will be sent.
///
/// @param pkt packet to be sent
///
/// @return true if sending was successful
- bool
- send(boost::shared_ptr<Pkt6>& pkt);
+ bool send(boost::shared_ptr<Pkt6>& pkt);
- /// @brief Tries to receive packet over open sockets.
+ /// @brief Sends an IPv4 packet.
///
- /// Attempts to receive a single packet of any of the open sockets.
+ /// Sends an IPv4 packet. All parameters for actual transmission are specified
+ /// in Pkt4 structure itself. That includes destination address, src/dst
+ /// port and interface over which data will be sent.
+ ///
+ /// @param pkt a packet to be sent
+ ///
+ /// @return true if sending was successful
+ bool send(boost::shared_ptr<Pkt4>& pkt);
+
+ /// @brief Tries to receive IPv6 packet over open IPv6 sockets.
+ ///
+ /// Attempts to receive a single IPv6 packet of any of the open IPv6 sockets.
/// If reception is successful and all information about its sender
/// are obtained, Pkt6 object is created and returned.
///
@@ -138,7 +262,49 @@ public:
/// (e.g. remove expired leases)
///
/// @return Pkt6 object representing received packet (or NULL)
- boost::shared_ptr<Pkt6> receive();
+ boost::shared_ptr<Pkt6> receive6();
+
+ /// @brief Tries to receive IPv4 packet over open IPv4 sockets.
+ ///
+ /// Attempts to receive a single IPv4 packet of any of the open IPv4 sockets.
+ /// If reception is successful and all information about its sender
+ /// are obtained, Pkt4 object is created and returned.
+ ///
+ /// TODO Start using select() and add timeout to be able
+ /// to not wait infinitely, but rather do something useful
+ /// (e.g. remove expired leases)
+ ///
+ /// @return Pkt4 object representing received packet (or NULL)
+ boost::shared_ptr<Pkt4> receive4();
+
+ /// Opens UDP/IP socket and binds it to address, interface and port.
+ ///
+ /// Specific type of socket (UDP/IPv4 or UDP/IPv6) depends on passed addr
+ /// family.
+ ///
+ /// @param ifname name of the interface
+ /// @param addr address to be bound.
+ /// @param port UDP port.
+ ///
+ /// Method will throw if socket creation, socket binding or multicast
+ /// join fails.
+ ///
+ /// @return socket descriptor, if socket creation, binding and multicast
+ /// group join were all successful.
+ int openSocket(const std::string& ifname,
+ const isc::asiolink::IOAddress& addr, int port);
+
+ /// Opens IPv6 sockets on detected interfaces.
+ ///
+ /// Will throw exception if socket creation fails.
+ ///
+ /// @param port specifies port number (usually DHCP6_SERVER_PORT)
+ void openSockets(uint16_t port);
+
+
+ /// @brief Closes all open sockets.
+ /// Is used in destructor, but also from Dhcpv4_srv and Dhcpv6_srv classes.
+ void closeSockets();
// don't use private, we need derived classes in tests
protected:
@@ -146,11 +312,44 @@ protected:
/// @brief Protected constructor.
///
/// Protected constructor. This is a singleton class. We don't want
- /// anyone to create instances of IfaceMgr. Use instance() method
+ /// anyone to create instances of IfaceMgr. Use instance() method instead.
IfaceMgr();
~IfaceMgr();
+ /// @brief Opens IPv4 socket.
+ ///
+ /// Please do not use this method directly. Use openSocket instead.
+ ///
+ /// This method may throw exception if socket creation fails.
+ ///
+ /// @param iface reference to interface structure.
+ /// @param addr an address the created socket should be bound to
+ /// @param port a port that created socket should be bound to
+ ///
+ /// @return socket descriptor
+ int openSocket4(Iface& iface, const isc::asiolink::IOAddress& addr, int port);
+
+ /// @brief Opens IPv6 socket.
+ ///
+ /// Please do not use this method directly. Use openSocket instead.
+ ///
+ /// This method may throw exception if socket creation fails.
+ ///
+ /// @param iface reference to interface structure.
+ /// @param addr an address the created socket should be bound to
+ /// @param port a port that created socket should be bound to
+ ///
+ /// @return socket descriptor
+ int openSocket6(Iface& iface, const isc::asiolink::IOAddress& addr, int port);
+
+ /// @brief Adds an interface to list of known interfaces.
+ ///
+ /// @param iface reference to Iface object.
+ void addInterface(const Iface& iface) {
+ ifaces_.push_back(iface);
+ }
+
/// @brief Detects network interfaces.
///
/// This method will eventually detect available interfaces. For now
@@ -159,24 +358,11 @@ protected:
void
detectIfaces();
- ///
- /// Opens UDP/IPv6 socket and binds it to address, interface and port.
- ///
- /// @param ifname name of the interface
- /// @param addr address to be bound.
- /// @param port UDP port.
- ///
- /// @return socket descriptor, if socket creation, binding and multicast
- /// group join were all successful. -1 otherwise.
- int openSocket(const std::string& ifname,
- const isc::asiolink::IOAddress& addr,
- int port);
-
// TODO: having 2 maps (ifindex->iface and ifname->iface would)
// probably be better for performance reasons
/// List of available interfaces
- IfaceLst ifaces_;
+ IfaceCollection ifaces_;
/// a pointer to a sole instance of this class (a singleton)
static IfaceMgr * instance_;
@@ -184,8 +370,9 @@ protected:
// TODO: Also keep this interface on Iface once interface detection
// is implemented. We may need it e.g. to close all sockets on
// specific interface
- int recvsock_; // TODO: should be fd_set eventually, but we have only
- int sendsock_; // 2 sockets for now. Will do for until next release
+ //int recvsock_; // TODO: should be fd_set eventually, but we have only
+ //int sendsock_; // 2 sockets for now. Will do for until next release
+
// we can't use the same socket, as receiving socket
// is bound to multicast address. And we all know what happens
// to people who try to use multicast as source address.
@@ -197,9 +384,6 @@ protected:
boost::scoped_array<char> control_buf_;
private:
- /// Opens sockets on detected interfaces.
- bool
- openSockets();
/// creates a single instance of this class (a singleton implementation)
static void
@@ -221,6 +405,7 @@ private:
bool
joinMcast(int sock, const std::string& ifname,
const std::string& mcast);
+
};
}; // namespace isc::dhcp
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index 985368e..f37194c 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -25,8 +25,6 @@ check-local:
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_builddir)/src/bin # for generated spec_config.h header
AM_CPPFLAGS += -I$(top_srcdir)/src/bin
-AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_top_srcdir)/src/lib/testutils/testdata\"
AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/bin/dhcp6/tests\"
@@ -57,8 +55,8 @@ dhcp6_unittests_LDADD = $(GTEST_LDADD)
dhcp6_unittests_LDADD += $(SQLITE_LIBS)
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp.la
-dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
index 72e48e4..50f37af 100644
--- a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -34,7 +34,7 @@ namespace test {
class NakedDhcpv6Srv: public Dhcpv6Srv {
// "naked" Interface Manager, exposes internal fields
public:
- NakedDhcpv6Srv() { }
+ NakedDhcpv6Srv():Dhcpv6Srv(DHCP6_SERVER_PORT + 10000) { }
boost::shared_ptr<Pkt6>
processSolicit(boost::shared_ptr<Pkt6>& request) {
@@ -53,30 +53,27 @@ public:
};
TEST_F(Dhcpv6SrvTest, basic) {
- // there's almost no code now. What's there provides echo capability
- // that is just a proof of concept and will be removed soon
- // No need to thoroughly test it
-
// srv has stubbed interface detection. It will read
// interfaces.txt instead. It will pretend to have detected
// fe80::1234 link-local address on eth0 interface. Obviously
// an attempt to bind this socket will fail.
- EXPECT_NO_THROW( {
- Dhcpv6Srv * srv = new Dhcpv6Srv();
-
- delete srv;
- });
+ Dhcpv6Srv* srv = 0;
+ ASSERT_NO_THROW( {
+ // open an unpriviledged port
+ srv = new Dhcpv6Srv(DHCP6_SERVER_PORT + 10000);
+ });
+ delete srv;
}
TEST_F(Dhcpv6SrvTest, Solicit_basic) {
NakedDhcpv6Srv * srv = 0;
- EXPECT_NO_THROW( srv = new NakedDhcpv6Srv(); );
+ ASSERT_NO_THROW( srv = new NakedDhcpv6Srv(); );
// a dummy content for client-id
boost::shared_array<uint8_t> clntDuid(new uint8_t[32]);
- for (int i=0; i<32; i++)
- clntDuid[i] = 100+i;
+ for (int i = 0; i < 32; i++)
+ clntDuid[i] = 100 + i;
boost::shared_ptr<Pkt6> sol =
boost::shared_ptr<Pkt6>(new Pkt6(DHCPV6_SOLICIT,
diff --git a/src/bin/dhcp6/tests/iface_mgr_unittest.cc b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
index f126e6a..0c54780 100644
--- a/src/bin/dhcp6/tests/iface_mgr_unittest.cc
+++ b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
@@ -20,9 +20,10 @@
#include <arpa/inet.h>
#include <gtest/gtest.h>
-#include "io_address.h"
-#include "dhcp/pkt6.h"
-#include "dhcp6/iface_mgr.h"
+#include <asiolink/io_address.h>
+#include <dhcp/pkt6.h>
+#include <dhcp6/iface_mgr.h>
+#include <dhcp/dhcp4.h>
using namespace std;
using namespace isc;
@@ -39,16 +40,7 @@ class NakedIfaceMgr: public IfaceMgr {
// "naked" Interface Manager, exposes internal fields
public:
NakedIfaceMgr() { }
- IfaceLst & getIfacesLst() { return ifaces_; }
- void setSendSock(int sock) { sendsock_ = sock; }
- void setRecvSock(int sock) { recvsock_ = sock; }
-
- int openSocket(const std::string& ifname,
- const isc::asiolink::IOAddress& addr,
- int port) {
- return IfaceMgr::openSocket(ifname, addr, port);
- }
-
+ IfaceCollection & getIfacesLst() { return ifaces_; }
};
// dummy class for now, but this will be expanded when needed
@@ -56,6 +48,13 @@ class IfaceMgrTest : public ::testing::Test {
public:
IfaceMgrTest() {
}
+
+ void createLoInterfacesTxt() {
+ unlink(INTERFACE_FILE);
+ fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+ fakeifaces << LOOPBACK << " ::1";
+ fakeifaces.close();
+ }
};
// We need some known interface to work reliably. Loopback interface
@@ -109,6 +108,7 @@ TEST_F(IfaceMgrTest, dhcp6Sniffer) {
while (true) {
pkt = ifacemgr->receive();
+ cout << "// this code is autogenerated. Do NOT edit." << endl;
cout << "// Received " << pkt->data_len_ << " bytes packet:" << endl;
cout << "Pkt6 *capture" << cnt++ << "() {" << endl;
cout << " Pkt6* pkt;" << endl;
@@ -183,10 +183,10 @@ TEST_F(IfaceMgrTest, getIface) {
cout << "There are " << ifacemgr->getIfacesLst().size()
<< " interfaces." << endl;
- for (IfaceMgr::IfaceLst::iterator iface=ifacemgr->getIfacesLst().begin();
+ for (IfaceMgr::IfaceCollection::iterator iface=ifacemgr->getIfacesLst().begin();
iface != ifacemgr->getIfacesLst().end();
++iface) {
- cout << " " << iface->name_ << "/" << iface->ifindex_ << endl;
+ cout << " " << iface->getFullName() << endl;
}
@@ -195,15 +195,15 @@ TEST_F(IfaceMgrTest, getIface) {
// ASSERT_NE(NULL, tmp); is not supported. hmmmm.
ASSERT_TRUE( tmp != NULL );
- EXPECT_STREQ( "en3", tmp->name_.c_str() );
- EXPECT_EQ(5, tmp->ifindex_);
+ EXPECT_EQ( "en3", tmp->getName() );
+ EXPECT_EQ(5, tmp->getIndex());
// check that interface can be retrieved by name
tmp = ifacemgr->getIface("lo1");
ASSERT_TRUE( tmp != NULL );
- EXPECT_STREQ( "lo1", tmp->name_.c_str() );
- EXPECT_EQ(1, tmp->ifindex_);
+ EXPECT_EQ( "lo1", tmp->getName() );
+ EXPECT_EQ(1, tmp->getIndex());
// check that non-existing interfaces are not returned
EXPECT_EQ(static_cast<void*>(NULL), ifacemgr->getIface("wifi0") );
@@ -231,58 +231,51 @@ TEST_F(IfaceMgrTest, detectIfaces) {
IfaceMgr::Iface * eth0 = ifacemgr->getIface("eth0");
// there should be one address
- EXPECT_EQ(1, eth0->addrs_.size());
+ IfaceMgr::AddressCollection addrs = eth0->getAddresses();
+ ASSERT_EQ(1, addrs.size());
- IOAddress * addr = &(*eth0->addrs_.begin());
- ASSERT_TRUE( addr != NULL );
+ IOAddress addr = *addrs.begin();
- EXPECT_STREQ( "fe80::1234", addr->toText().c_str() );
+ EXPECT_STREQ( "fe80::1234", addr.toText().c_str() );
delete ifacemgr;
}
-// TODO: disabled due to other naming on various systems
-// (lo in Linux, lo0 in BSD systems)
-// Fix for this is available on 1186 branch, will reenable
-// this test once 1186 is merged
-TEST_F(IfaceMgrTest, DISABLED_sockets) {
+TEST_F(IfaceMgrTest, sockets6) {
// testing socket operation in a portable way is tricky
// without interface detection implemented
+ createLoInterfacesTxt();
+
NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
IOAddress loAddr("::1");
+ Pkt6 pkt6(128);
+ pkt6.iface_ = LOOPBACK;
+
// bind multicast socket to port 10547
int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
EXPECT_GT(socket1, 0); // socket > 0
+ EXPECT_EQ(socket1, ifacemgr->getSocket(pkt6));
+
// bind unicast socket to port 10548
int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10548);
EXPECT_GT(socket2, 0);
- // expect success. This address/port is already bound, but
- // we are using SO_REUSEADDR, so we can bind it twice
- int socket3 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
-
- // rebinding succeeds on Linux, fails on BSD
- // TODO: add OS-specific defines here (or modify code to
- // behave the same way on all OSes, but that may not be
- // possible
- // EXPECT_GT(socket3, 0); // socket > 0
-
- // we now have 3 sockets open at the same time. Looks good.
+ // removed code for binding socket twice to the same address/port
+ // as it caused problems on some platforms (e.g. Mac OS X)
close(socket1);
close(socket2);
- close(socket3);
delete ifacemgr;
}
// TODO: disabled due to other naming on various systems
// (lo in Linux, lo0 in BSD systems)
-TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
+TEST_F(IfaceMgrTest, DISABLED_sockets6Mcast) {
// testing socket operation in a portable way is tricky
// without interface detection implemented
@@ -311,27 +304,24 @@ TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
delete ifacemgr;
}
-// TODO: disabled due to other naming on various systems
-// (lo in Linux, lo0 in BSD systems)
-// Fix for this is available on 1186 branch, will reenable
-// this test once 1186 is merged
-TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
+TEST_F(IfaceMgrTest, sendReceive6) {
+
// testing socket operation in a portable way is tricky
// without interface detection implemented
+ createLoInterfacesTxt();
- fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
- fakeifaces << LOOPBACK << " ::1";
- fakeifaces.close();
-
- NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
// let's assume that every supported OS have lo interface
IOAddress loAddr("::1");
- int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
- int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
+ int socket1 = 0, socket2 = 0;
+ EXPECT_NO_THROW(
+ socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+ socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
+ );
- ifacemgr->setSendSock(socket2);
- ifacemgr->setRecvSock(socket1);
+ EXPECT_GT(socket1, 0);
+ EXPECT_GT(socket2, 0);
boost::shared_ptr<Pkt6> sendPkt(new Pkt6(128) );
@@ -349,7 +339,7 @@ TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
EXPECT_EQ(true, ifacemgr->send(sendPkt));
- rcvPkt = ifacemgr->receive();
+ rcvPkt = ifacemgr->receive6();
ASSERT_TRUE( rcvPkt ); // received our own packet
@@ -359,7 +349,168 @@ TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
rcvPkt->data_len_) );
EXPECT_EQ(sendPkt->remote_addr_.toText(), rcvPkt->remote_addr_.toText());
- EXPECT_EQ(rcvPkt->remote_port_, 10546);
+
+ // since we opened 2 sockets on the same interface and none of them is multicast,
+ // none is preferred over the other for sending data, so we really should not
+ // assume the one or the other will always be choosen for sending data. Therefore
+ // we should accept both values as source ports.
+ EXPECT_TRUE( (rcvPkt->remote_port_ == 10546) || (rcvPkt->remote_port_ == 10547) );
+
+ delete ifacemgr;
+}
+
+TEST_F(IfaceMgrTest, socket4) {
+
+ createLoInterfacesTxt();
+ NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+ // Let's assume that every supported OS have lo interface.
+ IOAddress loAddr("127.0.0.1");
+ // Use unprivileged port (it's convenient for running tests as non-root).
+ int socket1 = 0;
+
+ EXPECT_NO_THROW(
+ socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, DHCP4_SERVER_PORT + 10000);
+ );
+
+ EXPECT_GT(socket1, 0);
+
+ Pkt4 pkt(DHCPDISCOVER, 1234);
+ pkt.setIface(LOOPBACK);
+
+ // Expect that we get the socket that we just opened.
+ EXPECT_EQ(socket1, ifacemgr->getSocket(pkt));
+
+ close(socket1);
+
+ delete ifacemgr;
+}
+
+// Test the Iface structure itself
+TEST_F(IfaceMgrTest, iface) {
+ IfaceMgr::Iface* iface = 0;
+ EXPECT_NO_THROW(
+ iface = new IfaceMgr::Iface("eth0",1);
+ );
+
+ EXPECT_EQ("eth0", iface->getName());
+ EXPECT_EQ(1, iface->getIndex());
+ EXPECT_EQ("eth0/1", iface->getFullName());
+
+ // Let's make a copy of this address collection.
+ IfaceMgr::AddressCollection addrs = iface->getAddresses();
+
+ EXPECT_EQ(0, addrs.size());
+
+ IOAddress addr1("192.0.2.6");
+ iface->addAddress(addr1);
+
+ addrs = iface->getAddresses();
+ ASSERT_EQ(1, addrs.size());
+ EXPECT_EQ("192.0.2.6", addrs.at(0).toText());
+
+ // No such address, should return false.
+ EXPECT_FALSE(iface->delAddress(IOAddress("192.0.8.9")));
+
+ // This address is present, delete it!
+ EXPECT_TRUE(iface->delAddress(IOAddress("192.0.2.6")));
+
+ // Not really necessary, previous reference still points to the same
+ // collection. Let's do it anyway, as test code may serve as example
+ // usage code as well.
+ addrs = iface->getAddresses();
+
+ EXPECT_EQ(0, addrs.size());
+
+ EXPECT_NO_THROW(
+ delete iface;
+ );
+}
+
+TEST_F(IfaceMgrTest, socketInfo) {
+
+ // check that socketinfo for IPv4 socket is functional
+ IfaceMgr::SocketInfo sock1(7, IOAddress("192.0.2.56"), DHCP4_SERVER_PORT + 7);
+ EXPECT_EQ(7, sock1.sockfd_);
+ EXPECT_EQ("192.0.2.56", sock1.addr_.toText());
+ EXPECT_EQ(AF_INET, sock1.family_);
+ EXPECT_EQ(DHCP4_SERVER_PORT + 7, sock1.port_);
+
+ // check that socketinfo for IPv6 socket is functional
+ IfaceMgr::SocketInfo sock2(9, IOAddress("2001:db8:1::56"), DHCP4_SERVER_PORT + 9);
+ EXPECT_EQ(9, sock2.sockfd_);
+ EXPECT_EQ("2001:db8:1::56", sock2.addr_.toText());
+ EXPECT_EQ(AF_INET6, sock2.family_);
+ EXPECT_EQ(DHCP4_SERVER_PORT + 9, sock2.port_);
+
+ // now let's test if IfaceMgr handles socket info properly
+ createLoInterfacesTxt();
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+ IfaceMgr::Iface* loopback = ifacemgr->getIface(LOOPBACK);
+ ASSERT_TRUE(loopback);
+ loopback->addSocket(sock1);
+ loopback->addSocket(sock2);
+
+ Pkt6 pkt6(100);
+
+ // pkt6 dos not have interface set yet
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt6),
+ BadValue
+ );
+
+ // try to send over non-existing interface
+ pkt6.iface_ = "nosuchinterface45";
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt6),
+ BadValue
+ );
+
+ // this will work
+ pkt6.iface_ = LOOPBACK;
+ EXPECT_EQ(9, ifacemgr->getSocket(pkt6));
+
+ bool deleted = false;
+ EXPECT_NO_THROW(
+ deleted = ifacemgr->getIface(LOOPBACK)->delSocket(9);
+ );
+ EXPECT_EQ(true, deleted);
+
+ // it should throw again, there's no usable socket anymore
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt6),
+ Unexpected
+ );
+
+ // repeat for pkt4
+ Pkt4 pkt4(DHCPDISCOVER, 1);
+
+ // pkt4 does not have interface set yet.
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt4),
+ BadValue
+ );
+
+ // Try to send over non-existing interface.
+ pkt4.setIface("nosuchinterface45");
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt4),
+ BadValue
+ );
+
+ // Socket info is set, packet has well defined interface. It should work.
+ pkt4.setIface(LOOPBACK);
+ EXPECT_EQ(7, ifacemgr->getSocket(pkt4));
+
+ EXPECT_NO_THROW(
+ ifacemgr->getIface(LOOPBACK)->delSocket(7);
+ );
+
+ // It should throw again, there's no usable socket anymore.
+ EXPECT_THROW(
+ ifacemgr->getSocket(pkt4),
+ Unexpected
+ );
delete ifacemgr;
}
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index 12ddab3..4d407bb 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -45,9 +45,9 @@ run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/bin/stats/stats-httpd-xml.tpl b/src/bin/stats/stats-httpd-xml.tpl
index d5846ad..ed91423 100644
--- a/src/bin/stats/stats-httpd-xml.tpl
+++ b/src/bin/stats/stats-httpd-xml.tpl
@@ -1,24 +1,3 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="$xsl_url_path"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<stats:stats_data version="1.0"
- xmlns:stats="$xsd_namespace"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="$xsd_namespace $xsd_url_path">
- $xml_string
-</stats:stats_data>
+$xml_string
\ No newline at end of file
diff --git a/src/bin/stats/stats-httpd-xsd.tpl b/src/bin/stats/stats-httpd-xsd.tpl
index 6ad1280..cc5578a 100644
--- a/src/bin/stats/stats-httpd-xsd.tpl
+++ b/src/bin/stats/stats-httpd-xsd.tpl
@@ -1,38 +1,2 @@
<?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<schema targetNamespace="$xsd_namespace"
- xmlns="http://www.w3.org/2001/XMLSchema"
- xmlns:stats="$xsd_namespace">
- <annotation>
- <documentation xml:lang="en">XML schema of the statistics
- data in BIND 10</documentation>
- </annotation>
- <element name="stats_data">
- <annotation>
- <documentation>A set of statistics data</documentation>
- </annotation>
- <complexType>
- $xsd_string
- <attribute name="version" type="token" use="optional" default="1.0">
- <annotation>
- <documentation>Version number of syntax</documentation>
- </annotation>
- </attribute>
- </complexType>
- </element>
-</schema>
+$xsd_string
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
index a1f6406..7c2e7ae 100644
--- a/src/bin/stats/stats-httpd-xsl.tpl
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -1,23 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml"
- xmlns:stats="$xsd_namespace">
+ xmlns:bind10="$xsd_namespace">
<xsl:output method="html" encoding="UTF-8"
doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"
doctype-system=" http://www.w3.org/TR/html4/loose.dtd " />
@@ -42,14 +26,7 @@ td.title {
</head>
<body>
<h1>BIND 10 Statistics</h1>
- <table>
- <tr>
- <th>Owner</th>
- <th>Title</th>
- <th>Value</th>
- </tr>
- <xsl:apply-templates />
- </table>
+ <xsl:apply-templates />
</body>
</html>
</xsl:template>
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
index 3a7f47a..51c4e09 100755
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -246,12 +246,12 @@ class Stats:
self.update_statistics_data()
if owner and name:
try:
- return self.statistics_data[owner][name]
+ return {owner:{name:self.statistics_data[owner][name]}}
except KeyError:
pass
elif owner:
try:
- return self.statistics_data[owner]
+ return {owner: self.statistics_data[owner]}
except KeyError:
pass
elif name:
@@ -360,9 +360,9 @@ class Stats:
if owner:
try:
if name:
- return isc.config.create_answer(0, schema_byname[owner][name])
+ return isc.config.create_answer(0, {owner:[schema_byname[owner][name]]})
else:
- return isc.config.create_answer(0, schema[owner])
+ return isc.config.create_answer(0, {owner:schema[owner]})
except KeyError:
pass
else:
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
index 042630d..f265abb 100644
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -29,6 +29,7 @@ import http.server
import socket
import string
import xml.etree.ElementTree
+import urllib.parse
import isc.cc
import isc.config
@@ -66,7 +67,7 @@ XML_URL_PATH = '/bind10/statistics/xml'
XSD_URL_PATH = '/bind10/statistics/xsd'
XSL_URL_PATH = '/bind10/statistics/xsl'
# TODO: This should be considered later.
-XSD_NAMESPACE = 'http://bind10.isc.org' + XSD_URL_PATH
+XSD_NAMESPACE = 'http://bind10.isc.org/bind10'
# Assign this process name
isc.util.process.rename()
@@ -85,14 +86,29 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
def send_head(self):
try:
- if self.path == XML_URL_PATH:
- body = self.server.xml_handler()
- elif self.path == XSD_URL_PATH:
- body = self.server.xsd_handler()
- elif self.path == XSL_URL_PATH:
- body = self.server.xsl_handler()
+ req_path = self.path
+ req_path = urllib.parse.urlsplit(req_path).path
+ req_path = urllib.parse.unquote(req_path)
+ req_path = os.path.normpath(req_path)
+ path_dirs = req_path.split('/')
+ path_dirs = [ d for d in filter(None, path_dirs) ]
+ req_path = '/'+"/".join(path_dirs)
+ module_name = None
+ item_name = None
+ # in case of /bind10/statistics/xxx/YYY/zzz
+ if len(path_dirs) >= 5:
+ item_name = path_dirs[4]
+ # in case of /bind10/statistics/xxx/YYY ...
+ if len(path_dirs) >= 4:
+ module_name = path_dirs[3]
+ if req_path == '/'.join([XML_URL_PATH] + path_dirs[3:5]):
+ body = self.server.xml_handler(module_name, item_name)
+ elif req_path == '/'.join([XSD_URL_PATH] + path_dirs[3:5]):
+ body = self.server.xsd_handler(module_name, item_name)
+ elif req_path == '/'.join([XSL_URL_PATH] + path_dirs[3:5]):
+ body = self.server.xsl_handler(module_name, item_name)
else:
- if self.path == '/' and 'Host' in self.headers.keys():
+ if req_path == '/' and 'Host' in self.headers.keys():
# redirect to XML URL only when requested with '/'
self.send_response(302)
self.send_header(
@@ -104,6 +120,12 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
# Couldn't find HOST
self.send_error(404)
return None
+ except StatsHttpdDataError as err:
+ # Couldn't find neither specified module name nor
+ # specified item name
+ self.send_error(404)
+ logger.error(STATHTTPD_SERVER_DATAERROR, err)
+ return None
except StatsHttpdError as err:
self.send_error(500)
logger.error(STATHTTPD_SERVER_ERROR, err)
@@ -145,6 +167,12 @@ class StatsHttpdError(Exception):
main routine."""
pass
+class StatsHttpdDataError(Exception):
+ """Exception class for StatsHttpd class. The reason seems to be
+ due to the data. It is intended to be thrown from the the
+ StatsHttpd object to the HttpHandler object or main routine."""
+ pass
+
class StatsHttpd:
"""The main class of HTTP server of HTTP/XML interface for
statistics module. It handles HTTP requests, and command channel
@@ -334,12 +362,27 @@ class StatsHttpd:
return isc.config.ccsession.create_answer(
1, "Unknown command: " + str(command))
- def get_stats_data(self):
+ def get_stats_data(self, owner=None, name=None):
"""Requests statistics data to the Stats daemon and returns
- the data which obtains from it"""
+ the data which obtains from it. The first argument is the
+ module name which owns the statistics data, the second
+ argument is one name of the statistics items which the the
+ module owns. The second argument cannot be specified when the
+ first argument is not specified. It returns the statistics
+ data of the specified module or item. When the session timeout
+ or the session error is occurred, it raises
+ StatsHttpdError. When the stats daemon returns none-zero
+ value, it raises StatsHttpdDataError."""
+ param = {}
+ if owner is None and name is None:
+ param = None
+ if owner is not None:
+ param['owner'] = owner
+ if name is not None:
+ param['name'] = name
try:
seq = self.cc_session.group_sendmsg(
- isc.config.ccsession.create_command('show'), 'Stats')
+ isc.config.ccsession.create_command('show', param), 'Stats')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
@@ -351,131 +394,409 @@ class StatsHttpd:
if rcode == 0:
return value
else:
- raise StatsHttpdError("Stats module: %s" % str(value))
+ raise StatsHttpdDataError("Stats module: %s" % str(value))
- def get_stats_spec(self):
+ def get_stats_spec(self, owner=None, name=None):
"""Requests statistics data to the Stats daemon and returns
- the data which obtains from it"""
+ the data which obtains from it. The first argument is the
+ module name which owns the statistics data, the second
+ argument is one name of the statistics items which the the
+ module owns. The second argument cannot be specified when the
+ first argument is not specified. It returns the statistics
+ specification of the specified module or item. When the
+ session timeout or the session error is occurred, it raises
+ StatsHttpdError. When the stats daemon returns none-zero
+ value, it raises StatsHttpdDataError."""
+ param = {}
+ if owner is None and name is None:
+ param = None
+ if owner is not None:
+ param['owner'] = owner
+ if name is not None:
+ param['name'] = name
try:
seq = self.cc_session.group_sendmsg(
- isc.config.ccsession.create_command('showschema'), 'Stats')
+ isc.config.ccsession.create_command('showschema', param), 'Stats')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
if rcode == 0:
return value
else:
- raise StatsHttpdError("Stats module: %s" % str(value))
+ raise StatsHttpdDataError("Stats module: %s" % str(value))
except (isc.cc.session.SessionTimeout,
isc.cc.session.SessionError) as err:
raise StatsHttpdError("%s: %s" %
(err.__class__.__name__, err))
- def xml_handler(self):
- """Handler which requests to Stats daemon to obtain statistics
- data and returns the body of XML document"""
- xml_list=[]
- for (mod, spec) in self.get_stats_data().items():
- if not spec: continue
- elem1 = xml.etree.ElementTree.Element(str(mod))
- for (k, v) in spec.items():
- elem2 = xml.etree.ElementTree.Element(str(k))
- elem2.text = str(v)
- elem1.append(elem2)
- # The coding conversion is tricky. xml..tostring() of Python 3.2
- # returns bytes (not string) regardless of the coding, while
- # tostring() of Python 3.1 returns a string. To support both
- # cases transparently, we first make sure tostring() returns
- # bytes by specifying utf-8 and then convert the result to a
- # plain string (code below assume it).
- xml_list.append(
- str(xml.etree.ElementTree.tostring(elem1, encoding='utf-8'),
- encoding='us-ascii'))
- xml_string = "".join(xml_list)
+
+ def xml_handler(self, module_name=None, item_name=None):
+ """Requests the specified statistics data and specification by
+ using the functions get_stats_data and get_stats_spec
+ respectively and loads the XML template file and returns the
+ string of the XML document.The first argument is the module
+ name which owns the statistics data, the second argument is
+ one name of the statistics items which the the module
+ owns. The second argument cannot be specified when the first
+ argument is not specified."""
+
+ # TODO: Separate the following recursive function by type of
+ # the parameter. Because we should be sure what type there is
+ # when we call it recursively.
+ def stats_data2xml(stats_spec, stats_data, xml_elem):
+ """Internal use for xml_handler. Reads stats_data and
+ stats_spec specified as first and second arguments, and
+ modify the xml object specified as third
+ argument. xml_elem must be modified and always returns
+ None."""
+ # assumed started with module_spec or started with
+ # item_spec in statistics
+ if type(stats_spec) is dict:
+ # assumed started with module_spec
+ if 'item_name' not in stats_spec \
+ and 'item_type' not in stats_spec:
+ for module_name in stats_spec.keys():
+ elem = xml.etree.ElementTree.Element(module_name)
+ stats_data2xml(stats_spec[module_name],
+ stats_data[module_name], elem)
+ xml_elem.append(elem)
+ # started with item_spec in statistics
+ else:
+ elem = xml.etree.ElementTree.Element(stats_spec['item_name'])
+ if stats_spec['item_type'] == 'map':
+ stats_data2xml(stats_spec['map_item_spec'],
+ stats_data,
+ elem)
+ elif stats_spec['item_type'] == 'list':
+ for item in stats_data:
+ stats_data2xml(stats_spec['list_item_spec'],
+ item, elem)
+ else:
+ elem.text = str(stats_data)
+ xml_elem.append(elem)
+ # assumed started with stats_spec
+ elif type(stats_spec) is list:
+ for item_spec in stats_spec:
+ stats_data2xml(item_spec,
+ stats_data[item_spec['item_name']],
+ xml_elem)
+
+ stats_spec = self.get_stats_spec(module_name, item_name)
+ stats_data = self.get_stats_data(module_name, item_name)
+ # make the path xxx/module/item if specified respectively
+ path_info = ''
+ if module_name is not None and item_name is not None:
+ path_info = '/' + module_name + '/' + item_name
+ elif module_name is not None:
+ path_info = '/' + module_name
+ xml_elem = xml.etree.ElementTree.Element(
+ 'bind10:statistics',
+ attrib={ 'xsi:schemaLocation' : XSD_NAMESPACE + ' ' + XSD_URL_PATH + path_info,
+ 'xmlns:bind10' : XSD_NAMESPACE,
+ 'xmlns:xsi' : "http://www.w3.org/2001/XMLSchema-instance" })
+ stats_data2xml(stats_spec, stats_data, xml_elem)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ # FIXME: Non-ASCII characters might be lost here. Consider how
+ # the whole system should handle non-ASCII characters.
+ xml_string = str(xml.etree.ElementTree.tostring(xml_elem, encoding='utf-8'),
+ encoding='us-ascii')
self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
xml_string=xml_string,
- xsd_namespace=XSD_NAMESPACE,
- xsd_url_path=XSD_URL_PATH,
- xsl_url_path=XSL_URL_PATH)
+ xsl_url_path=XSL_URL_PATH + path_info)
assert self.xml_body is not None
return self.xml_body
- def xsd_handler(self):
- """Handler which just returns the body of XSD document"""
+ def xsd_handler(self, module_name=None, item_name=None):
+ """Requests the specified statistics specification by using
+ the function get_stats_spec respectively and loads the XSD
+ template file and returns the string of the XSD document.The
+ first argument is the module name which owns the statistics
+ data, the second argument is one name of the statistics items
+ which the the module owns. The second argument cannot be
+ specified when the first argument is not specified."""
+
+ # TODO: Separate the following recursive function by type of
+ # the parameter. Because we should be sure what type there is
+ # when we call it recursively.
+ def stats_spec2xsd(stats_spec, xsd_elem):
+ """Internal use for xsd_handler. Reads stats_spec
+ specified as first arguments, and modify the xml object
+ specified as second argument. xsd_elem must be
+ modified. Always returns None with no exceptions."""
+ # assumed module_spec or one stats_spec
+ if type(stats_spec) is dict:
+ # assumed module_spec
+ if 'item_name' not in stats_spec:
+ for mod in stats_spec.keys():
+ elem = xml.etree.ElementTree.Element(
+ "element", { "name" : mod })
+ complextype = xml.etree.ElementTree.Element("complexType")
+ alltag = xml.etree.ElementTree.Element("all")
+ stats_spec2xsd(stats_spec[mod], alltag)
+ complextype.append(alltag)
+ elem.append(complextype)
+ xsd_elem.append(elem)
+ # assumed stats_spec
+ else:
+ if stats_spec['item_type'] == 'map':
+ alltag = xml.etree.ElementTree.Element("all")
+ stats_spec2xsd(stats_spec['map_item_spec'], alltag)
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ elem = xml.etree.ElementTree.Element(
+ "element", attrib={ "name" : stats_spec["item_name"],
+ "minOccurs": "0" \
+ if stats_spec["item_optional"] \
+ else "1",
+ "maxOccurs": "unbounded" })
+ elem.append(complextype)
+ xsd_elem.append(elem)
+ elif stats_spec['item_type'] == 'list':
+ alltag = xml.etree.ElementTree.Element("sequence")
+ stats_spec2xsd(stats_spec['list_item_spec'], alltag)
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ elem = xml.etree.ElementTree.Element(
+ "element", attrib={ "name" : stats_spec["item_name"],
+ "minOccurs": "0" \
+ if stats_spec["item_optional"] \
+ else "1",
+ "maxOccurs": "1" })
+ elem.append(complextype)
+ xsd_elem.append(elem)
+ else:
+ # determine the datatype of XSD
+ # TODO: Should consider other item_format types
+ datatype = stats_spec["item_type"] \
+ if stats_spec["item_type"].lower() != 'real' \
+ else 'float'
+ if "item_format" in stats_spec:
+ item_format = stats_spec["item_format"]
+ if datatype.lower() == 'string' \
+ and item_format.lower() == 'date-time':
+ datatype = 'dateTime'
+ elif datatype.lower() == 'string' \
+ and (item_format.lower() == 'date' \
+ or item_format.lower() == 'time'):
+ datatype = item_format.lower()
+ elem = xml.etree.ElementTree.Element(
+ "element",
+ attrib={
+ 'name' : stats_spec["item_name"],
+ 'type' : datatype,
+ 'minOccurs' : "0" \
+ if stats_spec["item_optional"] \
+ else "1",
+ 'maxOccurs' : "1"
+ }
+ )
+ annotation = xml.etree.ElementTree.Element("annotation")
+ appinfo = xml.etree.ElementTree.Element("appinfo")
+ documentation = xml.etree.ElementTree.Element("documentation")
+ if "item_title" in stats_spec:
+ appinfo.text = stats_spec["item_title"]
+ if "item_description" in stats_spec:
+ documentation.text = stats_spec["item_description"]
+ annotation.append(appinfo)
+ annotation.append(documentation)
+ elem.append(annotation)
+ xsd_elem.append(elem)
+ # multiple stats_specs
+ elif type(stats_spec) is list:
+ for item_spec in stats_spec:
+ stats_spec2xsd(item_spec, xsd_elem)
+
# for XSD
- xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
- for (mod, spec) in self.get_stats_spec().items():
- if not spec: continue
- alltag = xml.etree.ElementTree.Element("all")
- for item in spec:
- element = xml.etree.ElementTree.Element(
- "element",
- dict( name=item["item_name"],
- type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
- minOccurs="1",
- maxOccurs="1" ),
- )
- annotation = xml.etree.ElementTree.Element("annotation")
- appinfo = xml.etree.ElementTree.Element("appinfo")
- documentation = xml.etree.ElementTree.Element("documentation")
- appinfo.text = item["item_title"]
- documentation.text = item["item_description"]
- annotation.append(appinfo)
- annotation.append(documentation)
- element.append(annotation)
- alltag.append(element)
-
- complextype = xml.etree.ElementTree.Element("complexType")
- complextype.append(alltag)
- mod_element = xml.etree.ElementTree.Element("element", { "name" : mod })
- mod_element.append(complextype)
- xsd_root.append(mod_element)
+ stats_spec = self.get_stats_spec(module_name, item_name)
+ alltag = xml.etree.ElementTree.Element("all")
+ stats_spec2xsd(stats_spec, alltag)
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ documentation = xml.etree.ElementTree.Element("documentation")
+ documentation.text = "A set of statistics data"
+ annotation = xml.etree.ElementTree.Element("annotation")
+ annotation.append(documentation)
+ elem = xml.etree.ElementTree.Element(
+ "element", attrib={ 'name' : 'statistics' })
+ elem.append(annotation)
+ elem.append(complextype)
+ documentation = xml.etree.ElementTree.Element("documentation")
+ documentation.text = "XML schema of the statistics data in BIND 10"
+ annotation = xml.etree.ElementTree.Element("annotation")
+ annotation.append(documentation)
+ xsd_root = xml.etree.ElementTree.Element(
+ "schema",
+ attrib={ 'xmlns' : "http://www.w3.org/2001/XMLSchema",
+ 'targetNamespace' : XSD_NAMESPACE,
+ 'xmlns:bind10' : XSD_NAMESPACE })
+ xsd_root.append(annotation)
+ xsd_root.append(elem)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
# cases transparently, we first make sure tostring() returns
# bytes by specifying utf-8 and then convert the result to a
# plain string (code below assume it).
+ # FIXME: Non-ASCII characters might be lost here. Consider how
+ # the whole system should handle non-ASCII characters.
xsd_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
encoding='us-ascii')
self.xsd_body = self.open_template(XSD_TEMPLATE_LOCATION).substitute(
- xsd_string=xsd_string,
- xsd_namespace=XSD_NAMESPACE
- )
+ xsd_string=xsd_string)
assert self.xsd_body is not None
return self.xsd_body
- def xsl_handler(self):
- """Handler which just returns the body of XSL document"""
+ def xsl_handler(self, module_name=None, item_name=None):
+ """Requests the specified statistics specification by using
+ the function get_stats_spec respectively and loads the XSL
+ template file and returns the string of the XSL document.The
+ first argument is the module name which owns the statistics
+ data, the second argument is one name of the statistics items
+ which the the module owns. The second argument cannot be
+ specified when the first argument is not specified."""
+
+ # TODO: Separate the following recursive function by type of
+ # the parameter. Because we should be sure what type there is
+ # when we call it recursively.
+ def stats_spec2xsl(stats_spec, xsl_elem, path=XML_URL_PATH):
+ """Internal use for xsl_handler. Reads stats_spec
+ specified as first arguments, and modify the xml object
+ specified as second argument. xsl_elem must be
+ modified. The third argument is a base path used for
+ making anchor tag in XSL. Always returns None with no
+ exceptions."""
+ # assumed module_spec or one stats_spec
+ if type(stats_spec) is dict:
+ # assumed module_spec
+ if 'item_name' not in stats_spec:
+ table = xml.etree.ElementTree.Element("table")
+ tr = xml.etree.ElementTree.Element("tr")
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Module Name"
+ tr.append(th)
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Module Item"
+ tr.append(th)
+ table.append(tr)
+ for mod in stats_spec.keys():
+ foreach = xml.etree.ElementTree.Element(
+ "xsl:for-each", attrib={ "select" : mod })
+ tr = xml.etree.ElementTree.Element("tr")
+ td = xml.etree.ElementTree.Element("td")
+ a = xml.etree.ElementTree.Element(
+ "a", attrib={ "href": urllib.parse.quote(path + "/" + mod) })
+ a.text = mod
+ td.append(a)
+ tr.append(td)
+ td = xml.etree.ElementTree.Element("td")
+ stats_spec2xsl(stats_spec[mod], td,
+ path + "/" + mod)
+ tr.append(td)
+ foreach.append(tr)
+ table.append(foreach)
+ xsl_elem.append(table)
+ # assumed stats_spec
+ else:
+ if stats_spec['item_type'] == 'map':
+ table = xml.etree.ElementTree.Element("table")
+ tr = xml.etree.ElementTree.Element("tr")
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Item Name"
+ tr.append(th)
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Item Value"
+ tr.append(th)
+ table.append(tr)
+ foreach = xml.etree.ElementTree.Element(
+ "xsl:for-each", attrib={ "select" : stats_spec['item_name'] })
+ tr = xml.etree.ElementTree.Element("tr")
+ td = xml.etree.ElementTree.Element(
+ "td",
+ attrib={ "class" : "title",
+ "title" : stats_spec["item_description"] \
+ if "item_description" in stats_spec \
+ else "" })
+ # TODO: Consider whether we should always use
+ # the identical name "item_name" for the
+ # user-visible name in XSL.
+ td.text = stats_spec[ "item_title" if "item_title" in stats_spec else "item_name" ]
+ tr.append(td)
+ td = xml.etree.ElementTree.Element("td")
+ stats_spec2xsl(stats_spec['map_item_spec'], td,
+ path + "/" + stats_spec["item_name"])
+ tr.append(td)
+ foreach.append(tr)
+ table.append(foreach)
+ xsl_elem.append(table)
+ elif stats_spec['item_type'] == 'list':
+ stats_spec2xsl(stats_spec['list_item_spec'], xsl_elem,
+ path + "/" + stats_spec["item_name"])
+ else:
+ xsl_valueof = xml.etree.ElementTree.Element(
+ "xsl:value-of",
+ attrib={'select': stats_spec["item_name"]})
+ xsl_elem.append(xsl_valueof)
+
+ # multiple stats_specs
+ elif type(stats_spec) is list:
+ table = xml.etree.ElementTree.Element("table")
+ tr = xml.etree.ElementTree.Element("tr")
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Item Name"
+ tr.append(th)
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Item Value"
+ tr.append(th)
+ table.append(tr)
+ for item_spec in stats_spec:
+ tr = xml.etree.ElementTree.Element("tr")
+ td = xml.etree.ElementTree.Element(
+ "td",
+ attrib={ "class" : "title",
+ "title" : item_spec["item_description"] \
+ if "item_description" in item_spec \
+ else "" })
+ # if the path length is equal to or shorter than
+ # XML_URL_PATH + /Module/Item, add the anchor tag.
+ if len(path.split('/')) <= len((XML_URL_PATH + '/Module/Item').split('/')):
+ a = xml.etree.ElementTree.Element(
+ "a", attrib={ "href": urllib.parse.quote(path + "/" + item_spec["item_name"]) })
+ a.text = item_spec[ "item_title" if "item_title" in item_spec else "item_name" ]
+ td.append(a)
+ else:
+ td.text = item_spec[ "item_title" if "item_title" in item_spec else "item_name" ]
+ tr.append(td)
+ td = xml.etree.ElementTree.Element("td")
+ stats_spec2xsl(item_spec, td, path)
+ tr.append(td)
+ if item_spec['item_type'] == 'list':
+ foreach = xml.etree.ElementTree.Element(
+ "xsl:for-each", attrib={ "select" : item_spec['item_name'] })
+ foreach.append(tr)
+ table.append(foreach)
+ else:
+ table.append(tr)
+ xsl_elem.append(table)
+
# for XSL
- xsd_root = xml.etree.ElementTree.Element(
+ stats_spec = self.get_stats_spec(module_name, item_name)
+ xsd_root = xml.etree.ElementTree.Element( # started with xml:template tag
"xsl:template",
- dict(match="*")) # started with xml:template tag
- for (mod, spec) in self.get_stats_spec().items():
- if not spec: continue
- for item in spec:
- tr = xml.etree.ElementTree.Element("tr")
- td0 = xml.etree.ElementTree.Element("td")
- td0.text = str(mod)
- td1 = xml.etree.ElementTree.Element(
- "td", { "class" : "title",
- "title" : item["item_description"] })
- td1.text = item["item_title"]
- td2 = xml.etree.ElementTree.Element("td")
- xsl_valueof = xml.etree.ElementTree.Element(
- "xsl:value-of",
- dict(select=mod+'/'+item["item_name"]))
- td2.append(xsl_valueof)
- tr.append(td0)
- tr.append(td1)
- tr.append(td2)
- xsd_root.append(tr)
+ attrib={'match': "bind10:statistics"})
+ stats_spec2xsl(stats_spec, xsd_root)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
# cases transparently, we first make sure tostring() returns
# bytes by specifying utf-8 and then convert the result to a
# plain string (code below assume it).
+ # FIXME: Non-ASCII characters might be lost here. Consider how
+ # the whole system should handle non-ASCII characters.
xsl_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
encoding='us-ascii')
self.xsl_body = self.open_template(XSL_TEMPLATE_LOCATION).substitute(
diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes
index 0e984dc..dbd0650 100644
--- a/src/bin/stats/stats_httpd_messages.mes
+++ b/src/bin/stats/stats_httpd_messages.mes
@@ -55,6 +55,12 @@ response will be sent back, and the specific error is printed. This
is an error condition that likely points to a module that is not
responding correctly to statistic requests.
+% STATHTTPD_SERVER_DATAERROR HTTP server data error: %1
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
+
% STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
There was a problem initializing the HTTP server in the stats-httpd
module upon receiving its configuration data. The most likely cause
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index afd572f..01254d4 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -1,7 +1,7 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-stats_test.py b10-stats-httpd_test.py
EXTRA_DIST = $(PYTESTS) test_utils.py
-CLEANFILES = test_utils.pyc msgq_socket_test
+CLEANFILES = test_utils.pyc
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index e867080..b6847bd 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -45,7 +45,12 @@ DUMMY_DATA = {
},
'Auth' : {
"queries.tcp": 2,
- "queries.udp": 3
+ "queries.udp": 3,
+ "queries.perzone": [{
+ "zonename": "test.example",
+ "queries.tcp": 2,
+ "queries.udp": 3
+ }]
},
'Stats' : {
"report_time": "2011-03-04T11:59:19Z",
@@ -129,68 +134,295 @@ class TestHttpHandler(unittest.TestCase):
self.assertEqual(len(self.stats_httpd.httpd), 1)
self.assertEqual((self.address, self.port), self.stats_httpd.http_addrs[0])
- # URL is '/bind10/statistics/xml'
- self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
- self.client.endheaders()
- response = self.client.getresponse()
- self.assertEqual(response.getheader("Content-type"), "text/xml")
- self.assertTrue(int(response.getheader("Content-Length")) > 0)
- self.assertEqual(response.status, 200)
- root = xml.etree.ElementTree.parse(response).getroot()
- self.assertTrue(root.tag.find('stats_data') > 0)
- for (k,v) in root.attrib.items():
- if k.find('schemaLocation') > 0:
- self.assertEqual(v, stats_httpd.XSD_NAMESPACE + ' ' + stats_httpd.XSD_URL_PATH)
- for mod in DUMMY_DATA:
- for (item, value) in DUMMY_DATA[mod].items():
+ def check_XML_URL_PATH(mod=None, item=None):
+ url_path = stats_httpd.XML_URL_PATH
+ if mod is not None:
+ url_path = url_path + '/' + mod
+ if item is not None:
+ url_path = url_path + '/' + item
+ self.client.putrequest('GET', url_path)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ xml_doctype = response.readline().decode()
+ xsl_doctype = response.readline().decode()
+ self.assertTrue(len(xml_doctype) > 0)
+ self.assertTrue(len(xsl_doctype) > 0)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ self.assertTrue(root.tag.find('statistics') > 0)
+ schema_loc = '{http://www.w3.org/2001/XMLSchema-instance}schemaLocation'
+ if item is None and mod is None:
+ # check the path of XSD
+ self.assertEqual(root.attrib[schema_loc],
+ stats_httpd.XSD_NAMESPACE + ' '
+ + stats_httpd.XSD_URL_PATH)
+ # check the path of XSL
+ self.assertTrue(xsl_doctype.startswith(
+ '<?xml-stylesheet type="text/xsl" href="' +
+ stats_httpd.XSL_URL_PATH
+ + '"?>'))
+ for m in DUMMY_DATA:
+ for k in DUMMY_DATA[m].keys():
+ self.assertIsNotNone(root.find(m + '/' + k))
+ itm = root.find(m + '/' + k)
+ if type(DUMMY_DATA[m][k]) is list:
+ for v in DUMMY_DATA[m][k]:
+ for i in v:
+ self.assertIsNotNone(itm.find('zones/' + i))
+ elif item is None:
+ # check the path of XSD
+ self.assertEqual(root.attrib[schema_loc],
+ stats_httpd.XSD_NAMESPACE + ' '
+ + stats_httpd.XSD_URL_PATH + '/' + mod)
+ # check the path of XSL
+ self.assertTrue(xsl_doctype.startswith(
+ '<?xml-stylesheet type="text/xsl" href="'
+ + stats_httpd.XSL_URL_PATH + '/' + mod
+ + '"?>'))
+ for k in DUMMY_DATA[mod].keys():
+ self.assertIsNotNone(root.find(mod + '/' + k))
+ itm = root.find(mod + '/' + k)
+ self.assertIsNotNone(itm)
+ if type(DUMMY_DATA[mod][k]) is list:
+ for v in DUMMY_DATA[mod][k]:
+ for i in v:
+ self.assertIsNotNone(itm.find('zones/' + i))
+ else:
+ # check the path of XSD
+ self.assertEqual(root.attrib[schema_loc],
+ stats_httpd.XSD_NAMESPACE + ' '
+ + stats_httpd.XSD_URL_PATH + '/' + mod + '/' + item)
+ # check the path of XSL
+ self.assertTrue(xsl_doctype.startswith(
+ '<?xml-stylesheet type="text/xsl" href="'
+ + stats_httpd.XSL_URL_PATH + '/' + mod + '/' + item
+ + '"?>'))
self.assertIsNotNone(root.find(mod + '/' + item))
- # URL is '/bind10/statitics/xsd'
- self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
- self.client.endheaders()
- response = self.client.getresponse()
- self.assertEqual(response.getheader("Content-type"), "text/xml")
- self.assertTrue(int(response.getheader("Content-Length")) > 0)
- self.assertEqual(response.status, 200)
- root = xml.etree.ElementTree.parse(response).getroot()
- url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
- tags = [ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ]
- xsdpath = '/'.join(tags)
- self.assertTrue(root.tag.find('schema') > 0)
- self.assertTrue(hasattr(root, 'attrib'))
- self.assertTrue('targetNamespace' in root.attrib)
- self.assertEqual(root.attrib['targetNamespace'],
- stats_httpd.XSD_NAMESPACE)
- for elm in root.findall(xsdpath):
- self.assertIsNotNone(elm.attrib['name'])
- self.assertTrue(elm.attrib['name'] in DUMMY_DATA)
-
- # URL is '/bind10/statitics/xsl'
- self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
- self.client.endheaders()
- response = self.client.getresponse()
- self.assertEqual(response.getheader("Content-type"), "text/xml")
- self.assertTrue(int(response.getheader("Content-Length")) > 0)
- self.assertEqual(response.status, 200)
- root = xml.etree.ElementTree.parse(response).getroot()
- url_trans = '{http://www.w3.org/1999/XSL/Transform}'
- url_xhtml = '{http://www.w3.org/1999/xhtml}'
- xslpath = url_trans + 'template/' + url_xhtml + 'tr'
- self.assertEqual(root.tag, url_trans + 'stylesheet')
- for tr in root.findall(xslpath):
- tds = tr.findall(url_xhtml + 'td')
- self.assertIsNotNone(tds)
- self.assertEqual(type(tds), list)
- self.assertTrue(len(tds) > 2)
- self.assertTrue(hasattr(tds[0], 'text'))
- self.assertTrue(tds[0].text in DUMMY_DATA)
- valueof = tds[2].find(url_trans + 'value-of')
- self.assertIsNotNone(valueof)
- self.assertTrue(hasattr(valueof, 'attrib'))
- self.assertIsNotNone(valueof.attrib)
- self.assertTrue('select' in valueof.attrib)
- self.assertTrue(valueof.attrib['select'] in \
- [ tds[0].text+'/'+item for item in DUMMY_DATA[tds[0].text].keys() ])
+ # URL is '/bind10/statistics/xml'
+ check_XML_URL_PATH(mod=None, item=None)
+ for m in DUMMY_DATA:
+ # URL is '/bind10/statistics/xml/Module'
+ check_XML_URL_PATH(mod=m)
+ for k in DUMMY_DATA[m].keys():
+ # URL is '/bind10/statistics/xml/Module/Item'
+ check_XML_URL_PATH(mod=m, item=k)
+
+ def check_XSD_URL_PATH(mod=None, item=None):
+ url_path = stats_httpd.XSD_URL_PATH
+ if mod is not None:
+ url_path = url_path + '/' + mod
+ if item is not None:
+ url_path = url_path + '/' + item
+ self.client.putrequest('GET', url_path)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
+ self.assertTrue(root.tag.find('schema') > 0)
+ self.assertTrue(hasattr(root, 'attrib'))
+ self.assertTrue('targetNamespace' in root.attrib)
+ self.assertEqual(root.attrib['targetNamespace'],
+ stats_httpd.XSD_NAMESPACE)
+ if mod is None and item is None:
+ for (mod, itm) in DUMMY_DATA.items():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+ mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+ self.assertTrue(mod in mod_elm)
+ for (it, val) in itm.items():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+ self.assertTrue(it in itm_elm)
+ if type(val) is list:
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+ itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[it].findall(xsdpath) ])
+ self.assertTrue('zones' in itm_elm2)
+ for i in val:
+ for k in i.keys():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ self.assertTrue(
+ k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+ elif item is None:
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+ mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+ self.assertTrue(mod in mod_elm)
+ for (it, val) in DUMMY_DATA[mod].items():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+ self.assertTrue(it in itm_elm)
+ if type(val) is list:
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+ itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[it].findall(xsdpath) ])
+ self.assertTrue('zones' in itm_elm2)
+ for i in val:
+ for k in i.keys():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ self.assertTrue(
+ k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+ else:
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+ mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+ self.assertTrue(mod in mod_elm)
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+ self.assertTrue(item in itm_elm)
+ if type(DUMMY_DATA[mod][item]) is list:
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+ itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[item].findall(xsdpath) ])
+ self.assertTrue('zones' in itm_elm2)
+ for i in DUMMY_DATA[mod][item]:
+ for k in i.keys():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ self.assertTrue(
+ k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+
+ # URL is '/bind10/statistics/xsd'
+ check_XSD_URL_PATH(mod=None, item=None)
+ for m in DUMMY_DATA:
+ # URL is '/bind10/statistics/xsd/Module'
+ check_XSD_URL_PATH(mod=m)
+ for k in DUMMY_DATA[m].keys():
+ # URL is '/bind10/statistics/xsd/Module/Item'
+ check_XSD_URL_PATH(mod=m, item=k)
+
+ def check_XSL_URL_PATH(mod=None, item=None):
+ url_path = stats_httpd.XSL_URL_PATH
+ if mod is not None:
+ url_path = url_path + '/' + mod
+ if item is not None:
+ url_path = url_path + '/' + item
+ self.client.putrequest('GET', url_path)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_trans = '{http://www.w3.org/1999/XSL/Transform}'
+ url_xhtml = '{http://www.w3.org/1999/xhtml}'
+ self.assertEqual(root.tag, url_trans + 'stylesheet')
+ if item is None and mod is None:
+ xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+ mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+ for (mod, itms) in DUMMY_DATA.items():
+ self.assertTrue(mod in mod_fe)
+ for (k, v) in itms.items():
+ if type(v) is list:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+ self.assertTrue(k in itm_fe)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in itm_fe[k].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+ for itms in v:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[k].findall(xslpath) ])
+ self.assertTrue('zones' in itm_fe)
+ for (k, v) in itms.items():
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+ self.assertTrue(k in itm_vo)
+ else:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(k in itm_vo)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+ elif item is None:
+ xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+ mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+ self.assertTrue(mod in mod_fe)
+ for (k, v) in DUMMY_DATA[mod].items():
+ if type(v) is list:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+ self.assertTrue(k in itm_fe)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in itm_fe[k].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+ for itms in v:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[k].findall(xslpath) ])
+ self.assertTrue('zones' in itm_fe)
+ for (k, v) in itms.items():
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+ self.assertTrue(k in itm_vo)
+ else:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(k in itm_vo)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+ else:
+ xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+ mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+ self.assertTrue(mod in mod_fe)
+ if type(DUMMY_DATA[mod][item]) is list:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+ self.assertTrue(item in itm_fe)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in itm_fe[item].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + item in itm_a)
+ for itms in DUMMY_DATA[mod][item]:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[item].findall(xslpath) ])
+ self.assertTrue('zones' in itm_fe)
+ for (k, v) in itms.items():
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+ self.assertTrue(k in itm_vo)
+ else:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(item in itm_vo)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + item in itm_a)
+
+ # URL is '/bind10/statistics/xsl'
+ check_XSL_URL_PATH(mod=None, item=None)
+ for m in DUMMY_DATA:
+ # URL is '/bind10/statistics/xsl/Module'
+ check_XSL_URL_PATH(mod=m)
+ for k in DUMMY_DATA[m].keys():
+ # URL is '/bind10/statistics/xsl/Module/Item'
+ check_XSL_URL_PATH(mod=m, item=k)
# 302 redirect
self.client._http_vsn_str = 'HTTP/1.1'
@@ -202,13 +434,102 @@ class TestHttpHandler(unittest.TestCase):
self.assertEqual(response.getheader('Location'),
"http://%s:%d%s" % (self.address, self.port, stats_httpd.XML_URL_PATH))
- # 404 NotFound
+ # 404 NotFound (random path)
self.client._http_vsn_str = 'HTTP/1.0'
self.client.putrequest('GET', '/path/to/foo/bar')
self.client.endheaders()
response = self.client.getresponse()
self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', '/bind10/foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', '/bind10/statistics/foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + 'Auth') # with no slash
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+ # 200 ok
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '#foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '?foo=bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
+
+ # 404 NotFound (too long path)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Boss/boot_time/a')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+ # 404 NotFound (nonexistent module name)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+ # 404 NotFound (nonexistent item name)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ # 404 NotFound (existent module but nonexistent item name)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Auth/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Auth/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Auth/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
def test_do_GET_failed1(self):
# checks status
@@ -242,26 +563,26 @@ class TestHttpHandler(unittest.TestCase):
# failure case(Stats replies an error)
self.stats.mccs.set_command_handler(
lambda cmd, args: \
- isc.config.ccsession.create_answer(1, "I have an error.")
+ isc.config.ccsession.create_answer(1, "specified arguments are incorrect: I have an error.")
)
# request XML
self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
self.client.endheaders()
response = self.client.getresponse()
- self.assertEqual(response.status, 500)
+ self.assertEqual(response.status, 404)
# request XSD
self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
self.client.endheaders()
response = self.client.getresponse()
- self.assertEqual(response.status, 500)
+ self.assertEqual(response.status, 404)
# request XSL
self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
self.client.endheaders()
response = self.client.getresponse()
- self.assertEqual(response.status, 500)
+ self.assertEqual(response.status, 404)
def test_do_HEAD(self):
self.client.putrequest('HEAD', stats_httpd.XML_URL_PATH)
@@ -306,12 +627,18 @@ class TestHttpServer(unittest.TestCase):
class TestStatsHttpdError(unittest.TestCase):
"""Tests for StatsHttpdError exception"""
- def test_raises(self):
+ def test_raises1(self):
try:
raise stats_httpd.StatsHttpdError('Nothing')
except stats_httpd.StatsHttpdError as err:
self.assertEqual(str(err), 'Nothing')
+ def test_raises2(self):
+ try:
+ raise stats_httpd.StatsHttpdDataError('Nothing')
+ except stats_httpd.StatsHttpdDataError as err:
+ self.assertEqual(str(err), 'Nothing')
+
class TestStatsHttpd(unittest.TestCase):
"""Tests for StatsHttpd class"""
@@ -488,17 +815,13 @@ class TestStatsHttpd(unittest.TestCase):
self.assertTrue(isinstance(tmpl, string.Template))
opts = dict(
xml_string="<dummy></dummy>",
- xsd_namespace="http://host/path/to/",
- xsd_url_path="/path/to/",
xsl_url_path="/path/to/")
lines = tmpl.substitute(opts)
for n in opts:
self.assertTrue(lines.find(opts[n])>0)
tmpl = self.stats_httpd.open_template(stats_httpd.XSD_TEMPLATE_LOCATION)
self.assertTrue(isinstance(tmpl, string.Template))
- opts = dict(
- xsd_string="<dummy></dummy>",
- xsd_namespace="http://host/path/to/")
+ opts = dict(xsd_string="<dummy></dummy>")
lines = tmpl.substitute(opts)
for n in opts:
self.assertTrue(lines.find(opts[n])>0)
@@ -580,26 +903,172 @@ class TestStatsHttpd(unittest.TestCase):
def test_xml_handler(self):
self.stats_httpd = MyStatsHttpd(get_availaddr())
- self.stats_httpd.get_stats_data = lambda: \
- { 'Dummy' : { 'foo':'bar' } }
+ self.stats_httpd.get_stats_spec = lambda x,y: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ },
+ {
+ "item_name": "foo2",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Foo bar",
+ "item_description": "Foo bar",
+ "list_item_spec": {
+ "item_name": "foo2-1",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "foo2-1-1",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Foo2 1 1",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-2",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 2",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-3",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 3",
+ "item_description": "Foo bar"
+ }
+ ]
+ }
+ }]
+ }
+ self.stats_httpd.get_stats_data = lambda x,y: \
+ { 'Dummy' : { 'foo':'bar',
+ 'foo2': [
+ {
+ "foo2-1-1" : "bar1",
+ "foo2-1-2" : 10,
+ "foo2-1-3" : 9
+ },
+ {
+ "foo2-1-1" : "bar2",
+ "foo2-1-2" : 8,
+ "foo2-1-3" : 7
+ }
+ ] } }
xml_body1 = self.stats_httpd.open_template(
stats_httpd.XML_TEMPLATE_LOCATION).substitute(
- xml_string='<Dummy><foo>bar</foo></Dummy>',
- xsd_namespace=stats_httpd.XSD_NAMESPACE,
- xsd_url_path=stats_httpd.XSD_URL_PATH,
+ xml_string='<bind10:statistics xmlns:bind10="http://bind10.isc.org/bind10" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://bind10.isc.org/bind10 ' + stats_httpd.XSD_URL_PATH + '"><Dummy><foo>bar</foo><foo2><foo2-1><foo2-1-1>bar1</foo2-1-1><foo2-1-2>10</foo2-1-2><foo2-1-3>9</foo2-1-3></foo2-1><foo2-1><foo2-1-1>bar2</foo2-1-1><foo2-1-2>8</foo2-1-2><foo2-1-3>7</foo2-1-3></foo2-1></foo2></Dummy></bind10:statistics>',
xsl_url_path=stats_httpd.XSL_URL_PATH)
xml_body2 = self.stats_httpd.xml_handler()
self.assertEqual(type(xml_body1), str)
self.assertEqual(type(xml_body2), str)
self.assertEqual(xml_body1, xml_body2)
- self.stats_httpd.get_stats_data = lambda: \
- { 'Dummy' : {'bar':'foo'} }
+ self.stats_httpd.get_stats_spec = lambda x,y: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar foo",
+ "item_title": "Bar"
+ },
+ {
+ "item_name": "bar2",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Bar foo",
+ "item_description": "Bar foo",
+ "list_item_spec": {
+ "item_name": "bar2-1",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "bar2-1-1",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Bar2 1 1",
+ "item_description": "Bar foo"
+ },
+ {
+ "item_name": "bar2-1-2",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Bar2 1 2",
+ "item_description": "Bar foo"
+ },
+ {
+ "item_name": "bar2-1-3",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Bar2 1 3",
+ "item_description": "Bar foo"
+ }
+ ]
+ }
+ }]
+ }
+ self.stats_httpd.get_stats_data = lambda x,y: \
+ { 'Dummy' : { 'bar':'foo',
+ 'bar2': [
+ {
+ "bar2-1-1" : "foo1",
+ "bar2-1-2" : 10,
+ "bar2-1-3" : 9
+ },
+ {
+ "bar2-1-1" : "foo2",
+ "bar2-1-2" : 8,
+ "bar2-1-3" : 7
+ }
+ ] } }
xml_body2 = self.stats_httpd.xml_handler()
self.assertNotEqual(xml_body1, xml_body2)
def test_xsd_handler(self):
self.stats_httpd = MyStatsHttpd(get_availaddr())
- self.stats_httpd.get_stats_spec = lambda: \
+ self.stats_httpd.get_stats_spec = lambda x,y: \
{ "Dummy" :
[{
"item_name": "foo",
@@ -608,23 +1077,76 @@ class TestStatsHttpd(unittest.TestCase):
"item_default": "bar",
"item_description": "foo is bar",
"item_title": "Foo"
+ },
+ {
+ "item_name": "hoo_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "2011-01-01T01:01:01Z",
+ "item_description": "hoo time",
+ "item_title": "Hoo Time",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "foo2",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Foo bar",
+ "item_description": "Foo bar",
+ "list_item_spec": {
+ "item_name": "foo2-1",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "foo2-1-1",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Foo2 1 1",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-2",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 2",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-3",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 3",
+ "item_description": "Foo bar"
+ }
+ ]
+ }
}]
}
xsd_body1 = self.stats_httpd.open_template(
stats_httpd.XSD_TEMPLATE_LOCATION).substitute(
- xsd_string=\
- '<all><element name="Dummy"><complexType><all>' \
- + '<element maxOccurs="1" minOccurs="1" name="foo" type="string">' \
- + '<annotation><appinfo>Foo</appinfo>' \
- + '<documentation>foo is bar</documentation>' \
- + '</annotation></element></all>' \
- + '</complexType></element></all>',
- xsd_namespace=stats_httpd.XSD_NAMESPACE)
+ xsd_string='<schema targetNamespace="' + stats_httpd.XSD_NAMESPACE + '" xmlns="http://www.w3.org/2001/XMLSchema" xmlns:bind10="' + stats_httpd.XSD_NAMESPACE + '"><annotation><documentation>XML schema of the statistics data in BIND 10</documentation></annotation><element name="statistics"><annotation><documentation>A set of statistics data</documentation></annotation><complexType><all><element name="Dummy"><complexType><all><element maxOccurs="1" minOccurs="1" name="foo" type="string"><annotation><appinfo>Foo</appinfo><documentation>foo is bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="hoo_time" type="dateTime"><annotation><appinfo>Hoo Time</appinfo><documentation>hoo time</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2"><complexType><sequence><element maxOccurs="unbounded" minOccurs="1" name="foo2-1"><complexType><all><element maxOccurs="1" minOccurs="1" name="foo2-1-1" type="string"><ann
otation><appinfo>Foo2 1 1</appinfo><documentation>Foo bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2-1-2" type="integer"><annotation><appinfo>Foo2 1 2</appinfo><documentation>Foo bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2-1-3" type="integer"><annotation><appinfo>Foo2 1 3</appinfo><documentation>Foo bar</documentation></annotation></element></all></complexType></element></sequence></complexType></element></all></complexType></element></all></complexType></element></schema>')
xsd_body2 = self.stats_httpd.xsd_handler()
self.assertEqual(type(xsd_body1), str)
self.assertEqual(type(xsd_body2), str)
self.assertEqual(xsd_body1, xsd_body2)
- self.stats_httpd.get_stats_spec = lambda: \
+ self.stats_httpd.get_stats_spec = lambda x,y: \
{ "Dummy" :
[{
"item_name": "bar",
@@ -633,6 +1155,66 @@ class TestStatsHttpd(unittest.TestCase):
"item_default": "foo",
"item_description": "bar is foo",
"item_title": "bar"
+ },
+ {
+ "item_name": "boo_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "2012-02-02T02:02:02Z",
+ "item_description": "boo time",
+ "item_title": "Boo Time",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "foo2",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Foo bar",
+ "item_description": "Foo bar",
+ "list_item_spec": {
+ "item_name": "foo2-1",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "foo2-1-1",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Foo2 1 1",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-2",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 2",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-3",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 3",
+ "item_description": "Foo bar"
+ }
+ ]
+ }
}]
}
xsd_body2 = self.stats_httpd.xsd_handler()
@@ -640,30 +1222,77 @@ class TestStatsHttpd(unittest.TestCase):
def test_xsl_handler(self):
self.stats_httpd = MyStatsHttpd(get_availaddr())
- self.stats_httpd.get_stats_spec = lambda: \
+ self.stats_httpd.get_stats_spec = lambda x,y: \
{ "Dummy" :
[{
"item_name": "foo",
"item_type": "string",
"item_optional": False,
"item_default": "bar",
- "item_description": "foo is bar",
+ "item_description": "foo bar",
"item_title": "Foo"
+ },
+ {
+ "item_name": "foo2",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Foo bar",
+ "item_description": "Foo bar",
+ "list_item_spec": {
+ "item_name": "foo2-1",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "foo2-1-1",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Foo2 1 1",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-2",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 2",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-3",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 3",
+ "item_description": "Foo bar"
+ }
+ ]
+ }
}]
}
xsl_body1 = self.stats_httpd.open_template(
stats_httpd.XSL_TEMPLATE_LOCATION).substitute(
- xsl_string='<xsl:template match="*"><tr>' \
- + '<td>Dummy</td>' \
- + '<td class="title" title="foo is bar">Foo</td>' \
- + '<td><xsl:value-of select="Dummy/foo" /></td>' \
- + '</tr></xsl:template>',
+ xsl_string='<xsl:template match="bind10:statistics"><table><tr><th>Module Name</th><th>Module Item</th></tr><xsl:for-each select="Dummy"><tr><td><a href="' + stats_httpd.XML_URL_PATH + '/Dummy">Dummy</a></td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><tr><td class="title" title="foo bar"><a href="' + stats_httpd.XML_URL_PATH + '/Dummy/foo">Foo</a></td><td><xsl:value-of select="foo" /></td></tr><xsl:for-each select="foo2"><tr><td class="title" title="Foo bar"><a href="' + stats_httpd.XML_URL_PATH + '/Dummy/foo2">Foo bar</a></td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><xsl:for-each select="foo2-1"><tr><td class="title" title="">foo2-1</td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><tr><td class="title" title="Foo bar">Foo2 1 1</td><td><xsl:value-of select="foo2-1-1" /></td></tr><tr><td class="title" title="Foo bar">Foo2 1 2</td><td><xsl:value-of select="foo2-1-2" /></td></tr><tr><td class="title" title="Foo bar">Foo2 1 3
</td><td><xsl:value-of select="foo2-1-3" /></td></tr></table></td></tr></xsl:for-each></table></td></tr></xsl:for-each></table></td></tr></xsl:for-each></table></xsl:template>',
xsd_namespace=stats_httpd.XSD_NAMESPACE)
xsl_body2 = self.stats_httpd.xsl_handler()
self.assertEqual(type(xsl_body1), str)
self.assertEqual(type(xsl_body2), str)
self.assertEqual(xsl_body1, xsl_body2)
- self.stats_httpd.get_stats_spec = lambda: \
+ self.stats_httpd.get_stats_spec = lambda x,y: \
{ "Dummy" :
[{
"item_name": "bar",
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index 3813c7e..3c8599a 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -226,7 +226,7 @@ class TestStats(unittest.TestCase):
'show', 'Stats',
params={ 'owner' : 'Boss',
'name' : 'boot_time' }),
- (0, self.const_datetime))
+ (0, {'Boss': {'boot_time': self.const_datetime}}))
self.assertEqual(
send_command(
'set', 'Stats',
@@ -238,7 +238,7 @@ class TestStats(unittest.TestCase):
'show', 'Stats',
params={ 'owner' : 'Boss',
'name' : 'boot_time' }),
- (0, self.const_datetime))
+ (0, {'Boss': {'boot_time': self.const_datetime}}))
self.assertEqual(
send_command('status', 'Stats'),
(0, "Stats is up. (PID " + str(os.getpid()) + ")"))
@@ -321,25 +321,25 @@ class TestStats(unittest.TestCase):
my_statistics_data = self.stats.get_statistics_data()
self.assertTrue('Stats' in my_statistics_data)
self.assertTrue('Boss' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data['Boss'])
my_statistics_data = self.stats.get_statistics_data(owner='Stats')
- self.assertTrue('report_time' in my_statistics_data)
- self.assertTrue('boot_time' in my_statistics_data)
- self.assertTrue('last_update_time' in my_statistics_data)
- self.assertTrue('timestamp' in my_statistics_data)
- self.assertTrue('lname' in my_statistics_data)
+ self.assertTrue('Stats' in my_statistics_data)
+ self.assertTrue('report_time' in my_statistics_data['Stats'])
+ self.assertTrue('boot_time' in my_statistics_data['Stats'])
+ self.assertTrue('last_update_time' in my_statistics_data['Stats'])
+ self.assertTrue('timestamp' in my_statistics_data['Stats'])
+ self.assertTrue('lname' in my_statistics_data['Stats'])
self.assertRaises(stats.StatsError, self.stats.get_statistics_data, owner='Foo')
- my_statistics_data = self.stats.get_statistics_data(owner='Stats')
- self.assertTrue('boot_time' in my_statistics_data)
my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='report_time')
- self.assertEqual(my_statistics_data, self.const_default_datetime)
+ self.assertEqual(my_statistics_data['Stats']['report_time'], self.const_default_datetime)
my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='boot_time')
- self.assertEqual(my_statistics_data, self.const_default_datetime)
+ self.assertEqual(my_statistics_data['Stats']['boot_time'], self.const_default_datetime)
my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='last_update_time')
- self.assertEqual(my_statistics_data, self.const_default_datetime)
+ self.assertEqual(my_statistics_data['Stats']['last_update_time'], self.const_default_datetime)
my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='timestamp')
- self.assertEqual(my_statistics_data, 0.0)
+ self.assertEqual(my_statistics_data['Stats']['timestamp'], 0.0)
my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='lname')
- self.assertEqual(my_statistics_data, '')
+ self.assertEqual(my_statistics_data, {'Stats': {'lname':''}})
self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
owner='Stats', name='Bar')
self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
@@ -385,10 +385,25 @@ class TestStats(unittest.TestCase):
1, "specified arguments are incorrect: owner: Foo, name: bar"))
self.assertEqual(self.stats.command_show(owner='Auth'),
isc.config.create_answer(
- 0, {'queries.tcp': 0, 'queries.udp': 0}))
+ 0, {'Auth':{ 'queries.udp': 0,
+ 'queries.tcp': 0,
+ 'queries.perzone': [{ 'zonename': 'test1.example',
+ 'queries.udp': 1,
+ 'queries.tcp': 2 },
+ { 'zonename': 'test2.example',
+ 'queries.udp': 3,
+ 'queries.tcp': 4 }] }}))
self.assertEqual(self.stats.command_show(owner='Auth', name='queries.udp'),
isc.config.create_answer(
- 0, 0))
+ 0, {'Auth': {'queries.udp':0}}))
+ self.assertEqual(self.stats.command_show(owner='Auth', name='queries.perzone'),
+ isc.config.create_answer(
+ 0, {'Auth': {'queries.perzone': [{ 'zonename': 'test1.example',
+ 'queries.udp': 1,
+ 'queries.tcp': 2 },
+ { 'zonename': 'test2.example',
+ 'queries.udp': 3,
+ 'queries.tcp': 4 }]}}))
orig_get_timestamp = stats.get_timestamp
orig_get_datetime = stats.get_datetime
stats.get_timestamp = lambda : self.const_timestamp
@@ -396,7 +411,7 @@ class TestStats(unittest.TestCase):
self.assertEqual(stats.get_timestamp(), self.const_timestamp)
self.assertEqual(stats.get_datetime(), self.const_datetime)
self.assertEqual(self.stats.command_show(owner='Stats', name='report_time'), \
- isc.config.create_answer(0, self.const_datetime))
+ isc.config.create_answer(0, {'Stats': {'report_time':self.const_datetime}}))
self.assertEqual(self.stats.statistics_data['Stats']['timestamp'], self.const_timestamp)
self.assertEqual(self.stats.statistics_data['Stats']['boot_time'], self.const_default_datetime)
stats.get_timestamp = orig_get_timestamp
@@ -442,9 +457,12 @@ class TestStats(unittest.TestCase):
self.assertTrue('item_format' in item)
schema = value['Auth']
- self.assertEqual(len(schema), 2)
+ self.assertEqual(len(schema), 3)
for item in schema:
- self.assertTrue(len(item) == 6)
+ if item['item_type'] == 'list':
+ self.assertEqual(len(item), 7)
+ else:
+ self.assertEqual(len(item), 6)
self.assertTrue('item_name' in item)
self.assertTrue('item_type' in item)
self.assertTrue('item_optional' in item)
@@ -455,10 +473,10 @@ class TestStats(unittest.TestCase):
(rcode, value) = isc.config.ccsession.parse_answer(
self.stats.command_showschema(owner='Stats'))
self.assertEqual(rcode, 0)
- self.assertFalse('Stats' in value)
+ self.assertTrue('Stats' in value)
self.assertFalse('Boss' in value)
self.assertFalse('Auth' in value)
- for item in value:
+ for item in value['Stats']:
self.assertTrue(len(item) == 6 or len(item) == 7)
self.assertTrue('item_name' in item)
self.assertTrue('item_type' in item)
@@ -472,19 +490,19 @@ class TestStats(unittest.TestCase):
(rcode, value) = isc.config.ccsession.parse_answer(
self.stats.command_showschema(owner='Stats', name='report_time'))
self.assertEqual(rcode, 0)
- self.assertFalse('Stats' in value)
+ self.assertTrue('Stats' in value)
self.assertFalse('Boss' in value)
self.assertFalse('Auth' in value)
- self.assertTrue(len(value) == 7)
- self.assertTrue('item_name' in value)
- self.assertTrue('item_type' in value)
- self.assertTrue('item_optional' in value)
- self.assertTrue('item_default' in value)
- self.assertTrue('item_title' in value)
- self.assertTrue('item_description' in value)
- self.assertTrue('item_format' in value)
- self.assertEqual(value['item_name'], 'report_time')
- self.assertEqual(value['item_format'], 'date-time')
+ self.assertEqual(len(value['Stats'][0]), 7)
+ self.assertTrue('item_name' in value['Stats'][0])
+ self.assertTrue('item_type' in value['Stats'][0])
+ self.assertTrue('item_optional' in value['Stats'][0])
+ self.assertTrue('item_default' in value['Stats'][0])
+ self.assertTrue('item_title' in value['Stats'][0])
+ self.assertTrue('item_description' in value['Stats'][0])
+ self.assertTrue('item_format' in value['Stats'][0])
+ self.assertEqual(value['Stats'][0]['item_name'], 'report_time')
+ self.assertEqual(value['Stats'][0]['item_format'], 'date-time')
self.assertEqual(self.stats.command_showschema(owner='Foo'),
isc.config.create_answer(
@@ -494,7 +512,7 @@ class TestStats(unittest.TestCase):
1, "specified arguments are incorrect: owner: Foo, name: bar"))
self.assertEqual(self.stats.command_showschema(owner='Auth'),
isc.config.create_answer(
- 0, [{
+ 0, {'Auth': [{
"item_default": 0,
"item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
"item_name": "queries.tcp",
@@ -509,17 +527,121 @@ class TestStats(unittest.TestCase):
"item_optional": False,
"item_title": "Queries UDP",
"item_type": "integer"
- }]))
+ },
+ {
+ "item_name": "queries.perzone",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1.example",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2.example",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Queries per zone",
+ "item_description": "Queries per zone",
+ "list_item_spec": {
+ "item_name": "zones",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "zonename",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Zonename",
+ "item_description": "Zonename"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Queries UDP per zone",
+ "item_description": "A number of UDP query counts per zone"
+ },
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Queries TCP per zone",
+ "item_description": "A number of TCP query counts per zone"
+ }
+ ]
+ }
+ }]}))
self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.tcp'),
isc.config.create_answer(
- 0, {
+ 0, {'Auth': [{
"item_default": 0,
"item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
"item_name": "queries.tcp",
"item_optional": False,
"item_title": "Queries TCP",
"item_type": "integer"
- }))
+ }]}))
+ self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.perzone'),
+ isc.config.create_answer(
+ 0, {'Auth':[{
+ "item_name": "queries.perzone",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1.example",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2.example",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Queries per zone",
+ "item_description": "Queries per zone",
+ "list_item_spec": {
+ "item_name": "zones",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "zonename",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Zonename",
+ "item_description": "Zonename"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Queries UDP per zone",
+ "item_description": "A number of UDP query counts per zone"
+ },
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Queries TCP per zone",
+ "item_description": "A number of TCP query counts per zone"
+ }
+ ]
+ }
+ }]}))
self.assertEqual(self.stats.command_showschema(owner='Stats', name='bar'),
isc.config.create_answer(
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
index 5eb8f92..3f6ff33 100644
--- a/src/bin/stats/tests/test_utils.py
+++ b/src/bin/stats/tests/test_utils.py
@@ -232,6 +232,57 @@ class MockAuth:
"item_default": 0,
"item_title": "Queries UDP",
"item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ },
+ {
+ "item_name": "queries.perzone",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [
+ {
+ "zonename" : "test1.example",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2.example",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Queries per zone",
+ "item_description": "Queries per zone",
+ "list_item_spec": {
+ "item_name": "zones",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "zonename",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "",
+ "item_title": "Zonename",
+ "item_description": "Zonename"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP per zone",
+ "item_description": "A number of UDP query counts per zone"
+ },
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP per zone",
+ "item_description": "A number of TCP query counts per zone"
+ }
+ ]
+ }
}
]
}
@@ -251,6 +302,11 @@ class MockAuth:
self.got_command_name = ''
self.queries_tcp = 3
self.queries_udp = 2
+ self.queries_per_zone = [{
+ 'zonename': 'test1.example',
+ 'queries.tcp': 5,
+ 'queries.udp': 4
+ }]
def run(self):
self.mccs.start()
@@ -273,7 +329,8 @@ class MockAuth:
if command == 'sendstats':
params = { "owner": "Auth",
"data": { 'queries.tcp': self.queries_tcp,
- 'queries.udp': self.queries_udp } }
+ 'queries.udp': self.queries_udp,
+ 'queries.per-zone' : self.queries_per_zone } }
return send_command("set", "Stats", params=params, session=self.cc_session)
return isc.config.create_answer(1, "Unknown Command")
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index 8f4fa91..cba98ae 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -10,7 +10,7 @@ LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
else
-# sunstudio needs the ds path even if not all paths are necessary
+# Some systems need the ds path even if not all paths are necessary
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
endif
@@ -27,5 +27,6 @@ endif
PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(COMMON_PYTHON_PATH) \
TESTDATASRCDIR=$(abs_top_srcdir)/src/bin/xfrin/tests/testdata/ \
TESTDATAOBJDIR=$(abs_top_builddir)/src/bin/xfrin/tests/testdata/ \
+ B10_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrin/tests/testdata/example.com.sqlite3 b/src/bin/xfrin/tests/testdata/example.com.sqlite3
index ed241c3..3538e3d 100644
Binary files a/src/bin/xfrin/tests/testdata/example.com.sqlite3 and b/src/bin/xfrin/tests/testdata/example.com.sqlite3 differ
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 1e4d942..eb2c747 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -14,15 +14,24 @@
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
+import re
import shutil
import socket
import sys
import io
from isc.testutils.tsigctx_mock import MockTSIGContext
+from isc.testutils.rrset_utils import *
from xfrin import *
import xfrin
from isc.xfrin.diff import Diff
import isc.log
+# If we use any python library that is basically a wrapper for
+# a library we use as well (like sqlite3 in our datasources),
+# we must make sure we import ours first; If we have special
+# rpath or libtool rules to pick the correct version, python might
+# choose the wrong one first, if those rules aren't hit first.
+# This would result in missing symbols later.
+import sqlite3
#
# Commonly used (mostly constant) test parameters
@@ -34,11 +43,9 @@ TEST_RRCLASS_STR = 'IN'
TEST_DB_FILE = 'db_file'
TEST_MASTER_IPV4_ADDRESS = '127.0.0.1'
TEST_MASTER_IPV4_ADDRINFO = (socket.AF_INET, socket.SOCK_STREAM,
- socket.IPPROTO_TCP, '',
(TEST_MASTER_IPV4_ADDRESS, 53))
TEST_MASTER_IPV6_ADDRESS = '::1'
TEST_MASTER_IPV6_ADDRINFO = (socket.AF_INET6, socket.SOCK_STREAM,
- socket.IPPROTO_TCP, '',
(TEST_MASTER_IPV6_ADDRESS, 53))
TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
@@ -151,7 +158,7 @@ class MockDataSourceClient():
return (DataSourceClient.PARTIALMATCH, self)
raise ValueError('Unexpected input to mock client: bug in test case?')
- def find(self, name, rrtype, target, options):
+ def find(self, name, rrtype, target=None, options=ZoneFinder.FIND_DEFAULT):
'''Mock ZoneFinder.find().
It returns the predefined SOA RRset to queries for SOA of the common
@@ -170,7 +177,8 @@ class MockDataSourceClient():
return (ZoneFinder.SUCCESS, dup_soa_rrset)
raise ValueError('Unexpected input to mock finder: bug in test case?')
- def get_updater(self, zone_name, replace):
+ def get_updater(self, zone_name, replace, journaling=False):
+ self._journaling_enabled = journaling
return self
def add_rrset(self, rrset):
@@ -221,7 +229,7 @@ class MockXfrinConnection(XfrinConnection):
def __init__(self, sock_map, zone_name, rrclass, datasrc_client,
shutdown_event, master_addr, tsig_key=None):
super().__init__(sock_map, zone_name, rrclass, MockDataSourceClient(),
- shutdown_event, master_addr)
+ shutdown_event, master_addr, TEST_DB_FILE)
self.query_data = b''
self.reply_data = b''
self.force_time_out = False
@@ -271,10 +279,11 @@ class MockXfrinConnection(XfrinConnection):
self.response_generator()
return len(data)
- def create_response_data(self, response=True, bad_qid=False,
+ def create_response_data(self, response=True, auth=True, bad_qid=False,
rcode=Rcode.NOERROR(),
questions=default_questions,
answers=default_answers,
+ authorities=[],
tsig_ctx=None):
resp = Message(Message.RENDER)
qid = self.qid
@@ -285,8 +294,11 @@ class MockXfrinConnection(XfrinConnection):
resp.set_rcode(rcode)
if response:
resp.set_header_flag(Message.HEADERFLAG_QR)
+ if auth:
+ resp.set_header_flag(Message.HEADERFLAG_AA)
[resp.add_question(q) for q in questions]
[resp.add_rrset(Message.SECTION_ANSWER, a) for a in answers]
+ [resp.add_rrset(Message.SECTION_AUTHORITY, a) for a in authorities]
renderer = MessageRenderer()
if tsig_ctx is not None:
@@ -339,13 +351,44 @@ class TestXfrinInitialSOA(TestXfrinState):
self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual(type(XfrinFirstData()),
type(self.conn.get_xfrstate()))
- self.assertEqual(1234, self.conn._end_serial)
+ self.assertEqual(1234, self.conn._end_serial.get_value())
def test_handle_not_soa(self):
# The given RR is not of SOA
self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
self.ns_rrset)
+ def test_handle_ixfr_uptodate(self):
+ self.conn._request_type = RRType.IXFR()
+ self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinIXFRUptodate()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_ixfr_uptodate2(self):
+ self.conn._request_type = RRType.IXFR()
+ self.conn._request_serial = isc.dns.Serial(1235) # > soa_rrset
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinIXFRUptodate()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_ixfr_uptodate3(self):
+ # Similar to the previous case, but checking serial number arithmetic
+ # comparison
+ self.conn._request_type = RRType.IXFR()
+ self.conn._request_serial = isc.dns.Serial(0xffffffff)
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinFirstData()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_axfr_uptodate(self):
+ # "request serial" should matter only for IXFR
+ self.conn._request_type = RRType.AXFR()
+ self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinFirstData()),
+ type(self.conn.get_xfrstate()))
+
def test_finish_message(self):
self.assertTrue(self.state.finish_message(self.conn))
@@ -354,7 +397,8 @@ class TestXfrinFirstData(TestXfrinState):
super().setUp()
self.state = XfrinFirstData()
self.conn._request_type = RRType.IXFR()
- self.conn._request_serial = 1230 # arbitrary chosen serial < 1234
+ # arbitrary chosen serial < 1234:
+ self.conn._request_serial = isc.dns.Serial(1230)
self.conn._diff = None # should be replaced in the AXFR case
def test_handle_ixfr_begin_soa(self):
@@ -434,7 +478,7 @@ class TestXfrinIXFRDelete(TestXfrinState):
# false.
self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual([], self.conn._diff.get_buffer())
- self.assertEqual(1234, self.conn._current_serial)
+ self.assertEqual(1234, self.conn._current_serial.get_value())
self.assertEqual(type(XfrinIXFRAddSOA()),
type(self.conn.get_xfrstate()))
@@ -465,7 +509,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
# We need record the state in 'conn' to check the case where the
# state doesn't change.
XfrinIXFRAdd().set_xfrstate(self.conn, XfrinIXFRAdd())
- self.conn._current_serial = 1230
+ self.conn._current_serial = isc.dns.Serial(1230)
self.state = self.conn.get_xfrstate()
def test_handle_add_rr(self):
@@ -477,7 +521,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
def test_handle_end_soa(self):
- self.conn._end_serial = 1234
+ self.conn._end_serial = isc.dns.Serial(1234)
self.conn._diff.add_data(self.ns_rrset) # put some dummy change
self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
@@ -486,7 +530,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
self.assertEqual([], self.conn._diff.get_buffer())
def test_handle_new_delete(self):
- self.conn._end_serial = 1234
+ self.conn._end_serial = isc.dns.Serial(1234)
# SOA RR whose serial is the current one means we are going to a new
# difference, starting with removing that SOA.
self.conn._diff.add_data(self.ns_rrset) # put some dummy change
@@ -497,7 +541,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
def test_handle_out_of_sync(self):
# getting SOA with an inconsistent serial. This is an error.
- self.conn._end_serial = 1235
+ self.conn._end_serial = isc.dns.Serial(1235)
self.assertRaises(XfrinProtocolError, self.state.handle_rr,
self.conn, soa_rrset)
@@ -516,11 +560,24 @@ class TestXfrinIXFREnd(TestXfrinState):
def test_finish_message(self):
self.assertFalse(self.state.finish_message(self.conn))
+class TestXfrinIXFREnd(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinIXFRUptodate()
+
+ def test_handle_rr(self):
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertRaises(XfrinZoneUptodate, self.state.finish_message,
+ self.conn)
+
class TestXfrinAXFR(TestXfrinState):
def setUp(self):
super().setUp()
self.state = XfrinAXFR()
- self.conn._end_serial = 1234
+ self.conn._end_serial = isc.dns.Serial(1234)
def test_handle_rr(self):
"""
@@ -595,7 +652,10 @@ class TestXfrinConnection(unittest.TestCase):
'questions': [example_soa_question],
'bad_qid': False,
'response': True,
+ 'auth': True,
'rcode': Rcode.NOERROR(),
+ 'answers': default_answers,
+ 'authorities': [],
'tsig': False,
'axfr_after_soa': self._create_normal_response_data
}
@@ -652,8 +712,11 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.reply_data = self.conn.create_response_data(
bad_qid=self.soa_response_params['bad_qid'],
response=self.soa_response_params['response'],
+ auth=self.soa_response_params['auth'],
rcode=self.soa_response_params['rcode'],
questions=self.soa_response_params['questions'],
+ answers=self.soa_response_params['answers'],
+ authorities=self.soa_response_params['authorities'],
tsig_ctx=verify_ctx)
if self.soa_response_params['axfr_after_soa'] != None:
self.conn.response_generator = \
@@ -684,6 +747,15 @@ class TestXfrinConnection(unittest.TestCase):
rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, nsname))
return rrset
+ def _set_test_zone(self, zone_name):
+ '''Set the zone name for transfer to the specified one.
+
+ It also make sure that the SOA RR (if exist) is correctly (re)set.
+
+ '''
+ self.conn._zone_name = zone_name
+ self.conn._zone_soa = self.conn._get_zone_soa()
+
class TestAXFR(TestXfrinConnection):
def setUp(self):
super().setUp()
@@ -778,25 +850,26 @@ class TestAXFR(TestXfrinConnection):
# IXFR query
msg = self.conn._create_query(RRType.IXFR())
check_query(RRType.IXFR(), begin_soa_rrset)
- self.assertEqual(1230, self.conn._request_serial)
+ self.assertEqual(1230, self.conn._request_serial.get_value())
def test_create_ixfr_query_fail(self):
# In these cases _create_query() will fail to find a valid SOA RR to
# insert in the IXFR query, and should raise an exception.
- self.conn._zone_name = Name('no-such-zone.example')
+ self._set_test_zone(Name('no-such-zone.example'))
self.assertRaises(XfrinException, self.conn._create_query,
RRType.IXFR())
- self.conn._zone_name = Name('partial-match-zone.example')
+ self._set_test_zone(Name('partial-match-zone.example'))
self.assertRaises(XfrinException, self.conn._create_query,
RRType.IXFR())
- self.conn._zone_name = Name('no-soa.example')
+ self._set_test_zone(Name('no-soa.example'))
self.assertRaises(XfrinException, self.conn._create_query,
RRType.IXFR())
- self.conn._zone_name = Name('dup-soa.example')
+ self._set_test_zone(Name('dup-soa.example'))
+ self.conn._zone_soa = self.conn._get_zone_soa()
self.assertRaises(XfrinException, self.conn._create_query,
RRType.IXFR())
@@ -827,8 +900,10 @@ class TestAXFR(TestXfrinConnection):
self.conn._tsig_key = TSIG_KEY
# server tsig check fail, return with RCODE 9 (NOTAUTH)
self.conn._send_query(RRType.SOA())
- self.conn.reply_data = self.conn.create_response_data(rcode=Rcode.NOTAUTH())
- self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+ self.conn.reply_data = \
+ self.conn.create_response_data(rcode=Rcode.NOTAUTH())
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
def test_response_without_end_soa(self):
self.conn._send_query(RRType.AXFR())
@@ -841,7 +916,8 @@ class TestAXFR(TestXfrinConnection):
def test_response_bad_qid(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
- self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
def test_response_error_code_bad_sig(self):
self.conn._tsig_key = TSIG_KEY
@@ -852,7 +928,7 @@ class TestAXFR(TestXfrinConnection):
rcode=Rcode.SERVFAIL())
# xfrin should check TSIG before other part of incoming message
# validate log message for XfrinException
- self.__match_exception(XfrinException,
+ self.__match_exception(XfrinProtocolError,
"TSIG verify fail: BADSIG",
self.conn._handle_xfrin_responses)
@@ -864,7 +940,7 @@ class TestAXFR(TestXfrinConnection):
self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
# xfrin should check TSIG before other part of incoming message
# validate log message for XfrinException
- self.__match_exception(XfrinException,
+ self.__match_exception(XfrinProtocolError,
"TSIG verify fail: BADKEY",
self.conn._handle_xfrin_responses)
@@ -877,18 +953,21 @@ class TestAXFR(TestXfrinConnection):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(
rcode=Rcode.SERVFAIL())
- self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
def test_response_multi_question(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(
questions=[example_axfr_question, example_axfr_question])
- self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
def test_response_non_response(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(response = False)
- self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
def test_soacheck(self):
# we need to defer the creation until we know the QID, which is
@@ -903,7 +982,7 @@ class TestAXFR(TestXfrinConnection):
def test_soacheck_badqid(self):
self.soa_response_params['bad_qid'] = True
self.conn.response_generator = self._create_soa_response_data
- self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_bad_qid_bad_sig(self):
self.conn._tsig_key = TSIG_KEY
@@ -913,19 +992,123 @@ class TestAXFR(TestXfrinConnection):
self.conn.response_generator = self._create_soa_response_data
# xfrin should check TSIG before other part of incoming message
# validate log message for XfrinException
- self.__match_exception(XfrinException,
+ self.__match_exception(XfrinProtocolError,
"TSIG verify fail: BADSIG",
self.conn._check_soa_serial)
def test_soacheck_non_response(self):
self.soa_response_params['response'] = False
self.conn.response_generator = self._create_soa_response_data
- self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_error_code(self):
self.soa_response_params['rcode'] = Rcode.SERVFAIL()
self.conn.response_generator = self._create_soa_response_data
- self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_notauth(self):
+ self.soa_response_params['auth'] = False
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_uptodate(self):
+ # Primary's SOA serial is identical the local serial
+ self.soa_response_params['answers'] = [begin_soa_rrset]
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+ def test_soacheck_uptodate2(self):
+ # Primary's SOA serial is "smaller" than the local serial
+ self.soa_response_params['answers'] = [create_soa(1229)]
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+ def test_soacheck_uptodate3(self):
+ # Similar to the previous case, but checking the comparison is based
+ # on the serial number arithmetic.
+ self.soa_response_params['answers'] = [create_soa(0xffffffff)]
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+ def test_soacheck_newzone(self):
+ # Primary's SOA is 'old', but this secondary doesn't know anything
+ # about the zone yet, so it should accept it.
+ def response_generator():
+ # _request_serial is set in _check_soa_serial(). Reset it here.
+ self.conn._request_serial = None
+ self._create_soa_response_data()
+ self.soa_response_params['answers'] = [begin_soa_rrset]
+ self.conn.response_generator = response_generator
+ self.assertEqual(XFRIN_OK, self.conn._check_soa_serial())
+
+ def test_soacheck_question_empty(self):
+ self.conn.response_generator = self._create_soa_response_data
+ self.soa_response_params['questions'] = []
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_question_name_mismatch(self):
+ self.conn.response_generator = self._create_soa_response_data
+ self.soa_response_params['questions'] = [Question(Name('example.org'),
+ TEST_RRCLASS,
+ RRType.SOA())]
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_question_class_mismatch(self):
+ self.conn.response_generator = self._create_soa_response_data
+ self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
+ RRClass.CH(),
+ RRType.SOA())]
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_question_type_mismatch(self):
+ self.conn.response_generator = self._create_soa_response_data
+ self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
+ TEST_RRCLASS,
+ RRType.AAAA())]
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_no_soa(self):
+ # The response just doesn't contain SOA without any other indication
+ # of errors.
+ self.conn.response_generator = self._create_soa_response_data
+ self.soa_response_params['answers'] = []
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_soa_name_mismatch(self):
+ self.conn.response_generator = self._create_soa_response_data
+ self.soa_response_params['answers'] = [create_soa(1234,
+ Name('example.org'))]
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_soa_class_mismatch(self):
+ self.conn.response_generator = self._create_soa_response_data
+ soa = RRset(TEST_ZONE_NAME, RRClass.CH(), RRType.SOA(), RRTTL(0))
+ soa.add_rdata(Rdata(RRType.SOA(), RRClass.CH(), 'm. r. 1234 0 0 0 0'))
+ self.soa_response_params['answers'] = [soa]
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_multiple_soa(self):
+ self.conn.response_generator = self._create_soa_response_data
+ self.soa_response_params['answers'] = [soa_rrset, soa_rrset]
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_cname_response(self):
+ self.conn.response_generator = self._create_soa_response_data
+ # Add SOA to answer, too, to make sure that it that deceives the parser
+ self.soa_response_params['answers'] = [soa_rrset, create_cname()]
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_referral_response(self):
+ self.conn.response_generator = self._create_soa_response_data
+ self.soa_response_params['answers'] = []
+ self.soa_response_params['authorities'] = [create_ns('ns.example.com')]
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+ def test_soacheck_nodata_response(self):
+ self.conn.response_generator = self._create_soa_response_data
+ self.soa_response_params['answers'] = []
+ self.soa_response_params['authorities'] = [soa_rrset]
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_with_tsig(self):
# Use a mock tsig context emulating a validly signed response
@@ -944,7 +1127,7 @@ class TestAXFR(TestXfrinConnection):
self.soa_response_params['rcode'] = Rcode.NOTAUTH()
self.conn.response_generator = self._create_soa_response_data
- self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_with_tsig_noerror_badsig(self):
self.conn._tsig_key = TSIG_KEY
@@ -957,7 +1140,7 @@ class TestAXFR(TestXfrinConnection):
# treat this as a final failure (just as BIND 9 does).
self.conn.response_generator = self._create_soa_response_data
- self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_with_tsig_unsigned_response(self):
# we can use a real TSIGContext for this. the response doesn't
@@ -966,14 +1149,14 @@ class TestAXFR(TestXfrinConnection):
# it as a fatal transaction failure, too.
self.conn._tsig_key = TSIG_KEY
self.conn.response_generator = self._create_soa_response_data
- self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_with_unexpected_tsig_response(self):
# we reject unexpected TSIG in responses (following BIND 9's
# behavior)
self.soa_response_params['tsig'] = True
self.conn.response_generator = self._create_soa_response_data
- self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_response_shutdown(self):
self.conn.response_generator = self._create_normal_response_data
@@ -1132,6 +1315,7 @@ class TestAXFR(TestXfrinConnection):
def test_do_xfrin(self):
self.conn.response_generator = self._create_normal_response_data
self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+ self.assertFalse(self.conn._datasrc_client._journaling_enabled)
def test_do_xfrin_with_tsig(self):
# use TSIG with a mock context. we fake all verify results to
@@ -1234,6 +1418,18 @@ class TestAXFR(TestXfrinConnection):
self.conn.response_generator = self._create_soa_response_data
self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
+ def test_do_soacheck_uptodate(self):
+ self.soa_response_params['answers'] = [begin_soa_rrset]
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
+
+ def test_do_soacheck_protocol_error(self):
+ # There are several cases, but at this level it's sufficient to check
+ # only one. We use the case where there's no SOA in the response.
+ self.soa_response_params['answers'] = []
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
+
def test_do_soacheck_and_xfrin_with_tsig(self):
# We are going to have a SOA query/response transaction, followed by
# AXFR, all TSIG signed. xfrin should use a new TSIG context for
@@ -1266,9 +1462,8 @@ class TestIXFRResponse(TestXfrinConnection):
def setUp(self):
super().setUp()
self.conn._query_id = self.conn.qid = 1035
- self.conn._request_serial = 1230
+ self.conn._request_serial = isc.dns.Serial(1230)
self.conn._request_type = RRType.IXFR()
- self._zone_name = TEST_ZONE_NAME
self.conn._datasrc_client = MockDataSourceClient()
XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
@@ -1283,6 +1478,7 @@ class TestIXFRResponse(TestXfrinConnection):
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertTrue(self.conn._datasrc_client._journaling_enabled)
self.assertEqual([], self.conn._datasrc_client.diffs)
check_diffs(self.assertEqual,
[[('delete', begin_soa_rrset), ('add', soa_rrset)]],
@@ -1342,6 +1538,16 @@ class TestIXFRResponse(TestXfrinConnection):
[[('delete', begin_soa_rrset), ('add', soa_rrset)]],
self.conn._datasrc_client.committed_diffs)
+ def test_ixfr_response_uptodate(self):
+ '''IXFR response indicates the zone is new enough'''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[begin_soa_rrset])
+ self.assertRaises(XfrinZoneUptodate, self.conn._handle_xfrin_responses)
+ # no diffs should have been committed
+ check_diffs(self.assertEqual,
+ [], self.conn._datasrc_client.committed_diffs)
+
def test_ixfr_response_broken(self):
'''Test with a broken response.
@@ -1374,6 +1580,22 @@ class TestIXFRResponse(TestXfrinConnection):
[[('delete', begin_soa_rrset), ('add', soa_rrset)]],
self.conn._datasrc_client.committed_diffs)
+ def test_ixfr_response_uptodate_extra(self):
+ '''Similar to 'uptodate' test, but with extra bogus data.
+
+ In either case an exception will be raised, but in this case it's
+ considered an error.
+
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[begin_soa_rrset, soa_rrset])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ # no diffs should have been committed
+ check_diffs(self.assertEqual,
+ [], self.conn._datasrc_client.committed_diffs)
+
def test_ixfr_to_axfr_response(self):
'''AXFR-style IXFR response.
@@ -1387,6 +1609,8 @@ class TestIXFRResponse(TestXfrinConnection):
answers=[soa_rrset, ns_rr, a_rr, soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ # In the case AXFR-style IXFR, journaling must have been disabled.
+ self.assertFalse(self.conn._datasrc_client._journaling_enabled)
self.assertEqual([], self.conn._datasrc_client.diffs)
# The SOA should be added exactly once, and in our implementation
# it should be added at the end of the sequence.
@@ -1475,13 +1699,25 @@ class TestIXFRSession(TestXfrinConnection):
self.conn.response_generator = create_ixfr_response
self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
- def test_do_xfrin_fail(self):
+ def test_do_xfrin_fail2(self):
'''IXFR fails due to a bogus DNS message.
'''
self._create_broken_response_data()
self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ def test_do_xfrin_uptodate(self):
+ '''IXFR is (gracefully) aborted because serial is not new
+
+ '''
+ def create_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[begin_soa_rrset])
+ self.conn.response_generator = create_response
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+
class TestXFRSessionWithSQLite3(TestXfrinConnection):
'''Tests for XFR sessions using an SQLite3 DB.
@@ -1515,8 +1751,7 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
def get_zone_serial(self):
result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
self.assertEqual(DataSourceClient.SUCCESS, result)
- result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA(),
- None, ZoneFinder.FIND_DEFAULT)
+ result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA())
self.assertEqual(ZoneFinder.SUCCESS, result)
self.assertEqual(1, soa.get_rdata_count())
return get_soa_serial(soa.get_rdata()[0])
@@ -1524,7 +1759,7 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
def record_exist(self, name, type):
result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
self.assertEqual(DataSourceClient.SUCCESS, result)
- result, soa = finder.find(name, type, None, ZoneFinder.FIND_DEFAULT)
+ result, soa = finder.find(name, type)
return result == ZoneFinder.SUCCESS
def test_do_ixfrin_sqlite3(self):
@@ -1536,9 +1771,22 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
self.conn.response_generator = create_ixfr_response
# Confirm xfrin succeeds and SOA is updated
- self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
- self.assertEqual(1234, self.get_zone_serial())
+ self.assertEqual(1234, self.get_zone_serial().get_value())
+
+ # Also confirm the corresponding diffs are stored in the diffs table
+ conn = sqlite3.connect(self.sqlite3db_obj)
+ cur = conn.cursor()
+ cur.execute('SELECT name, rrtype, ttl, rdata FROM diffs ORDER BY id')
+ soa_rdata_base = 'master.example.com. admin.example.com. ' + \
+ 'SERIAL 3600 1800 2419200 7200'
+ self.assertEqual(cur.fetchall(),
+ [(TEST_ZONE_NAME_STR, 'SOA', 3600,
+ re.sub('SERIAL', str(1230), soa_rdata_base)),
+ (TEST_ZONE_NAME_STR, 'SOA', 3600,
+ re.sub('SERIAL', str(1234), soa_rdata_base))])
+ conn.close()
def test_do_ixfrin_sqlite3_fail(self):
'''Similar to the previous test, but xfrin fails due to error.
@@ -1554,12 +1802,12 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
self._create_soa('1235')])
self.conn.response_generator = create_ixfr_response
- self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
- self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(1230, self.get_zone_serial().get_value())
def test_do_ixfrin_nozone_sqlite3(self):
- self.conn._zone_name = Name('nosuchzone.example')
+ self._set_test_zone(Name('nosuchzone.example'))
self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
# This should fail even before starting state transition
self.assertEqual(None, self.conn.get_xfrstate())
@@ -1575,11 +1823,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
self.conn.response_generator = create_response
# Confirm xfrin succeeds and SOA is updated, A RR is deleted.
- self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertTrue(self.record_exist(Name('dns01.example.com'),
RRType.A()))
self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, type))
- self.assertEqual(1234, self.get_zone_serial())
+ self.assertEqual(1234, self.get_zone_serial().get_value())
self.assertFalse(self.record_exist(Name('dns01.example.com'),
RRType.A()))
@@ -1607,11 +1855,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
answers=[soa_rrset, self._create_ns(), soa_rrset, soa_rrset])
self.conn.response_generator = create_response
- self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertTrue(self.record_exist(Name('dns01.example.com'),
RRType.A()))
self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, type))
- self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertTrue(self.record_exist(Name('dns01.example.com'),
RRType.A()))
@@ -1645,11 +1893,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
RRType.AXFR())],
answers=[soa_rrset, self._create_ns(), soa_rrset])
self.conn.response_generator = create_response
- self.conn._zone_name = Name('example.com')
+ self._set_test_zone(Name('example.com'))
self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR()))
self.assertEqual(type(XfrinAXFREnd()),
type(self.conn.get_xfrstate()))
- self.assertEqual(1234, self.get_zone_serial())
+ self.assertEqual(1234, self.get_zone_serial().get_value())
self.assertFalse(self.record_exist(Name('dns01.example.com'),
RRType.A()))
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 911b3b3..1167bef 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -24,6 +24,7 @@ import struct
import threading
import socket
import random
+from functools import reduce
from optparse import OptionParser, OptionValueError
from isc.config.ccsession import *
from isc.notify import notify_out
@@ -75,9 +76,10 @@ DEFAULT_MASTER_PORT = 53
DEFAULT_ZONE_CLASS = RRClass.IN()
__version__ = 'BIND10'
-# define xfrin rcode
-XFRIN_OK = 0
-XFRIN_FAIL = 1
+
+# Internal result codes of an xfr session
+XFRIN_OK = 0 # normal success
+XFRIN_FAIL = 1 # general failure (internal/external)
class XfrinException(Exception):
pass
@@ -87,6 +89,11 @@ class XfrinProtocolError(Exception):
'''
pass
+class XfrinZoneUptodate(Exception):
+ '''TBD
+ '''
+ pass
+
class XfrinZoneInfoException(Exception):
"""This exception is raised if there is an error in the given
configuration (part), or when a command does not have a required
@@ -153,7 +160,7 @@ def format_addrinfo(addrinfo):
"appear to be consisting of (family, socktype, (addr, port))")
def get_soa_serial(soa_rdata):
- '''Extract the serial field of an SOA RDATA and returns it as an intger.
+ '''Extract the serial field of SOA RDATA and return it as a Serial object.
We don't have to be very efficient here, so we first dump the entire RDATA
as a string and convert the first corresponding field. This should be
@@ -162,7 +169,7 @@ def get_soa_serial(soa_rdata):
should be a more direct and convenient way to get access to the SOA
fields.
'''
- return int(soa_rdata.to_text().split()[2])
+ return Serial(int(soa_rdata.to_text().split()[2]))
class XfrinState:
'''
@@ -181,12 +188,12 @@ class XfrinState:
(AXFR or
(recv SOA) AXFR-style IXFR) (SOA, add)
InitialSOA------->FirstData------------->AXFR--------->AXFREnd
- | | ^ (post xfr
- | | | checks, then
- | +--+ commit)
- | (non SOA, add)
- |
- | (non SOA, delete)
+ | | | ^ (post xfr
+ |(IXFR && | | | checks, then
+ | recv SOA | +--+ commit)
+ | not new) | (non SOA, add)
+ V |
+ IXFRUptodate | (non SOA, delete)
(pure IXFR,| +-------+
keep handling)| (Delete SOA) V |
+ ->IXFRDeleteSOA------>IXFRDelete--+
@@ -300,13 +307,14 @@ class XfrinInitialSOA(XfrinState):
+ rr.get_type().to_text() + ' received)')
conn._end_serial = get_soa_serial(rr.get_rdata()[0])
- # FIXME: we need to check the serial is actually greater than ours.
- # To do so, however, we need to implement serial number arithmetic.
- # Although it wouldn't be a big task, we'll leave it for a separate
- # task for now. (Always performing xfr could be inefficient, but
- # shouldn't do any harm otherwise)
+ if conn._request_type == RRType.IXFR() and \
+ conn._end_serial <= conn._request_serial:
+ logger.info(XFRIN_IXFR_UPTODATE, conn.zone_str(),
+ conn._request_serial, conn._end_serial)
+ self.set_xfrstate(conn, XfrinIXFRUptodate())
+ else:
+ self.set_xfrstate(conn, XfrinFirstData())
- self.set_xfrstate(conn, XfrinFirstData())
return True
class XfrinFirstData(XfrinState):
@@ -367,7 +375,10 @@ class XfrinIXFRDeleteSOA(XfrinState):
' RR is given in IXFRDeleteSOA state')
# This is the beginning state of one difference sequence (changes
# for one SOA update). We need to create a new Diff object now.
- conn._diff = Diff(conn._datasrc_client, conn._zone_name)
+ # Note also that we (unconditionally) enable journaling here. The
+ # Diff constructor may internally disable it, however, if the
+ # underlying data source doesn't support journaling.
+ conn._diff = Diff(conn._datasrc_client, conn._zone_name, False, True)
conn._diff.delete_data(rr)
self.set_xfrstate(conn, XfrinIXFRDelete())
return True
@@ -427,6 +438,14 @@ class XfrinIXFREnd(XfrinState):
'''
return False
+class XfrinIXFRUptodate(XfrinState):
+ def handle_rr(self, conn, rr):
+ raise XfrinProtocolError('Extra data after single IXFR response ' +
+ rr.to_text())
+
+ def finish_message(self, conn):
+ raise XfrinZoneUptodate
+
class XfrinAXFR(XfrinState):
def handle_rr(self, conn, rr):
"""
@@ -470,10 +489,13 @@ class XfrinConnection(asyncore.dispatcher):
def __init__(self,
sock_map, zone_name, rrclass, datasrc_client,
- shutdown_event, master_addrinfo, tsig_key=None,
+ shutdown_event, master_addrinfo, db_file, tsig_key=None,
idle_timeout=60):
'''Constructor of the XfirnConnection class.
+ db_file: SQLite3 DB file. Unforutnately we still need this for
+ temporary workaround in _get_zone_soa(). This should be
+ removed when we eliminate the need for the workaround.
idle_timeout: max idle time for read data from socket.
datasrc_client: the data source client object used for the XFR session.
This will eventually replace db_file completely.
@@ -497,7 +519,9 @@ class XfrinConnection(asyncore.dispatcher):
self._rrclass = rrclass
# Data source handler
+ self._db_file = db_file
self._datasrc_client = datasrc_client
+ self._zone_soa = self._get_zone_soa()
self._sock_map = sock_map
self._soa_rr_count = 0
@@ -521,6 +545,55 @@ class XfrinConnection(asyncore.dispatcher):
self.create_socket(self._master_addrinfo[0], self._master_addrinfo[1])
self.setblocking(1)
+ def _get_zone_soa(self):
+ '''Retrieve the current SOA RR of the zone to be transferred.
+
+ It will be used for various purposes in subsequent xfr protocol
+ processing. It is validly possible that the zone is currently
+ empty and therefore doesn't have an SOA, so this method doesn't
+ consider it an error and returns None in such a case. It may or
+ may not result in failure in the actual processing depending on
+ how the SOA is used.
+
+ When the zone has an SOA RR, this method makes sure that it's
+ valid, i.e., it has exactly one RDATA; if it is not the case
+ this method returns None.
+
+ If the underlying data source doesn't even know the zone, this method
+ tries to provide backward compatible behavior where xfrin is
+ responsible for creating zone in the corresponding DB table.
+ For a longer term we should deprecate this behavior by introducing
+ more generic zone management framework, but at the moment we try
+ to not surprise existing users. (Note also that the part of
+ providing the compatible behavior uses the old data source API.
+ We'll deprecate this API in a near future, too).
+
+ '''
+ # get the zone finder. this must be SUCCESS (not even
+ # PARTIALMATCH) because we are specifying the zone origin name.
+ result, finder = self._datasrc_client.find_zone(self._zone_name)
+ if result != DataSourceClient.SUCCESS:
+ # The data source doesn't know the zone. For now, we provide
+ # backward compatibility and creates a new one ourselves.
+ isc.datasrc.sqlite3_ds.load(self._db_file,
+ self._zone_name.to_text(),
+ lambda : [])
+ logger.warn(XFRIN_ZONE_CREATED, self.zone_str())
+ # try again
+ result, finder = self._datasrc_client.find_zone(self._zone_name)
+ if result != DataSourceClient.SUCCESS:
+ return None
+ result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
+ None, ZoneFinder.FIND_DEFAULT)
+ if result != ZoneFinder.SUCCESS:
+ logger.info(XFRIN_ZONE_NO_SOA, self.zone_str())
+ return None
+ if soa_rrset.get_rdata_count() != 1:
+ logger.warn(XFRIN_ZONE_MULTIPLE_SOA, self.zone_str(),
+ soa_rrset.get_rdata_count())
+ return None
+ return soa_rrset
+
def __set_xfrstate(self, new_state):
self.__state = new_state
@@ -542,37 +615,16 @@ class XfrinConnection(asyncore.dispatcher):
str(e))
return False
- def _get_zone_soa(self):
- result, finder = self._datasrc_client.find_zone(self._zone_name)
- if result != DataSourceClient.SUCCESS:
- raise XfrinException('Zone not found in the given data ' +
- 'source: ' + self.zone_str())
- result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
- None, ZoneFinder.FIND_DEFAULT)
- if result != ZoneFinder.SUCCESS:
- raise XfrinException('SOA RR not found in zone: ' +
- self.zone_str())
- # Especially for database-based zones, a working zone may be in
- # a broken state where it has more than one SOA RR. We proactively
- # check the condition and abort the xfr attempt if we identify it.
- if soa_rrset.get_rdata_count() != 1:
- raise XfrinException('Invalid number of SOA RRs for ' +
- self.zone_str() + ': ' +
- str(soa_rrset.get_rdata_count()))
- return soa_rrset
-
def _create_query(self, query_type):
'''Create an XFR-related query message.
- query_type is either SOA, AXFR or IXFR. For type IXFR, it searches
- the associated data source for the current SOA record to include
- it in the query. If the corresponding zone or the SOA record
- cannot be found, it raises an XfrinException exception. Note that
- this may not necessarily a broken configuration; for the first attempt
- of transfer the secondary may not have any boot-strap zone
- information, in which case IXFR simply won't work. The xfrin
- should then fall back to AXFR. _request_serial is recorded for
- later use.
+ query_type is either SOA, AXFR or IXFR. An IXFR query needs the
+ zone's current SOA record. If it's not known, it raises an
+ XfrinException exception. Note that this may not necessarily a
+ broken configuration; for the first attempt of transfer the secondary
+ may not have any boot-strap zone information, in which case IXFR
+ simply won't work. The xfrin should then fall back to AXFR.
+ _request_serial is recorded for later use.
'''
msg = Message(Message.RENDER)
@@ -582,27 +634,19 @@ class XfrinConnection(asyncore.dispatcher):
msg.set_opcode(Opcode.QUERY())
msg.set_rcode(Rcode.NOERROR())
msg.add_question(Question(self._zone_name, self._rrclass, query_type))
+
+ # Remember our serial, if known
+ self._request_serial = get_soa_serial(self._zone_soa.get_rdata()[0]) \
+ if self._zone_soa is not None else None
+
+ # Set the authority section with our SOA for IXFR
if query_type == RRType.IXFR():
- # get the zone finder. this must be SUCCESS (not even
- # PARTIALMATCH) because we are specifying the zone origin name.
- zone_soa_rr = self._get_zone_soa()
- msg.add_rrset(Message.SECTION_AUTHORITY, zone_soa_rr)
- self._request_serial = get_soa_serial(zone_soa_rr.get_rdata()[0])
- else:
- # For AXFR, we temporarily provide backward compatible behavior
- # where xfrin is responsible for creating zone in the corresponding
- # DB table. Note that the code below uses the old data source
- # API and assumes SQLite3 in an ugly manner. We'll have to
- # develop a better way of managing zones in a generic way and
- # eliminate the code like the one here.
- try:
- self._get_zone_soa()
- except XfrinException:
- def empty_rr_generator():
- return []
- isc.datasrc.sqlite3_ds.load(self._db_file,
- self._zone_name.to_text(),
- empty_rr_generator)
+ if self._zone_soa is None:
+ # (incremental) IXFR doesn't work without known SOA
+ raise XfrinException('Failed to create IXFR query due to no ' +
+ 'SOA for ' + self.zone_str())
+ msg.add_rrset(Message.SECTION_AUTHORITY, self._zone_soa)
+
return msg
def _send_data(self, data):
@@ -656,7 +700,8 @@ class XfrinConnection(asyncore.dispatcher):
if self._tsig_ctx is not None:
tsig_error = self._tsig_ctx.verify(tsig_record, response_data)
if tsig_error != TSIGError.NOERROR:
- raise XfrinException('TSIG verify fail: %s' % str(tsig_error))
+ raise XfrinProtocolError('TSIG verify fail: %s' %
+ str(tsig_error))
elif tsig_record is not None:
# If the response includes a TSIG while we didn't sign the query,
# we treat it as an error. RFC doesn't say anything about this
@@ -665,13 +710,78 @@ class XfrinConnection(asyncore.dispatcher):
# implementation would return such a response, and since this is
# part of security mechanism, it's probably better to be more
# strict.
- raise XfrinException('Unexpected TSIG in response')
+ raise XfrinProtocolError('Unexpected TSIG in response')
+
+ def __parse_soa_response(self, msg, response_data):
+ '''Parse a response to SOA query and extract the SOA from answer.
+
+ This is a subroutine of _check_soa_serial(). This method also
+ validates message, and rejects bogus responses with XfrinProtocolError.
+
+ If everything is okay, it returns the SOA RR from the answer section
+ of the response.
+
+ '''
+ # Check TSIG integrity and validate the header. Unlike AXFR/IXFR,
+ # we should be more strict for SOA queries and check the AA flag, too.
+ self._check_response_tsig(msg, response_data)
+ self._check_response_header(msg)
+ if not msg.get_header_flag(Message.HEADERFLAG_AA):
+ raise XfrinProtocolError('non-authoritative answer to SOA query')
+
+ # Validate the question section
+ n_question = msg.get_rr_count(Message.SECTION_QUESTION)
+ if n_question != 1:
+ raise XfrinProtocolError('Invalid response to SOA query: ' +
+ '(' + str(n_question) + ' questions, 1 ' +
+ 'expected)')
+ resp_question = msg.get_question()[0]
+ if resp_question.get_name() != self._zone_name or \
+ resp_question.get_class() != self._rrclass or \
+ resp_question.get_type() != RRType.SOA():
+ raise XfrinProtocolError('Invalid response to SOA query: '
+ 'question mismatch: ' +
+ str(resp_question))
+
+ # Look into the answer section for SOA
+ soa = None
+ for rr in msg.get_section(Message.SECTION_ANSWER):
+ if rr.get_type() == RRType.SOA():
+ if soa is not None:
+ raise XfrinProtocolError('SOA response had multiple SOAs')
+ soa = rr
+ # There should not be a CNAME record at top of zone.
+ if rr.get_type() == RRType.CNAME():
+ raise XfrinProtocolError('SOA query resulted in CNAME')
+
+ # If SOA is not found, try to figure out the reason then report it.
+ if soa is None:
+ # See if we have any SOA records in the authority section.
+ for rr in msg.get_section(Message.SECTION_AUTHORITY):
+ if rr.get_type() == RRType.NS():
+ raise XfrinProtocolError('SOA query resulted in referral')
+ if rr.get_type() == RRType.SOA():
+ raise XfrinProtocolError('SOA query resulted in NODATA')
+ raise XfrinProtocolError('No SOA record found in response to ' +
+ 'SOA query')
+
+ # Check if the SOA is really what we asked for
+ if soa.get_name() != self._zone_name or \
+ soa.get_class() != self._rrclass:
+ raise XfrinProtocolError("SOA response doesn't match query: " +
+ str(soa))
+
+ # All okay, return it
+ return soa
+
def _check_soa_serial(self):
- ''' Compare the soa serial, if soa serial in master is less than
- the soa serial in local, Finish xfrin.
- False: soa serial in master is less or equal to the local one.
- True: soa serial in master is bigger
+ '''Send SOA query and compare the local and remote serials.
+
+ If we know our local serial and the remote serial isn't newer
+ than ours, we abort the session with XfrinZoneUptodate.
+ On success it returns XFRIN_OK for testing. The caller won't use it.
+
'''
self._send_query(RRType.SOA())
@@ -679,18 +789,23 @@ class XfrinConnection(asyncore.dispatcher):
msg_len = socket.htons(struct.unpack('H', data_len)[0])
soa_response = self._get_request_response(msg_len)
msg = Message(Message.PARSE)
- msg.from_wire(soa_response)
+ msg.from_wire(soa_response, Message.PRESERVE_ORDER)
+
+ # Validate/parse the rest of the response, and extract the SOA
+ # from the answer section
+ soa = self.__parse_soa_response(msg, soa_response)
+
+ # Compare the two serials. If ours is 'new', abort with ZoneUptodate.
+ primary_serial = get_soa_serial(soa.get_rdata()[0])
+ if self._request_serial is not None and \
+ self._request_serial >= primary_serial:
+ if self._request_serial != primary_serial:
+ logger.info(XFRIN_ZONE_SERIAL_AHEAD, primary_serial,
+ self.zone_str(),
+ format_addrinfo(self._master_addrinfo),
+ self._request_serial)
+ raise XfrinZoneUptodate
- # TSIG related checks, including an unexpected signed response
- self._check_response_tsig(msg, soa_response)
-
- # perform some minimal level validation. It's an open issue how
- # strict we should be (see the comment in _check_response_header())
- self._check_response_header(msg)
-
- # TODO, need select soa record from data source then compare the two
- # serial, current just return OK, since this function hasn't been used
- # now.
return XFRIN_OK
def do_xfrin(self, check_soa, request_type=RRType.AXFR()):
@@ -701,22 +816,30 @@ class XfrinConnection(asyncore.dispatcher):
self._request_type = request_type
# Right now RRType.[IA]XFR().to_text() is 'TYPExxx', so we need
# to hardcode here.
- request_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
+ req_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
if check_soa:
- ret = self._check_soa_serial()
-
- if ret == XFRIN_OK:
- logger.info(XFRIN_XFR_TRANSFER_STARTED, request_str,
- self.zone_str())
- self._send_query(self._request_type)
- self.__state = XfrinInitialSOA()
- self._handle_xfrin_responses()
- logger.info(XFRIN_XFR_TRANSFER_SUCCESS, request_str,
- self.zone_str())
-
- except (XfrinException, XfrinProtocolError) as e:
- logger.error(XFRIN_XFR_TRANSFER_FAILURE, request_str,
- self.zone_str(), str(e))
+ self._check_soa_serial()
+
+ logger.info(XFRIN_XFR_TRANSFER_STARTED, req_str, self.zone_str())
+ self._send_query(self._request_type)
+ self.__state = XfrinInitialSOA()
+ self._handle_xfrin_responses()
+ logger.info(XFRIN_XFR_TRANSFER_SUCCESS, req_str, self.zone_str())
+
+ except XfrinZoneUptodate:
+ # Eventually we'll probably have to treat this case as a trigger
+ # of trying another primary server, etc, but for now we treat it
+ # as "success".
+ pass
+ except XfrinProtocolError as e:
+ logger.info(XFRIN_XFR_TRANSFER_PROTOCOL_ERROR, req_str,
+ self.zone_str(),
+ format_addrinfo(self._master_addrinfo), str(e))
+ ret = XFRIN_FAIL
+ except XfrinException as e:
+ logger.error(XFRIN_XFR_TRANSFER_FAILURE, req_str,
+ self.zone_str(),
+ format_addrinfo(self._master_addrinfo), str(e))
ret = XFRIN_FAIL
except Exception as e:
# Catching all possible exceptions like this is generally not a
@@ -727,7 +850,7 @@ class XfrinConnection(asyncore.dispatcher):
# catch it here, but until then we need broadest coverage so that
# we won't miss anything.
- logger.error(XFRIN_XFR_OTHER_FAILURE, request_str,
+ logger.error(XFRIN_XFR_OTHER_FAILURE, req_str,
self.zone_str(), str(e))
ret = XFRIN_FAIL
finally:
@@ -751,13 +874,14 @@ class XfrinConnection(asyncore.dispatcher):
msg_rcode = msg.get_rcode()
if msg_rcode != Rcode.NOERROR():
- raise XfrinException('error response: %s' % msg_rcode.to_text())
+ raise XfrinProtocolError('error response: %s' %
+ msg_rcode.to_text())
if not msg.get_header_flag(Message.HEADERFLAG_QR):
- raise XfrinException('response is not a response')
+ raise XfrinProtocolError('response is not a response')
if msg.get_qid() != self._query_id:
- raise XfrinException('bad query id')
+ raise XfrinProtocolError('bad query id')
def _check_response_status(self, msg):
'''Check validation of xfr response. '''
@@ -765,7 +889,7 @@ class XfrinConnection(asyncore.dispatcher):
self._check_response_header(msg)
if msg.get_rr_count(Message.SECTION_QUESTION) > 1:
- raise XfrinException('query section count greater than 1')
+ raise XfrinProtocolError('query section count greater than 1')
def _handle_xfrin_responses(self):
read_next_msg = True
@@ -805,8 +929,8 @@ class XfrinConnection(asyncore.dispatcher):
return False
def __process_xfrin(server, zone_name, rrclass, db_file,
- shutdown_event, master_addrinfo, check_soa, tsig_key,
- request_type, conn_class):
+ shutdown_event, master_addrinfo, check_soa, tsig_key,
+ request_type, conn_class):
conn = None
exception = None
ret = XFRIN_FAIL
@@ -837,11 +961,9 @@ def __process_xfrin(server, zone_name, rrclass, db_file,
while retry:
retry = False
conn = conn_class(sock_map, zone_name, rrclass, datasrc_client,
- shutdown_event, master_addrinfo, tsig_key)
+ shutdown_event, master_addrinfo, db_file,
+ tsig_key)
conn.init_socket()
- # XXX: We still need _db_file for temporary workaround in _create_query().
- # This should be removed when we eliminate the need for the workaround.
- conn._db_file = db_file
ret = XFRIN_FAIL
if conn.connect_to_master():
ret = conn.do_xfrin(check_soa, request_type)
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index 86cdec3..5e182d8 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -15,18 +15,63 @@
# No namespace declaration - these constants go in the global namespace
# of the xfrin messages python module.
+% XFRIN_ZONE_CREATED Zone %1 not found in the given data source, newly created
+On starting an xfrin session, it is identified that the zone to be
+transferred is not found in the data source. This can happen if a
+secondary DNS server first tries to perform AXFR from a primary server
+without creating the zone image beforehand (e.g. by b10-loadzone). As
+of this writing the xfrin process provides backward compatible
+behavior to previous versions: creating a new one in the data source
+not to surprise existing users too much. This is probably not a good
+idea, however, in terms of who should be responsible for managing
+zones at a higher level. In future it is more likely that a separate
+zone management framework is provided, and the situation where the
+given zone isn't found in xfrout will be treated as an error.
+
+% XFRIN_ZONE_NO_SOA Zone %1 does not have SOA
+On starting an xfrin session, it is identified that the zone to be
+transferred does not have an SOA RR in the data source. This is not
+necessarily an error; if a secondary DNS server first tries to perform
+transfer from a primary server, the zone can be empty, and therefore
+doesn't have an SOA. Subsequent AXFR will fill in the zone; if the
+attempt is IXFR it will fail in query creation.
+
+% XFRIN_ZONE_MULTIPLE_SOA Zone %1 has %2 SOA RRs
+On starting an xfrin session, it is identified that the zone to be
+transferred has multiple SOA RRs. Such a zone is broken, but could be
+accidentally configured especially in a data source using "non
+captive" backend database. The implementation ignores entire SOA RRs
+and tries to continue processing as if the zone were empty. This
+means subsequent AXFR can succeed and possibly replace the zone with
+valid content, but an IXFR attempt will fail.
+
+% XFRIN_ZONE_SERIAL_AHEAD Serial number (%1) for %2 received from master %3 < ours (%4)
+The response to an SOA query prior to xfr indicated that the zone's
+SOA serial at the primary server is smaller than that of the xfrin
+client. This is not necessarily an error especially if that
+particular primary server is another secondary server which hasn't got
+the latest version of the zone. But if the primary server is known to
+be the real source of the zone, some unexpected inconsistency may have
+happened, and you may want to take a closer look. In this case xfrin
+doesn't perform subsequent zone transfer.
+
% XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3
The XFR transfer for the given zone has failed due to a problem outside
of the xfrin module. Possible reasons are a broken DNS message or failure
in database connection. The error is shown in the log message.
-% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message. Note: due to the code structure
-this can only happen for AXFR.
-
-% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3
-The XFR transfer for the given zone has failed due to a protocol error.
+% XFRIN_XFR_TRANSFER_PROTOCOL_ERROR %1 transfer of zone %2 with %3 failed: %4
+The XFR transfer for the given zone has failed due to a protocol
+error, such as an unexpected response from the primary server. The
+error is shown in the log message. It may be because the primary
+server implementation is broken or (although less likely) there was
+some attack attempt, but it can also happen due to configuration
+mismatch such as the remote server does not have authority for the
+zone any more but the local configuration hasn't been updated. So it
+is recommended to check the primary server configuration.
+
+% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 with %3 failed: %4
+The XFR transfer for the given zone has failed due to an internal error.
The error is shown in the log message.
% XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1
@@ -118,6 +163,16 @@ daemon will now shut down.
An uncaught exception was raised while running the xfrin daemon. The
exception message is printed in the log message.
+% XFRIN_IXFR_UPTODATE IXFR requested serial for %1 is %2, master has %3, not updating
+The first SOA record in an IXFR response indicates the zone's serial
+at the primary server is not newer than the client's. This is
+basically unexpected event because normally the client first checks
+the SOA serial by an SOA query, but can still happen if the transfer
+is manually invoked or (although unlikely) there is a rapid change at
+the primary server between the SOA and IXFR queries. The client
+implementation confirms the whole response is this single SOA, and
+aborts the transfer just like a successful case.
+
% XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1
In an attempt of IXFR processing, the begenning SOA of the first difference
(following the initial SOA that specified the final SOA for all the
diff --git a/src/bin/xfrout/b10-xfrout.8 b/src/bin/xfrout/b10-xfrout.8
index c8b4b07..c810c2f 100644
--- a/src/bin/xfrout/b10-xfrout.8
+++ b/src/bin/xfrout/b10-xfrout.8
@@ -71,6 +71,19 @@ The configurable settings are:
defines the maximum number of outgoing zone transfers that can run concurrently\&. The default is 10\&.
.PP
+\fItsig_key_ring\fR
+A list of TSIG keys (each of which is in the form of name:base64\-key[:algorithm]) used for access control on transfer requests\&. The default is an empty list\&.
+.PP
+
+\fItransfer_acl\fR
+A list of ACL elements that apply to all transfer requests by default (unless overridden in zone_config)\&. See the BIND 10 guide for configuration examples\&. The default is an element that allows any transfer requests\&.
+.PP
+
+\fIzone_config\fR
+A list of JSON objects (i\&.e\&. maps) that define per zone configuration concerning
+\fBb10\-xfrout\fR\&. The supported names of each object are "origin" (the origin name of the zone), "class" (the RR class of the zone, optional, default to "IN"), and "acl_element" (ACL only applicable to transfer requests for that zone)\&. See the BIND 10 guide for configuration examples\&. The default is an empty list, that is, no zone specific configuration\&.
+.PP
+
\fIlog_name\fR
.PP
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index 9889b80..4f6a7fa 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -98,6 +98,31 @@
that can run concurrently. The default is 10.
</para>
<para>
+ <varname>tsig_key_ring</varname>
+ A list of TSIG keys (each of which is in the form of
+ name:base64-key[:algorithm]) used for access control on transfer
+ requests.
+ The default is an empty list.
+ </para>
+ <para>
+ <varname>transfer_acl</varname>
+ A list of ACL elements that apply to all transfer requests by
+ default (unless overridden in zone_config). See the BIND 10
+ guide for configuration examples.
+ The default is an element that allows any transfer requests.
+ </para>
+ <para>
+ <varname>zone_config</varname>
+ A list of JSON objects (i.e. maps) that define per zone
+ configuration concerning <command>b10-xfrout</command>.
+ The supported names of each object are "origin" (the origin
+ name of the zone), "class" (the RR class of the zone, optional,
+ default to "IN"), and "acl_element" (ACL only applicable to
+ transfer requests for that zone).
+ See the BIND 10 guide for configuration examples.
+ The default is an empty list, that is, no zone specific configuration.
+ </para>
+ <para>
<varname>log_name</varname>
<!-- TODO -->
</para>
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index ace8fc9..ad6d7e6 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -2,11 +2,18 @@ PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = xfrout_test.py
noinst_SCRIPTS = $(PYTESTS)
+EXTRA_DIST = testdata/test.sqlite3
+# These are actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com testdata/creatediff.py
+
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# Some systems need the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -24,5 +31,6 @@ endif
B10_FROM_BUILD=$(abs_top_builddir) \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/xfrout:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+ TESTDATASRCDIR=$(abs_srcdir)/testdata/ \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrout/tests/testdata/creatediff.py b/src/bin/xfrout/tests/testdata/creatediff.py
new file mode 100755
index 0000000..dab6622
--- /dev/null
+++ b/src/bin/xfrout/tests/testdata/creatediff.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3.1
+
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''This script was used to create zone differences for IXFR tests.
+
+The result was stored in the test SQLite3 database file, so this script
+itself isn't necessary for testing. It's provided here for reference
+purposes.
+
+'''
+
+import isc.datasrc
+import isc.log
+from isc.dns import *
+from isc.testutils.rrset_utils import *
+
+isc.log.init("dummy") # XXX
+
+ZONE_NAME = Name('example.com')
+NS_NAME_STR = 'a.dns.example.com'
+NS_NAME = Name(NS_NAME_STR)
+
+client = isc.datasrc.DataSourceClient('sqlite3',
+ '{ "database_file": "test.sqlite3" }')
+
+# Install the initial data
+updater = client.get_updater(ZONE_NAME, True)
+updater.add_rrset(create_soa(2011111802))
+updater.add_rrset(create_ns(NS_NAME_STR))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.53'))
+updater.add_rrset(create_aaaa(NS_NAME, '2001:db8::1'))
+updater.commit()
+
+# Incremental update to generate diffs
+updater = client.get_updater(ZONE_NAME, False, True)
+updater.delete_rrset(create_soa(2011111802))
+updater.add_rrset(create_soa(2011111900))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.2', 7200))
+updater.delete_rrset(create_soa(2011111900))
+updater.delete_rrset(create_a(NS_NAME, '192.0.2.53'))
+updater.delete_rrset(create_aaaa(NS_NAME, '2001:db8::1'))
+updater.add_rrset(create_soa(2011112001))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.1'))
+updater.commit()
diff --git a/src/bin/xfrout/tests/testdata/example.com b/src/bin/xfrout/tests/testdata/example.com
new file mode 100644
index 0000000..8458d09
--- /dev/null
+++ b/src/bin/xfrout/tests/testdata/example.com
@@ -0,0 +1,6 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+example.com. 3600 IN SOA master.example.com. admin.example.com. 2011112001 3600 1800 2419200 7200
+example.com. 3600 IN NS a.dns.example.com.
+a.dns.example.com. 3600 IN A 192.0.2.1
+a.dns.example.com. 7200 IN A 192.0.2.2
diff --git a/src/bin/xfrout/tests/testdata/test.sqlite3 b/src/bin/xfrout/tests/testdata/test.sqlite3
new file mode 100644
index 0000000..9eb14f1
Binary files /dev/null and b/src/bin/xfrout/tests/testdata/test.sqlite3 differ
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 0a9fd3c..ea4de27 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -21,14 +21,26 @@ import os
from isc.testutils.tsigctx_mock import MockTSIGContext
from isc.cc.session import *
import isc.config
-from pydnspp import *
+from isc.dns import *
+from isc.testutils.rrset_utils import *
from xfrout import *
import xfrout
import isc.log
import isc.acl.dns
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
+#
+# Commonly used (mostly constant) test parameters
+#
+TEST_ZONE_NAME_STR = "example.com."
+TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
+TEST_RRCLASS = RRClass.IN()
+IXFR_OK_VERSION = 2011111802
+IXFR_NG_VERSION = 2011112800
+SOA_CURRENT_VERSION = 2011112001
+
# our fake socket, where we can read and insert messages
class MySocket():
def __init__(self, family, type):
@@ -55,19 +67,99 @@ class MySocket():
self.sendqueue = self.sendqueue[size:]
return result
- def read_msg(self):
+ def read_msg(self, parse_options=Message.PARSE_DEFAULT, need_len=False):
sent_data = self.readsent()
get_msg = Message(Message.PARSE)
- get_msg.from_wire(bytes(sent_data[2:]))
+ get_msg.from_wire(bytes(sent_data[2:]), parse_options)
+ if need_len:
+ return (get_msg, len(sent_data) - 2)
return get_msg
def clear_send(self):
del self.sendqueue[:]
-# We subclass the Session class we're testing here, only
-# to override the handle() and _send_data() method
+class MockDataSrcClient:
+ def __init__(self, type, config):
+ pass
+
+ def find_zone(self, zone_name):
+ '''Mock version of find_zone().
+
+ It returns itself (subsequently acting as a mock ZoneFinder) for
+ some test zone names. For a special name it returns NOTFOUND to
+ emulate the condition where the specified zone doen't exist.
+
+ '''
+ self._zone_name = zone_name
+ if zone_name == Name('notauth.example.com'):
+ return (isc.datasrc.DataSourceClient.NOTFOUND, None)
+ return (isc.datasrc.DataSourceClient.SUCCESS, self)
+
+ def find(self, name, rrtype, target=None, options=ZoneFinder.FIND_DEFAULT):
+ '''Mock ZoneFinder.find().
+
+ (At the moment) this method only handles query for type SOA.
+ By default it returns a normal SOA RR(set) whose owner name is
+ the query name It also emulates some unusual cases for special
+ zone names.
+
+ '''
+ if name == Name('nosoa.example.com') and rrtype == RRType.SOA():
+ return (ZoneFinder.NXDOMAIN, None)
+ elif name == Name('multisoa.example.com') and rrtype == RRType.SOA():
+ soa_rrset = create_soa(SOA_CURRENT_VERSION)
+ soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
+ return (ZoneFinder.SUCCESS, soa_rrset)
+ elif rrtype == RRType.SOA():
+ return (ZoneFinder.SUCCESS, create_soa(SOA_CURRENT_VERSION))
+ raise ValueError('Unexpected input to mock finder: bug in test case?')
+
+ def get_iterator(self, zone_name, adjust_ttl=False):
+ if zone_name == Name('notauth.example.com'):
+ raise isc.datasrc.Error('no such zone')
+ self._zone_name = zone_name
+ return self
+
+ def get_soa(self): # emulate ZoneIterator.get_soa()
+ if self._zone_name == Name('nosoa.example.com'):
+ return None
+ soa_rrset = create_soa(SOA_CURRENT_VERSION)
+ if self._zone_name == Name('multisoa.example.com'):
+ soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
+ return soa_rrset
+
+ def get_journal_reader(self, zone_name, begin_serial, end_serial):
+ if zone_name == Name('notauth2.example.com'):
+ return isc.datasrc.ZoneJournalReader.NO_SUCH_ZONE, None
+ if zone_name == Name('nojournal.example.com'):
+ raise isc.datasrc.NotImplemented('journaling not supported')
+ if begin_serial == IXFR_NG_VERSION:
+ return isc.datasrc.ZoneJournalReader.NO_SUCH_VERSION, None
+ return isc.datasrc.ZoneJournalReader.SUCCESS, self
+
+class MyCCSession(isc.config.ConfigData):
+ def __init__(self):
+ module_spec = isc.config.module_spec_from_file(
+ xfrout.SPECFILE_LOCATION)
+ ConfigData.__init__(self, module_spec)
+
+ def get_remote_config_value(self, module_name, identifier):
+ if module_name == "Auth" and identifier == "database_file":
+ return "initdb.file", False
+ else:
+ return "unknown", False
+
+# This constant dictionary stores all default configuration parameters
+# defined in the xfrout spec file.
+DEFAULT_CONFIG = MyCCSession().get_full_config()
+
+# We subclass the Session class we're testing here, only overriding a few
+# methods
class MyXfroutSession(XfroutSession):
- def handle(self):
+ def _handle(self):
+ pass
+
+ def _close_socket(self):
pass
def _send_data(self, sock, data):
@@ -80,12 +172,23 @@ class MyXfroutSession(XfroutSession):
class Dbserver:
def __init__(self):
self._shutdown_event = threading.Event()
+ self.transfer_counter = 0
+ self._max_transfers_out = DEFAULT_CONFIG['transfers_out']
def get_db_file(self):
- return None
+ return 'test.sqlite3'
+ def increase_transfers_counter(self):
+ self.transfer_counter += 1
+ return True
def decrease_transfers_counter(self):
- pass
+ self.transfer_counter -= 1
+
+class TestXfroutSessionBase(unittest.TestCase):
+ '''Base classs for tests related to xfrout sessions
-class TestXfroutSession(unittest.TestCase):
+ This class defines common setup/teadown and utility methods. Actual
+ tests are delegated to subclasses.
+
+ '''
def getmsg(self):
msg = Message(Message.PARSE)
msg.from_wire(self.mdata)
@@ -102,15 +205,44 @@ class TestXfroutSession(unittest.TestCase):
def message_has_tsig(self, msg):
return msg.get_tsig_record() is not None
- def create_request_data(self, with_tsig=False):
+ def create_request_data(self, with_question=True, with_tsig=False,
+ ixfr=None, qtype=None, zone_name=TEST_ZONE_NAME,
+ soa_class=TEST_RRCLASS, num_soa=1):
+ '''Create a commonly used XFR request data.
+
+ By default the request type is AXFR; if 'ixfr' is an integer,
+ the request type will be IXFR and an SOA with the serial being
+ the value of the parameter will be included in the authority
+ section.
+
+ This method has various minor parameters only for creating bad
+ format requests for testing purposes:
+ qtype: the RR type of the question section. By default automatically
+ determined by the value of ixfr, but could be an invalid type
+ for testing.
+ zone_name: the query (zone) name. for IXFR, it's also used as
+ the owner name of the SOA in the authority section.
+ soa_class: IXFR only. The RR class of the SOA RR in the authority
+ section.
+ num_soa: IXFR only. The number of SOA RDATAs in the authority
+ section.
+ '''
msg = Message(Message.RENDER)
query_id = 0x1035
msg.set_qid(query_id)
msg.set_opcode(Opcode.QUERY())
msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name("example.com"), RRClass.IN(),
- RRType.AXFR())
- msg.add_question(query_question)
+ req_type = RRType.AXFR() if ixfr is None else RRType.IXFR()
+ if with_question:
+ msg.add_question(Question(zone_name, RRClass.IN(),
+ req_type if qtype is None else qtype))
+ if req_type == RRType.IXFR():
+ soa = RRset(zone_name, soa_class, RRType.SOA(), RRTTL(0))
+ # In the RDATA only the serial matters.
+ for i in range(0, num_soa):
+ soa.add_rdata(Rdata(RRType.SOA(), soa_class,
+ 'm r ' + str(ixfr) + ' 1 1 1 1'))
+ msg.add_rrset(Message.SECTION_AUTHORITY, soa)
renderer = MessageRenderer()
if with_tsig:
@@ -121,23 +253,98 @@ class TestXfroutSession(unittest.TestCase):
request_data = renderer.get_data()
return request_data
+ def set_request_type(self, type):
+ self.xfrsess._request_type = type
+ if type == RRType.AXFR():
+ self.xfrsess._request_typestr = 'AXFR'
+ else:
+ self.xfrsess._request_typestr = 'IXFR'
+
def setUp(self):
self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(),
- TSIGKeyRing(), ('127.0.0.1', 12345),
+ TSIGKeyRing(),
+ (socket.AF_INET, socket.SOCK_STREAM,
+ ('127.0.0.1', 12345)),
# When not testing ACLs, simply accept
isc.acl.dns.REQUEST_LOADER.load(
[{"action": "ACCEPT"}]),
{})
- self.mdata = self.create_request_data(False)
- self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
+ self.set_request_type(RRType.AXFR()) # test AXFR by default
+ self.mdata = self.create_request_data()
+ self.soa_rrset = create_soa(SOA_CURRENT_VERSION)
+ # some test replaces a module-wide function. We should ensure the
+ # original is used elsewhere.
+ self.orig_get_rrset_len = xfrout.get_rrset_len
+
+ def tearDown(self):
+ xfrout.get_rrset_len = self.orig_get_rrset_len
+ # transfer_counter must be always be reset no matter happens within
+ # the XfroutSession object. We check the condition here.
+ self.assertEqual(0, self.xfrsess._server.transfer_counter)
+
+class TestXfroutSession(TestXfroutSessionBase):
+ def test_quota_error(self):
+ '''Emulating the server being too busy.
+
+ '''
+ self.xfrsess._request_data = self.mdata
+ self.xfrsess._server.increase_transfers_counter = lambda : False
+ XfroutSession._handle(self.xfrsess)
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.REFUSED())
+
+ def test_quota_ok(self):
+ '''The default case in terms of the xfrout quota.
+
+ '''
+ # set up a bogus request, which should result in FORMERR. (it only
+ # has to be something that is different from the previous case)
+ self.xfrsess._request_data = \
+ self.create_request_data(ixfr=IXFR_OK_VERSION, num_soa=2)
+ # Replace the data source client to avoid datasrc related exceptions
+ self.xfrsess.ClientClass = MockDataSrcClient
+ XfroutSession._handle(self.xfrsess)
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.FORMERR())
+
+ def test_exception_from_session(self):
+ '''Test the case where the main processing raises an exception.
+
+ We just check it doesn't any unexpected disruption and (in tearDown)
+ transfer_counter is correctly reset to 0.
+
+ '''
+ def dns_xfrout_start(fd, msg, quota):
+ raise ValueError('fake exception')
+ self.xfrsess.dns_xfrout_start = dns_xfrout_start
+ XfroutSession._handle(self.xfrsess)
def test_parse_query_message(self):
+ # Valid AXFR
[get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(RRType.AXFR(), self.xfrsess._request_type)
self.assertEqual(get_rcode.to_text(), "NOERROR")
+ # Valid IXFR
+ request_data = self.create_request_data(ixfr=2011111801)
+ rcode, msg = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(RRType.IXFR(), self.xfrsess._request_type)
+ self.assertEqual(Rcode.NOERROR(), rcode)
+
+ # Broken request: no question
+ self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
+ self.create_request_data(with_question=False))
+
+ # Broken request: invalid RR type (neither AXFR nor IXFR)
+ self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
+ self.create_request_data(qtype=RRType.A()))
+
+ # NOERROR
+ request_data = self.create_request_data(ixfr=IXFR_OK_VERSION)
+ rcode, msg = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+
# tsig signed query message
- request_data = self.create_request_data(True)
+ request_data = self.create_request_data(with_tsig=True)
# BADKEY
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOTAUTH")
@@ -165,20 +372,23 @@ class TestXfroutSession(unittest.TestCase):
rcode, msg = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "NOERROR")
# This should be dropped completely, therefore returning None
- self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.1', 12345))
rcode, msg = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(None, rcode)
# This should be refused, therefore REFUSED
- self.xfrsess._remote = ('192.0.2.2', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.2', 12345))
rcode, msg = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
# TSIG signed request
- request_data = self.create_request_data(True)
+ request_data = self.create_request_data(with_tsig=True)
# If the TSIG check fails, it should not check ACL
# (If it checked ACL as well, it would just drop the request)
- self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.1', 12345))
self.xfrsess._tsig_key_ring = TSIGKeyRing()
rcode, msg = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOTAUTH")
@@ -216,19 +426,23 @@ class TestXfroutSession(unittest.TestCase):
{"action": "REJECT"}
]))
# both matches
- self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.1', 12345))
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOERROR")
# TSIG matches, but address doesn't
- self.xfrsess._remote = ('192.0.2.2', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.2', 12345))
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "REFUSED")
# Address matches, but TSIG doesn't (not included)
- self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.1', 12345))
[rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
# Neither address nor TSIG matches
- self.xfrsess._remote = ('192.0.2.2', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.2', 12345))
[rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
@@ -289,10 +503,6 @@ class TestXfroutSession(unittest.TestCase):
self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
RRClass.IN()))
- def test_get_query_zone_name(self):
- msg = self.getmsg()
- self.assertEqual(self.xfrsess._get_query_zone_name(msg), "example.com.")
-
def test_send_data(self):
self.xfrsess._send_data(self.sock, self.mdata)
senddata = self.sock.readsent()
@@ -315,10 +525,13 @@ class TestXfroutSession(unittest.TestCase):
def test_send_message(self):
msg = self.getmsg()
msg.make_response()
- # soa record data with different cases
- soa_record = (4, 3, 'Example.com.', 'com.Example.', 3600, 'SOA', None, 'master.Example.com. admin.exAmple.com. 1234 3600 1800 2419200 7200')
- rrset_soa = self.xfrsess._create_rrset_from_db_record(soa_record)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+ # SOA record data with different cases
+ soa_rrset = RRset(Name('Example.com.'), RRClass.IN(), RRType.SOA(),
+ RRTTL(3600))
+ soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ 'master.Example.com. admin.exAmple.com. ' +
+ '2011112001 3600 1800 2419200 7200'))
+ msg.add_rrset(Message.SECTION_ANSWER, soa_rrset)
self.xfrsess._send_message(self.sock, msg)
send_out_data = self.sock.readsent()[2:]
@@ -347,61 +560,44 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(msg.get_rcode(), rcode)
self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_AA))
- def test_create_rrset_from_db_record(self):
- rrset = self.xfrsess._create_rrset_from_db_record(self.soa_record)
- self.assertEqual(rrset.get_name().to_text(), "example.com.")
- self.assertEqual(rrset.get_class(), RRClass("IN"))
- self.assertEqual(rrset.get_type().to_text(), "SOA")
- rdata = rrset.get_rdata()
- self.assertEqual(rdata[0].to_text(), self.soa_record[7])
-
def test_send_message_with_last_soa(self):
- rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
msg = self.getmsg()
msg.make_response()
- # packet number less than TSIG_SIGN_EVERY_NTH
- packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
- 0, packet_neet_not_sign)
+ self.xfrsess._send_message_with_last_soa(msg, self.sock,
+ self.soa_rrset, 0)
get_msg = self.sock.read_msg()
- # tsig context is not exist
+ # tsig context does not exist
self.assertFalse(self.message_has_tsig(get_msg))
self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
- #answer_rrset_iter = section_iter(get_msg, section.ANSWER())
- answer = get_msg.get_section(Message.SECTION_ANSWER)[0]#answer_rrset_iter.get_rrset()
+ answer = get_msg.get_section(Message.SECTION_ANSWER)[0]
self.assertEqual(answer.get_name().to_text(), "example.com.")
self.assertEqual(answer.get_class(), RRClass("IN"))
self.assertEqual(answer.get_type().to_text(), "SOA")
rdata = answer.get_rdata()
- self.assertEqual(rdata[0].to_text(), self.soa_record[7])
+ self.assertEqual(rdata[0], self.soa_rrset.get_rdata()[0])
- # msg is the TSIG_SIGN_EVERY_NTH one
- # sending the message with last soa together
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
- 0, TSIG_SIGN_EVERY_NTH)
+ # Sending the message with last soa together
+ self.xfrsess._send_message_with_last_soa(msg, self.sock,
+ self.soa_rrset, 0)
get_msg = self.sock.read_msg()
- # tsig context is not exist
+ # tsig context does not exist
self.assertFalse(self.message_has_tsig(get_msg))
def test_send_message_with_last_soa_with_tsig(self):
# create tsig context
self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
- rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
msg = self.getmsg()
msg.make_response()
- # packet number less than TSIG_SIGN_EVERY_NTH
- packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
- # msg is not the TSIG_SIGN_EVERY_NTH one
- # sending the message with last soa together
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
- 0, packet_neet_not_sign)
+ # Sending the message with last soa together
+ self.xfrsess._send_message_with_last_soa(msg, self.sock,
+ self.soa_rrset, 0)
get_msg = self.sock.read_msg()
self.assertTrue(self.message_has_tsig(get_msg))
@@ -409,33 +605,25 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
- # msg is the TSIG_SIGN_EVERY_NTH one
- # sending the message with last soa together
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
- 0, TSIG_SIGN_EVERY_NTH)
- get_msg = self.sock.read_msg()
- self.assertTrue(self.message_has_tsig(get_msg))
-
def test_trigger_send_message_with_last_soa(self):
rrset_a = RRset(Name("example.com"), RRClass.IN(), RRType.A(), RRTTL(3600))
rrset_a.add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
- rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
msg = self.getmsg()
msg.make_response()
msg.add_rrset(Message.SECTION_ANSWER, rrset_a)
# length larger than MAX-len(rrset)
- length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
- # packet number less than TSIG_SIGN_EVERY_NTH
- packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+ length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
+ get_rrset_len(self.soa_rrset) + 1
# give the function a value that is larger than MAX-len(rrset)
# this should have triggered the sending of two messages
# (1 with the rrset we added manually, and 1 that triggered
# the sending in _with_last_soa)
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
- packet_neet_not_sign)
+ self.xfrsess._send_message_with_last_soa(msg, self.sock,
+ self.soa_rrset,
+ length_need_split)
get_msg = self.sock.read_msg()
self.assertFalse(self.message_has_tsig(get_msg))
self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
@@ -455,100 +643,139 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
- #answer_rrset_iter = section_iter(get_msg, Message.SECTION_ANSWER)
answer = get_msg.get_section(Message.SECTION_ANSWER)[0]
self.assertEqual(answer.get_name().to_text(), "example.com.")
self.assertEqual(answer.get_class(), RRClass("IN"))
self.assertEqual(answer.get_type().to_text(), "SOA")
rdata = answer.get_rdata()
- self.assertEqual(rdata[0].to_text(), self.soa_record[7])
+ self.assertEqual(rdata[0], self.soa_rrset.get_rdata()[0])
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
def test_trigger_send_message_with_last_soa_with_tsig(self):
self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
- rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
msg = self.getmsg()
msg.make_response()
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+ msg.add_rrset(Message.SECTION_ANSWER, self.soa_rrset)
# length larger than MAX-len(rrset)
- length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
- # packet number less than TSIG_SIGN_EVERY_NTH
- packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+ length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
+ get_rrset_len(self.soa_rrset) + 1
# give the function a value that is larger than MAX-len(rrset)
# this should have triggered the sending of two messages
# (1 with the rrset we added manually, and 1 that triggered
# the sending in _with_last_soa)
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
- packet_neet_not_sign)
- get_msg = self.sock.read_msg()
- # msg is not the TSIG_SIGN_EVERY_NTH one, it shouldn't be tsig signed
- self.assertFalse(self.message_has_tsig(get_msg))
- # the last packet should be tsig signed
+ self.xfrsess._send_message_with_last_soa(msg, self.sock,
+ self.soa_rrset,
+ length_need_split)
+ # Both messages should have TSIG RRs
get_msg = self.sock.read_msg()
self.assertTrue(self.message_has_tsig(get_msg))
- # and it should not have sent anything else
- self.assertEqual(0, len(self.sock.sendqueue))
-
-
- # msg is the TSIG_SIGN_EVERY_NTH one, it should be tsig signed
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
- xfrout.TSIG_SIGN_EVERY_NTH)
- get_msg = self.sock.read_msg()
- self.assertTrue(self.message_has_tsig(get_msg))
- # the last packet should be tsig signed
get_msg = self.sock.read_msg()
self.assertTrue(self.message_has_tsig(get_msg))
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
def test_get_rrset_len(self):
- rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
- self.assertEqual(82, get_rrset_len(rrset_soa))
-
- def test_zone_has_soa(self):
- global sqlite3_ds
- def mydb1(zone, file):
- return True
- sqlite3_ds.get_zone_soa = mydb1
- self.assertTrue(self.xfrsess._zone_has_soa(""))
- def mydb2(zone, file):
- return False
- sqlite3_ds.get_zone_soa = mydb2
- self.assertFalse(self.xfrsess._zone_has_soa(""))
-
- def test_zone_exist(self):
- global sqlite3_ds
- def zone_exist(zone, file):
- return zone
- sqlite3_ds.zone_exist = zone_exist
- self.assertTrue(self.xfrsess._zone_exist(True))
- self.assertFalse(self.xfrsess._zone_exist(False))
-
- def test_check_xfrout_available(self):
- def zone_exist(zone):
- return zone
- def zone_has_soa(zone):
- return (not zone)
- self.xfrsess._zone_exist = zone_exist
- self.xfrsess._zone_has_soa = zone_has_soa
- self.assertEqual(self.xfrsess._check_xfrout_available(False).to_text(), "NOTAUTH")
- self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "SERVFAIL")
-
- def zone_empty(zone):
- return zone
- self.xfrsess._zone_has_soa = zone_empty
- def false_func():
- return False
- self.xfrsess._server.increase_transfers_counter = false_func
- self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "REFUSED")
- def true_func():
- return True
- self.xfrsess._server.increase_transfers_counter = true_func
- self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "NOERROR")
+ self.assertEqual(82, get_rrset_len(self.soa_rrset))
+
+ def test_xfrout_axfr_setup(self):
+ self.xfrsess.ClientClass = MockDataSrcClient
+ # Successful case. A zone iterator should be set up.
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.assertNotEqual(None, self.xfrsess._iterator)
+
+ # Failure cases
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), Name('notauth.example.com'), TEST_RRCLASS),
+ Rcode.NOTAUTH())
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), Name('nosoa.example.com'), TEST_RRCLASS),
+ Rcode.SERVFAIL())
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), Name('multisoa.example.com'), TEST_RRCLASS),
+ Rcode.SERVFAIL())
+
+ def test_xfrout_ixfr_setup(self):
+ self.xfrsess.ClientClass = MockDataSrcClient
+ self.set_request_type(RRType.IXFR())
+
+ # Successful case of pure IXFR. A zone journal reader should be set
+ # up.
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.assertNotEqual(None, self.xfrsess._jnl_reader)
+
+ # Successful case, but as a result of falling back to AXFR-style
+ # IXFR. A zone iterator should be set up instead of a journal reader.
+ self.mdata = self.create_request_data(ixfr=IXFR_NG_VERSION)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.assertNotEqual(None, self.xfrsess._iterator)
+ self.assertEqual(None, self.xfrsess._jnl_reader)
+
+ # Successful case, but the requested SOA serial is equal to that of
+ # the local SOA. Both iterator and jnl_reader should be None,
+ # indicating that the response will contain just one SOA.
+ self.mdata = self.create_request_data(ixfr=SOA_CURRENT_VERSION)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.assertEqual(None, self.xfrsess._iterator)
+ self.assertEqual(None, self.xfrsess._jnl_reader)
+
+ # The data source doesn't support journaling. Should fallback to AXFR.
+ zone_name = Name('nojournal.example.com')
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ zone_name=zone_name)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR())
+ self.assertNotEqual(None, self.xfrsess._iterator)
+
+ # Failure cases
+ zone_name = Name('notauth.example.com')
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ zone_name=zone_name)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+ # this is a strange case: zone's SOA will be found but the journal
+ # reader won't be created due to 'no such zone'.
+ zone_name = Name('notauth2.example.com')
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ zone_name=zone_name)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+ zone_name = Name('nosoa.example.com')
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ zone_name=zone_name)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+ zone_name = Name('multisoa.example.com')
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ zone_name=zone_name)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+
+ # query name doesn't match the SOA's owner
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+
+ # query's RR class doesn't match the SOA's class
+ zone_name = TEST_ZONE_NAME # make sure the name matches this time
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ soa_class=RRClass.CH())
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+
+ # multiple SOA RRs
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ num_soa=2)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
def test_dns_xfrout_start_formerror(self):
# formerror
@@ -556,102 +783,336 @@ class TestXfroutSession(unittest.TestCase):
sent_data = self.sock.readsent()
self.assertEqual(len(sent_data), 0)
- def default(self, param):
- return "example.com"
-
def test_dns_xfrout_start_notauth(self):
- self.xfrsess._get_query_zone_name = self.default
- def notauth(formpara):
+ def notauth(msg, name, rrclass):
return Rcode.NOTAUTH()
- self.xfrsess._check_xfrout_available = notauth
+ self.xfrsess._xfrout_setup = notauth
self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
get_msg = self.sock.read_msg()
self.assertEqual(get_msg.get_rcode().to_text(), "NOTAUTH")
+ def test_dns_xfrout_start_datasrc_servfail(self):
+ def internal_raise(x, y):
+ raise isc.datasrc.Error('exception for the sake of test')
+ self.xfrsess.ClientClass = internal_raise
+ self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.SERVFAIL())
+
def test_dns_xfrout_start_noerror(self):
- self.xfrsess._get_query_zone_name = self.default
- def noerror(form):
+ def noerror(msg, name, rrclass):
return Rcode.NOERROR()
- self.xfrsess._check_xfrout_available = noerror
+ self.xfrsess._xfrout_setup = noerror
- def myreply(msg, sock, zonename):
+ def myreply(msg, sock):
self.sock.send(b"success")
self.xfrsess._reply_xfrout_query = myreply
self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
self.assertEqual(self.sock.readsent(), b"success")
- def test_reply_xfrout_query_noerror(self):
- global sqlite3_ds
- def get_zone_soa(zonename, file):
- return self.soa_record
-
- def get_zone_datas(zone, file):
- return [self.soa_record]
-
- sqlite3_ds.get_zone_soa = get_zone_soa
- sqlite3_ds.get_zone_datas = get_zone_datas
- self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock, "example.com.")
+ def test_reply_xfrout_query_axfr(self):
+ self.xfrsess._soa = self.soa_rrset
+ self.xfrsess._iterator = [self.soa_rrset]
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
reply_msg = self.sock.read_msg()
self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 2)
- def test_reply_xfrout_query_noerror_with_tsig(self):
- rrset_data = (4, 3, 'a.example.com.', 'com.example.', 3600, 'A', None, '192.168.1.1')
- global sqlite3_ds
+ def test_reply_xfrout_query_axfr_with_tsig(self):
+ rrset = RRset(Name('a.example.com'), RRClass.IN(), RRType.A(),
+ RRTTL(3600))
+ rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), '192.0.2.1'))
global xfrout
- def get_zone_soa(zonename, file):
- return self.soa_record
-
- def get_zone_datas(zone, file):
- zone_rrsets = []
- for i in range(0, 100):
- zone_rrsets.insert(i, rrset_data)
- return zone_rrsets
def get_rrset_len(rrset):
return 65520
- sqlite3_ds.get_zone_soa = get_zone_soa
- sqlite3_ds.get_zone_datas = get_zone_datas
+ self.xfrsess._soa = self.soa_rrset
+ self.xfrsess._iterator = [rrset for i in range(0, 100)]
xfrout.get_rrset_len = get_rrset_len
self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
- self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock, "example.com.")
-
- # tsig signed first package
- reply_msg = self.sock.read_msg()
- self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
- self.assertTrue(self.message_has_tsig(reply_msg))
- # (TSIG_SIGN_EVERY_NTH - 1) packets have no tsig
- for i in range(0, xfrout.TSIG_SIGN_EVERY_NTH - 1):
- reply_msg = self.sock.read_msg()
- self.assertFalse(self.message_has_tsig(reply_msg))
- # TSIG_SIGN_EVERY_NTH packet has tsig
- reply_msg = self.sock.read_msg()
- self.assertTrue(self.message_has_tsig(reply_msg))
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
- for i in range(0, 100 - TSIG_SIGN_EVERY_NTH):
+ # All messages must have TSIG as we don't support the feature of
+ # skipping intermediate TSIG records (with bulk signing).
+ for i in range(0, 102): # 102 = all 100 RRs from iterator and 2 SOAs
reply_msg = self.sock.read_msg()
- self.assertFalse(self.message_has_tsig(reply_msg))
- # tsig signed last package
- reply_msg = self.sock.read_msg()
- self.assertTrue(self.message_has_tsig(reply_msg))
+ # With the hack of get_rrset_len() above, every message must have
+ # exactly one RR in the answer section.
+ self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
+ self.assertTrue(self.message_has_tsig(reply_msg))
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
-class MyCCSession(isc.config.ConfigData):
- def __init__(self):
- module_spec = isc.config.module_spec_from_file(
- xfrout.SPECFILE_LOCATION)
- ConfigData.__init__(self, module_spec)
+ def test_reply_xfrout_query_ixfr(self):
+ # Creating a pure (incremental) IXFR response. Intermediate SOA
+ # RRs won't be skipped.
+ self.xfrsess._soa = create_soa(SOA_CURRENT_VERSION)
+ self.xfrsess._iterator = [create_soa(IXFR_OK_VERSION),
+ create_a(Name('a.example.com'), '192.0.2.2'),
+ create_soa(SOA_CURRENT_VERSION),
+ create_aaaa(Name('a.example.com'),
+ '2001:db8::1')]
+ self.xfrsess._jnl_reader = self.xfrsess._iterator
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+ reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
+ actual_records = reply_msg.get_section(Message.SECTION_ANSWER)
+
+ expected_records = self.xfrsess._iterator[:]
+ expected_records.insert(0, create_soa(SOA_CURRENT_VERSION))
+ expected_records.append(create_soa(SOA_CURRENT_VERSION))
+
+ self.assertEqual(len(expected_records), len(actual_records))
+ for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+ self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+ def test_reply_xfrout_query_axfr_maxlen(self):
+ # The test RR(set) has the length of 65535 - 12 (size of hdr) bytes:
+ # owner name = 1 (root), fixed fields (type,class,TTL,RDLEN) = 10
+ # RDATA = 65512 (= 65535 - 12 - 1 - 10)
+ self.xfrsess._soa = self.soa_rrset
+ test_rr = create_generic(Name('.'), 65512)
+ self.xfrsess._iterator = [self.soa_rrset, test_rr]
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+ # The first message should contain the beginning SOA, and only that RR
+ r = self.sock.read_msg()
+ self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+ self.assertTrue(rrsets_equal(self.soa_rrset,
+ r.get_section(Message.SECTION_ANSWER)[0]))
+ # The second message should contain the beginning SOA, and only that RR
+ # The wire format data should have the possible maximum size.
+ r, rlen = self.sock.read_msg(need_len=True)
+ self.assertEqual(65535, rlen)
+ self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+ self.assertTrue(rrsets_equal(test_rr,
+ r.get_section(Message.SECTION_ANSWER)[0]))
+ # The third message should contain the ending SOA, and only that RR
+ r = self.sock.read_msg()
+ self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+ self.assertTrue(rrsets_equal(self.soa_rrset,
+ r.get_section(Message.SECTION_ANSWER)[0]))
+
+ # there should be no more message
+ self.assertEqual(0, len(self.sock.sendqueue))
- def get_remote_config_value(self, module_name, identifier):
- if module_name == "Auth" and identifier == "database_file":
- return "initdb.file", False
- else:
- return "unknown", False
+ def maxlen_test_common_setup(self, tsig=False):
+ '''Common initialization for some of the tests below
+
+ For those tests we use '.' for all owner names and names in RDATA
+ to avoid having unexpected results due to compression. It returns
+ the created SOA for convenience.
+
+ If tsig is True, also setup TSIG (mock) context. In our test cases
+ the size of the TSIG RR is 81 bytes (key name = example.com,
+ algorithm = hmac-md5)
+
+ '''
+ soa = RRset(Name('.'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+ soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), '. . 0 0 0 0 0'))
+ self.mdata = self.create_request_data(zone_name=Name('.'))
+ self.xfrsess._soa = soa
+ if tsig:
+ self.xfrsess._tsig_ctx = \
+ self.create_mock_tsig_ctx(TSIGError.NOERROR)
+ self.xfrsess._tsig_len = 81
+ return soa
+
+ def maxlen_test_common_checks(self, soa_rr, test_rr, expected_n_rr):
+ '''A set of common assertion checks for some tests below.
+
+ In all cases two AXFR response messages should have been created.
+ expected_n_rr is a list of two elements, each specifies the expected
+ number of answer RRs for each message: expected_n_rr[0] is the expected
+ number of the first answer RRs; expected_n_rr[1] is the expected number
+ of the second answer RRs. The message that contains two RRs should
+ have the maximum possible wire length (65535 bytes). And, in all
+ cases, the resulting RRs should be in the order of SOA, another RR,
+ SOA.
+
+ '''
+ # Check the first message
+ r, rlen = self.sock.read_msg(need_len=True)
+ if expected_n_rr[0] == 2:
+ self.assertEqual(65535, rlen)
+ self.assertEqual(expected_n_rr[0],
+ r.get_rr_count(Message.SECTION_ANSWER))
+ actual_rrs = r.get_section(Message.SECTION_ANSWER)[:]
+
+ # Check the second message
+ r, rlen = self.sock.read_msg(need_len=True)
+ if expected_n_rr[1] == 2:
+ self.assertEqual(65535, rlen)
+ self.assertEqual(expected_n_rr[1],
+ r.get_rr_count(Message.SECTION_ANSWER))
+ actual_rrs.extend(r.get_section(Message.SECTION_ANSWER))
+ for (expected_rr, actual_rr) in zip([soa_rr, test_rr, soa_rr],
+ actual_rrs):
+ self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+ # there should be no more message
+ self.assertEqual(0, len(self.sock.sendqueue))
+
+ def test_reply_xfrout_query_axfr_maxlen_with_soa(self):
+ # Similar to the 'maxlen' test, but the first message should be
+ # able to contain both SOA and the large RR.
+ soa = self.maxlen_test_common_setup()
+
+ # The first message will contain the question (5 bytes), so the
+ # test RDATA should allow a room for that.
+ test_rr = create_generic(Name('.'), 65512 - 5 - get_rrset_len(soa))
+ self.xfrsess._iterator = [soa, test_rr]
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+ self.maxlen_test_common_checks(soa, test_rr, [2, 1])
+
+ def test_reply_xfrout_query_axfr_maxlen_with_soa_with_tsig(self):
+ # Similar to the previous case, but with TSIG (whose size is 81 bytes).
+ soa = self.maxlen_test_common_setup(True)
+ test_rr = create_generic(Name('.'), 65512 - 5 - 81 -
+ get_rrset_len(soa))
+ self.xfrsess._iterator = [soa, test_rr]
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+ self.maxlen_test_common_checks(soa, test_rr, [2, 1])
+
+ def test_reply_xfrout_query_axfr_maxlen_with_endsoa(self):
+ # Similar to the max w/ soa test, but the first message cannot contain
+ # both SOA and the long RR due to the question section. The second
+ # message should be able to contain both.
+ soa = self.maxlen_test_common_setup()
+ test_rr = create_generic(Name('.'), 65512 - get_rrset_len(soa))
+ self.xfrsess._iterator = [soa, test_rr]
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+ self.maxlen_test_common_checks(soa, test_rr, [1, 2])
+
+ def test_reply_xfrout_query_axfr_maxlen_with_endsoa_with_tsig(self):
+ # Similar to the previous case, but with TSIG.
+ soa = self.maxlen_test_common_setup(True)
+ test_rr = create_generic(Name('.'), 65512 - 81 - get_rrset_len(soa))
+ self.xfrsess._iterator = [soa, test_rr]
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+ self.maxlen_test_common_checks(soa, test_rr, [1, 2])
+
+ def test_reply_xfrout_query_axfr_toobigdata(self):
+ # Similar to the 'maxlen' test, but the RR doesn't even fit in a
+ # single message.
+ self.xfrsess._soa = self.soa_rrset
+ test_rr = create_generic(Name('.'), 65513) # 1 byte larger than 'max'
+ self.xfrsess._iterator = [self.soa_rrset, test_rr]
+ # the reply method should fail with exception
+ self.assertRaises(XfroutSessionError, self.xfrsess._reply_xfrout_query,
+ self.getmsg(), self.sock)
+ # The first message should still have been sent and contain the
+ # beginning SOA, and only that RR
+ r = self.sock.read_msg()
+ self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+ self.assertTrue(rrsets_equal(self.soa_rrset,
+ r.get_section(Message.SECTION_ANSWER)[0]))
+ # And there should have been no other messages sent
+ self.assertEqual(0, len(self.sock.sendqueue))
+ def test_reply_xfrout_query_ixfr_soa_only(self):
+ # Creating an IXFR response that contains only one RR, which is the
+ # SOA of the current version.
+ self.xfrsess._soa = create_soa(SOA_CURRENT_VERSION)
+ self.xfrsess._iterator = None
+ self.xfrsess._jnl_reader = None
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+ reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
+ answer = reply_msg.get_section(Message.SECTION_ANSWER)
+ self.assertEqual(1, len(answer))
+ self.assertTrue(rrsets_equal(create_soa(SOA_CURRENT_VERSION),
+ answer[0]))
+
+class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
+ '''Tests for XFR-out sessions using an SQLite3 DB.
+
+ These are provided mainly to confirm the implementation actually works
+ in an environment closer to actual operational environments. So we
+ only check a few common cases; other details are tested using mock
+ data sources.
+
+ '''
+ def setUp(self):
+ super().setUp()
+ self.xfrsess._request_data = self.mdata
+ self.xfrsess._server.get_db_file = lambda : TESTDATA_SRCDIR + \
+ 'test.sqlite3'
+ self.ns_name = 'a.dns.example.com'
+
+ def check_axfr_stream(self, response):
+ '''Common checks for AXFR(-style) response for the test zone.
+ '''
+ # This zone contains two A RRs for the same name with different TTLs.
+ # These TTLs should be preseved in the AXFR stream.
+ actual_records = response.get_section(Message.SECTION_ANSWER)
+ self.assertEqual(5, len(actual_records))
+ # The first and last RR should be the expected SOA
+ expected_soa = create_soa(2011112001)
+ self.assertTrue(rrsets_equal(expected_soa, actual_records[0]))
+ self.assertTrue(rrsets_equal(expected_soa, actual_records[-1]))
+
+ # The ordering of the intermediate RRs can differ depending on the
+ # internal details of the SQLite3 library, so we sort them by a simple
+ # rule sufficient for the purpose here, and then compare them.
+ expected_others = [create_ns(self.ns_name),
+ create_a(Name(self.ns_name), '192.0.2.1', 3600),
+ create_a(Name(self.ns_name), '192.0.2.2', 7200)]
+ keyfn = lambda x: (x.get_type(), x.get_ttl())
+ for (expected_rr, actual_rr) in zip(sorted(expected_others, key=keyfn),
+ sorted(actual_records[1:4],
+ key=keyfn)):
+ self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+ def test_axfr_normal_session(self):
+ XfroutSession._handle(self.xfrsess)
+ response = self.sock.read_msg(Message.PRESERVE_ORDER);
+ self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+ self.check_axfr_stream(response)
+
+ def test_ixfr_to_axfr(self):
+ self.xfrsess._request_data = \
+ self.create_request_data(ixfr=IXFR_NG_VERSION)
+ XfroutSession._handle(self.xfrsess)
+ response = self.sock.read_msg(Message.PRESERVE_ORDER);
+ self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+ # This is an AXFR-style IXFR. So the question section should indicate
+ # that it's an IXFR resposne.
+ self.assertEqual(RRType.IXFR(), response.get_question()[0].get_type())
+ self.check_axfr_stream(response)
+
+ def test_ixfr_normal_session(self):
+ # See testdata/creatediff.py. There are 8 changes between two
+ # versions. So the answer section should contain all of these and
+ # two beginning and trailing SOAs.
+ self.xfrsess._request_data = \
+ self.create_request_data(ixfr=IXFR_OK_VERSION)
+ XfroutSession._handle(self.xfrsess)
+ response = self.sock.read_msg(Message.PRESERVE_ORDER);
+ actual_records = response.get_section(Message.SECTION_ANSWER)
+ expected_records = [create_soa(2011112001), create_soa(2011111802),
+ create_soa(2011111900),
+ create_a(Name(self.ns_name), '192.0.2.2', 7200),
+ create_soa(2011111900),
+ create_a(Name(self.ns_name), '192.0.2.53'),
+ create_aaaa(Name(self.ns_name), '2001:db8::1'),
+ create_soa(2011112001),
+ create_a(Name(self.ns_name), '192.0.2.1'),
+ create_soa(2011112001)]
+ self.assertEqual(len(expected_records), len(actual_records))
+ for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+ self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+ def test_ixfr_soa_only(self):
+ # The requested SOA serial is the latest one. The response should
+ # contain exactly one SOA of that serial.
+ self.xfrsess._request_data = \
+ self.create_request_data(ixfr=SOA_CURRENT_VERSION)
+ XfroutSession._handle(self.xfrsess)
+ response = self.sock.read_msg(Message.PRESERVE_ORDER);
+ answers = response.get_section(Message.SECTION_ANSWER)
+ self.assertEqual(1, len(answers))
+ self.assertTrue(rrsets_equal(create_soa(SOA_CURRENT_VERSION),
+ answers[0]))
class MyUnixSockServer(UnixSockServer):
def __init__(self):
@@ -670,23 +1131,27 @@ class TestUnixSockServer(unittest.TestCase):
file descriptor. This is needed, because we get only that one
from auth."""
# We test with UDP, as it can be "connected" without other
- # endpoint
+ # endpoint. Note that in the current implementation _guess_remote()
+ # unconditionally returns SOCK_STREAM.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(('127.0.0.1', 12345))
- self.assertEqual(('127.0.0.1', 12345),
+ self.assertEqual((socket.AF_INET, socket.SOCK_STREAM,
+ ('127.0.0.1', 12345)),
self.unix._guess_remote(sock.fileno()))
if socket.has_ipv6:
# Don't check IPv6 address on hosts not supporting them
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.connect(('::1', 12345))
- self.assertEqual(('::1', 12345, 0, 0),
+ self.assertEqual((socket.AF_INET6, socket.SOCK_STREAM,
+ ('::1', 12345, 0, 0)),
self.unix._guess_remote(sock.fileno()))
# Try when pretending there's no IPv6 support
# (No need to pretend when there's really no IPv6)
xfrout.socket.has_ipv6 = False
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(('127.0.0.1', 12345))
- self.assertEqual(('127.0.0.1', 12345),
+ self.assertEqual((socket.AF_INET, socket.SOCK_STREAM,
+ ('127.0.0.1', 12345)),
self.unix._guess_remote(sock.fileno()))
# Return it back
xfrout.socket.has_ipv6 = True
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index cf3b04f..310a0aa 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -22,7 +22,7 @@ import isc.cc
import threading
import struct
import signal
-from isc.datasrc import sqlite3_ds
+from isc.datasrc import DataSourceClient, ZoneFinder, ZoneJournalReader
from socketserver import *
import os
from isc.config.ccsession import *
@@ -39,6 +39,7 @@ from isc.log_messages.xfrout_messages import *
isc.log.init("b10-xfrout")
logger = isc.log.Logger("xfrout")
+DBG_XFROUT_TRACE = logger.DBGLVL_TRACE_BASIC
try:
from libutil_io_python import *
@@ -46,7 +47,7 @@ try:
except ImportError as e:
# C++ loadable module may not be installed; even so the xfrout process
# must keep running, so we warn about it and move forward.
- log.error(XFROUT_IMPORT, str(e))
+ logger.error(XFROUT_IMPORT, str(e))
from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
from isc.acl.dns import REQUEST_LOADER
@@ -65,6 +66,11 @@ class XfroutConfigError(Exception):
"""
pass
+class XfroutSessionError(Exception):
+ '''An exception raised for some unexpected events during an xfrout session.
+ '''
+ pass
+
def init_paths():
global SPECFILE_PATH
global AUTH_SPECFILE_PATH
@@ -92,10 +98,40 @@ init_paths()
SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
VERBOSE_MODE = False
-# tsig sign every N axfr packets.
-TSIG_SIGN_EVERY_NTH = 96
-
-XFROUT_MAX_MESSAGE_SIZE = 65535
+XFROUT_DNS_HEADER_SIZE = 12 # protocol constant
+XFROUT_MAX_MESSAGE_SIZE = 65535 # ditto
+
+# borrowed from xfrin.py @ #1298. We should eventually unify it.
+def format_zone_str(zone_name, zone_class):
+ """Helper function to format a zone name and class as a string of
+ the form '<name>/<class>'.
+ Parameters:
+ zone_name (isc.dns.Name) name to format
+ zone_class (isc.dns.RRClass) class to format
+ """
+ return zone_name.to_text(True) + '/' + str(zone_class)
+
+# borrowed from xfrin.py @ #1298.
+def format_addrinfo(addrinfo):
+ """Helper function to format the addrinfo as a string of the form
+ <addr>:<port> (for IPv4) or [<addr>]:port (for IPv6). For unix domain
+ sockets, and unknown address families, it returns a basic string
+ conversion of the third element of the passed tuple.
+ Parameters:
+ addrinfo: a 3-tuple consisting of address family, socket type, and,
+ depending on the family, either a 2-tuple with the address
+ and port, or a filename
+ """
+ try:
+ if addrinfo[0] == socket.AF_INET:
+ return str(addrinfo[2][0]) + ":" + str(addrinfo[2][1])
+ elif addrinfo[0] == socket.AF_INET6:
+ return "[" + str(addrinfo[2][0]) + "]:" + str(addrinfo[2][1])
+ else:
+ return str(addrinfo[2])
+ except IndexError:
+ raise TypeError("addrinfo argument to format_addrinfo() does not "
+ "appear to be consisting of (family, socktype, (addr, port))")
def get_rrset_len(rrset):
"""Returns the wire length of the given RRset"""
@@ -103,10 +139,15 @@ def get_rrset_len(rrset):
rrset.to_wire(bytes)
return len(bytes)
+def get_soa_serial(soa_rdata):
+ '''Extract the serial field of an SOA RDATA and returns it as an intger.
+ (borrowed from xfrin)
+ '''
+ return int(soa_rdata.to_text().split()[2])
class XfroutSession():
def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
- default_acl, zone_config):
+ default_acl, zone_config, client_class=DataSourceClient):
self._sock_fd = sock_fd
self._request_data = request_data
self._server = server
@@ -114,23 +155,53 @@ class XfroutSession():
self._tsig_ctx = None
self._tsig_len = 0
self._remote = remote
+ self._request_type = None
+ self._request_typestr = None
self._acl = default_acl
self._zone_config = zone_config
- self.handle()
+ self.ClientClass = client_class # parameterize this for testing
+ self._soa = None # will be set in _xfrout_setup or in tests
+ self._jnl_reader = None # will be set to a reader for IXFR
+ self._handle()
def create_tsig_ctx(self, tsig_record, tsig_key_ring):
return TSIGContext(tsig_record.get_name(), tsig_record.get_rdata().get_algorithm(),
tsig_key_ring)
- def handle(self):
- ''' Handle a xfrout query, send xfrout response '''
+ def _handle(self):
+ ''' Handle a xfrout query, send xfrout response(s).
+
+ This is separated from the constructor so that we can override
+ it from tests.
+
+ '''
+ # Check the xfrout quota. We do both increase/decrease in this
+ # method so it's clear we always release it once acuired.
+ quota_ok = self._server.increase_transfers_counter()
+ ex = None
try:
- self.dns_xfrout_start(self._sock_fd, self._request_data)
- #TODO, avoid catching all exceptions
+ self.dns_xfrout_start(self._sock_fd, self._request_data, quota_ok)
except Exception as e:
- logger.error(XFROUT_HANDLE_QUERY_ERROR, e)
- pass
+ # To avoid resource leak we need catch all possible exceptions
+ # We log it later to exclude the case where even logger raises
+ # an exception.
+ ex = e
+
+ # Release any critical resources
+ if quota_ok:
+ self._server.decrease_transfers_counter()
+ self._close_socket()
+
+ if ex is not None:
+ logger.error(XFROUT_HANDLE_QUERY_ERROR, ex)
+
+ def _close_socket(self):
+ '''Simply close the socket via the given FD.
+ This is a dedicated subroutine of handle() and is sepsarated from it
+ for the convenience of tests.
+
+ '''
os.close(self._sock_fd)
def _check_request_tsig(self, msg, request_data):
@@ -138,7 +209,8 @@ class XfroutSession():
tsig_record = msg.get_tsig_record()
if tsig_record is not None:
self._tsig_len = tsig_record.get_length()
- self._tsig_ctx = self.create_tsig_ctx(tsig_record, self._tsig_key_ring)
+ self._tsig_ctx = self.create_tsig_ctx(tsig_record,
+ self._tsig_key_ring)
tsig_error = self._tsig_ctx.verify(tsig_record, request_data)
if tsig_error != TSIGError.NOERROR:
return Rcode.NOTAUTH()
@@ -157,23 +229,45 @@ class XfroutSession():
# TSIG related checks
rcode = self._check_request_tsig(msg, mdata)
+ if rcode != Rcode.NOERROR():
+ return rcode, msg
+
+ # Make sure the question is valid. This should be ensured by
+ # the auth server, but since it's far from xfrout itself, we check
+ # it by ourselves. A viloation would be an internal bug, so we
+ # raise and stop here rather than returning a FORMERR or SERVFAIL.
+ if msg.get_rr_count(Message.SECTION_QUESTION) != 1:
+ raise RuntimeError('Invalid number of question for XFR: ' +
+ str(msg.get_rr_count(Message.SECTION_QUESTION)))
+ question = msg.get_question()[0]
- if rcode == Rcode.NOERROR():
- # ACL checks
- zone_name = msg.get_question()[0].get_name()
- zone_class = msg.get_question()[0].get_class()
- acl = self._get_transfer_acl(zone_name, zone_class)
- acl_result = acl.execute(
- isc.acl.dns.RequestContext(self._remote,
- msg.get_tsig_record()))
- if acl_result == DROP:
- logger.info(XFROUT_QUERY_DROPPED, zone_name, zone_class,
- self._remote[0], self._remote[1])
- return None, None
- elif acl_result == REJECT:
- logger.info(XFROUT_QUERY_REJECTED, zone_name, zone_class,
- self._remote[0], self._remote[1])
- return Rcode.REFUSED(), msg
+ # Identify the request type
+ self._request_type = question.get_type()
+ if self._request_type == RRType.AXFR():
+ self._request_typestr = 'AXFR'
+ elif self._request_type == RRType.IXFR():
+ self._request_typestr = 'IXFR'
+ else:
+ # Likewise, this should be impossible.
+ raise RuntimeError('Unexpected XFR type: ' +
+ str(self._request_type))
+
+ # ACL checks
+ zone_name = question.get_name()
+ zone_class = question.get_class()
+ acl = self._get_transfer_acl(zone_name, zone_class)
+ acl_result = acl.execute(
+ isc.acl.dns.RequestContext(self._remote[2], msg.get_tsig_record()))
+ if acl_result == DROP:
+ logger.debug(DBG_XFROUT_TRACE, XFROUT_QUERY_DROPPED,
+ self._request_type, format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class))
+ return None, None
+ elif acl_result == REJECT:
+ logger.debug(DBG_XFROUT_TRACE, XFROUT_QUERY_REJECTED,
+ self._request_type, format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class))
+ return Rcode.REFUSED(), msg
return rcode, msg
@@ -195,14 +289,6 @@ class XfroutSession():
return self._zone_config[config_key]['transfer_acl']
return self._acl
- def _get_query_zone_name(self, msg):
- question = msg.get_question()[0]
- return question.get_name().to_text()
-
- def _get_query_zone_class(self, msg):
- question = msg.get_question()[0]
- return question.get_class().to_text()
-
def _send_data(self, sock_fd, data):
size = len(data)
total_count = 0
@@ -238,51 +324,165 @@ class XfroutSession():
msg.set_rcode(rcode_)
self._send_message(sock_fd, msg, self._tsig_ctx)
- def _zone_has_soa(self, zone):
- '''Judge if the zone has an SOA record.'''
- # In some sense, the SOA defines a zone.
- # If the current name server has authority for the
- # specific zone, we need to judge if the zone has an SOA record;
- # if not, we consider the zone has incomplete data, so xfrout can't
- # serve for it.
- if sqlite3_ds.get_zone_soa(zone, self._server.get_db_file()):
- return True
+ def _get_zone_soa(self, zone_name):
+ '''Retrieve the SOA RR of the given zone.
+
+ It returns a pair of RCODE and the SOA (in the form of RRset).
+ On success RCODE is NOERROR and returned SOA is not None;
+ on failure RCODE indicates the appropriate code in the context of
+ xfr processing, and the returned SOA is None.
+
+ '''
+ result, finder = self._datasrc_client.find_zone(zone_name)
+ if result != DataSourceClient.SUCCESS:
+ return (Rcode.NOTAUTH(), None)
+ result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
+ ZoneFinder.FIND_DEFAULT)
+ if result != ZoneFinder.SUCCESS:
+ return (Rcode.SERVFAIL(), None)
+ # Especially for database-based zones, a working zone may be in
+ # a broken state where it has more than one SOA RR. We proactively
+ # check the condition and abort the xfr attempt if we identify it.
+ if soa_rrset.get_rdata_count() != 1:
+ return (Rcode.SERVFAIL(), None)
+ return (Rcode.NOERROR(), soa_rrset)
+
+ def __axfr_setup(self, zone_name):
+ '''Setup a zone iterator for AXFR or AXFR-style IXFR.
- return False
-
- def _zone_exist(self, zonename):
- '''Judge if the zone is configured by config manager.'''
- # Currently, if we find the zone in datasource successfully, we
- # consider the zone is configured, and the current name server has
- # authority for the specific zone.
- # TODO: should get zone's configuration from cfgmgr or other place
- # in future.
- return sqlite3_ds.zone_exist(zonename, self._server.get_db_file())
-
- def _check_xfrout_available(self, zone_name):
- '''Check if xfr request can be responsed.
- TODO, Get zone's configuration from cfgmgr or some other place
- eg. check allow_transfer setting,
'''
- # If the current name server does not have authority for the
- # zone, xfrout can't serve for it, return rcode NOTAUTH.
- if not self._zone_exist(zone_name):
+ try:
+ # Note that we enable 'separate_rrs'. In xfr-out we need to
+ # preserve as many things as possible (even if it's half broken)
+ # stored in the zone.
+ self._iterator = self._datasrc_client.get_iterator(zone_name,
+ True)
+ except isc.datasrc.Error:
+ # If the current name server does not have authority for the
+ # zone, xfrout can't serve for it, return rcode NOTAUTH.
+ # Note: this exception can happen for other reasons. We should
+ # update get_iterator() API so that we can distinguish "no such
+ # zone" and other cases (#1373). For now we consider all these
+ # cases as NOTAUTH.
return Rcode.NOTAUTH()
# If we are an authoritative name server for the zone, but fail
# to find the zone's SOA record in datasource, xfrout can't
# provide zone transfer for it.
- if not self._zone_has_soa(zone_name):
+ self._soa = self._iterator.get_soa()
+ if self._soa is None or self._soa.get_rdata_count() != 1:
return Rcode.SERVFAIL()
- #TODO, check allow_transfer
- if not self._server.increase_transfers_counter():
- return Rcode.REFUSED()
+ return Rcode.NOERROR()
+
+ def __ixfr_setup(self, request_msg, zone_name, zone_class):
+ '''Setup a zone journal reader for IXFR.
+
+ If the underlying data source does not know the requested range
+ of zone differences it automatically falls back to AXFR-style
+ IXFR by setting up a zone iterator instead of a journal reader.
+
+ '''
+ # Check the authority section. Look for a SOA record with
+ # the same name and class as the question.
+ remote_soa = None
+ for auth_rrset in request_msg.get_section(Message.SECTION_AUTHORITY):
+ # Ignore data whose owner name is not the zone apex, and
+ # ignore non-SOA or different class of records.
+ if auth_rrset.get_name() != zone_name or \
+ auth_rrset.get_type() != RRType.SOA() or \
+ auth_rrset.get_class() != zone_class:
+ continue
+ if auth_rrset.get_rdata_count() != 1:
+ logger.info(XFROUT_IXFR_MULTIPLE_SOA,
+ format_addrinfo(self._remote))
+ return Rcode.FORMERR()
+ remote_soa = auth_rrset
+ if remote_soa is None:
+ logger.info(XFROUT_IXFR_NO_SOA, format_addrinfo(self._remote))
+ return Rcode.FORMERR()
+
+ # Retrieve the local SOA
+ rcode, self._soa = self._get_zone_soa(zone_name)
+ if rcode != Rcode.NOERROR():
+ return rcode
+
+ # RFC1995 says "If an IXFR query with the same or newer version
+ # number than that of the server is received, it is replied to with
+ # a single SOA record of the server's current version, just as
+ # in AXFR". The claim about AXFR is incorrect, but other than that,
+ # we do as the RFC says.
+ # Note: until we complete #1278 we can only check equality of the
+ # two serials. The "newer version" case would fall back to AXFR-style.
+ begin_serial = get_soa_serial(remote_soa.get_rdata()[0])
+ end_serial = get_soa_serial(self._soa.get_rdata()[0])
+ if begin_serial == end_serial:
+ # clear both iterator and jnl_reader to signal we won't do
+ # iteration in response generation
+ self._iterator = None
+ self._jnl_reader = None
+ logger.info(XFROUT_IXFR_UPTODATE, format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class),
+ begin_serial, end_serial)
+ return Rcode.NOERROR()
+
+ # Set up the journal reader or fall back to AXFR-style IXFR
+ try:
+ code, self._jnl_reader = self._datasrc_client.get_journal_reader(
+ zone_name, begin_serial, end_serial)
+ except isc.datasrc.NotImplemented as ex:
+ # The underlying data source doesn't support journaling.
+ # Fall back to AXFR-style IXFR.
+ logger.info(XFROUT_IXFR_NO_JOURNAL_SUPPORT,
+ format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class))
+ return self.__axfr_setup(zone_name)
+ if code == ZoneJournalReader.NO_SUCH_VERSION:
+ logger.info(XFROUT_IXFR_NO_VERSION, format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class),
+ begin_serial, end_serial)
+ return self.__axfr_setup(zone_name)
+ if code == ZoneJournalReader.NO_SUCH_ZONE:
+ # this is quite unexpected as we know zone's SOA exists.
+ # It might be a bug or the data source is somehow broken,
+ # but it can still happen if someone has removed the zone
+ # between these two operations. We treat it as NOTAUTH.
+ logger.warn(XFROUT_IXFR_NO_ZONE, format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class))
+ return Rcode.NOTAUTH()
+
+ # Use the reader as the iterator to generate the response.
+ self._iterator = self._jnl_reader
return Rcode.NOERROR()
+ def _xfrout_setup(self, request_msg, zone_name, zone_class):
+ '''Setup a context for xfr responses according to the request type.
+
+ This method identifies the most appropriate data source for the
+ request and set up a zone iterator or journal reader depending on
+ whether the request is AXFR or IXFR. If it identifies any protocol
+ level error it returns an RCODE other than NOERROR.
- def dns_xfrout_start(self, sock_fd, msg_query):
+ '''
+
+ # Identify the data source for the requested zone and see if it has
+ # SOA while initializing objects used for request processing later.
+ # We should eventually generalize this so that we can choose the
+ # appropriate data source from (possible) multiple candidates.
+ # We should eventually take into account the RR class here.
+ # For now, we hardcode a particular type (SQLite3-based), and only
+ # consider that one.
+ datasrc_config = '{ "database_file": "' + \
+ self._server.get_db_file() + '"}'
+ self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
+
+ if self._request_type == RRType.AXFR():
+ return self.__axfr_setup(zone_name)
+ else:
+ return self.__ixfr_setup(request_msg, zone_name, zone_class)
+
+ def dns_xfrout_start(self, sock_fd, msg_query, quota_ok=True):
rcode_, msg = self._parse_query_message(msg_query)
#TODO. create query message and parse header
if rcode_ is None: # Dropped by ACL
@@ -292,29 +492,38 @@ class XfroutSession():
elif rcode_ != Rcode.NOERROR():
return self._reply_query_with_error_rcode(msg, sock_fd,
Rcode.FORMERR())
+ elif not quota_ok:
+ logger.warn(XFROUT_QUERY_QUOTA_EXCCEEDED, self._request_typestr,
+ format_addrinfo(self._remote),
+ self._server._max_transfers_out)
+ return self._reply_query_with_error_rcode(msg, sock_fd,
+ Rcode.REFUSED())
- zone_name = self._get_query_zone_name(msg)
- zone_class_str = self._get_query_zone_class(msg)
- # TODO: should we not also include class in the check?
- rcode_ = self._check_xfrout_available(zone_name)
+ question = msg.get_question()[0]
+ zone_name = question.get_name()
+ zone_class = question.get_class()
+ zone_str = format_zone_str(zone_name, zone_class) # for logging
+ try:
+ rcode_ = self._xfrout_setup(msg, zone_name, zone_class)
+ except Exception as ex:
+ logger.error(XFROUT_XFR_TRANSFER_CHECK_ERROR, self._request_typestr,
+ format_addrinfo(self._remote), zone_str, ex)
+ rcode_ = Rcode.SERVFAIL()
if rcode_ != Rcode.NOERROR():
- logger.info(XFROUT_AXFR_TRANSFER_FAILED, zone_name,
- zone_class_str, rcode_.to_text())
+ logger.info(XFROUT_XFR_TRANSFER_FAILED, self._request_typestr,
+ format_addrinfo(self._remote), zone_str, rcode_)
return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
try:
- logger.info(XFROUT_AXFR_TRANSFER_STARTED, zone_name, zone_class_str)
- self._reply_xfrout_query(msg, sock_fd, zone_name)
+ logger.info(XFROUT_XFR_TRANSFER_STARTED, self._request_typestr,
+ format_addrinfo(self._remote), zone_str)
+ self._reply_xfrout_query(msg, sock_fd)
except Exception as err:
- logger.error(XFROUT_AXFR_TRANSFER_ERROR, zone_name,
- zone_class_str, str(err))
- pass
- logger.info(XFROUT_AXFR_TRANSFER_DONE, zone_name, zone_class_str)
-
- self._server.decrease_transfers_counter()
- return
-
+ logger.error(XFROUT_XFR_TRANSFER_ERROR, self._request_typestr,
+ format_addrinfo(self._remote), zone_str, err)
+ logger.info(XFROUT_XFR_TRANSFER_DONE, self._request_typestr,
+ format_addrinfo(self._remote), zone_str)
def _clear_message(self, msg):
qid = msg.get_qid()
@@ -329,87 +538,93 @@ class XfroutSession():
msg.set_header_flag(Message.HEADERFLAG_QR)
return msg
- def _create_rrset_from_db_record(self, record):
- '''Create one rrset from one record of datasource, if the schema of record is changed,
- This function should be updated first.
- '''
- rrtype_ = RRType(record[5])
- rdata_ = Rdata(rrtype_, RRClass("IN"), " ".join(record[7:]))
- rrset_ = RRset(Name(record[2]), RRClass("IN"), rrtype_, RRTTL( int(record[4])))
- rrset_.add_rdata(rdata_)
- return rrset_
-
- def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa, message_upper_len,
- count_since_last_tsig_sign):
- '''Add the SOA record to the end of message. If it can't be
- added, a new message should be created to send out the last soa .
- '''
- rrset_len = get_rrset_len(rrset_soa)
+ def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa,
+ message_upper_len):
+ '''Add the SOA record to the end of message.
- if (count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH and
- message_upper_len + rrset_len >= XFROUT_MAX_MESSAGE_SIZE):
- # If tsig context exist, sign the packet with serial number TSIG_SIGN_EVERY_NTH
+ If it would exceed the maximum allowable size of a message, a new
+ message will be created to send out the last SOA.
+
+ We assume a message with a single SOA can always fit the buffer
+ with or without TSIG. In theory this could be wrong if TSIG is
+ stupidly large, but in practice this assumption should be reasonable.
+ '''
+ if message_upper_len + get_rrset_len(rrset_soa) > \
+ XFROUT_MAX_MESSAGE_SIZE:
self._send_message(sock_fd, msg, self._tsig_ctx)
msg = self._clear_message(msg)
- elif (count_since_last_tsig_sign != TSIG_SIGN_EVERY_NTH and
- message_upper_len + rrset_len + self._tsig_len >= XFROUT_MAX_MESSAGE_SIZE):
- self._send_message(sock_fd, msg)
- msg = self._clear_message(msg)
- # If tsig context exist, sign the last packet
msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
self._send_message(sock_fd, msg, self._tsig_ctx)
-
- def _reply_xfrout_query(self, msg, sock_fd, zone_name):
- #TODO, there should be a better way to insert rrset.
- count_since_last_tsig_sign = TSIG_SIGN_EVERY_NTH
+ def _reply_xfrout_query(self, msg, sock_fd):
msg.make_response()
msg.set_header_flag(Message.HEADERFLAG_AA)
- soa_record = sqlite3_ds.get_zone_soa(zone_name, self._server.get_db_file())
- rrset_soa = self._create_rrset_from_db_record(soa_record)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+ # Reserved space for the fixed header size, the size of the question
+ # section, and TSIG size (when included). The size of the question
+ # section is the sum of the qname length and the size of the
+ # fixed-length fields (type and class, 2 bytes each).
+ message_upper_len = XFROUT_DNS_HEADER_SIZE + \
+ msg.get_question()[0].get_name().get_length() + 4 + \
+ self._tsig_len
+
+ # If the iterator is None, we are responding to IXFR with a single
+ # SOA RR.
+ if self._iterator is None:
+ self._send_message_with_last_soa(msg, sock_fd, self._soa,
+ message_upper_len)
+ return
- message_upper_len = get_rrset_len(rrset_soa) + self._tsig_len
+ # Add the beginning SOA
+ msg.add_rrset(Message.SECTION_ANSWER, self._soa)
+ message_upper_len += get_rrset_len(self._soa)
- for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
- if self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
+ # Add the rest of the zone/diff contets
+ for rrset in self._iterator:
+ # Check if xfrout is shutdown
+ if self._server._shutdown_event.is_set():
logger.info(XFROUT_STOPPING)
return
- # TODO: RRType.SOA() ?
- if RRType(rr_data[5]) == RRType("SOA"): #ignore soa record
- continue
- rrset_ = self._create_rrset_from_db_record(rr_data)
+ # For AXFR (or AXFR-style IXFR), in which case _jnl_reader is None,
+ # we should skip SOAs from the iterator.
+ if self._jnl_reader is None and rrset.get_type() == RRType.SOA():
+ continue
# We calculate the maximum size of the RRset (i.e. the
# size without compression) and use that to see if we
# may have reached the limit
- rrset_len = get_rrset_len(rrset_)
- if message_upper_len + rrset_len < XFROUT_MAX_MESSAGE_SIZE:
- msg.add_rrset(Message.SECTION_ANSWER, rrset_)
+ rrset_len = get_rrset_len(rrset)
+
+ if message_upper_len + rrset_len <= XFROUT_MAX_MESSAGE_SIZE:
+ msg.add_rrset(Message.SECTION_ANSWER, rrset)
message_upper_len += rrset_len
continue
- # If tsig context exist, sign every N packets
- if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
- count_since_last_tsig_sign = 0
- self._send_message(sock_fd, msg, self._tsig_ctx)
- else:
- self._send_message(sock_fd, msg)
+ # RR would not fit. If there are other RRs in the buffer, send
+ # them now and leave this RR to the next message.
+ self._send_message(sock_fd, msg, self._tsig_ctx)
- count_since_last_tsig_sign += 1
+ # Create a new message and reserve space for the carried-over
+ # RR (and TSIG space in case it's to be TSIG signed)
msg = self._clear_message(msg)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_) # Add the rrset to the new message
+ message_upper_len = XFROUT_DNS_HEADER_SIZE + rrset_len + \
+ self._tsig_len
- # Reserve tsig space for signed packet
- if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
- message_upper_len = rrset_len + self._tsig_len
- else:
- message_upper_len = rrset_len
+ # If this RR overflows the buffer all by itself, fail. In theory
+ # some RRs might fit in a TCP message when compressed even if they
+ # do not fit when uncompressed, but surely we don't want to send
+ # such monstrosities to an unsuspecting slave.
+ if message_upper_len > XFROUT_MAX_MESSAGE_SIZE:
+ raise XfroutSessionError('RR too large for zone transfer (' +
+ str(rrset_len) + ' bytes)')
- self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len,
- count_since_last_tsig_sign)
+ # Add the RRset to the new message
+ msg.add_rrset(Message.SECTION_ANSWER, rrset)
+
+ # Add and send the trailing SOA
+ self._send_message_with_last_soa(msg, sock_fd, self._soa,
+ message_upper_len)
class UnixSockServer(socketserver_mixin.NoPollMixIn,
ThreadingUnixStreamServer):
@@ -483,7 +698,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
try:
self.process_request(request)
except Exception as pre:
- log.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
+ logger.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
break
def _handle_request_noblock(self):
@@ -517,9 +732,12 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
t.start()
def _guess_remote(self, sock_fd):
- """
- Guess remote address and port of the socket. The sock_fd must be a
- socket
+ """Guess remote address and port of the socket.
+
+ The sock_fd must be a file descriptor of a socket.
+ This method retuns a 3-tuple consisting of address family,
+ socket type, and a 2-tuple with the address (string) and port (int).
+
"""
# This uses a trick. If the socket is IPv4 in reality and we pretend
# it to be IPv6, it returns IPv4 address anyway. This doesn't seem
@@ -531,11 +749,23 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
# To make it work even on hosts without IPv6 support
# (Any idea how to simulate this in test?)
sock = socket.fromfd(sock_fd, socket.AF_INET, socket.SOCK_STREAM)
- return sock.getpeername()
+ peer = sock.getpeername()
+
+ # Identify the correct socket family. Due to the above "trick",
+ # we cannot simply use sock.family.
+ family = socket.AF_INET6
+ try:
+ socket.inet_pton(socket.AF_INET6, peer[0])
+ except socket.error:
+ family = socket.AF_INET
+ return (family, socket.SOCK_STREAM, peer)
def finish_request(self, sock_fd, request_data):
'''Finish one request by instantiating RequestHandlerClass.
+ This is an entry point of a separate thread spawned in
+ UnixSockServer.process_request().
+
This method creates a XfroutSession object.
'''
self._lock.acquire()
@@ -583,7 +813,6 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
os.unlink(self._sock_file)
except Exception as e:
logger.error(XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR, self._sock_file, str(e))
- pass
def update_config_data(self, new_config):
'''Apply the new config setting of xfrout module.
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index b2e432c..fcc2e59 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -15,30 +15,6 @@
# No namespace declaration - these constants go in the global namespace
# of the xfrout messages python module.
-% XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-
-% XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-
-% XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-# Still a TODO, but when implemented, REFUSED can also mean
-# the client is not allowed to transfer the zone
-
-% XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started
-A transfer out of the given zone has started.
-
% XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1
The TSIG key string as read from the configuration does not represent
a valid TSIG key.
@@ -106,16 +82,27 @@ in the log message, but at this point no specific information other
than that could be given. This points to incomplete exception handling
in the code.
-% XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
+% XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped
+The xfrout process silently dropped a request to transfer zone to
+given host. This is required by the ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
-% XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected
+% XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected
The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+
+% XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message. The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
There was an error receiving the file descriptor for the transfer
@@ -160,3 +147,72 @@ on, but the file is in use. The most likely cause is that another
xfrout daemon process is still running. This xfrout daemon (the one
printing this message) will not start.
+% XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+
+% XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+
+% XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage. Some detail of the error is also included in the
+message. The xfrout server tries to return a SERVFAIL response in this case.
+
+% XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+# Still a TODO, but when implemented, REFUSED can also mean
+# the client is not allowed to transfer the zone
+
+% XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started
+A transfer out of the given zone has started.
+
+% XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs
+An IXFR request was received with more than one SOA RRs in the authority
+section. The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+
+% XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+
+% XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR
+An IXFR request was received but the underlying data source did
+not support journaling. The xfrout daemon fell back to AXFR-style
+IXFR.
+
+% XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server. The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic. This will soon
+be implemented.
+
+% XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR
+An IXFR request was received, but the requested range of differences
+were not found in the data source. The xfrout daemon fell back to
+AXFR-style IXFR.
+
+% XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source. This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone. The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
diff --git a/src/lib/asiodns/io_fetch.cc b/src/lib/asiodns/io_fetch.cc
index 466be3e..25ec955 100644
--- a/src/lib/asiodns/io_fetch.cc
+++ b/src/lib/asiodns/io_fetch.cc
@@ -175,12 +175,12 @@ struct IOFetchData {
/// IOFetch Constructor - just initialize the private data
IOFetch::IOFetch(Protocol protocol, IOService& service,
- const isc::dns::Question& question, const IOAddress& address, uint16_t port,
- OutputBufferPtr& buff, Callback* cb, int wait)
+ const isc::dns::Question& question, const IOAddress& address,
+ uint16_t port, OutputBufferPtr& buff, Callback* cb, int wait, bool edns)
{
MessagePtr query_msg(new Message(Message::RENDER));
initIOFetch(query_msg, protocol, service, question, address, port, buff,
- cb, wait);
+ cb, wait, edns);
}
IOFetch::IOFetch(Protocol protocol, IOService& service,
@@ -214,7 +214,7 @@ void
IOFetch::initIOFetch(MessagePtr& query_msg, Protocol protocol, IOService& service,
const isc::dns::Question& question,
const IOAddress& address, uint16_t port,
- OutputBufferPtr& buff, Callback* cb, int wait)
+ OutputBufferPtr& buff, Callback* cb, int wait, bool edns)
{
data_ = boost::shared_ptr<IOFetchData>(new IOFetchData(
protocol, service, address, port, buff, cb, wait));
@@ -224,9 +224,13 @@ IOFetch::initIOFetch(MessagePtr& query_msg, Protocol protocol, IOService& servic
query_msg->setRcode(Rcode::NOERROR());
query_msg->setHeaderFlag(Message::HEADERFLAG_RD);
query_msg->addQuestion(question);
- EDNSPtr edns_query(new EDNS());
- edns_query->setUDPSize(Message::DEFAULT_MAX_EDNS0_UDPSIZE);
- query_msg->setEDNS(edns_query);
+
+ if (edns) {
+ EDNSPtr edns_query(new EDNS());
+ edns_query->setUDPSize(Message::DEFAULT_MAX_EDNS0_UDPSIZE);
+ query_msg->setEDNS(edns_query);
+ }
+
MessageRenderer renderer(*data_->msgbuf);
query_msg->toWire(renderer);
}
@@ -355,10 +359,6 @@ IOFetch::stop(Result result) {
// variable should be done inside a mutex (and the stopped_ variable
// declared as "volatile").
//
- // The numeric arguments indicate the debug level, with the lower
- // numbers indicating the most important information. The relative
- // values are somewhat arbitrary.
- //
// TODO: Update testing of stopped_ if threads are used.
data_->stopped = true;
switch (result) {
diff --git a/src/lib/asiodns/io_fetch.h b/src/lib/asiodns/io_fetch.h
index 9626ffe..1161ed3 100644
--- a/src/lib/asiodns/io_fetch.h
+++ b/src/lib/asiodns/io_fetch.h
@@ -131,11 +131,14 @@ public:
/// and deleting it if necessary.
/// \param wait Timeout for the fetch (in ms). The default value of
/// -1 indicates no timeout.
+ /// \param edns true if the request should be EDNS. The default value is
+ /// true.
IOFetch(Protocol protocol, isc::asiolink::IOService& service,
const isc::dns::Question& question,
const isc::asiolink::IOAddress& address,
uint16_t port, isc::util::OutputBufferPtr& buff, Callback* cb,
- int wait = -1);
+ int wait = -1,
+ bool edns = true);
/// \brief Constructor
/// This constructor has one parameter "query_message", which
@@ -206,7 +209,8 @@ private:
void initIOFetch(isc::dns::MessagePtr& query_message, Protocol protocol,
isc::asiolink::IOService& service, const isc::dns::Question& question,
const isc::asiolink::IOAddress& address, uint16_t port,
- isc::util::OutputBufferPtr& buff, Callback* cb, int wait);
+ isc::util::OutputBufferPtr& buff, Callback* cb, int wait,
+ bool edns = true);
/// \brief Log I/O Failure
///
diff --git a/src/lib/asiolink/Makefile.am b/src/lib/asiolink/Makefile.am
index 5444547..07c3e13 100644
--- a/src/lib/asiolink/Makefile.am
+++ b/src/lib/asiolink/Makefile.am
@@ -14,6 +14,9 @@ CLEANFILES = *.gcno *.gcda
# with -Werror (our default setting).
lib_LTLIBRARIES = libasiolink.la
+
+libasiolink_la_LDFLAGS = -no-undefined -version-info 1:0:1
+
libasiolink_la_SOURCES = asiolink.h
libasiolink_la_SOURCES += dummy_io_cb.h
libasiolink_la_SOURCES += interval_timer.cc interval_timer.h
diff --git a/src/lib/cryptolink/Makefile.am b/src/lib/cryptolink/Makefile.am
index 93f3443..fc12fae 100644
--- a/src/lib/cryptolink/Makefile.am
+++ b/src/lib/cryptolink/Makefile.am
@@ -11,4 +11,5 @@ lib_LTLIBRARIES = libcryptolink.la
libcryptolink_la_SOURCES = cryptolink.h cryptolink.cc
libcryptolink_la_SOURCES += crypto_hmac.h crypto_hmac.cc
-libcryptolink_la_LIBADD = ${BOTAN_LDFLAGS} ${BOTAN_RPATH}
+libcryptolink_la_LDFLAGS = ${BOTAN_LDFLAGS}
+libcryptolink_la_LIBADD = ${BOTAN_LIBS} ${BOTAN_RPATH}
diff --git a/src/lib/cryptolink/tests/Makefile.am b/src/lib/cryptolink/tests/Makefile.am
index fbdd13f..6ac6fdf 100644
--- a/src/lib/cryptolink/tests/Makefile.am
+++ b/src/lib/cryptolink/tests/Makefile.am
@@ -16,8 +16,8 @@ TESTS += run_unittests
run_unittests_SOURCES = run_unittests.cc
run_unittests_SOURCES += crypto_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = ${BOTAN_LDFLAGS} $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS = $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS) $(AM_LDFLAGS)
+run_unittests_LDADD = $(GTEST_LDADD) $(BOTAN_LIBS)
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libcryptolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index bf1171e..b6c314c 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -7,9 +7,15 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
AM_CXXFLAGS = $(B10_CXXFLAGS)
+pkglibexecdir = $(libexecdir)/@PACKAGE@/backends
+
+datasrc_config.h: datasrc_config.h.pre
+ $(SED) -e "s|@@PKGLIBEXECDIR@@|$(pkglibexecdir)|" datasrc_config.h.pre >$@
+
CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
+CLEANFILES += datasrc_config.h
-lib_LTLIBRARIES = libdatasrc.la sqlite3_ds.la memory_ds.la
+lib_LTLIBRARIES = libdatasrc.la
libdatasrc_la_SOURCES = data_source.h data_source.cc
libdatasrc_la_SOURCES += static_datasrc.h static_datasrc.cc
libdatasrc_la_SOURCES += sqlite3_datasrc.h sqlite3_datasrc.cc
@@ -25,8 +31,11 @@ libdatasrc_la_SOURCES += database.h database.cc
libdatasrc_la_SOURCES += factory.h factory.cc
nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
+pkglibexec_LTLIBRARIES = sqlite3_ds.la memory_ds.la
+
sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc
sqlite3_ds_la_LDFLAGS = -module
+sqlite3_ds_la_LDFLAGS += -no-undefined -version-info 1:0:0
sqlite3_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
sqlite3_ds_la_LIBADD += libdatasrc.la
sqlite3_ds_la_LIBADD += $(SQLITE_LIBS)
@@ -42,7 +51,7 @@ libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
libdatasrc_la_LIBADD += $(SQLITE_LIBS)
-BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
+BUILT_SOURCES = datasrc_config.h datasrc_messages.h datasrc_messages.cc
datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index 2c3f709..24c8850 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -15,6 +15,8 @@
#ifndef __DATA_SOURCE_CLIENT_H
#define __DATA_SOURCE_CLIENT_H 1
+#include <utility>
+
#include <boost/noncopyable.hpp>
#include <boost/shared_ptr.hpp>
@@ -215,18 +217,19 @@ public:
///
/// \param name The name of zone apex to be traversed. It doesn't do
/// nearest match as findZone.
- /// \param adjust_ttl If true, the iterator will treat RRs with the same
- /// name and type but different TTL values to be of the
- /// same RRset, and will adjust the TTL to the lowest
- /// value found. If false, it will consider the RR to
- /// belong to a different RRset.
+ /// \param separate_rrs If true, the iterator will return each RR as a
+ /// new RRset object. If false, the iterator will
+ /// combine consecutive RRs with the name and type
+ /// into 1 RRset. The capitalization of the RRset will
+ /// be that of the first RR read, and TTLs will be
+ /// adjusted to the lowest one found.
/// \return Pointer to the iterator.
virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
- bool adjust_ttl = true) const {
+ bool separate_rrs = false) const {
// This is here to both document the parameter in doxygen (therefore it
// needs a name) and avoid unused parameter warning.
static_cast<void>(name);
- static_cast<void>(adjust_ttl);
+ static_cast<void>(separate_rrs);
isc_throw(isc::NotImplemented,
"Data source doesn't support iteration");
@@ -272,6 +275,22 @@ public:
/// In such cases this method will result in an \c isc::NotImplemented
/// exception unconditionally or when \c replace is false).
///
+ /// If \c journaling is true, the data source should store a journal
+ /// of changes. These can be used later on by, for example, IXFR-out.
+ /// However, the parameter is a hint only. It might be unable to store
+ /// them and they would be silently discarded. Or it might need to
+ /// store them no matter what (for example a git-based data source would
+ /// store journal implicitly). When the \c journaling is true, it
+ /// requires that the following update be formatted as IXFR transfer
+ /// (SOA to be removed, bunch of RRs to be removed, SOA to be added,
+ /// bunch of RRs to be added, and possibly repeated). However, it is not
+ /// required that the updater checks that. If it is false, it must not
+ /// require so and must accept any order of changes.
+ ///
+ /// We don't support erasing the whole zone (by replace being true) and
+ /// saving a journal at the same time. In such situation, BadValue is
+ /// thrown.
+ ///
/// \note To avoid throwing the exception accidentally with a lazy
/// implementation, we still keep this method pure virtual without
/// an implementation. All derived classes must explicitly define this
@@ -282,14 +301,67 @@ public:
/// \exception DataSourceError Internal error in the underlying data
/// source.
/// \exception std::bad_alloc Resource allocation failure.
+ /// \exception BadValue if both replace and journaling are true.
///
/// \param name The zone name to be updated
/// \param replace Whether to delete existing RRs before making updates
+ /// \param journaling The zone updater should store a journal of the
+ /// changes.
///
/// \return A pointer to the updater; it will be NULL if the specified
/// zone isn't found.
virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
- bool replace) const = 0;
+ bool replace, bool journaling = false)
+ const = 0;
+
+ /// Return a journal reader to retrieve differences of a zone.
+ ///
+ /// A derived version of this method creates a concrete
+ /// \c ZoneJournalReader object specific to the underlying data source
+ /// for the specified name of zone and differences between the versions
+ /// specified by the beginning and ending serials of the corresponding
+ /// SOA RRs.
+ /// The RR class of the zone is the one that the client is expected to
+ /// handle (see the detailed description of this class).
+ ///
+ /// Note that the SOA serials are compared by the semantics of the serial
+ /// number arithmetic. So, for example, \c begin_serial can be larger than
+ /// \c end_serial as bare unsigned integers. The underlying data source
+ /// implementation is assumed to keep track of sufficient history to
+ /// identify (if exist) the corresponding difference between the specified
+ /// versions.
+ ///
+ /// This method returns the result as a pair of a result code and
+ /// a pointer to a \c ZoneJournalReader object. On success, the result
+ /// code is \c SUCCESS and the pointer must be non NULL; otherwise
+ /// the result code is something other than \c SUCCESS and the pinter
+ /// must be NULL.
+ ///
+ /// If the specified zone is not found in the data source, the result
+ /// code is \c NO_SUCH_ZONE.
+ /// Otherwise, if specified range of difference for the zone is not found
+ /// in the data source, the result code is \c NO_SUCH_VERSION.
+ ///
+ /// Handling differences is an optional feature of data source.
+ /// If the underlying data source does not support difference handling,
+ /// this method for that type of data source can throw an exception of
+ /// class \c NotImplemented.
+ ///
+ /// \exception NotImplemented The data source does not support differences.
+ /// \exception DataSourceError Other operational errors at the data source
+ /// level.
+ ///
+ /// \param zone The name of the zone for which the difference should be
+ /// retrieved.
+ /// \param begin_serial The SOA serial of the beginning version of the
+ /// differences.
+ /// \param end_serial The SOA serial of the ending version of the
+ /// differences.
+ ///
+ /// \return A pair of result code and a pointer to \c ZoneJournalReader.
+ virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+ getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+ uint32_t end_serial) const = 0;
};
}
}
diff --git a/src/lib/datasrc/data_source.h b/src/lib/datasrc/data_source.h
index a7a15a9..c35f0d3 100644
--- a/src/lib/datasrc/data_source.h
+++ b/src/lib/datasrc/data_source.h
@@ -53,6 +53,18 @@ public:
isc::Exception(file, line, what) {}
};
+/// \brief No such serial number when obtaining difference iterator
+///
+/// Thrown if either the zone/start serial number or zone/end serial number
+/// combination does not exist in the differences table. (Note that this
+/// includes the case where the differences table contains no records related
+/// to that zone.)
+class NoSuchSerial : public DataSourceError {
+public:
+ NoSuchSerial(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
class AbstractDataSrc {
///
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index f06cdc0..6e0b02b 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -13,6 +13,7 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <string>
+#include <utility>
#include <vector>
#include <datasrc/database.h>
@@ -351,7 +352,7 @@ FINAL_TYPES() {
}
-RRsetPtr
+ConstRRsetPtr
DatabaseClient::Finder::findNSECCover(const Name& name) {
try {
// Which one should contain the NSEC record?
@@ -386,69 +387,99 @@ DatabaseClient::Finder::findNSECCover(const Name& name) {
arg(accessor_->getDBName()).arg(name);
}
// We didn't find it, return nothing
- return (RRsetPtr());
+ return (ConstRRsetPtr());
}
-ZoneFinder::FindResult
-DatabaseClient::Finder::find(const isc::dns::Name& name,
- const isc::dns::RRType& type,
- isc::dns::RRsetList*,
- const FindOptions options)
+DatabaseClient::Finder::DelegationSearchResult
+DatabaseClient::Finder::findDelegationPoint(const isc::dns::Name& name,
+ const FindOptions options)
{
- // This variable is used to determine the difference between
- // NXDOMAIN and NXRRSET
- bool records_found = false;
- bool glue_ok((options & FIND_GLUE_OK) != 0);
- const bool dnssec_data((options & FIND_DNSSEC) != 0);
- bool get_cover(false);
- isc::dns::RRsetPtr result_rrset;
+ // Result of search
+ isc::dns::ConstRRsetPtr result_rrset;
ZoneFinder::Result result_status = SUCCESS;
- FoundRRsets found;
- logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
- .arg(accessor_->getDBName()).arg(name).arg(type);
- // In case we are in GLUE_OK mode and start matching wildcards,
- // we can't do it under NS, so we store it here to check
- isc::dns::RRsetPtr first_ns;
-
- // First, do we have any kind of delegation (NS/DNAME) here?
- const Name origin(getOrigin());
- const size_t origin_label_count(origin.getLabelCount());
- // Number of labels in the last known non-empty domain
- size_t last_known(origin_label_count);
- const size_t current_label_count(name.getLabelCount());
- // This is how many labels we remove to get origin
- const size_t remove_labels(current_label_count - origin_label_count);
-
- // Now go trough all superdomains from origin down
- for (int i(remove_labels); i > 0; --i) {
- Name superdomain(name.split(i));
- // Look if there's NS or DNAME (but ignore the NS in origin)
- found = getRRsets(superdomain.toText(), DELEGATION_TYPES(),
- i != remove_labels);
- if (found.first) {
- // It contains some RRs, so it exists.
- last_known = superdomain.getLabelCount();
+ // Are we searching for glue?
+ const bool glue_ok = ((options & FIND_GLUE_OK) != 0);
+
+ // This next declaration is an optimisation. When we search the database
+ // for glue records, we generally ignore delegations. (This allows for
+ // the case where e.g. the delegation to zone example.com refers to
+ // nameservers within the zone, e.g. ns1.example.com. When conducting the
+ // search for ns1.example.com, we have to search past the NS records at
+ // example.com.)
+ //
+ // The one case where this is forbidden is when we search past the zone
+ // cut but the match we find for the glue is a wildcard match. In that
+ // case, we return the delegation instead (see RFC 1034, section 4.3.3).
+ // To save a new search, we record the location of the delegation cut when
+ // we encounter it here.
+ isc::dns::ConstRRsetPtr first_ns;
+
+ // We want to search from the apex down. We are given the full domain
+ // name so we have to do some manipulation to ensure that when we start
+ // checking superdomains, we start from the the domain name of the zone
+ // (e.g. if the name is b.a.example.com. and we are in the example.com.
+ // zone, we check example.com., a.example.com. and b.a.example.com. We
+ // don't need to check com. or .).
+ //
+ // Set the number of labels in the origin (i.e. apex of the zone) and in
+ // the last known non-empty domain (which, at this point, is the origin).
+ const size_t origin_label_count = getOrigin().getLabelCount();
+ size_t last_known = origin_label_count;
+
+ // Set how many labels we remove to get origin: this is the number of
+ // labels we have to process in our search.
+ const size_t remove_labels = name.getLabelCount() - origin_label_count;
+
+ // Go through all superdomains from the origin down searching for nodes
+ // that indicate a delegation (.e. NS or DNAME).
+ for (int i = remove_labels; i > 0; --i) {
+ const Name superdomain(name.split(i));
+
+ // Note if this is the origin. (We don't count NS records at the origin
+ // as a delegation so this controls whether NS RRs are included in
+ // the results of some searches.)
+ const bool not_origin = (i != remove_labels);
+
+ // Look if there's NS or DNAME at this point of the tree, but ignore
+ // the NS RRs at the apex of the zone.
+ const FoundRRsets found = getRRsets(superdomain.toText(),
+ DELEGATION_TYPES(), not_origin);
+ if (found.first) {
+ // This node contains either NS or DNAME RRs so it does exist.
const FoundIterator nsi(found.second.find(RRType::NS()));
const FoundIterator dni(found.second.find(RRType::DNAME()));
- // In case we are in GLUE_OK mode, we want to store the
- // highest encountered NS (but not apex)
- if (glue_ok && !first_ns && i != remove_labels &&
- nsi != found.second.end()) {
+
+ // An optimisation. We know that there is an exact match for
+ // something at this point in the tree so remember it. If we have
+ // to do a wildcard search, as we search upwards through the tree
+ // we don't need to pass this point, which is an exact match for
+ // the domain name.
+ last_known = superdomain.getLabelCount();
+
+ if (glue_ok && !first_ns && not_origin &&
+ nsi != found.second.end()) {
+ // If we are searching for glue ("glue OK" mode), store the
+ // highest NS record that we find that is not the apex. This
+ // is another optimisation for later, where we need the
+ // information if the domain we are looking for matches through
+ // a wildcard.
first_ns = nsi->second;
- } else if (!glue_ok && i != remove_labels &&
- nsi != found.second.end()) {
- // Do a NS delegation, but ignore NS in glue_ok mode. Ignore
- // delegation in apex
+
+ } else if (!glue_ok && not_origin && nsi != found.second.end()) {
+ // Not searching for glue and we have found an NS RRset that is
+ // not at the apex. We have found a delegation - return that
+ // fact, there is no need to search further down the tree.
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_FOUND_DELEGATION).
arg(accessor_->getDBName()).arg(superdomain);
result_rrset = nsi->second;
result_status = DELEGATION;
- // No need to go lower, found
break;
+
} else if (dni != found.second.end()) {
- // Very similar with DNAME
+ // We have found a DNAME so again stop searching down the tree
+ // and return the information.
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_FOUND_DNAME).
arg(accessor_->getDBName()).arg(superdomain);
@@ -463,202 +494,344 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
}
}
}
+ return (DelegationSearchResult(result_status, result_rrset, first_ns,
+ last_known));
+}
- if (!result_rrset) { // Only if we didn't find a redirect already
- // Try getting the final result and extract it
- // It is special if there's a CNAME or NS, DNAME is ignored here
- // And we don't consider the NS in origin
-
- WantedTypes final_types(FINAL_TYPES());
- final_types.insert(type);
- found = getRRsets(name.toText(), final_types, name != origin);
- records_found = found.first;
+// This method is called when we have not found an exact match and when we
+// know that the name is not an empty non-terminal. So the only way that
+// the name can match something in the zone is through a wildcard match.
+//
+// During an earlier stage in the search for this name, we made a record of
+// the lowest superdomain for which we know an RR exists. (Note the "we
+// know" qualification - there may be lower superdomains (ones with more
+// labels) that hold an RR, but as we weren't searching for them, we don't
+// know about them.)
+//
+// In the search for a wildcard match (which starts at the given domain
+// name and goes up the tree to successive superdomains), this is the level
+// at which we can stop - there can't be a wildcard at or beyond that
+// point.
+//
+// At each level that can stop the search, we should consider several cases:
+//
+// - If we found a wildcard match for a glue record below a
+// delegation point, we don't return the match,
+// instead we return the delegation. (Note that if we didn't
+// a wildcard match at all, we would return NXDOMAIN, not the
+// the delegation.)
+//
+// - If we found a wildcard match and we are sure that the match
+// is not an empty non-terminal, return the result taking into account CNAME,
+// on a zone cut, and NXRRSET.
+// (E.g. searching for a match
+// for c.b.a.example.com, we found that b.a.example.com did
+// not exist but that *.a.example.com. did. Checking
+// b.a.example.com revealed no subdomains, so we can use the
+// wilcard match we found.)
+//
+// - If we found a more specified match, the wildcard search
+// is canceled, resulting in NXDOMAIN. (E.g. searching for a match
+// for c.b.a.example.com, we found that b.a.example.com did
+// not exist but that *.a.example.com. did. Checking
+// b.a.example.com found subdomains. So b.example.com is
+// an empty non-terminal and so should not be returned in
+// the wildcard matching process. In other words,
+// b.example.com does exist in the DNS space, it just doesn't
+// have any RRs associated with it.)
+//
+// - If we found a match, but it is an empty non-terminal asterisk (E.g.#
+// subdomain.*.example.com. is present, but there is nothing at
+// *.example.com.), return an NXRRSET indication;
+// the wildcard exists in the DNS space, there's just nothing
+// associated with it. If DNSSEC data is required, return the
+// covering NSEC record.
+//
+// If none of the above applies in any level, the search fails with NXDOMAIN.
+ZoneFinder::FindResult
+DatabaseClient::Finder::findWildcardMatch(
+ const isc::dns::Name& name, const isc::dns::RRType& type,
+ const FindOptions options, const DelegationSearchResult& dresult)
+{
+ // Note that during the search we are going to search not only for the
+ // requested type, but also for types that indicate a delegation -
+ // NS and DNAME.
+ WantedTypes final_types(FINAL_TYPES());
+ final_types.insert(type);
+
+ for (size_t i = 1; i <= (name.getLabelCount() - dresult.last_known); ++i) {
+
+ // Strip off the left-more label(s) in the name and replace with a "*".
+ const Name superdomain(name.split(i));
+ const string wildcard("*." + superdomain.toText());
+ const string construct_name(name.toText());
+
+ // TODO Add a check for DNAME, as DNAME wildcards are discouraged (see
+ // RFC 4592 section 4.4).
+ // Search for a match. The types are the same as with original query.
+ FoundRRsets found = getRRsets(wildcard, final_types, true,
+ &construct_name);
+ if (found.first) {
+ // Found something - but what?
+
+ if (dresult.first_ns) {
+ // About to use first_ns. The only way this can be set is if
+ // we are searching for glue, so do a sanity check.
+ if ((options & FIND_GLUE_OK) == 0) {
+ isc_throw(Unexpected, "Inconsistent conditions during "
+ "cancel of wilcard search for " <<
+ name.toText() << ": find_ns non-null when not "
+ "processing glue request");
+ }
- // NS records, CNAME record and Wanted Type records
- const FoundIterator nsi(found.second.find(RRType::NS()));
- const FoundIterator cni(found.second.find(RRType::CNAME()));
- const FoundIterator wti(found.second.find(type));
- if (name != origin && !glue_ok && nsi != found.second.end()) {
- // There's a delegation at the exact node.
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_DELEGATION_EXACT).
- arg(accessor_->getDBName()).arg(name);
- result_status = DELEGATION;
- result_rrset = nsi->second;
- } else if (type != isc::dns::RRType::CNAME() &&
- cni != found.second.end()) {
- // A CNAME here
- result_status = CNAME;
- result_rrset = cni->second;
- if (result_rrset->getRdataCount() != 1) {
- isc_throw(DataSourceError, "CNAME with " <<
- result_rrset->getRdataCount() <<
- " rdata at " << name << ", expected 1");
- }
- } else if (wti != found.second.end()) {
- // Just get the answer
- result_rrset = wti->second;
- } else if (!records_found) {
- // Nothing lives here.
- // But check if something lives below this
- // domain and if so, pretend something is here as well.
- if (hasSubdomains(name.toText())) {
+ // Wildcard match for a glue below a delegation point
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
- arg(accessor_->getDBName()).arg(name);
- records_found = true;
- get_cover = dnssec_data;
- } else if ((options & NO_WILDCARD) != 0) {
- // If wildcard check is disabled, the search will ultimately
- // terminate with NXDOMAIN. If DNSSEC is enabled, flag that
- // we need to get the NSEC records to prove this.
- if (dnssec_data) {
- get_cover = true;
- }
+ DATASRC_DATABASE_WILDCARD_CANCEL_NS).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(dresult.first_ns->getName());
+ return (ZoneFinder::FindResult(DELEGATION, dresult.first_ns));
+
+ } else if (!hasSubdomains(name.split(i - 1).toText())) {
+ // The wildcard match is the best one, find the final result
+ // at it. Note that wildcard should never be the zone origin.
+ return (findOnNameResult(name, type, options, false,
+ found, &wildcard));
} else {
- // It's not empty non-terminal. So check for wildcards.
- // We remove labels one by one and look for the wildcard there.
- // Go up to first non-empty domain.
- for (size_t i(1); i <= current_label_count - last_known; ++i) {
- // Construct the name with *
- const Name superdomain(name.split(i));
- const string wildcard("*." + superdomain.toText());
- const string construct_name(name.toText());
- // TODO What do we do about DNAME here?
- // The types are the same as with original query
- found = getRRsets(wildcard, final_types, true,
- &construct_name);
- if (found.first) {
- if (first_ns) {
- // In case we are under NS, we don't
- // wildcard-match, but return delegation
- result_rrset = first_ns;
- result_status = DELEGATION;
- records_found = true;
- // We pretend to switch to non-glue_ok mode
- glue_ok = false;
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_WILDCARD_CANCEL_NS).
- arg(accessor_->getDBName()).arg(wildcard).
- arg(first_ns->getName());
- } else if (!hasSubdomains(name.split(i - 1).toText()))
- {
- // Nothing we added as part of the * can exist
- // directly, as we go up only to first existing
- // domain, but it could be empty non-terminal. In
- // that case, we need to cancel the match.
- records_found = true;
- const FoundIterator
- cni(found.second.find(RRType::CNAME()));
- const FoundIterator
- nsi(found.second.find(RRType::NS()));
- const FoundIterator
- nci(found.second.find(RRType::NSEC()));
- const FoundIterator wti(found.second.find(type));
- if (cni != found.second.end() &&
- type != RRType::CNAME()) {
- result_rrset = cni->second;
- result_status = WILDCARD_CNAME;
- } else if (nsi != found.second.end()) {
- result_rrset = nsi->second;
- result_status = DELEGATION;
- } else if (wti != found.second.end()) {
- result_rrset = wti->second;
- result_status = WILDCARD;
- } else {
- // NXRRSET case in the wildcard
- result_status = WILDCARD_NXRRSET;
- if (dnssec_data &&
- nci != found.second.end()) {
- // User wants a proof the wildcard doesn't
- // contain it
- //
- // However, we need to get the RRset in the
- // name of the wildcard, not the constructed
- // one, so we walk it again
- found = getRRsets(wildcard, NSEC_TYPES(),
- true);
- result_rrset =
- found.second.find(RRType::NSEC())->
- second;
- }
- }
-
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_WILDCARD).
- arg(accessor_->getDBName()).arg(wildcard).
- arg(name);
- } else {
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_WILDCARD_CANCEL_SUB).
- arg(accessor_->getDBName()).arg(wildcard).
- arg(name).arg(superdomain);
- }
- break;
- } else if (hasSubdomains(wildcard)) {
- // Empty non-terminal asterisk
- records_found = true;
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_WILDCARD_EMPTY).
- arg(accessor_->getDBName()).arg(wildcard).
- arg(name);
- if (dnssec_data) {
- result_rrset = findNSECCover(Name(wildcard));
- if (result_rrset) {
- result_status = WILDCARD_NXRRSET;
- }
- }
- break;
- }
- }
- // This is the NXDOMAIN case (nothing found anywhere). If
- // they want DNSSEC data, try getting the NSEC record
- if (dnssec_data && !records_found) {
- get_cover = true;
- }
+
+ // more specified match found, cancel wildcard match
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_CANCEL_SUB).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name).arg(superdomain);
+ return (ZoneFinder::FindResult(NXDOMAIN, ConstRRsetPtr()));
}
- } else if (dnssec_data) {
- // This is the "usual" NXRRSET case
- // So in case they want DNSSEC, provide the NSEC
- // (which should be available already here)
- result_status = NXRRSET;
- const FoundIterator nci(found.second.find(RRType::NSEC()));
- if (nci != found.second.end()) {
- result_rrset = nci->second;
+
+ } else if (hasSubdomains(wildcard)) {
+ // an empty non-terminal asterisk
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_EMPTY).
+ arg(accessor_->getDBName()).arg(wildcard).arg(name);
+ if ((options & FIND_DNSSEC) != 0) {
+ ConstRRsetPtr nsec = findNSECCover(Name(wildcard));
+ if (nsec) {
+ return (ZoneFinder::FindResult(WILDCARD_NXRRSET, nsec));
+ }
}
+ return (ZoneFinder::FindResult(NXRRSET, ConstRRsetPtr()));
}
}
- if (!result_rrset) {
- if (result_status == SUCCESS) {
- // Should we look for NSEC covering the name?
- if (get_cover) {
- result_rrset = findNSECCover(name);
- if (result_rrset) {
- result_status = NXDOMAIN;
- }
+ // Nothing found at any level.
+ return (ZoneFinder::FindResult(NXDOMAIN, ConstRRsetPtr()));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::logAndCreateResult(
+ const Name& name, const string* wildname, const RRType& type,
+ ZoneFinder::Result code, ConstRRsetPtr rrset,
+ const isc::log::MessageID& log_id) const
+{
+ if (rrset) {
+ if (wildname == NULL) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+ arg(accessor_->getDBName()).arg(name).arg(type).
+ arg(getClass()).arg(*rrset);
+ } else {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+ arg(accessor_->getDBName()).arg(name).arg(type).
+ arg(getClass()).arg(*wildname).arg(*rrset);
+ }
+ } else {
+ if (wildname == NULL) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+ arg(accessor_->getDBName()).arg(name).arg(type).
+ arg(getClass());
+ } else {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, log_id).
+ arg(accessor_->getDBName()).arg(name).arg(type).
+ arg(getClass()).arg(*wildname);
+ }
+ }
+ return (ZoneFinder::FindResult(code, rrset));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::findOnNameResult(const Name& name,
+ const RRType& type,
+ const FindOptions options,
+ const bool is_origin,
+ const FoundRRsets& found,
+ const string* wildname)
+{
+ const bool wild = (wildname != NULL);
+
+ // Get iterators for the different types of records we are interested in -
+ // CNAME, NS and Wanted types.
+ const FoundIterator nsi(found.second.find(RRType::NS()));
+ const FoundIterator cni(found.second.find(RRType::CNAME()));
+ const FoundIterator wti(found.second.find(type));
+
+ if (!is_origin && ((options & FIND_GLUE_OK) == 0) &&
+ nsi != found.second.end()) {
+ // A NS RRset was found at the domain we were searching for. As it is
+ // not at the origin of the zone, it is a delegation and indicates that
+ // this zone is not authoritative for the data. Just return the
+ // delegation information.
+ return (logAndCreateResult(name, wildname, type, DELEGATION,
+ nsi->second,
+ wild ? DATASRC_DATABASE_WILDCARD_NS :
+ DATASRC_DATABASE_FOUND_DELEGATION_EXACT));
+
+ } else if (type != RRType::CNAME() && cni != found.second.end()) {
+ // We are not searching for a CNAME but nevertheless we have found one
+ // at the name we are searching so we return it. (The caller may
+ // want to continue the lookup by replacing the query name with the
+ // canonical name and the original RR type.) First though, do a sanity
+ // check to ensure that there is only one RR in the CNAME RRset.
+ if (cni->second->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "CNAME with " <<
+ cni->second->getRdataCount() << " rdata at " << name <<
+ ", expected 1");
+ }
+ return (logAndCreateResult(name, wildname, type,
+ wild ? WILDCARD_CNAME : CNAME, cni->second,
+ wild ? DATASRC_DATABASE_WILDCARD_CNAME :
+ DATASRC_DATABASE_FOUND_CNAME));
+
+ } else if (wti != found.second.end()) {
+ // Found an RR matching the query, so return it. (Note that this
+ // includes the case where we were explicitly querying for a CNAME and
+ // found it. It also includes the case where we were querying for an
+ // NS RRset and found it at the apex of the zone.)
+ return (logAndCreateResult(name, wildname, type,
+ wild ? WILDCARD : SUCCESS, wti->second,
+ wild ? DATASRC_DATABASE_WILDCARD_MATCH :
+ DATASRC_DATABASE_FOUND_RRSET));
+ }
+
+ // If we get here, we have found something at the requested name but not
+ // one of the RR types we were interested in. This is the NXRRSET case so
+ // return the appropriate status. If DNSSEC information was requested,
+ // provide the NSEC records. If it's for wildcard, we need to get the
+ // NSEC records in the name of the wildcard, not the substituted one,
+ // so we need to search the tree again.
+ ConstRRsetPtr nsec_rrset; // possibly used with DNSSEC, otherwise NULL
+ if ((options & FIND_DNSSEC) != 0) {
+ if (wild) {
+ const FoundRRsets wfound = getRRsets(*wildname, NSEC_TYPES(),
+ true);
+ const FoundIterator nci = wfound.second.find(RRType::NSEC());
+ if (nci != wfound.second.end()) {
+ nsec_rrset = nci->second;
}
- // Something is not here and we didn't decide yet what
- if (records_found) {
- logger.debug(DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_NXRRSET)
- .arg(accessor_->getDBName()).arg(name)
- .arg(getClass()).arg(type);
- result_status = NXRRSET;
- } else {
- logger.debug(DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_NXDOMAIN)
- .arg(accessor_->getDBName()).arg(name)
- .arg(getClass()).arg(type);
- result_status = NXDOMAIN;
+ } else {
+ const FoundIterator nci = found.second.find(RRType::NSEC());
+ if (nci != found.second.end()) {
+ nsec_rrset = nci->second;
}
}
+ }
+ if (nsec_rrset) {
+ // This log message covers both normal and wildcard cases, so we pass
+ // NULL for 'wildname'.
+ return (logAndCreateResult(name, NULL, type,
+ wild ? WILDCARD_NXRRSET : NXRRSET,
+ nsec_rrset,
+ DATASRC_DATABASE_FOUND_NXRRSET_NSEC));
+ }
+ return (logAndCreateResult(name, wildname, type,
+ wild ? WILDCARD_NXRRSET : NXRRSET, nsec_rrset,
+ wild ? DATASRC_DATABASE_WILDCARD_NXRRSET :
+ DATASRC_DATABASE_FOUND_NXRRSET));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::findNoNameResult(const Name& name, const RRType& type,
+ FindOptions options,
+ const DelegationSearchResult& dresult)
+{
+ const bool dnssec_data = ((options & FIND_DNSSEC) != 0);
+
+ // On entry to this method, we know that the database doesn't have any
+ // entry for this name. Before returning NXDOMAIN, we need to check
+ // for special cases.
+
+ if (hasSubdomains(name.toText())) {
+ // Does the domain have a subdomain (i.e. it is an empty non-terminal)?
+ // If so, return NXRRSET instead of NXDOMAIN (as although the name does
+ // not exist in the database, it does exist in the DNS tree).
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
+ arg(accessor_->getDBName()).arg(name);
+ return (FindResult(NXRRSET, dnssec_data ? findNSECCover(name) :
+ ConstRRsetPtr()));
+
+ } else if ((options & NO_WILDCARD) == 0) {
+ // It's not an empty non-terminal and wildcard matching is not
+ // disabled, so check for wildcards. If there is a wildcard match
+ // (i.e. all results except NXDOMAIN) return it; otherwise fall
+ // through to the NXDOMAIN case below.
+ const ZoneFinder::FindResult wresult =
+ findWildcardMatch(name, type, options, dresult);
+ if (wresult.code != NXDOMAIN) {
+ return (FindResult(wresult.code, wresult.rrset));
+ }
+ }
+
+ // All avenues to find a match are now exhausted, return NXDOMAIN (plus
+ // NSEC records if requested).
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_NO_MATCH).
+ arg(accessor_->getDBName()).arg(name).arg(type).arg(getClass());
+ return (FindResult(NXDOMAIN, dnssec_data ? findNSECCover(name) :
+ ConstRRsetPtr()));
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList*,
+ const FindOptions options)
+{
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
+ .arg(accessor_->getDBName()).arg(name).arg(type).arg(getClass());
+
+ // First, go through all superdomains from the origin down, searching for
+ // nodes that indicate a delegation (i.e. NS or DNAME, ignoring NS records
+ // at the apex). If one is found, the search stops there.
+ //
+ // (In fact there could be RRs in the database corresponding to subdomains
+ // of the delegation. The reason we do the search for the delegations
+ // first is because the delegation means that another zone is authoritative
+ // for the data and so should be consulted to retrieve it. RRs below
+ // this delegation point can be found in a search for glue but not
+ // otherwise; in the latter case they are said to be occluded by the
+ // presence of the delegation.)
+ const DelegationSearchResult dresult = findDelegationPoint(name, options);
+ if (dresult.rrset) {
+ return (FindResult(dresult.code, dresult.rrset));
+ }
+
+ // If there is no delegation, look for the exact match to the request
+ // name/type/class. However, there are special cases:
+ // - Requested name has a singleton CNAME record associated with it
+ // - Requested name is a delegation point (NS only but not at the zone
+ // apex - DNAME is ignored here as it redirects DNS names subordinate to
+ // the owner name - the owner name itself is not redirected.)
+ const bool is_origin = (name == getOrigin());
+ WantedTypes final_types(FINAL_TYPES());
+ final_types.insert(type);
+ const FoundRRsets found = getRRsets(name.toText(), final_types,
+ !is_origin);
+
+ if (found.first) {
+ // Something found at the domain name. Look into it further to get
+ // the final result.
+ return (findOnNameResult(name, type, options, is_origin, found, NULL));
} else {
- logger.debug(DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_RRSET)
- .arg(accessor_->getDBName()).arg(*result_rrset);
+ // Did not find anything at all at the domain name, so check for
+ // subdomains or wildcards.
+ return (findNoNameResult(name, type, options, dresult));
}
- return (FindResult(result_status, result_rrset));
}
Name
@@ -668,10 +841,9 @@ DatabaseClient::Finder::findPreviousName(const Name& name) const {
try {
return (Name(str));
}
- /*
- * To avoid having the same code many times, we just catch all the
- * exceptions and handle them in a common code below
- */
+
+ // To avoid having the same code many times, we just catch all the
+ // exceptions and handle them in a common code below
catch (const isc::dns::EmptyLabel&) {}
catch (const isc::dns::TooLongLabel&) {}
catch (const isc::dns::BadLabelType&) {}
@@ -694,24 +866,22 @@ DatabaseClient::Finder::getClass() const {
namespace {
-/*
- * This needs, beside of converting all data from textual representation, group
- * together rdata of the same RRsets. To do this, we hold one row of data ahead
- * of iteration. When we get a request to provide data, we create it from this
- * data and load a new one. If it is to be put to the same rrset, we add it.
- * Otherwise we just return what we have and keep the row as the one ahead
- * for next time.
- */
+/// This needs, beside of converting all data from textual representation, group
+/// together rdata of the same RRsets. To do this, we hold one row of data ahead
+/// of iteration. When we get a request to provide data, we create it from this
+/// data and load a new one. If it is to be put to the same rrset, we add it.
+/// Otherwise we just return what we have and keep the row as the one ahead
+/// for next time.
class DatabaseIterator : public ZoneIterator {
public:
DatabaseIterator(shared_ptr<DatabaseAccessor> accessor,
const Name& zone_name,
const RRClass& rrclass,
- bool adjust_ttl) :
+ bool separate_rrs) :
accessor_(accessor),
class_(rrclass),
ready_(true),
- adjust_ttl_(adjust_ttl)
+ separate_rrs_(separate_rrs)
{
// Get the zone
const pair<bool, int> zone(accessor_->getZone(zone_name.toText()));
@@ -769,20 +939,19 @@ public:
const RRType rtype(rtype_str);
RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
- if (adjust_ttl_) {
- if (ttl_ != ttl) {
- if (ttl < ttl_) {
- ttl_ = ttl;
- rrset->setTTL(RRTTL(ttl));
- }
- LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
- arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
+ if (ttl_ != ttl) {
+ if (ttl < ttl_) {
+ ttl_ = ttl;
+ rrset->setTTL(RRTTL(ttl));
}
- } else if (ttl_ != ttl) {
- break;
+ LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
+ arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
}
rrset->addRdata(rdata::createRdata(rtype, class_, rdata_));
getData();
+ if (separate_rrs_) {
+ break;
+ }
}
LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE_NEXT).
arg(rrset->getName()).arg(rrset->getType());
@@ -814,18 +983,18 @@ private:
string name_, rtype_, rdata_, ttl_;
// Whether to modify differing TTL values, or treat a different TTL as
// a different RRset
- bool adjust_ttl_;
+ bool separate_rrs_;
};
}
ZoneIteratorPtr
DatabaseClient::getIterator(const isc::dns::Name& name,
- bool adjust_ttl) const
+ bool separate_rrs) const
{
ZoneIteratorPtr iterator = ZoneIteratorPtr(new DatabaseIterator(
accessor_->clone(), name,
- rrclass_, adjust_ttl));
+ rrclass_, separate_rrs));
LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE).
arg(name);
@@ -838,10 +1007,12 @@ DatabaseClient::getIterator(const isc::dns::Name& name,
class DatabaseUpdater : public ZoneUpdater {
public:
DatabaseUpdater(shared_ptr<DatabaseAccessor> accessor, int zone_id,
- const Name& zone_name, const RRClass& zone_class) :
+ const Name& zone_name, const RRClass& zone_class,
+ bool journaling) :
committed_(false), accessor_(accessor), zone_id_(zone_id),
db_name_(accessor->getDBName()), zone_name_(zone_name.toText()),
- zone_class_(zone_class),
+ zone_class_(zone_class), journaling_(journaling),
+ diff_phase_(NOT_STARTED), serial_(0),
finder_(new DatabaseClient::Finder(accessor_, zone_id_, zone_name))
{
logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_CREATED)
@@ -877,45 +1048,97 @@ public:
virtual void commit();
private:
+ // A short cut typedef only for making the code shorter.
+ typedef DatabaseAccessor Accessor;
+
bool committed_;
shared_ptr<DatabaseAccessor> accessor_;
const int zone_id_;
const string db_name_;
const string zone_name_;
const RRClass zone_class_;
+ const bool journaling_;
+ // For the journals
+ enum DiffPhase {
+ NOT_STARTED,
+ DELETE,
+ ADD
+ };
+ DiffPhase diff_phase_;
+ Serial serial_;
boost::scoped_ptr<DatabaseClient::Finder> finder_;
+
+ // This is a set of validation checks commonly used for addRRset() and
+ // deleteRRset to minimize duplicate code logic and to make the main
+ // code concise.
+ void validateAddOrDelete(const char* const op_str, const RRset& rrset,
+ DiffPhase prev_phase,
+ DiffPhase current_phase) const;
};
void
-DatabaseUpdater::addRRset(const RRset& rrset) {
+DatabaseUpdater::validateAddOrDelete(const char* const op_str,
+ const RRset& rrset,
+ DiffPhase prev_phase,
+ DiffPhase current_phase) const
+{
if (committed_) {
- isc_throw(DataSourceError, "Add attempt after commit to zone: "
+ isc_throw(DataSourceError, op_str << " attempt after commit to zone: "
<< zone_name_ << "/" << zone_class_);
}
+ if (rrset.getRdataCount() == 0) {
+ isc_throw(DataSourceError, op_str << " attempt with an empty RRset: "
+ << rrset.getName() << "/" << zone_class_ << "/"
+ << rrset.getType());
+ }
if (rrset.getClass() != zone_class_) {
- isc_throw(DataSourceError, "An RRset of a different class is being "
- << "added to " << zone_name_ << "/" << zone_class_ << ": "
+ isc_throw(DataSourceError, op_str << " attempt for a different class "
+ << zone_name_ << "/" << zone_class_ << ": "
<< rrset.toText());
}
if (rrset.getRRsig()) {
- isc_throw(DataSourceError, "An RRset with RRSIG is being added to "
+ isc_throw(DataSourceError, op_str << " attempt for RRset with RRSIG "
<< zone_name_ << "/" << zone_class_ << ": "
<< rrset.toText());
}
+ if (journaling_) {
+ const RRType rrtype(rrset.getType());
+ if (rrtype == RRType::SOA() && diff_phase_ != prev_phase) {
+ isc_throw(isc::BadValue, op_str << " attempt in an invalid "
+ << "diff phase: " << diff_phase_ << ", rrset: " <<
+ rrset.toText());
+ }
+ if (rrtype != RRType::SOA() && diff_phase_ != current_phase) {
+ isc_throw(isc::BadValue, "diff state change by non SOA: "
+ << rrset.toText());
+ }
+ }
+}
+
+void
+DatabaseUpdater::addRRset(const RRset& rrset) {
+ validateAddOrDelete("add", rrset, DELETE, ADD);
+ // It's guaranteed rrset has at least one RDATA at this point.
RdataIteratorPtr it = rrset.getRdataIterator();
- if (it->isLast()) {
- isc_throw(DataSourceError, "An empty RRset is being added for "
- << rrset.getName() << "/" << zone_class_ << "/"
- << rrset.getType());
- }
- string columns[DatabaseAccessor::ADD_COLUMN_COUNT]; // initialized with ""
- columns[DatabaseAccessor::ADD_NAME] = rrset.getName().toText();
- columns[DatabaseAccessor::ADD_REV_NAME] =
- rrset.getName().reverse().toText();
- columns[DatabaseAccessor::ADD_TTL] = rrset.getTTL().toText();
- columns[DatabaseAccessor::ADD_TYPE] = rrset.getType().toText();
+ string columns[Accessor::ADD_COLUMN_COUNT]; // initialized with ""
+ columns[Accessor::ADD_NAME] = rrset.getName().toText();
+ columns[Accessor::ADD_REV_NAME] = rrset.getName().reverse().toText();
+ columns[Accessor::ADD_TTL] = rrset.getTTL().toText();
+ columns[Accessor::ADD_TYPE] = rrset.getType().toText();
+ string journal[Accessor::DIFF_PARAM_COUNT];
+ if (journaling_) {
+ journal[Accessor::DIFF_NAME] = columns[Accessor::ADD_NAME];
+ journal[Accessor::DIFF_TYPE] = columns[Accessor::ADD_TYPE];
+ journal[Accessor::DIFF_TTL] = columns[Accessor::ADD_TTL];
+ diff_phase_ = ADD;
+ if (rrset.getType() == RRType::SOA()) {
+ serial_ =
+ dynamic_cast<const generic::SOA&>(it->getCurrent()).
+ getSerial();
+ }
+ }
for (; !it->isLast(); it->next()) {
if (rrset.getType() == RRType::RRSIG()) {
// XXX: the current interface (based on the current sqlite3
@@ -925,43 +1148,53 @@ DatabaseUpdater::addRRset(const RRset& rrset) {
// the interface, but until then we have to conform to the schema.
const generic::RRSIG& rrsig_rdata =
dynamic_cast<const generic::RRSIG&>(it->getCurrent());
- columns[DatabaseAccessor::ADD_SIGTYPE] =
+ columns[Accessor::ADD_SIGTYPE] =
rrsig_rdata.typeCovered().toText();
}
- columns[DatabaseAccessor::ADD_RDATA] = it->getCurrent().toText();
+ columns[Accessor::ADD_RDATA] = it->getCurrent().toText();
+ if (journaling_) {
+ journal[Accessor::DIFF_RDATA] = columns[Accessor::ADD_RDATA];
+ accessor_->addRecordDiff(zone_id_, serial_.getValue(),
+ Accessor::DIFF_ADD, journal);
+ }
accessor_->addRecordToZone(columns);
}
}
void
DatabaseUpdater::deleteRRset(const RRset& rrset) {
- if (committed_) {
- isc_throw(DataSourceError, "Delete attempt after commit on zone: "
- << zone_name_ << "/" << zone_class_);
- }
- if (rrset.getClass() != zone_class_) {
- isc_throw(DataSourceError, "An RRset of a different class is being "
- << "deleted from " << zone_name_ << "/" << zone_class_
- << ": " << rrset.toText());
- }
- if (rrset.getRRsig()) {
- isc_throw(DataSourceError, "An RRset with RRSIG is being deleted from "
- << zone_name_ << "/" << zone_class_ << ": "
- << rrset.toText());
+ // If this is the first operation, pretend we are starting a new delete
+ // sequence after adds. This will simplify the validation below.
+ if (diff_phase_ == NOT_STARTED) {
+ diff_phase_ = ADD;
}
+ validateAddOrDelete("delete", rrset, ADD, DELETE);
+
RdataIteratorPtr it = rrset.getRdataIterator();
- if (it->isLast()) {
- isc_throw(DataSourceError, "An empty RRset is being deleted for "
- << rrset.getName() << "/" << zone_class_ << "/"
- << rrset.getType());
- }
- string params[DatabaseAccessor::DEL_PARAM_COUNT]; // initialized with ""
- params[DatabaseAccessor::DEL_NAME] = rrset.getName().toText();
- params[DatabaseAccessor::DEL_TYPE] = rrset.getType().toText();
+ string params[Accessor::DEL_PARAM_COUNT]; // initialized with ""
+ params[Accessor::DEL_NAME] = rrset.getName().toText();
+ params[Accessor::DEL_TYPE] = rrset.getType().toText();
+ string journal[Accessor::DIFF_PARAM_COUNT];
+ if (journaling_) {
+ journal[Accessor::DIFF_NAME] = params[Accessor::DEL_NAME];
+ journal[Accessor::DIFF_TYPE] = params[Accessor::DEL_TYPE];
+ journal[Accessor::DIFF_TTL] = rrset.getTTL().toText();
+ diff_phase_ = DELETE;
+ if (rrset.getType() == RRType::SOA()) {
+ serial_ =
+ dynamic_cast<const generic::SOA&>(it->getCurrent()).
+ getSerial();
+ }
+ }
for (; !it->isLast(); it->next()) {
- params[DatabaseAccessor::DEL_RDATA] = it->getCurrent().toText();
+ params[Accessor::DEL_RDATA] = it->getCurrent().toText();
+ if (journaling_) {
+ journal[Accessor::DIFF_RDATA] = params[Accessor::DEL_RDATA];
+ accessor_->addRecordDiff(zone_id_, serial_.getValue(),
+ Accessor::DIFF_DELETE, journal);
+ }
accessor_->deleteRecordInZone(params);
}
}
@@ -973,6 +1206,9 @@ DatabaseUpdater::commit() {
<< zone_name_ << "/" << zone_class_ << " on "
<< db_name_);
}
+ if (journaling_ && diff_phase_ == DELETE) {
+ isc_throw(isc::BadValue, "Update sequence not complete");
+ }
accessor_->commit();
committed_ = true; // make sure the destructor won't trigger rollback
@@ -986,7 +1222,13 @@ DatabaseUpdater::commit() {
// The updater factory
ZoneUpdaterPtr
-DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace) const {
+DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace,
+ bool journaling) const
+{
+ if (replace && journaling) {
+ isc_throw(isc::BadValue, "Can't store journal and replace the whole "
+ "zone at the same time");
+ }
shared_ptr<DatabaseAccessor> update_accessor(accessor_->clone());
const std::pair<bool, int> zone(update_accessor->startUpdateZone(
name.toText(), replace));
@@ -995,7 +1237,107 @@ DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace) const {
}
return (ZoneUpdaterPtr(new DatabaseUpdater(update_accessor, zone.second,
- name, rrclass_)));
+ name, rrclass_, journaling)));
+}
+
+//
+// Zone journal reader using some database system as the underlying data
+// source.
+//
+class DatabaseJournalReader : public ZoneJournalReader {
+private:
+ // A shortcut typedef to keep the code concise.
+ typedef DatabaseAccessor Accessor;
+public:
+ DatabaseJournalReader(shared_ptr<Accessor> accessor, const Name& zone,
+ int zone_id, const RRClass& rrclass, uint32_t begin,
+ uint32_t end) :
+ accessor_(accessor), zone_(zone), rrclass_(rrclass),
+ begin_(begin), end_(end), finished_(false)
+ {
+ context_ = accessor_->getDiffs(zone_id, begin, end);
+ }
+ virtual ~DatabaseJournalReader() {}
+ virtual ConstRRsetPtr getNextDiff() {
+ if (finished_) {
+ isc_throw(InvalidOperation,
+ "Diff read attempt past the end of sequence on "
+ << accessor_->getDBName());
+ }
+
+ string data[Accessor::COLUMN_COUNT];
+ if (!context_->getNext(data)) {
+ finished_ = true;
+ LOG_DEBUG(logger, DBG_TRACE_BASIC,
+ DATASRC_DATABASE_JOURNALREADER_END).
+ arg(zone_).arg(rrclass_).arg(accessor_->getDBName()).
+ arg(begin_).arg(end_);
+ return (ConstRRsetPtr());
+ }
+
+ try {
+ RRsetPtr rrset(new RRset(Name(data[Accessor::NAME_COLUMN]),
+ rrclass_,
+ RRType(data[Accessor::TYPE_COLUMN]),
+ RRTTL(data[Accessor::TTL_COLUMN])));
+ rrset->addRdata(rdata::createRdata(rrset->getType(), rrclass_,
+ data[Accessor::RDATA_COLUMN]));
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_JOURNALREADER_NEXT).
+ arg(rrset->getName()).arg(rrset->getType()).
+ arg(zone_).arg(rrclass_).arg(accessor_->getDBName());
+ return (rrset);
+ } catch (const Exception& ex) {
+ LOG_ERROR(logger, DATASRC_DATABASE_JOURNALREADR_BADDATA).
+ arg(zone_).arg(rrclass_).arg(accessor_->getDBName()).
+ arg(begin_).arg(end_).arg(ex.what());
+ isc_throw(DataSourceError, "Failed to create RRset from diff on "
+ << accessor_->getDBName());
+ }
+ }
+
+private:
+ shared_ptr<Accessor> accessor_;
+ const Name zone_;
+ const RRClass rrclass_;
+ Accessor::IteratorContextPtr context_;
+ const uint32_t begin_;
+ const uint32_t end_;
+ bool finished_;
+};
+
+// The JournalReader factory
+pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+DatabaseClient::getJournalReader(const isc::dns::Name& zone,
+ uint32_t begin_serial,
+ uint32_t end_serial) const
+{
+ shared_ptr<DatabaseAccessor> jnl_accessor(accessor_->clone());
+ const pair<bool, int> zoneinfo(jnl_accessor->getZone(zone.toText()));
+ if (!zoneinfo.first) {
+ return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(
+ ZoneJournalReader::NO_SUCH_ZONE,
+ ZoneJournalReaderPtr()));
+ }
+
+ try {
+ const pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> ret(
+ ZoneJournalReader::SUCCESS,
+ ZoneJournalReaderPtr(new DatabaseJournalReader(jnl_accessor,
+ zone,
+ zoneinfo.second,
+ rrclass_,
+ begin_serial,
+ end_serial)));
+ LOG_DEBUG(logger, DBG_TRACE_BASIC,
+ DATASRC_DATABASE_JOURNALREADER_START).arg(zone).arg(rrclass_).
+ arg(jnl_accessor->getDBName()).arg(begin_serial).arg(end_serial);
+ return (ret);
+ } catch (const NoSuchSerial&) {
+ return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(
+ ZoneJournalReader::NO_SUCH_VERSION,
+ ZoneJournalReaderPtr()));
+ }
}
}
}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index b3fda6d..c1b71cd 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -18,12 +18,16 @@
#include <string>
#include <boost/scoped_ptr.hpp>
+#include <boost/tuple/tuple.hpp>
#include <dns/rrclass.h>
-#include <dns/rrclass.h>
#include <dns/rrset.h>
+#include <dns/rrtype.h>
+#include <datasrc/data_source.h>
+#include <datasrc/client.h>
#include <datasrc/client.h>
+#include <datasrc/logger.h>
#include <dns/name.h>
#include <exceptions/exceptions.h>
@@ -34,46 +38,41 @@
namespace isc {
namespace datasrc {
-/**
- * \brief Abstraction of lowlevel database with DNS data
- *
- * This class is defines interface to databases. Each supported database
- * will provide methods for accessing the data stored there in a generic
- * manner. The methods are meant to be low-level, without much or any knowledge
- * about DNS and should be possible to translate directly to queries.
- *
- * On the other hand, how the communication with database is done and in what
- * schema (in case of relational/SQL database) is up to the concrete classes.
- *
- * This class is non-copyable, as copying connections to database makes little
- * sense and will not be needed.
- *
- * \todo Is it true this does not need to be copied? For example the zone
- * iterator might need it's own copy. But a virtual clone() method might
- * be better for that than copy constructor.
- *
- * \note The same application may create multiple connections to the same
- * database, having multiple instances of this class. If the database
- * allows having multiple open queries at one connection, the connection
- * class may share it.
- */
+/// \brief Abstraction of lowlevel database with DNS data
+///
+/// This class is defines interface to databases. Each supported database
+/// will provide methods for accessing the data stored there in a generic
+/// manner. The methods are meant to be low-level, without much or any knowledge
+/// about DNS and should be possible to translate directly to queries.
+///
+/// On the other hand, how the communication with database is done and in what
+/// schema (in case of relational/SQL database) is up to the concrete classes.
+///
+/// This class is non-copyable, as copying connections to database makes little
+/// sense and will not be needed.
+///
+/// \todo Is it true this does not need to be copied? For example the zone
+/// iterator might need it's own copy. But a virtual clone() method might
+/// be better for that than copy constructor.
+///
+/// \note The same application may create multiple connections to the same
+/// database, having multiple instances of this class. If the database
+/// allows having multiple open queries at one connection, the connection
+/// class may share it.
class DatabaseAccessor : boost::noncopyable {
public:
- /**
- * Definitions of the fields as they are required to be filled in
- * by IteratorContext::getNext()
- *
- * When implementing getNext(), the columns array should
- * be filled with the values as described in this enumeration,
- * in this order, i.e. TYPE_COLUMN should be the first element
- * (index 0) of the array, TTL_COLUMN should be the second element
- * (index 1), etc.
- */
+ /// \brief Data columns for by IteratorContext::getNext()
+ ///
+ /// When implementing getNext(), the columns array should be filled with
+ /// the values as described in this enumeration, in this order, i.e.
+ /// - TYPE_COLUMN should be the first element (index 0) of the array,
+ /// - TTL_COLUMN should be the second element (index 1),
+ /// - etc.
enum RecordColumns {
TYPE_COLUMN = 0, ///< The RRType of the record (A/NS/TXT etc.)
TTL_COLUMN = 1, ///< The TTL of the record (a
- SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPE
- ///< the RRSIG covers. In the current implementation,
+ SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPEs
+ ///< the RRSIG cover. In the current implementation,
///< this field is ignored.
RDATA_COLUMN = 3, ///< Full text representation of the record's RDATA
NAME_COLUMN = 4, ///< The domain name of this RR
@@ -81,31 +80,26 @@ public:
///< the largest other element in this enum plus 1.
};
- /**
- * Definitions of the fields to be passed to addRecordToZone().
- *
- * Each derived implementation of addRecordToZone() should expect
- * the "columns" array to be filled with the values as described in this
- * enumeration, in this order.
- */
+ /// \brief Definitions of the fields to be passed to addRecordToZone()
+ ///
+ /// Each derived implementation of addRecordToZone() should expect
+ /// the "columns" array to be filled with the values as described in this
+ /// enumeration, in this order.
enum AddRecordColumns {
- ADD_NAME = 0, ///< The owner name of the record (a domain name)
- ADD_REV_NAME = 1, ///< Reversed name of NAME (used for DNSSEC)
- ADD_TTL = 2, ///< The TTL of the record (in numeric form)
- ADD_TYPE = 3, ///< The RRType of the record (A/NS/TXT etc.)
- ADD_SIGTYPE = 4, ///< For RRSIG records, this contains the RRTYPE
- ///< the RRSIG covers.
- ADD_RDATA = 5, ///< Full text representation of the record's RDATA
+ ADD_NAME = 0, ///< The owner name of the record (a domain name)
+ ADD_REV_NAME = 1, ///< Reversed name of NAME (used for DNSSEC)
+ ADD_TTL = 2, ///< The TTL of the record (in numeric form)
+ ADD_TYPE = 3, ///< The RRType of the record (A/NS/TXT etc.)
+ ADD_SIGTYPE = 4, ///< RRSIGs only: RRTYPEs the RRSIG covers.
+ ADD_RDATA = 5, ///< Full text representation of the record's RDATA
ADD_COLUMN_COUNT = 6 ///< Number of columns
};
- /**
- * Definitions of the fields to be passed to deleteRecordInZone().
- *
- * Each derived implementation of deleteRecordInZone() should expect
- * the "params" array to be filled with the values as described in this
- * enumeration, in this order.
- */
+ /// \brief Definitions of the fields to be passed to deleteRecordInZone()
+ ///
+ /// Each derived implementation of deleteRecordInZone() should expect
+ /// the "params" array to be filled with the values as described in this
+ /// enumeration, in this order.
enum DeleteRecordParams {
DEL_NAME = 0, ///< The owner name of the record (a domain name)
DEL_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
@@ -113,168 +107,199 @@ public:
DEL_PARAM_COUNT = 3 ///< Number of parameters
};
- /**
- * Operation mode when adding a record diff.
- *
- * This is used as the "operation" parameter value of addRecordDiff().
- */
+ /// \brief Operation mode when adding a record diff.
+ ///
+ /// This is used as the "operation" parameter value of addRecordDiff().
enum DiffOperation {
DIFF_ADD = 0, ///< This diff is for adding an RR
DIFF_DELETE = 1 ///< This diff is for deleting an RR
};
- /**
- * Definitions of the fields to be passed to addRecordDiff().
- *
- * Each derived implementation of addRecordDiff() should expect
- * the "params" array to be filled with the values as described in this
- * enumeration, in this order.
- */
+ /// \brief Definitions of the fields to be passed to addRecordDiff().
+ ///
+ /// Each derived implementation of addRecordDiff() should expect
+ /// the "params" array to be filled with the values as described in this
+ /// enumeration, in this order.
enum DiffRecordParams {
- DIFF_NAME = 0, ///< The owner name of the record (a domain name)
- DIFF_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
- DIFF_TTL = 2, ///< The TTL of the record (in numeric form)
- DIFF_RDATA = 3, ///< Full text representation of the record's RDATA
+ DIFF_NAME = 0, ///< Owner name of the record (a domain name)
+ DIFF_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
+ DIFF_TTL = 2, ///< The TTL of the record (in numeric form)
+ DIFF_RDATA = 3, ///< Full text representation of record's RDATA
DIFF_PARAM_COUNT = 4 ///< Number of parameters
};
- /**
- * \brief Destructor
- *
- * It is empty, but needs a virtual one, since we will use the derived
- * classes in polymorphic way.
- */
+ /// \brief Destructor
+ ///
+ /// It is empty, but needs a virtual one, since we will use the derived
+ /// classes in polymorphic way.
virtual ~DatabaseAccessor() { }
- /**
- * \brief Retrieve a zone identifier
- *
- * This method looks up a zone for the given name in the database. It
- * should match only exact zone name (eg. name is equal to the zone's
- * apex), as the DatabaseClient will loop trough the labels itself and
- * find the most suitable zone.
- *
- * It is not specified if and what implementation of this method may throw,
- * so code should expect anything.
- *
- * \param name The (fully qualified) domain name of the zone's apex to be
- * looked up.
- * \return The first part of the result indicates if a matching zone
- * was found. In case it was, the second part is internal zone ID.
- * This one will be passed to methods finding data in the zone.
- * It is not required to keep them, in which case whatever might
- * be returned - the ID is only passed back to the database as
- * an opaque handle.
- */
+ /// \brief Retrieve a zone identifier
+ ///
+ /// This method looks up a zone for the given name in the database. It
+ /// should match only exact zone name (eg. name is equal to the zone's
+ /// apex), as the DatabaseClient will loop trough the labels itself and
+ /// find the most suitable zone.
+ ///
+ /// It is not specified if and what implementation of this method may throw,
+ /// so code should expect anything.
+ ///
+ /// \param name The (fully qualified) domain name of the zone's apex to be
+ /// looked up.
+ /// \return The first part of the result indicates if a matching zone
+ /// was found. In case it was, the second part is internal zone ID.
+ /// This one will be passed to methods finding data in the zone.
+ /// It is not required to keep them, in which case whatever might
+ /// be returned - the ID is only passed back to the database as
+ /// an opaque handle.
virtual std::pair<bool, int> getZone(const std::string& name) const = 0;
- /**
- * \brief This holds the internal context of ZoneIterator for databases
- *
- * While the ZoneIterator implementation from DatabaseClient does all the
- * translation from strings to DNS classes and validation, this class
- * holds the pointer to where the database is at reading the data.
- *
- * It can either hold shared pointer to the connection which created it
- * and have some kind of statement inside (in case single database
- * connection can handle multiple concurrent SQL statements) or it can
- * create a new connection (or, if it is more convenient, the connection
- * itself can inherit both from DatabaseConnection and IteratorContext
- * and just clone itself).
- */
+ /// \brief This holds the internal context of ZoneIterator for databases
+ ///
+ /// While the ZoneIterator implementation from DatabaseClient does all the
+ /// translation from strings to DNS classes and validation, this class
+ /// holds the pointer to where the database is at reading the data.
+ ///
+ /// It can either hold shared pointer to the connection which created it
+ /// and have some kind of statement inside (in case single database
+ /// connection can handle multiple concurrent SQL statements) or it can
+ /// create a new connection (or, if it is more convenient, the connection
+ /// itself can inherit both from DatabaseConnection and IteratorContext
+ /// and just clone itself).
class IteratorContext : public boost::noncopyable {
public:
- /**
- * \brief Destructor
- *
- * Virtual destructor, so any descendand class is destroyed correctly.
- */
+ /// \brief Destructor
+ ///
+ /// Virtual destructor, so any descendand class is destroyed correctly.
virtual ~IteratorContext() { }
- /**
- * \brief Function to provide next resource record
- *
- * This function should provide data about the next resource record
- * from the data that is searched. The data is not converted yet.
- *
- * Depending on how the iterator was constructed, there is a difference
- * in behaviour; for a 'full zone iterator', created with
- * getAllRecords(), all COLUMN_COUNT elements of the array are
- * overwritten.
- * For a 'name iterator', created with getRecords(), the column
- * NAME_COLUMN is untouched, since what would be added here is by
- * definition already known to the caller (it already passes it as
- * an argument to getRecords()).
- *
- * Once this function returns false, any subsequent call to it should
- * result in false. The implementation of a derived class must ensure
- * it doesn't cause any disruption due to that such as a crash or
- * exception.
- *
- * \note The order of RRs is not strictly set, but the RRs for single
- * RRset must not be interleaved with any other RRs (eg. RRsets must be
- * "together").
- *
- * \param columns The data will be returned through here. The order
- * is specified by the RecordColumns enum, and the size must be
- * COLUMN_COUNT
- * \todo Do we consider databases where it is stored in binary blob
- * format?
- * \throw DataSourceError if there's database-related error. If the
- * exception (or any other in case of derived class) is thrown,
- * the iterator can't be safely used any more.
- * \return true if a record was found, and the columns array was
- * updated. false if there was no more data, in which case
- * the columns array is untouched.
- */
+ /// \brief Function to provide next resource record
+ ///
+ /// This function should provide data about the next resource record
+ /// from the data that is searched. The data is not converted yet.
+ ///
+ /// Depending on how the iterator was constructed, there is a difference
+ /// in behaviour; for a 'full zone iterator', created with
+ /// getAllRecords(), all COLUMN_COUNT elements of the array are
+ /// overwritten.
+ /// For a 'name iterator', created with getRecords(), the column
+ /// NAME_COLUMN is untouched, since what would be added here is by
+ /// definition already known to the caller (it already passes it as
+ /// an argument to getRecords()).
+ ///
+ /// Once this function returns false, any subsequent call to it should
+ /// result in false. The implementation of a derived class must ensure
+ /// it doesn't cause any disruption due to that such as a crash or
+ /// exception.
+ ///
+ /// \note The order of RRs is not strictly set, but the RRs for single
+ /// RRset must not be interleaved with any other RRs (eg. RRsets must be
+ /// "together").
+ ///
+ /// \param columns The data will be returned through here. The order
+ /// is specified by the RecordColumns enum, and the size must be
+ /// COLUMN_COUNT
+ /// \todo Do we consider databases where it is stored in binary blob
+ /// format?
+ /// \throw DataSourceError if there's database-related error. If the
+ /// exception (or any other in case of derived class) is thrown,
+ /// the iterator can't be safely used any more.
+ /// \return true if a record was found, and the columns array was
+ /// updated. false if there was no more data, in which case
+ /// the columns array is untouched.
virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) = 0;
};
typedef boost::shared_ptr<IteratorContext> IteratorContextPtr;
- /**
- * \brief Creates an iterator context for a specific name.
- *
- * Returns an IteratorContextPtr that contains all records of the
- * given name from the given zone.
- *
- * The implementation of the iterator that is returned may leave the
- * NAME_COLUMN column of the array passed to getNext() untouched, as that
- * data is already known (it is the same as the name argument here)
- *
- * \exception any Since any implementation can be used, the caller should
- * expect any exception to be thrown.
- *
- * \param name The name to search for. This should be a FQDN.
- * \param id The ID of the zone, returned from getZone().
- * \param subdomains If set to true, match subdomains of name instead
- * of name itself. It is used to find empty domains and match
- * wildcards.
- * \return Newly created iterator context. Must not be NULL.
- */
+ /// \brief Creates an iterator context for a specific name.
+ ///
+ /// Returns an IteratorContextPtr that contains all records of the
+ /// given name from the given zone.
+ ///
+ /// The implementation of the iterator that is returned may leave the
+ /// NAME_COLUMN column of the array passed to getNext() untouched, as that
+ /// data is already known (it is the same as the name argument here)
+ ///
+ /// \exception any Since any implementation can be used, the caller should
+ /// expect any exception to be thrown.
+ ///
+ /// \param name The name to search for. This should be a FQDN.
+ /// \param id The ID of the zone, returned from getZone().
+ /// \param subdomains If set to true, match subdomains of name instead
+ /// of name itself. It is used to find empty domains and match
+ /// wildcards.
+ /// \return Newly created iterator context. Must not be NULL.
virtual IteratorContextPtr getRecords(const std::string& name,
int id,
bool subdomains = false) const = 0;
- /**
- * \brief Creates an iterator context for the whole zone.
- *
- * Returns an IteratorContextPtr that contains all records of the
- * zone with the given zone id.
- *
- * Each call to getNext() on the returned iterator should copy all
- * column fields of the array that is passed, as defined in the
- * RecordColumns enum.
- *
- * \exception any Since any implementation can be used, the caller should
- * expect any exception to be thrown.
- *
- * \param id The ID of the zone, returned from getZone().
- * \return Newly created iterator context. Must not be NULL.
- */
+ /// \brief Creates an iterator context for the whole zone.
+ ///
+ /// Returns an IteratorContextPtr that contains all records of the
+ /// zone with the given zone id.
+ ///
+ /// Each call to getNext() on the returned iterator should copy all
+ /// column fields of the array that is passed, as defined in the
+ /// RecordColumns enum.
+ ///
+ /// \exception any Since any implementation can be used, the caller should
+ /// expect any exception to be thrown.
+ ///
+ /// \param id The ID of the zone, returned from getZone().
+ /// \return Newly created iterator context. Must not be NULL.
virtual IteratorContextPtr getAllRecords(int id) const = 0;
- /// Start a transaction for updating a zone.
+ /// \brief Creates an iterator context for a set of differences.
+ ///
+ /// Returns an IteratorContextPtr that contains all difference records for
+ /// the given zone between two versions of a zone.
+ ///
+ /// The difference records are the set of records that would appear in an
+ /// IXFR serving a request for the difference between two versions of a
+ /// zone. The records are returned in the same order as they would be in
+ /// the IXFR. This means that if the the difference between versions of a
+ /// zone with SOA serial numbers of "start" and "end" is required, and the
+ /// zone contains the differences between serial number "start" to serial
+ /// number "intermediate" and from serial number "intermediate" to serial
+ /// number "end", the returned records will be (in order):
+ ///
+ /// \li SOA for serial "start"
+ /// \li Records removed from the zone between versions "start" and
+ /// "intermediate" of the zone. The order of these is not guaranteed.
+ /// \li SOA for serial "intermediate"
+ /// \li Records added to the zone between versions "start" and
+ /// "intermediate" of the zone. The order of these is not guaranteed.
+ /// \li SOA for serial "intermediate"
+ /// \li Records removed from the zone between versions "intermediate" and
+ /// "end" of the zone. The order of these is not guaranteed.
+ /// \li SOA for serial "end"
+ /// \li Records added to the zone between versions "intermediate" and "end"
+ /// of the zone. The order of these is not guaranteed.
+ ///
+ /// Note that there is no requirement that "start" be less than "end".
+ /// Owing to serial number arithmetic, it is entirely possible that a later
+ /// version of a zone will have a smaller SOA serial number than an earlier
+ /// version.
+ ///
+ /// Each call to getNext() on the returned iterator should copy all column
+ /// fields of the array that is passed, as defined in the RecordColumns
+ /// enum.
+ ///
+ /// \exception any Since any implementation can be used, the caller should
+ /// expect any exception to be thrown.
+ ///
+ /// \param id The ID of the zone, returned from getZone().
+ /// \param start The SOA serial number of the version of the zone from
+ /// which the difference sequence should start.
+ /// \param end The SOA serial number of the version of the zone at which
+ /// the difference sequence should end.
+ ///
+ /// \return Newly created iterator context. Must not be NULL.
+ virtual IteratorContextPtr
+ getDiffs(int id, uint32_t start, uint32_t end) const = 0;
+
+ /// \brief Start a transaction for updating a zone.
///
/// Each derived class version of this method starts a database
/// transaction to make updates to the given name of zone (whose class was
@@ -333,7 +358,7 @@ public:
virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
bool replace) = 0;
- /// Add a single record to the zone to be updated.
+ /// \brief Add a single record to the zone to be updated.
///
/// This method provides a simple interface to insert a new record
/// (a database "row") to the zone in the update context started by
@@ -372,7 +397,7 @@ public:
virtual void addRecordToZone(
const std::string (&columns)[ADD_COLUMN_COUNT]) = 0;
- /// Delete a single record from the zone to be updated.
+ /// \brief Delete a single record from the zone to be updated.
///
/// This method provides a simple interface to delete a record
/// (a database "row") from the zone in the update context started by
@@ -409,7 +434,7 @@ public:
virtual void deleteRecordInZone(
const std::string (¶ms)[DEL_PARAM_COUNT]) = 0;
- /// Start a general transaction.
+ /// \brief Start a general transaction.
///
/// Each derived class version of this method starts a database
/// transaction in a way specific to the database details. Any subsequent
@@ -429,7 +454,7 @@ public:
/// internal database related error.
virtual void startTransaction() = 0;
- /// Commit a transaction.
+ /// \brief Commit a transaction.
///
/// This method completes a transaction started by \c startTransaction
/// or \c startUpdateZone.
@@ -452,7 +477,7 @@ public:
/// to the method or internal database error.
virtual void commit() = 0;
- /// Rollback any changes in a transaction made so far.
+ /// \brief Rollback any changes in a transaction made so far.
///
/// This method rollbacks a transaction started by \c startTransaction or
/// \c startUpdateZone. When it succeeds (it normally should, but see
@@ -478,7 +503,7 @@ public:
/// to the method or internal database error.
virtual void rollback() = 0;
- /// Install a single RR diff in difference sequences for zone update.
+ /// \brief Install a single RR diff in difference sequences for zone update.
///
/// This method inserts parameters of an update operation for a single RR
/// (either adding or deleting one) in the underlying database.
@@ -494,12 +519,10 @@ public:
/// is not for the SOA RR; it passes TTL for a diff that deletes an RR
/// while in \c deleteRecordInZone() it's omitted. This is because
/// the stored diffs are expected to be retrieved in the form that
- /// \c getRecordDiffs() is expected to meet. This means if the caller
+ /// \c getDiffs() is expected to meet. This means if the caller
/// wants to use this method with other update operations, it must
/// ensure the additional information is ready when this method is called.
///
- /// \note \c getRecordDiffs() is not yet implemented.
- ///
/// The caller of this method must ensure that the added diffs via
/// this method in a single transaction form an IXFR-style difference
/// sequences: Each difference sequence is a sequence of RRs:
@@ -512,7 +535,7 @@ public:
/// an SOA RR, \c serial must be identical to the serial of that SOA).
/// The underlying derived class implementation may or may not check
/// this condition, but if the caller doesn't meet the condition
- /// a subsequent call to \c getRecordDiffs() will not work as expected.
+ /// a subsequent call to \c getDiffs() will not work as expected.
///
/// Any call to this method must be in a transaction, and, for now,
/// it must be a transaction triggered by \c startUpdateZone() (that is,
@@ -554,7 +577,7 @@ public:
int zone_id, uint32_t serial, DiffOperation operation,
const std::string (¶ms)[DIFF_PARAM_COUNT]) = 0;
- /// Clone the accessor with the same configuration.
+ /// \brief Clone the accessor with the same configuration.
///
/// Each derived class implementation of this method will create a new
/// accessor of the same derived class with the same configuration
@@ -583,187 +606,169 @@ public:
/// \return A shared pointer to the cloned accessor.
virtual boost::shared_ptr<DatabaseAccessor> clone() = 0;
- /**
- * \brief Returns a string identifying this dabase backend
- *
- * The returned string is mainly intended to be used for
- * debugging/logging purposes.
- *
- * Any implementation is free to choose the exact string content,
- * but it is advisable to make it a name that is distinguishable
- * from the others.
- *
- * \return the name of the database
- */
+ /// \brief Returns a string identifying this dabase backend
+ ///
+ /// The returned string is mainly intended to be used for
+ /// debugging/logging purposes.
+ ///
+ /// Any implementation is free to choose the exact string content,
+ /// but it is advisable to make it a name that is distinguishable
+ /// from the others.
+ ///
+ /// \return the name of the database
virtual const std::string& getDBName() const = 0;
- /**
- * \brief It returns the previous name in DNSSEC order.
- *
- * This is used in DatabaseClient::findPreviousName and does more
- * or less the real work, except for working on strings.
- *
- * \param rname The name to ask for previous of, in reversed form.
- * We use the reversed form (see isc::dns::Name::reverse),
- * because then the case insensitive order of string representation
- * and the DNSSEC order correspond (eg. org.example.a is followed
- * by org.example.a.b which is followed by org.example.b, etc).
- * \param zone_id The zone to look through.
- * \return The previous name.
- * \note This function must return previous name even in case
- * the queried rname does not exist in the zone.
- * \note This method must skip under-the-zone-cut data (glue data).
- * This might be implemented by looking for NSEC records (as glue
- * data don't have them) in the zone or in some other way.
- *
- * \throw DataSourceError if there's a problem with the database.
- * \throw NotImplemented if this database doesn't support DNSSEC
- * or there's no previous name for the queried one (the NSECs
- * might be missing or the queried name is less or equal the
- * apex of the zone).
- */
+ /// \brief It returns the previous name in DNSSEC order.
+ ///
+ /// This is used in DatabaseClient::findPreviousName and does more
+ /// or less the real work, except for working on strings.
+ ///
+ /// \param rname The name to ask for previous of, in reversed form.
+ /// We use the reversed form (see isc::dns::Name::reverse),
+ /// because then the case insensitive order of string representation
+ /// and the DNSSEC order correspond (eg. org.example.a is followed
+ /// by org.example.a.b which is followed by org.example.b, etc).
+ /// \param zone_id The zone to look through.
+ /// \return The previous name.
+ /// \note This function must return previous name even in case
+ /// the queried rname does not exist in the zone.
+ /// \note This method must skip under-the-zone-cut data (glue data).
+ /// This might be implemented by looking for NSEC records (as glue
+ /// data don't have them) in the zone or in some other way.
+ ///
+ /// \throw DataSourceError if there's a problem with the database.
+ /// \throw NotImplemented if this database doesn't support DNSSEC
+ /// or there's no previous name for the queried one (the NSECs
+ /// might be missing or the queried name is less or equal the
+ /// apex of the zone).
virtual std::string findPreviousName(int zone_id,
const std::string& rname) const = 0;
};
-/**
- * \brief Concrete data source client oriented at database backends.
- *
- * This class (together with corresponding versions of ZoneFinder,
- * ZoneIterator, etc.) translates high-level data source queries to
- * low-level calls on DatabaseAccessor. It calls multiple queries
- * if necessary and validates data from the database, allowing the
- * DatabaseAccessor to be just simple translation to SQL/other
- * queries to database.
- *
- * While it is possible to subclass it for specific database in case
- * of special needs, it is not expected to be needed. This should just
- * work as it is with whatever DatabaseAccessor.
- */
+/// \brief Concrete data source client oriented at database backends.
+///
+/// This class (together with corresponding versions of ZoneFinder,
+/// ZoneIterator, etc.) translates high-level data source queries to
+/// low-level calls on DatabaseAccessor. It calls multiple queries
+/// if necessary and validates data from the database, allowing the
+/// DatabaseAccessor to be just simple translation to SQL/other
+/// queries to database.
+///
+/// While it is possible to subclass it for specific database in case
+/// of special needs, it is not expected to be needed. This should just
+/// work as it is with whatever DatabaseAccessor.
class DatabaseClient : public DataSourceClient {
public:
- /**
- * \brief Constructor
- *
- * It initializes the client with a database via the given accessor.
- *
- * \exception isc::InvalidParameter if accessor is NULL. It might throw
- * standard allocation exception as well, but doesn't throw anything else.
- *
- * \param rrclass The RR class of the zones that this client will handle.
- * \param accessor The accessor to the database to use to get data.
- * As the parameter suggests, the client takes ownership of the accessor
- * and will delete it when itself deleted.
- */
+ /// \brief Constructor
+ ///
+ /// It initializes the client with a database via the given accessor.
+ ///
+ /// \exception isc::InvalidParameter if accessor is NULL. It might throw
+ /// standard allocation exception as well, but doesn't throw anything else.
+ ///
+ /// \param rrclass The RR class of the zones that this client will handle.
+ /// \param accessor The accessor to the database to use to get data.
+ /// As the parameter suggests, the client takes ownership of the accessor
+ /// and will delete it when itself deleted.
DatabaseClient(isc::dns::RRClass rrclass,
boost::shared_ptr<DatabaseAccessor> accessor);
- /**
- * \brief Corresponding ZoneFinder implementation
- *
- * The zone finder implementation for database data sources. Similarly
- * to the DatabaseClient, it translates the queries to methods of the
- * database.
- *
- * Application should not come directly in contact with this class
- * (it should handle it trough generic ZoneFinder pointer), therefore
- * it could be completely hidden in the .cc file. But it is provided
- * to allow testing and for rare cases when a database needs slightly
- * different handling, so it can be subclassed.
- *
- * Methods directly corresponds to the ones in ZoneFinder.
- */
+ /// \brief Corresponding ZoneFinder implementation
+ ///
+ /// The zone finder implementation for database data sources. Similarly
+ /// to the DatabaseClient, it translates the queries to methods of the
+ /// database.
+ ///
+ /// Application should not come directly in contact with this class
+ /// (it should handle it trough generic ZoneFinder pointer), therefore
+ /// it could be completely hidden in the .cc file. But it is provided
+ /// to allow testing and for rare cases when a database needs slightly
+ /// different handling, so it can be subclassed.
+ ///
+ /// Methods directly corresponds to the ones in ZoneFinder.
class Finder : public ZoneFinder {
public:
- /**
- * \brief Constructor
- *
- * \param database The database (shared with DatabaseClient) to
- * be used for queries (the one asked for ID before).
- * \param zone_id The zone ID which was returned from
- * DatabaseAccessor::getZone and which will be passed to further
- * calls to the database.
- * \param origin The name of the origin of this zone. It could query
- * it from database, but as the DatabaseClient just searched for
- * the zone using the name, it should have it.
- */
+ /// \brief Constructor
+ ///
+ /// \param database The database (shared with DatabaseClient) to
+ /// be used for queries (the one asked for ID before).
+ /// \param zone_id The zone ID which was returned from
+ /// DatabaseAccessor::getZone and which will be passed to further
+ /// calls to the database.
+ /// \param origin The name of the origin of this zone. It could query
+ /// it from database, but as the DatabaseClient just searched for
+ /// the zone using the name, it should have it.
Finder(boost::shared_ptr<DatabaseAccessor> database, int zone_id,
const isc::dns::Name& origin);
+
// The following three methods are just implementations of inherited
// ZoneFinder's pure virtual methods.
virtual isc::dns::Name getOrigin() const;
virtual isc::dns::RRClass getClass() const;
- /**
- * \brief Find an RRset in the datasource
- *
- * Searches the datasource for an RRset of the given name and
- * type. If there is a CNAME at the given name, the CNAME rrset
- * is returned.
- * (this implementation is not complete, and currently only
- * does full matches, CNAMES, and the signatures for matches and
- * CNAMEs)
- * \note target was used in the original design to handle ANY
- * queries. This is not implemented yet, and may use
- * target again for that, but it might also use something
- * different. It is left in for compatibility at the moment.
- * \note options are ignored at this moment
- *
- * \note Maybe counter intuitively, this method is not a const member
- * function. This is intentional; some of the underlying implementations
- * are expected to use a database backend, and would internally contain
- * some abstraction of "database connection". In the most strict sense
- * any (even read only) operation might change the internal state of
- * such a connection, and in that sense the operation cannot be considered
- * "const". In order to avoid giving a false sense of safety to the
- * caller, we indicate a call to this method may have a surprising
- * side effect. That said, this view may be too strict and it may
- * make sense to say the internal database connection doesn't affect
- * external behavior in terms of the interface of this method. As
- * we gain more experiences with various kinds of backends we may
- * revisit the constness.
- *
- * \exception DataSourceError when there is a problem reading
- * the data from the dabase backend.
- * This can be a connection, code, or
- * data (parse) error.
- *
- * \param name The name to find
- * \param type The RRType to find
- * \param target Unused at this moment
- * \param options Options about how to search.
- * See ZoneFinder::FindOptions.
- */
+ /// \brief Find an RRset in the datasource
+ ///
+ /// Searches the datasource for an RRset of the given name and
+ /// type. If there is a CNAME at the given name, the CNAME rrset
+ /// is returned.
+ /// (this implementation is not complete, and currently only
+ /// does full matches, CNAMES, and the signatures for matches and
+ /// CNAMEs)
+ /// \note target was used in the original design to handle ANY
+ /// queries. This is not implemented yet, and may use
+ /// target again for that, but it might also use something
+ /// different. It is left in for compatibility at the moment.
+ /// \note options are ignored at this moment
+ ///
+ /// \note Maybe counter intuitively, this method is not a const member
+ /// function. This is intentional; some of the underlying
+ /// implementations are expected to use a database backend, and would
+ /// internally contain some abstraction of "database connection". In
+ /// the most strict sense any (even read only) operation might change
+ /// the internal state of such a connection, and in that sense the
+ /// operation cannot be considered "const". In order to avoid giving a
+ /// false sense of safety to the caller, we indicate a call to this
+ /// method may have a surprising side effect. That said, this view may
+ /// be too strict and it may make sense to say the internal database
+ /// connection doesn't affect external behavior in terms of the
+ /// interface of this method. As we gain more experiences with various
+ /// kinds of backends we may revisit the constness.
+ ///
+ /// \exception DataSourceError when there is a problem reading
+ /// the data from the dabase backend.
+ /// This can be a connection, code, or
+ /// data (parse) error.
+ ///
+ /// \param name The name to find
+ /// \param type The RRType to find
+ /// \param target Unused at this moment
+ /// \param options Options about how to search.
+ /// See ZoneFinder::FindOptions.
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
isc::dns::RRsetList* target = NULL,
const FindOptions options = FIND_DEFAULT);
- /**
- * \brief Implementation of ZoneFinder::findPreviousName method.
- */
+ /// \brief Implementation of ZoneFinder::findPreviousName method.
virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
const;
- /**
- * \brief The zone ID
- *
- * This function provides the stored zone ID as passed to the
- * constructor. This is meant for testing purposes and normal
- * applications shouldn't need it.
- */
+ /// \brief The zone ID
+ ///
+ /// This function provides the stored zone ID as passed to the
+ /// constructor. This is meant for testing purposes and normal
+ /// applications shouldn't need it.
int zone_id() const { return (zone_id_); }
- /**
- * \brief The database accessor.
- *
- * This function provides the database accessor stored inside as
- * passed to the constructor. This is meant for testing purposes and
- * normal applications shouldn't need it.
- */
+ /// \brief The database accessor.
+ ///
+ /// This function provides the database accessor stored inside as
+ /// passed to the constructor. This is meant for testing purposes and
+ /// normal applications shouldn't need it.
const DatabaseAccessor& getAccessor() const {
return (*accessor_);
}
+
private:
boost::shared_ptr<DatabaseAccessor> accessor_;
const int zone_id_;
@@ -774,111 +779,327 @@ public:
FoundRRsets;
/// \brief Just shortcut for set of types
typedef std::set<dns::RRType> WantedTypes;
- /**
- * \brief Searches database for RRsets of one domain.
- *
- * This method scans RRs of single domain specified by name and
- * extracts any RRsets found and requested by parameters.
- *
- * It is used internally by find(), because it is called multiple
- * times (usually with different domains).
- *
- * \param name Which domain name should be scanned.
- * \param types List of types the caller is interested in.
- * \param check_ns If this is set to true, it checks nothing lives
- * together with NS record (with few little exceptions, like RRSIG
- * or NSEC). This check is meant for non-apex NS records.
- * \param construct_name If this is NULL, the resulting RRsets have
- * their name set to name. If it is not NULL, it overrides the name
- * and uses this one (this can be used for wildcard synthesized
- * records).
- * \return A pair, where the first element indicates if the domain
- * contains any RRs at all (not only the requested, it may happen
- * this is set to true, but the second part is empty). The second
- * part is map from RRtypes to RRsets of the corresponding types.
- * If the RRset is not present in DB, the RRtype is not there at
- * all (so you'll not find NULL pointer in the result).
- * \throw DataSourceError If there's a low-level error with the
- * database or the database contains bad data.
- */
+
+ /// \brief Search result of \c findDelegationPoint().
+ ///
+ /// This is a tuple combining the result of the search - a status code
+ /// and a pointer to the RRset found - together with additional
+ /// information needed for subsequent processing, an indication of
+ /// the first NS RRset found in the search and the number of labels
+ /// in the last non-empty domain encountered in the search. It is
+ /// used by \c findDelegationPoint().
+ ///
+ /// The last two items are located naturally in the search and although
+ /// not strictly part of the result, they are passed back to avoid
+ /// another (duplicate) search later in the processing.
+ ///
+ /// Note that the code and rrset elements are the same as that in
+ /// the \c ZoneFinder::FindResult struct: this structure could be
+ /// derived from that one, but as it is used just once in the code and
+ /// will never be treated as a \c FindResult, the obscurity involved in
+ /// deriving it from a parent class was deemed not worthwhile.
+ struct DelegationSearchResult {
+ DelegationSearchResult(const ZoneFinder::Result param_code,
+ const isc::dns::ConstRRsetPtr param_rrset,
+ const isc::dns::ConstRRsetPtr param_ns,
+ size_t param_last_known) :
+ code(param_code), rrset(param_rrset),
+ first_ns(param_ns),
+ last_known(param_last_known)
+ {}
+ const ZoneFinder::Result code; ///< Result code
+ const isc::dns::ConstRRsetPtr rrset; ///< RRset found
+ const isc::dns::ConstRRsetPtr first_ns; ///< First NS found
+ const size_t last_known; ///< No. labels in last non-empty domain
+ };
+
+ /// \brief Searches database for RRsets of one domain.
+ ///
+ /// This method scans RRs of single domain specified by name and
+ /// extracts any RRsets found and requested by parameters.
+ ///
+ /// It is used internally by find(), because it is called multiple
+ /// times (usually with different domains).
+ ///
+ /// \param name Which domain name should be scanned.
+ /// \param types List of types the caller is interested in.
+ /// \param check_ns If this is set to true, it checks nothing lives
+ /// together with NS record (with few little exceptions, like RRSIG
+ /// or NSEC). This check is meant for non-apex NS records.
+ /// \param construct_name If this is NULL, the resulting RRsets have
+ /// their name set to name. If it is not NULL, it overrides the name
+ /// and uses this one (this can be used for wildcard synthesized
+ /// records).
+ /// \return A pair, where the first element indicates if the domain
+ /// contains any RRs at all (not only the requested, it may happen
+ /// this is set to true, but the second part is empty). The second
+ /// part is map from RRtypes to RRsets of the corresponding types.
+ /// If the RRset is not present in DB, the RRtype is not there at
+ /// all (so you'll not find NULL pointer in the result).
+ /// \throw DataSourceError If there's a low-level error with the
+ /// database or the database contains bad data.
FoundRRsets getRRsets(const std::string& name,
const WantedTypes& types, bool check_ns,
const std::string* construct_name = NULL);
- /**
- * \brief Checks if something lives below this domain.
- *
- * This looks if there's any subdomain of the given name. It can be
- * used to test if domain is empty non-terminal.
- *
- * \param name The domain to check.
- */
+
+ /// \brief Find delegation point
+ ///
+ /// Given a name, searches through the superdomains from the origin
+ /// down, searching for a point that indicates a delegation (i.e. an
+ /// NS record or a DNAME).
+ ///
+ /// The method operates in two modes, non-glue-ok and glue-ok modes:
+ ///
+ /// In non-glue-ok mode, the search is made purely for the NS or DNAME
+ /// RR. The zone is searched from the origin down looking for one
+ /// of these RRTypes (and ignoring the NS records at the zone origin).
+ /// A status is returned indicating what is found: DNAME, DELEGATION
+ /// of SUCCESS, the last indicating that nothing was found, together
+ /// with a pointer to the relevant RR.
+ ///
+ /// In glue-ok mode, the first NS encountered in the search (apart from
+ /// the NS at the zone apex) is remembered but otherwise NS records are
+ /// ignored and the search attempts to find a DNAME. The result is
+ /// returned in the same format, along with a pointer to the first non-
+ /// apex NS (if found).
+ ///
+ /// \param name The name to find
+ /// \param options Options about how to search. See the documentation
+ /// for ZoneFinder::FindOptions.
+ ///
+ /// \return Tuple holding the result of the search - the RRset of the
+ /// delegation point and the type of the point (DELEGATION or
+ /// DNAME) - and associated information. This latter item
+ /// comprises two pieces of data: a pointer to the highest
+ /// encountered NS, and the number of labels in the last known
+ /// non-empty domain. The associated information is found as
+ /// a natural part of the search for the delegation point and
+ /// is used later in the find() processing; it is passed back
+ /// to avoid the need to perform a second search to obtain it.
+ DelegationSearchResult
+ findDelegationPoint(const isc::dns::Name& name,
+ const FindOptions options);
+
+ /// \brief Find wildcard match
+ ///
+ /// Having found that the name is not an empty non-terminal, this
+ /// searches the zone for for wildcards that match the name.
+ ///
+ /// It searches superdomains of the name from the zone origin down
+ /// looking for a wildcard in the zone that matches the name. There
+ /// are several cases to consider:
+ ///
+ /// - If the previous search for a delegation point has found that
+ /// there is an NS at the superdomain of the point at which the
+ /// wildcard is found, the delegation is returned.
+ /// - If there is a match to the name, an appropriate status is
+ /// returned (match on requested type, delegation, cname, or just
+ /// the indication of a match but no RRs relevant to the query).
+ /// - If the match is to an non-empty non-terminal wildcard, a
+ /// wildcard NXRRSET is returned.
+ ///
+ /// Note that if DNSSEC is enabled for the search and the zone uses
+ /// NSEC for authenticated denial of existence, the search may
+ /// return NSEC records.
+ ///
+ /// \param name The name to find
+ /// \param type The RRType to find
+ /// \param options Options about how to search. See the documentation
+ /// for ZoneFinder::FindOptions.
+ /// \param dresult Result of the search through the zone for a
+ /// delegation.
+ ///
+ /// \return Tuple holding the result of the search - the RRset of the
+ /// wildcard records matching the name, together with a status
+ /// indicating the match type (e.g. CNAME at the wildcard
+ /// match, no RRs of the requested type at the wildcard,
+ /// success due to an exact match). Also returned if there
+ /// is no match is an indication as to whether there was an
+ /// NXDOMAIN or an NXRRSET.
+ FindResult findWildcardMatch(
+ const isc::dns::Name& name,
+ const isc::dns::RRType& type, const FindOptions options,
+ const DelegationSearchResult& dresult);
+
+ /// \brief Handle matching results for name
+ ///
+ /// This is called when something is found in the underlying database
+ /// whose domain name is an exact match of the name to be searched for.
+ /// It explores four possible cases to decide the final lookup result:
+ /// - The name is a zone cut due to an NS RR.
+ /// - CNAME is found (while the requested RR type is not CNAME).
+ /// In this case multiple CNAMEs are checked and rejected with
+ /// a \c DataSourceError exception.
+ /// - Requested type is not found at that name.
+ /// - A record of the requested type is found.
+ /// and returns a corresponding find result.
+ ///
+ /// This method is commonly used for normal (non wildcard) and wildcard
+ /// matches.
+ ///
+ /// \param name The name to find
+ /// \param type The RRType to find
+ /// \param options Options about how to search. See the documentation
+ /// for ZoneFinder::FindOptions.
+ /// \param is_origin If name is the zone's origin name.
+ /// \param found A set of found RRsets in the search for the name
+ /// and type. It could contain one or more of the requested
+ /// type, CNAME, NS, and NSEC RRsets of the name.
+ /// \param wildname If non NULL, the method is called on a wildcard
+ /// match, and points to a string object representing
+ /// a textual form of the matched wildcard name;
+ /// it's NULL in the case of non wildcard match.
+ ///
+ /// \return Tuple holding the result of the search - the RRset of the
+ /// wildcard records matching the name, together with a status
+ /// indicating the match type (corresponding to the each of
+ /// the above 4 cases). The return value is intended to be
+ /// usable as a return value of the caller of this helper
+ /// method.
+ FindResult findOnNameResult(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ const FindOptions options,
+ const bool is_origin,
+ const FoundRRsets& found,
+ const std::string* wildname);
+
+ /// \brief Handle no match for name
+ ///
+ /// This is called when it is known that there is no delegation and
+ /// there is no exact match for the name (regardless of RR types
+ /// requested). Before returning NXDOMAIN, we need to check two
+ /// cases:
+ /// - Empty non-terminal: if the name has subdomains in the database,
+ /// flag the fact. An NXRRSET will be returned (along with the
+ /// NSEC record covering the requested domain name if DNSSEC data
+ /// is being returned).
+ /// - Wildcard: is there a wildcard record in the zone that matches
+ /// requested name? If so, return it. If not, return the relevant
+ /// NSEC records (if requested).
+ ///
+ /// \param name The name to find
+ /// \param type The RRType to find
+ /// \param options Options about how to search. See the documentation
+ /// for ZoneFinder::FindOptions.
+ /// \param dresult Result of the search through the zone for a
+ /// delegation.
+ ///
+ /// \return Tuple holding the result of the search - the RRset of the
+ /// wildcard records matching the name, together with a status
+ /// indicating the match type (e.g. CNAME at the wildcard
+ /// match, no RRs of the requested type at the wildcard,
+ /// success due to an exact match).
+ FindResult findNoNameResult(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ FindOptions options,
+ const DelegationSearchResult& dresult);
+
+ /// Logs condition and creates result
+ ///
+ /// A convenience function used by findOnNameResult(), it both creates
+ /// the FindResult object that find() will return to its caller as well
+ /// as logging a debug message for the information being returned.
+ ///
+ /// \param name Domain name of the RR that was being sought.
+ /// \param wildname Domain name string of a matched wildcard name or
+ /// NULL for non wildcard match.
+ /// \param type Type of RR being sought.
+ /// \param code Result of the find operation
+ /// \param rrset RRset found as a result of the find (which may be
+ /// null).
+ /// \param log_id ID of the message being logged. Up to five
+ /// parameters are available to the message: data source name,
+ /// requested domain name, requested class, requested type
+ /// and (but only if the search was successful and returned
+ /// an RRset) details of the RRset found.
+ ///
+ /// \return FindResult object constructed from the code and rrset
+ /// arguments.
+ FindResult logAndCreateResult(const isc::dns::Name& name,
+ const std::string* wildname,
+ const isc::dns::RRType& type,
+ ZoneFinder::Result code,
+ isc::dns::ConstRRsetPtr rrset,
+ const isc::log::MessageID& log_id) const;
+
+ /// \brief Checks if something lives below this domain.
+ ///
+ /// This looks if there's any subdomain of the given name. It can be
+ /// used to test if domain is empty non-terminal.
+ ///
+ /// \param name The domain to check.
+ ///
+ /// \return true if the name has subdomains, false if not.
bool hasSubdomains(const std::string& name);
- /**
- * \brief Get the NSEC covering a name.
- *
- * This one calls findPreviousName on the given name and extracts an NSEC
- * record on the result. It handles various error cases. The method exists
- * to share code present at more than one location.
- */
- dns::RRsetPtr findNSECCover(const dns::Name& name);
-
- /**
- * \brief Convenience type shortcut.
- *
- * To find stuff in the result of getRRsets.
- */
+ /// \brief Get the NSEC covering a name.
+ ///
+ /// This one calls findPreviousName on the given name and extracts an
+ /// NSEC record on the result. It handles various error cases. The
+ /// method exists to share code present at more than one location.
+ dns::ConstRRsetPtr findNSECCover(const dns::Name& name);
+
+ /// \brief Convenience type shortcut.
+ ///
+ /// To find stuff in the result of getRRsets.
typedef std::map<dns::RRType, dns::RRsetPtr>::const_iterator
FoundIterator;
};
- /**
- * \brief Find a zone in the database
- *
- * This queries database's getZone to find the best matching zone.
- * It will propagate whatever exceptions are thrown from that method
- * (which is not restricted in any way).
- *
- * \param name Name of the zone or data contained there.
- * \return FindResult containing the code and an instance of Finder, if
- * anything is found. However, application should not rely on the
- * ZoneFinder being instance of Finder (possible subclass of this class
- * may return something else and it may change in future versions), it
- * should use it as a ZoneFinder only.
- */
+ /// \brief Find a zone in the database
+ ///
+ /// This queries database's getZone to find the best matching zone.
+ /// It will propagate whatever exceptions are thrown from that method
+ /// (which is not restricted in any way).
+ ///
+ /// \param name Name of the zone or data contained there.
+ /// \return FindResult containing the code and an instance of Finder, if
+ /// anything is found. However, application should not rely on the
+ /// ZoneFinder being instance of Finder (possible subclass of this class
+ /// may return something else and it may change in future versions), it
+ /// should use it as a ZoneFinder only.
virtual FindResult findZone(const isc::dns::Name& name) const;
- /**
- * \brief Get the zone iterator
- *
- * The iterator allows going through the whole zone content. If the
- * underlying DatabaseConnection is implemented correctly, it should
- * be possible to have multiple ZoneIterators at once and query data
- * at the same time.
- *
- * \exception DataSourceError if the zone doesn't exist.
- * \exception isc::NotImplemented if the underlying DatabaseConnection
- * doesn't implement iteration. But in case it is not implemented
- * and the zone doesn't exist, DataSourceError is thrown.
- * \exception Anything else the underlying DatabaseConnection might
- * want to throw.
- * \param name The origin of the zone to iterate.
- * \param adjust_ttl If true, the iterator will treat RRs with the same
- * name and type but different TTL values to be of the
- * same RRset, and will adjust the TTL to the lowest
- * value found. If false, it will consider the RR to
- * belong to a different RRset.
- * \return Shared pointer to the iterator (it will never be NULL)
- */
+ /// \brief Get the zone iterator
+ ///
+ /// The iterator allows going through the whole zone content. If the
+ /// underlying DatabaseConnection is implemented correctly, it should
+ /// be possible to have multiple ZoneIterators at once and query data
+ /// at the same time.
+ ///
+ /// \exception DataSourceError if the zone doesn't exist.
+ /// \exception isc::NotImplemented if the underlying DatabaseConnection
+ /// doesn't implement iteration. But in case it is not implemented
+ /// and the zone doesn't exist, DataSourceError is thrown.
+ /// \exception Anything else the underlying DatabaseConnection might
+ /// want to throw.
+ /// \param name The origin of the zone to iterate.
+ /// \param separate_rrs If true, the iterator will return each RR as a
+ /// new RRset object. If false, the iterator will
+ /// combine consecutive RRs with the name and type
+ /// into 1 RRset. The capitalization of the RRset will
+ /// be that of the first RR read, and TTLs will be
+ /// adjusted to the lowest one found.
+ /// \return Shared pointer to the iterator (it will never be NULL)
virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
- bool adjust_ttl = true) const;
+ bool separate_rrs = false) const;
/// This implementation internally clones the accessor from the one
/// used in the client and starts a separate transaction using the cloned
/// accessor. The returned updater will be able to work separately from
/// the original client.
virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
- bool replace) const;
+ bool replace,
+ bool journaling = false) const;
+
+
+ /// This implementation internally clones the accessor from the one
+ /// used in the client for retrieving diffs and iterating over them.
+ /// The returned reader object will be able to work separately from
+ /// the original client.
+ virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+ getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+ uint32_t end_serial) const;
private:
/// \brief The RR class that this client handles.
@@ -892,7 +1113,3 @@ private:
}
#endif // __DATABASE_DATASRC_H
-
-// Local Variables:
-// mode: c++
-// End:
diff --git a/src/lib/datasrc/datasrc_config.h.pre.in b/src/lib/datasrc/datasrc_config.h.pre.in
new file mode 100644
index 0000000..ff99601
--- /dev/null
+++ b/src/lib/datasrc/datasrc_config.h.pre.in
@@ -0,0 +1,31 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+#ifndef __DATASRC_CONFIG_H
+#define __DATASRC_CONFIG_H 1
+
+namespace isc {
+namespace datasrc {
+
+/// \brief Default directory to find the loadable data source libraries
+///
+/// This is the directory where, once installed, loadable backend libraries
+/// such as memory_ds.so and sqlite3_ds.so are found. It is used by the
+/// DataSourceClient loader if no absolute path is used and
+/// B10_FROM_BUILD is not set in the environment.
+const char* const BACKEND_LIBRARY_PATH = "@@PKGLIBEXECDIR@@/";
+
+} // end namespace datasrc
+} // end namespace isc
+
+#endif // __DATASRC_CONFIG_H
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index 04ad610..01fb082 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -68,7 +68,7 @@ The datasource tried to provide an NSEC proof that the named domain does not
exist, but the database backend doesn't support DNSSEC. No proof is included
in the answer as a result.
-% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3
+% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3/%4
Debug information. The database data source is looking up records with the given
name and type in the database.
@@ -78,11 +78,17 @@ different TTL values. This isn't allowed on the wire and is considered
an error, so we set it to the lowest value we found (but we don't modify the
database). The data in database should be checked and fixed.
+% DATASRC_DATABASE_FOUND_CNAME search in datasource %1 for %2/%3/%4 found CNAME, resulting in %5
+When searching the domain for a name a CNAME was found at that name.
+Even though it was not the RR type being sought, it is returned. (The
+caller may want to continue the lookup by replacing the query name with
+the canonical name and restarting the query with the original RR type.)
+
% DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1
When searching for a domain, the program met a delegation to a different zone
at the given domain name. It will return that one instead.
-% DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1
+% DATASRC_DATABASE_FOUND_DELEGATION_EXACT search in datasource %1 for %2/%3/%4 found delegation at %5
The program found the domain requested, but it is a delegation point to a
different zone, therefore it is not authoritative for this domain name.
It will return the NS record instead.
@@ -93,19 +99,25 @@ place in the domain space at the given domain name. It will return that one
instead.
% DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1
-The domain name doesn't have any RRs, so it doesn't exist in the database.
-However, it has a subdomain, so it exists in the DNS address space. So we
-return NXRRSET instead of NXDOMAIN.
+The domain name does not have any RRs associated with it, so it doesn't
+exist in the database. However, it has a subdomain, so it does exist
+in the DNS address space. This type of domain is known an an "empty
+non-terminal" and so we return NXRRSET instead of NXDOMAIN.
% DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4
The data returned by the database backend did not contain any data for the given
domain name, class and type.
-% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4
+% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 for %2/%3/%4 resulted in NXRRSET
The data returned by the database backend contained data for the given domain
name and class, but not for the given type.
-% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2
+% DATASRC_DATABASE_FOUND_NXRRSET_NSEC search in datasource %1 for %2/%3/%4 resulted in RRset %5
+A search in the database for RRs for the specified name, type and class has
+located RRs that match the name and class but not the type. DNSSEC information
+has been requested and returned.
+
+% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %5
The data returned by the database backend contained data for the given domain
name, and it either matches the type or has a relevant type. The RRset that is
returned is printed.
@@ -127,11 +139,46 @@ were found to be different. This isn't allowed on the wire and is considered
an error, so we set it to the lowest value we found (but we don't modify the
database). The data in database should be checked and fixed.
-% DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1
-The database doesn't contain directly matching domain, but it does contain a
-wildcard one which is being used to synthesize the answer.
+% DATASRC_DATABASE_NO_MATCH not match for %2/%3/%4 in %1
+No match (not even a wildcard) was found in the named data source for the given
+name/type/class in the data source.
+
+% DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3
+Debug information. A set of updates to a zone has been successfully
+committed to the corresponding database backend. The zone name,
+its class and the database name are printed.
+
+% DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3
+Debug information. A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+
+% DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3
+Debug information. A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+
+% DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes. The intermediate changes made through the updater won't
+be applied to the underlying database. The zone name, its class, and
+the underlying database name are shown in the log message.
+
+% DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails. The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation. In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid. The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
-% DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1
+% DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %3 because %2 contains NS (data source %1)
The database was queried to provide glue data and it didn't find direct match.
It could create it from given wildcard, but matching wildcards is forbidden
under a zone cut, which was found. Therefore the delegation will be returned
@@ -143,11 +190,31 @@ exists, therefore this name is something like empty non-terminal (actually,
from the protocol point of view, it is empty non-terminal, but the code
discovers it differently).
-% DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1
-The given wildcard exists implicitly in the domainspace, as empty nonterminal
-(eg. there's something like subdomain.*.example.org, so *.example.org exists
-implicitly, but is empty). This will produce NXRRSET, because the constructed
-domain is empty as well as the wildcard.
+% DATASRC_DATABASE_WILDCARD_CNAME search in datasource %1 for %2/%3/%4 found wildcard CNAME at %5, resulting in %6
+The database doesn't contain directly matching name. When searching
+for a wildcard match, a CNAME RR was found at a wildcard record
+matching the name. This is returned as the result of the search.
+
+% DATASRC_DATABASE_WILDCARD_EMPTY found subdomains of %2 which is a wildcard match for %3 in %1
+The given wildcard matches the name being sough but it as an empty
+nonterminal (e.g. there's nothing at *.example.com but something like
+subdomain.*.example.org, do exist: so *.example.org exists in the
+namespace but has no RRs assopciated with it). This will produce NXRRSET.
+
+% DATASRC_DATABASE_WILDCARD_MATCH search in datasource %1 resulted in wildcard match at %5 with RRset %6
+The database doesn't contain directly matching name. When searching
+for a wildcard match, a wildcard record matching the name and type of
+the query was found. The data at this point is returned.
+
+% DATASRC_DATABASE_WILDCARD_NS search in datasource %1 for %2/%3/%4 found wildcard delegation at %5, resulting in %6
+The database doesn't contain directly matching name. When searching
+for a wildcard match, an NS RR was found at a wildcard record matching
+the name. This is returned as the result of the search.
+
+% DATASRC_DATABASE_WILDCARD_NXRRSET search in datasource %1 for %2/%3/%4 resulted in wildcard NXRRSET at %5
+The database doesn't contain directly matching name. When searching
+for a wildcard match, a matching wildcard entry was found but it did
+not contain RRs the requested type. AN NXRRSET indication is returned.
% DATASRC_DO_QUERY handling query for '%1/%2'
A debug message indicating that a query for the given name and RR type is being
@@ -259,7 +326,7 @@ Debug information. The requested record was found.
% DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
Debug information. The search stopped at a superdomain of the requested
-domain. The domain is a empty nonterminal, therefore it is treated as NXRRSET
+domain. The domain is an empty nonterminal, therefore it is treated as NXRRSET
case (eg. the domain exists, but it doesn't have the requested record type).
% DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
@@ -487,12 +554,12 @@ enough information for it. The code is 1 for error, 2 for not implemented.
% DATASRC_SQLITE_CLOSE closing SQLite database
Debug information. The SQLite data source is closing the database file.
-% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
-The database file is being opened so it can start providing data.
-
% DATASRC_SQLITE_CONNCLOSE Closing sqlite database
The database file is no longer needed and is being closed.
+% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
+The database file is being opened so it can start providing data.
+
% DATASRC_SQLITE_CREATE SQLite data source created
Debug information. An instance of SQLite data source is being created.
@@ -630,3 +697,31 @@ database module are shown in the log message.
Debug information. A set of updates to a zone has been successfully
committed to the corresponding database backend. The zone name,
its class and the database name are printed.
+
+% DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source. The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+
+% DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset. The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+
+% DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences. The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+
+% DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset. The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message. The administrator should examine the diff in the database
+to find any invalid data and fix it.
diff --git a/src/lib/datasrc/factory.cc b/src/lib/datasrc/factory.cc
index 1818c70..35a79fe 100644
--- a/src/lib/datasrc/factory.cc
+++ b/src/lib/datasrc/factory.cc
@@ -19,13 +19,59 @@
#include "sqlite3_accessor.h"
#include "memory_datasrc.h"
+#include "datasrc_config.h"
+
#include <datasrc/logger.h>
#include <dlfcn.h>
+#include <cstdlib>
+using namespace std;
using namespace isc::data;
using namespace isc::datasrc;
+namespace {
+// This helper function takes the 'type' string as passed to
+// the DataSourceClient container below, and, unless it
+// already specifies a specific loadable .so file, will
+// convert the short-name to the full file.
+// I.e. it will add '_ds.so' (if necessary), and prepend
+// it with an absolute path (if necessary).
+// Returns the resulting string to use with LibraryContainer.
+const std::string
+getDataSourceLibFile(const std::string& type) {
+ if (type.empty()) {
+ isc_throw(DataSourceLibraryError,
+ "DataSourceClient container called with empty type value");
+ }
+ if (type == ".so") {
+ isc_throw(DataSourceLibraryError, "DataSourceClient container called "
+ "with bad type or file name");
+ }
+
+ // Type can be either a short name, in which case we need to
+ // append "_ds.so", or it can be a direct .so library.
+ std::string lib_file = type;
+ const int ext_pos = lib_file.rfind(".so");
+ if (ext_pos == std::string::npos || ext_pos + 3 != lib_file.length()) {
+ lib_file.append("_ds.so");
+ }
+ // And if it is not an absolute path, prepend it with our
+ // loadable backend library path
+ if (type[0] != '/') {
+ // When running from the build tree, we do NOT want
+ // to load the installed loadable library
+ if (getenv("B10_FROM_BUILD") != NULL) {
+ lib_file = std::string(getenv("B10_FROM_BUILD")) +
+ "/src/lib/datasrc/.libs/" + lib_file;
+ } else {
+ lib_file = isc::datasrc::BACKEND_LIBRARY_PATH + lib_file;
+ }
+ }
+ return (lib_file);
+}
+} // end anonymous namespace
+
namespace isc {
namespace datasrc {
@@ -34,7 +80,10 @@ LibraryContainer::LibraryContainer(const std::string& name) {
// are recognized as such
ds_lib_ = dlopen(name.c_str(), RTLD_NOW | RTLD_GLOBAL);
if (ds_lib_ == NULL) {
- isc_throw(DataSourceLibraryError, dlerror());
+ // This may cause the filename to appear twice in the actual
+ // error, but the output of dlerror is implementation-dependent
+ isc_throw(DataSourceLibraryError, "dlopen failed for " << name <<
+ ": " << dlerror());
}
}
@@ -61,7 +110,7 @@ LibraryContainer::getSym(const char* name) {
DataSourceClientContainer::DataSourceClientContainer(const std::string& type,
ConstElementPtr config)
-: ds_lib_(type + "_ds.so")
+: ds_lib_(getDataSourceLibFile(type))
{
// We are casting from a data to a function pointer here
// Some compilers (rightfully) complain about that, but
diff --git a/src/lib/datasrc/factory.h b/src/lib/datasrc/factory.h
index 0284067..9d0a762 100644
--- a/src/lib/datasrc/factory.h
+++ b/src/lib/datasrc/factory.h
@@ -68,7 +68,7 @@ public:
/// the library path.
///
/// \exception DataSourceLibraryError If the library cannot be found or
- /// cannot be loaded.
+ /// cannot be loaded, or if name is an empty string.
LibraryContainer(const std::string& name);
/// \brief Destructor
@@ -115,6 +115,15 @@ private:
/// easy recognition and to reduce potential mistakes.
/// For example, the sqlite3 implementation has the type 'sqlite3', and the
/// derived filename 'sqlite3_ds.so'
+/// The value of type can be a specific loadable library; if it already ends
+/// with '.so', the loader will not add '_ds.so'.
+/// It may also be an absolute path; if it starts with '/', nothing is
+/// prepended. If it does not, the loadable library will be taken from the
+/// installation directory, see the value of
+/// isc::datasrc::BACKEND_LIBRARY_PATH in datasrc_config.h for the exact path.
+///
+/// \note When 'B10_FROM_BUILD' is set in the environment, the build
+/// directory is used instead of the install directory.
///
/// There are of course some demands to an implementation, not all of which
/// can be verified compile-time. It must provide a creator and destructor
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 6c0f589..a79ee5b 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -729,10 +729,14 @@ private:
Domain::const_iterator dom_iterator_;
const DomainTree& tree_;
const DomainNode* node_;
+ // Only used when separate_rrs_ is true
+ RdataIteratorPtr rdata_iterator_;
+ bool separate_rrs_;
bool ready_;
public:
- MemoryIterator(const DomainTree& tree, const Name& origin) :
+ MemoryIterator(const DomainTree& tree, const Name& origin, bool separate_rrs) :
tree_(tree),
+ separate_rrs_(separate_rrs),
ready_(true)
{
// Find the first node (origin) and preserve the node chain for future
@@ -747,6 +751,9 @@ public:
// Initialize the iterator if there's somewhere to point to
if (node_ != NULL && node_->getData() != DomainPtr()) {
dom_iterator_ = node_->getData()->begin();
+ if (separate_rrs_ && dom_iterator_ != node_->getData()->end()) {
+ rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+ }
}
}
@@ -766,6 +773,10 @@ public:
// if the map is empty or not
if (node_ != NULL && node_->getData() != NULL) {
dom_iterator_ = node_->getData()->begin();
+ // New RRset, so get a new rdata iterator
+ if (separate_rrs_) {
+ rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+ }
}
}
if (node_ == NULL) {
@@ -773,12 +784,35 @@ public:
ready_ = false;
return (ConstRRsetPtr());
}
- // The iterator points to the next yet unused RRset now
- ConstRRsetPtr result(dom_iterator_->second);
- // This one is used, move it to the next time for next call
- ++dom_iterator_;
- return (result);
+ if (separate_rrs_) {
+ // For separate rrs, reconstruct a new RRset with just the
+ // 'current' rdata
+ RRsetPtr result(new RRset(dom_iterator_->second->getName(),
+ dom_iterator_->second->getClass(),
+ dom_iterator_->second->getType(),
+ dom_iterator_->second->getTTL()));
+ result->addRdata(rdata_iterator_->getCurrent());
+ rdata_iterator_->next();
+ if (rdata_iterator_->isLast()) {
+ // all used up, next.
+ ++dom_iterator_;
+ // New RRset, so get a new rdata iterator, but only if this
+ // was not the final RRset in the chain
+ if (dom_iterator_ != node_->getData()->end()) {
+ rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+ }
+ }
+ return (result);
+ } else {
+ // The iterator points to the next yet unused RRset now
+ ConstRRsetPtr result(dom_iterator_->second);
+
+ // This one is used, move it to the next time for next call
+ ++dom_iterator_;
+
+ return (result);
+ }
}
virtual ConstRRsetPtr getSOA() const {
@@ -789,11 +823,7 @@ public:
} // End of anonymous namespace
ZoneIteratorPtr
-InMemoryClient::getIterator(const Name& name, bool) const {
- // note: adjust_ttl argument is ignored, as the RRsets are already
- // individually stored, and hence cannot have different TTLs anymore at
- // this point
-
+InMemoryClient::getIterator(const Name& name, bool separate_rrs) const {
ZoneTable::FindResult result(impl_->zone_table.findZone(name));
if (result.code != result::SUCCESS) {
isc_throw(DataSourceError, "No such zone: " + name.toText());
@@ -811,14 +841,22 @@ InMemoryClient::getIterator(const Name& name, bool) const {
isc_throw(Unexpected, "The zone at " + name.toText() +
" is not InMemoryZoneFinder");
}
- return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name)));
+ return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name,
+ separate_rrs)));
}
ZoneUpdaterPtr
-InMemoryClient::getUpdater(const isc::dns::Name&, bool) const {
+InMemoryClient::getUpdater(const isc::dns::Name&, bool, bool) const {
isc_throw(isc::NotImplemented, "Update attempt on in memory data source");
}
+pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+InMemoryClient::getJournalReader(const isc::dns::Name&, uint32_t,
+ uint32_t) const
+{
+ isc_throw(isc::NotImplemented, "Journaling isn't supported for "
+ "in memory data source");
+}
namespace {
// convencience function to add an error message to a list of those
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 1b6c120..b852eb3 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -273,7 +273,7 @@ public:
/// \brief Implementation of the getIterator method
virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
- bool adjust_ttl = true) const;
+ bool separate_rrs = false) const;
/// In-memory data source is read-only, so this derived method will
/// result in a NotImplemented exception.
@@ -284,7 +284,12 @@ public:
/// to update via its updater (this may or may not be a good idea and
/// is subject to further discussions).
virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
- bool replace) const;
+ bool replace, bool journaling = false)
+ const;
+
+ virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+ getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+ uint32_t end_serial) const;
private:
// TODO: Do we still need the PImpl if nobody should manipulate this class
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
index 01b9f41..fb2ffef 100644
--- a/src/lib/datasrc/sqlite3_accessor.cc
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -23,6 +23,7 @@
#include <datasrc/logger.h>
#include <datasrc/data_source.h>
#include <datasrc/factory.h>
+#include <datasrc/database.h>
#include <util/filename.h>
using namespace std;
@@ -54,7 +55,10 @@ enum StatementID {
FIND_PREVIOUS = 10,
ADD_RECORD_DIFF = 11,
GET_RECORD_DIFF = 12, // This is temporary for testing "add diff"
- NUM_STATEMENTS = 13
+ LOW_DIFF_ID = 13,
+ HIGH_DIFF_ID = 14,
+ DIFF_RECS = 15,
+ NUM_STATEMENTS = 16
};
const char* const text_statements[NUM_STATEMENTS] = {
@@ -62,33 +66,48 @@ const char* const text_statements[NUM_STATEMENTS] = {
// specifically chosen to match the enum values in RecordColumns
"SELECT id FROM zones WHERE name=?1 AND rdclass = ?2", // ZONE
"SELECT rdtype, ttl, sigtype, rdata FROM records " // ANY
- "WHERE zone_id=?1 AND name=?2",
+ "WHERE zone_id=?1 AND name=?2",
"SELECT rdtype, ttl, sigtype, rdata " // ANY_SUB
- "FROM records WHERE zone_id=?1 AND name LIKE (\"%.\" || ?2)",
+ "FROM records WHERE zone_id=?1 AND name LIKE (\"%.\" || ?2)",
"BEGIN", // BEGIN
"COMMIT", // COMMIT
"ROLLBACK", // ROLLBACK
"DELETE FROM records WHERE zone_id=?1", // DEL_ZONE_RECORDS
"INSERT INTO records " // ADD_RECORD
- "(zone_id, name, rname, ttl, rdtype, sigtype, rdata) "
- "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+ "(zone_id, name, rname, ttl, rdtype, sigtype, rdata) "
+ "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
"DELETE FROM records WHERE zone_id=?1 AND name=?2 " // DEL_RECORD
- "AND rdtype=?3 AND rdata=?4",
+ "AND rdtype=?3 AND rdata=?4",
"SELECT rdtype, ttl, sigtype, rdata, name FROM records " // ITERATE
- "WHERE zone_id = ?1 ORDER BY rname, rdtype",
+ "WHERE zone_id = ?1 ORDER BY rname, rdtype",
/*
* This one looks for previous name with NSEC record. It is done by
* using the reversed name. The NSEC is checked because we need to
* skip glue data, which don't have the NSEC.
*/
"SELECT name FROM records " // FIND_PREVIOUS
- "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
- "rname < $2 ORDER BY rname DESC LIMIT 1",
+ "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+ "rname < $2 ORDER BY rname DESC LIMIT 1",
"INSERT INTO diffs " // ADD_RECORD_DIFF
- "(zone_id, version, operation, name, rrtype, ttl, rdata) "
- "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"
- , "SELECT name, rrtype, ttl, rdata, version, operation " // GET_RECORD_DIFF
- "FROM diffs WHERE zone_id = ?1 ORDER BY id, operation"
+ "(zone_id, version, operation, name, rrtype, ttl, rdata) "
+ "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+ "SELECT name, rrtype, ttl, rdata, version, operation " // GET_RECORD_DIFF
+ "FROM diffs WHERE zone_id = ?1 ORDER BY id, operation",
+
+ // Two statements to select the lowest ID and highest ID in a set of
+ // differences.
+ "SELECT id FROM diffs " // LOW_DIFF_ID
+ "WHERE zone_id=?1 AND version=?2 and OPERATION=?3 "
+ "ORDER BY id ASC LIMIT 1",
+ "SELECT id FROM diffs " // HIGH_DIFF_ID
+ "WHERE zone_id=?1 AND version=?2 and OPERATION=?3 "
+ "ORDER BY id DESC LIMIT 1",
+
+ // In the next statement, note the redundant ID. This is to ensure
+ // that the columns match the column IDs passed to the iterator
+ "SELECT rrtype, ttl, id, rdata, name FROM diffs " // DIFF_RECS
+ "WHERE zone_id=?1 AND id>=?2 and id<=?3 "
+ "ORDER BY id ASC"
};
struct SQLite3Parameters {
@@ -231,23 +250,26 @@ const char* const SCHEMA_LIST[] = {
"dnssec BOOLEAN NOT NULL DEFAULT 0)",
"CREATE INDEX zones_byname ON zones (name)",
"CREATE TABLE records (id INTEGER PRIMARY KEY, "
- "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
- "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
- "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
- "rdata STRING NOT NULL)",
+ "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+ "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
"CREATE INDEX records_byname ON records (name)",
"CREATE INDEX records_byrname ON records (rname)",
"CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
- "hash STRING NOT NULL COLLATE NOCASE, "
- "owner STRING NOT NULL COLLATE NOCASE, "
- "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
- "rdata STRING NOT NULL)",
+ "hash STRING NOT NULL COLLATE NOCASE, "
+ "owner STRING NOT NULL COLLATE NOCASE, "
+ "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
"CREATE INDEX nsec3_byhash ON nsec3 (hash)",
"CREATE TABLE diffs (id INTEGER PRIMARY KEY, "
- "zone_id INTEGER NOT NULL, version INTEGER NOT NULL, "
- "operation INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
- "rrtype STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
- "rdata STRING NOT NULL)",
+ "zone_id INTEGER NOT NULL, "
+ "version INTEGER NOT NULL, "
+ "operation INTEGER NOT NULL, "
+ "name STRING NOT NULL COLLATE NOCASE, "
+ "rrtype STRING NOT NULL COLLATE NOCASE, "
+ "ttl INTEGER NOT NULL, "
+ "rdata STRING NOT NULL)",
NULL
};
@@ -558,6 +580,9 @@ private:
const std::string name_;
};
+
+// Methods to retrieve the various iterators
+
DatabaseAccessor::IteratorContextPtr
SQLite3Accessor::getRecords(const std::string& name, int id,
bool subdomains) const
@@ -571,6 +596,257 @@ SQLite3Accessor::getAllRecords(int id) const {
return (IteratorContextPtr(new Context(shared_from_this(), id)));
}
+
+/// \brief Difference Iterator
+///
+/// This iterator is used to search through the differences table for the
+/// resouce records making up an IXFR between two versions of a zone.
+
+class SQLite3Accessor::DiffContext : public DatabaseAccessor::IteratorContext {
+public:
+
+ /// \brief Constructor
+ ///
+ /// Constructs the iterator for the difference sequence. It is
+ /// passed two parameters, the first and last versions in the difference
+ /// sequence. Note that because of serial number rollover, it may well
+ /// be that the start serial number is greater than the end one.
+ ///
+ /// \param zone_id ID of the zone (in the zone table)
+ /// \param start Serial number of first version in difference sequence
+ /// \param end Serial number of last version in difference sequence
+ ///
+ /// \exception any A number of exceptions can be expected
+ DiffContext(const boost::shared_ptr<const SQLite3Accessor>& accessor,
+ int zone_id, uint32_t start, uint32_t end) :
+ accessor_(accessor),
+ last_status_(SQLITE_ROW)
+ {
+ try {
+ int low_id = findIndex(LOW_DIFF_ID, zone_id, start, DIFF_DELETE);
+ int high_id = findIndex(HIGH_DIFF_ID, zone_id, end, DIFF_ADD);
+
+ // Prepare the statement that will return data values
+ reset(DIFF_RECS);
+ bindInt(DIFF_RECS, 1, zone_id);
+ bindInt(DIFF_RECS, 2, low_id);
+ bindInt(DIFF_RECS, 3, high_id);
+
+ } catch (...) {
+ // Something wrong, clear up everything.
+ accessor_->dbparameters_->finalizeStatements();
+ throw;
+ }
+ }
+
+ /// \brief Destructor
+ virtual ~DiffContext()
+ {}
+
+ /// \brief Get Next Diff Record
+ ///
+ /// Returns the next difference record in the difference sequence.
+ ///
+ /// \param data Array of std::strings COLUMN_COUNT long. The results
+ /// are returned in this.
+ ///
+ /// \return bool true if data is returned, false if not.
+ ///
+ /// \exceptions any Varied
+ bool getNext(std::string (&data)[COLUMN_COUNT]) {
+
+ if (last_status_ != SQLITE_DONE) {
+ // Last call (if any) didn't reach end of result set, so we
+ // can read another row from it.
+ //
+ // Get a pointer to the statement for brevity (this does not
+ // transfer ownership of the statement to this class, so there is
+ // no need to tidy up after we have finished using it).
+ sqlite3_stmt* stmt =
+ accessor_->dbparameters_->getStatement(DIFF_RECS);
+
+ const int rc(sqlite3_step(stmt));
+ if (rc == SQLITE_ROW) {
+ // Copy the data across to the output array
+ copyColumn(DIFF_RECS, data, TYPE_COLUMN);
+ copyColumn(DIFF_RECS, data, TTL_COLUMN);
+ copyColumn(DIFF_RECS, data, NAME_COLUMN);
+ copyColumn(DIFF_RECS, data, RDATA_COLUMN);
+
+ } else if (rc != SQLITE_DONE) {
+ isc_throw(DataSourceError,
+ "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ last_status_ = rc;
+ }
+ return (last_status_ == SQLITE_ROW);
+ }
+
+private:
+
+ /// \brief Reset prepared statement
+ ///
+ /// Sets up the statement so that new parameters can be attached to it and
+ /// that it can be used to query for another difference sequence.
+ ///
+ /// \param stindex Index of prepared statement to which to bind
+ void reset(int stindex) {
+ sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+ if ((sqlite3_reset(stmt) != SQLITE_OK) ||
+ (sqlite3_clear_bindings(stmt) != SQLITE_OK)) {
+ isc_throw(SQLite3Error, "Could not clear statement bindings in '" <<
+ text_statements[stindex] << "': " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ }
+
+ /// \brief Bind Int
+ ///
+ /// Binds an integer to a specific variable in a prepared statement.
+ ///
+ /// \param stindex Index of prepared statement to which to bind
+ /// \param varindex Index of variable to which to bind
+ /// \param value Value of variable to bind
+ /// \exception SQLite3Error on an error
+ void bindInt(int stindex, int varindex, sqlite3_int64 value) {
+ if (sqlite3_bind_int64(accessor_->dbparameters_->getStatement(stindex),
+ varindex, value) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind value to parameter " <<
+ varindex << " in statement '" <<
+ text_statements[stindex] << "': " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ }
+
+ ///\brief Get Single Value
+ ///
+ /// Executes a prepared statement (which has parameters bound to it)
+ /// for which the result of a single value is expected.
+ ///
+ /// \param stindex Index of prepared statement in statement table.
+ ///
+ /// \return Value of SELECT.
+ ///
+ /// \exception TooMuchData Multiple rows returned when one expected
+ /// \exception TooLittleData Zero rows returned when one expected
+ /// \exception DataSourceError SQLite3-related error
+ int getSingleValue(StatementID stindex) {
+
+ // Get a pointer to the statement for brevity (does not transfer
+ // resources)
+ sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+
+ // Execute the data. Should be just one result
+ int rc = sqlite3_step(stmt);
+ int result = -1;
+ if (rc == SQLITE_ROW) {
+
+ // Got some data, extract the value
+ result = sqlite3_column_int(stmt, 0);
+ rc = sqlite3_step(stmt);
+ if (rc == SQLITE_DONE) {
+
+ // All OK, exit with the value.
+ return (result);
+
+ } else if (rc == SQLITE_ROW) {
+ isc_throw(TooMuchData, "request to return one value from "
+ "diffs table returned multiple values");
+ }
+ } else if (rc == SQLITE_DONE) {
+
+ // No data in the table. A bare exception with no explanation is
+ // thrown, as it will be replaced by a more informative one by
+ // the caller.
+ isc_throw(TooLittleData, "");
+ }
+
+ // We get here on an error.
+ isc_throw(DataSourceError, "could not get data from diffs table: " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+
+ // Keep the compiler happy with a return value.
+ return (result);
+ }
+
+ /// \brief Find index
+ ///
+ /// Executes the prepared statement locating the high or low index in
+ /// the diffs table and returns that index.
+ ///
+ /// \param stmt_id Index of the prepared statement to execute
+ /// \param zone_id ID of the zone for which the index is being sought
+ /// \param serial Zone serial number for which an index is being sought.
+ /// \param diff Code to delete record additions or deletions
+ ///
+ /// \return int ID of the row in the difss table corresponding to the
+ /// statement.
+ ///
+ /// \exception TooLittleData Internal error, no result returned when one
+ /// was expected.
+ /// \exception NoSuchSerial Serial number not found.
+ /// \exception NoDiffsData No data for this zone found in diffs table
+ int findIndex(StatementID stindex, int zone_id, uint32_t serial, int diff) {
+
+ // Set up the statement
+ reset(stindex);
+ bindInt(stindex, 1, zone_id);
+ bindInt(stindex, 2, serial);
+ bindInt(stindex, 3, diff);
+
+ // Execute the statement
+ int result = -1;
+ try {
+ result = getSingleValue(stindex);
+
+ } catch (const TooLittleData&) {
+
+ // No data returned but the SQL query succeeded. Only possibility
+ // is that there is no entry in the differences table for the given
+ // zone and version.
+ isc_throw(NoSuchSerial, "No entry in differences table for " <<
+ " zone ID " << zone_id << ", serial number " << serial);
+ }
+
+ return (result);
+ }
+
+ /// \brief Copy Column to Output
+ ///
+ /// Copies the textual data in the result set to the specified column
+ /// in the output.
+ ///
+ /// \param stindex Index of prepared statement used to access data
+ /// \param data Array of columns passed to getNext
+ /// \param column Column of output to copy
+ void copyColumn(StatementID stindex, std::string (&data)[COLUMN_COUNT],
+ int column) {
+
+ // Get a pointer to the statement for brevity (does not transfer
+ // resources)
+ sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+ data[column] = convertToPlainChar(sqlite3_column_text(stmt,
+ column),
+ accessor_->dbparameters_->db_);
+ }
+
+ // Attributes
+
+ boost::shared_ptr<const SQLite3Accessor> accessor_; // Accessor object
+ int last_status_; // Last status received from sqlite3_step
+};
+
+// ... and return the iterator
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getDiffs(int id, uint32_t start, uint32_t end) const {
+ return (IteratorContextPtr(new DiffContext(shared_from_this(), id, start,
+ end)));
+}
+
+
+
pair<bool, int>
SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
if (dbparameters_->updating_zone) {
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index 6b5369c..08be824 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -17,6 +17,7 @@
#define __DATASRC_SQLITE3_ACCESSOR_H
#include <datasrc/database.h>
+#include <datasrc/data_source.h>
#include <exceptions/exceptions.h>
@@ -40,10 +41,34 @@ namespace datasrc {
* It might mean corrupt database file, invalid request or that something is
* rotten in the library.
*/
-class SQLite3Error : public Exception {
+class SQLite3Error : public DataSourceError {
public:
SQLite3Error(const char* file, size_t line, const char* what) :
- isc::Exception(file, line, what) {}
+ DataSourceError(file, line, what) {}
+};
+
+/**
+ * \brief Too Much Data
+ *
+ * Thrown if a query expecting a certain number of rows back returned too
+ * many rows.
+ */
+class TooMuchData : public DataSourceError {
+public:
+ TooMuchData(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
+/**
+ * \brief Too Little Data
+ *
+ * Thrown if a query expecting a certain number of rows back returned too
+ * few rows (including none).
+ */
+class TooLittleData : public DataSourceError {
+public:
+ TooLittleData(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
};
struct SQLite3Parameters;
@@ -128,6 +153,27 @@ public:
*/
virtual IteratorContextPtr getAllRecords(int id) const;
+ /** \brief Creates an iterator context for a set of differences.
+ *
+ * Implements the getDiffs() method from DatabaseAccessor
+ *
+ * \exception NoSuchSerial if either of the versions do not exist in
+ * the difference table.
+ * \exception SQLite3Error if there is an sqlite3 error when performing
+ * the query
+ *
+ * \param id The ID of the zone, returned from getZone().
+ * \param start The SOA serial number of the version of the zone from
+ * which the difference sequence should start.
+ * \param end The SOA serial number of the version of the zone at which
+ * the difference sequence should end.
+ *
+ * \return Iterator containing difference records.
+ */
+ virtual IteratorContextPtr
+ getDiffs(int id, uint32_t start, uint32_t end) const;
+
+
virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
bool replace);
@@ -192,14 +238,20 @@ private:
const std::string filename_;
/// \brief The class for which the queries are done
const std::string class_;
+ /// \brief Database name
+ const std::string database_name_;
+
/// \brief Opens the database
void open(const std::string& filename);
/// \brief Closes the database
void close();
- /// \brief SQLite3 implementation of IteratorContext
+
+ /// \brief SQLite3 implementation of IteratorContext for all records
class Context;
friend class Context;
- const std::string database_name_;
+ /// \brief SQLite3 implementation of IteratorContext for differences
+ class DiffContext;
+ friend class DiffContext;
};
/// \brief Creates an instance of the SQlite3 datasource client
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index e5cca0a..6dd6b0a 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -18,48 +18,72 @@ CLEANFILES = *.gcno *.gcda
TESTS =
if HAVE_GTEST
-TESTS += run_unittests
-run_unittests_SOURCES = run_unittests.cc
-run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.h
-run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
+TESTS += run_unittests run_unittests_sqlite3 run_unittests_memory
+
+#
+# For each specific datasource, there is a separate binary that includes
+# the code itself (we can't unittest through the public API). These need
+# to be separate because the included code, by design, contains conflicting
+# symbols.
+# We also have a 'general' run_unittests with non-datasource-specific tests
+#
+
+# First define the parts shared by all
+common_sources = run_unittests.cc
+common_sources += $(top_srcdir)/src/lib/dns/tests/unittest_util.h
+common_sources += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
+
+common_ldadd = $(GTEST_LDADD)
+common_ldadd += $(SQLITE_LIBS)
+common_ldadd += $(top_builddir)/src/lib/datasrc/libdatasrc.la
+common_ldadd += $(top_builddir)/src/lib/dns/libdns++.la
+common_ldadd += $(top_builddir)/src/lib/util/libutil.la
+common_ldadd += $(top_builddir)/src/lib/log/liblog.la
+common_ldadd += $(top_builddir)/src/lib/exceptions/libexceptions.la
+common_ldadd += $(top_builddir)/src/lib/cc/libcc.la
+common_ldadd += $(top_builddir)/src/lib/testutils/libtestutils.la
+common_ldadd += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+
+
+# The general tests
+run_unittests_SOURCES = $(common_sources)
run_unittests_SOURCES += datasrc_unittest.cc
-run_unittests_SOURCES += sqlite3_unittest.cc
run_unittests_SOURCES += static_unittest.cc
run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += cache_unittest.cc
run_unittests_SOURCES += test_datasrc.h test_datasrc.cc
run_unittests_SOURCES += rbtree_unittest.cc
-#run_unittests_SOURCES += zonetable_unittest.cc
-#run_unittests_SOURCES += memory_datasrc_unittest.cc
run_unittests_SOURCES += logger_unittest.cc
-run_unittests_SOURCES += database_unittest.cc
run_unittests_SOURCES += client_unittest.cc
-run_unittests_SOURCES += sqlite3_accessor_unittest.cc
-if !USE_STATIC_LINK
-# This test uses dynamically loadable module. It will cause various
-# troubles with static link such as "missing" symbols in the static object
-# for the module. As a workaround we disable this particualr test
-# in this case.
-run_unittests_SOURCES += factory_unittest.cc
-endif
-# for the dlopened types we have tests for, we also need to include the
-# sources
-run_unittests_SOURCES += $(top_srcdir)/src/lib/datasrc/sqlite3_accessor.cc
-#run_unittests_SOURCES += $(top_srcdir)/src/lib/datasrc/memory_datasrc.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
-run_unittests_LDADD += $(SQLITE_LIBS)
-run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
-run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
-run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
-run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
-run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
-run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
-run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la
-run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD = $(common_ldadd)
+
+
+# SQlite3 datasource tests
+run_unittests_sqlite3_SOURCES = $(common_sources)
+run_unittests_sqlite3_SOURCES += database_unittest.cc
+run_unittests_sqlite3_SOURCES += sqlite3_unittest.cc
+run_unittests_sqlite3_SOURCES += sqlite3_accessor_unittest.cc
+run_unittests_sqlite3_SOURCES += $(top_srcdir)/src/lib/datasrc/sqlite3_accessor.cc
+
+run_unittests_sqlite3_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_sqlite3_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_sqlite3_LDADD = $(common_ldadd)
+
+# In-memory datasource tests
+run_unittests_memory_SOURCES = $(common_sources)
+run_unittests_memory_SOURCES += memory_datasrc_unittest.cc
+run_unittests_memory_SOURCES += $(top_srcdir)/src/lib/datasrc/memory_datasrc.cc
+
+run_unittests_memory_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_memory_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_memory_LDADD = $(common_ldadd)
+
endif
noinst_PROGRAMS = $(TESTS)
@@ -78,3 +102,22 @@ EXTRA_DIST += testdata/test-root.sqlite3
EXTRA_DIST += testdata/test.sqlite3
EXTRA_DIST += testdata/test.sqlite3.nodiffs
EXTRA_DIST += testdata/rwtest.sqlite3
+EXTRA_DIST += testdata/diffs.sqlite3
+
+# For the factory unit tests, we need to specify that we want
+# the loadable backend libraries from the build tree, and not from
+# the installation directory. Therefore we build it into a separate
+# binary, and call that from check-local with B10_FROM_BUILD set.
+# Also, we only want to do this when static building is not used,
+# since it will cause various troubles with static link such as
+# "missing" symbols in the static object for the module.
+if !USE_STATIC_LINK
+noinst_PROGRAMS+=run_unittests_factory
+run_unittests_factory_SOURCES = $(common_sources)
+run_unittests_factory_SOURCES += factory_unittest.cc
+run_unittests_factory_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_factory_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+run_unittests_factory_LDADD = $(common_ldadd)
+check-local:
+ B10_FROM_BUILD=${abs_top_builddir} ./run_unittests_factory
+endif
diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc
index 5b2c91a..64ad25f 100644
--- a/src/lib/datasrc/tests/client_unittest.cc
+++ b/src/lib/datasrc/tests/client_unittest.cc
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <utility>
+
#include <datasrc/client.h>
#include <dns/name.h>
@@ -32,9 +34,16 @@ public:
virtual FindResult findZone(const isc::dns::Name&) const {
return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
}
- virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name&, bool) const {
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name&, bool, bool)
+ const
+ {
return (ZoneUpdaterPtr());
}
+ virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+ getJournalReader(const isc::dns::Name&, uint32_t, uint32_t) const {
+ isc_throw(isc::NotImplemented, "Journaling isn't supported "
+ "in Nop data source");
+ }
};
class ClientTest : public ::testing::Test {
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index 1a471bf..920c9a2 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -12,10 +12,15 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <stdlib.h>
+
#include <boost/shared_ptr.hpp>
+#include <boost/lexical_cast.hpp>
#include <gtest/gtest.h>
+#include <exceptions/exceptions.h>
+
#include <dns/name.h>
#include <dns/rrttl.h>
#include <dns/rrset.h>
@@ -30,6 +35,7 @@
#include <testutils/dnsmessage_test.h>
#include <map>
+#include <vector>
using namespace isc::datasrc;
using namespace std;
@@ -37,6 +43,7 @@ using namespace std;
// for some systems.
using boost::shared_ptr;
using boost::dynamic_pointer_cast;
+using boost::lexical_cast;
using namespace isc::dns;
namespace {
@@ -255,6 +262,11 @@ public:
"This database datasource can't be iterated");
}
+ virtual IteratorContextPtr getDiffs(int, uint32_t, uint32_t) const {
+ isc_throw(isc::NotImplemented,
+ "This database datasource doesn't support diffs");
+ }
+
virtual std::string findPreviousName(int, const std::string&) const {
isc_throw(isc::NotImplemented,
"This data source doesn't support DNSSEC");
@@ -264,6 +276,52 @@ private:
};
+/**
+ * Single journal entry in the mock database.
+ *
+ * All the members there are public for simplicity, as it only stores data.
+ * We use the implicit constructor and operator. The members can't be const
+ * because of the assignment operator (used in the vectors).
+ */
+struct JournalEntry {
+ JournalEntry(int id, uint32_t serial,
+ DatabaseAccessor::DiffOperation operation,
+ const std::string (&data)[DatabaseAccessor::DIFF_PARAM_COUNT])
+ : id_(id), serial_(serial), operation_(operation)
+ {
+ data_[DatabaseAccessor::DIFF_NAME] = data[DatabaseAccessor::DIFF_NAME];
+ data_[DatabaseAccessor::DIFF_TYPE] = data[DatabaseAccessor::DIFF_TYPE];
+ data_[DatabaseAccessor::DIFF_TTL] = data[DatabaseAccessor::DIFF_TTL];
+ data_[DatabaseAccessor::DIFF_RDATA] =
+ data[DatabaseAccessor::DIFF_RDATA];
+ }
+ JournalEntry(int id, uint32_t serial,
+ DatabaseAccessor::DiffOperation operation,
+ const std::string& name, const std::string& type,
+ const std::string& ttl, const std::string& rdata):
+ id_(id), serial_(serial), operation_(operation)
+ {
+ data_[DatabaseAccessor::DIFF_NAME] = name;
+ data_[DatabaseAccessor::DIFF_TYPE] = type;
+ data_[DatabaseAccessor::DIFF_TTL] = ttl;
+ data_[DatabaseAccessor::DIFF_RDATA] = rdata;
+ }
+ int id_;
+ uint32_t serial_;
+ DatabaseAccessor::DiffOperation operation_;
+ std::string data_[DatabaseAccessor::DIFF_PARAM_COUNT];
+ bool operator==(const JournalEntry& other) const {
+ for (size_t i(0); i < DatabaseAccessor::DIFF_PARAM_COUNT; ++ i) {
+ if (data_[i] != other.data_[i]) {
+ return false;
+ }
+ }
+ // No need to check data here, checked above
+ return (id_ == other.id_ && serial_ == other.serial_ &&
+ operation_ == other.operation_);
+ }
+};
+
/*
* A virtual database accessor that pretends it contains single zone --
* example.org.
@@ -288,6 +346,7 @@ public:
readonly_records_ = &readonly_records_master_;
update_records_ = &update_records_master_;
empty_records_ = &empty_records_master_;
+ journal_entries_ = &journal_entries_master_;
fillData();
}
@@ -296,6 +355,7 @@ public:
cloned_accessor->readonly_records_ = &readonly_records_master_;
cloned_accessor->update_records_ = &update_records_master_;
cloned_accessor->empty_records_ = &empty_records_master_;
+ cloned_accessor->journal_entries_ = &journal_entries_master_;
latest_clone_ = cloned_accessor;
return (cloned_accessor);
}
@@ -495,6 +555,29 @@ private:
}
}
};
+ class MockDiffIteratorContext : public IteratorContext {
+ const vector<JournalEntry> diffs_;
+ vector<JournalEntry>::const_iterator it_;
+ public:
+ MockDiffIteratorContext(const vector<JournalEntry>& diffs) :
+ diffs_(diffs), it_(diffs_.begin())
+ {}
+ virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ if (it_ == diffs_.end()) {
+ return (false);
+ }
+ data[DatabaseAccessor::NAME_COLUMN] =
+ (*it_).data_[DatabaseAccessor::DIFF_NAME];
+ data[DatabaseAccessor::TYPE_COLUMN] =
+ (*it_).data_[DatabaseAccessor::DIFF_TYPE];
+ data[DatabaseAccessor::TTL_COLUMN] =
+ (*it_).data_[DatabaseAccessor::DIFF_TTL];
+ data[DatabaseAccessor::RDATA_COLUMN] =
+ (*it_).data_[DatabaseAccessor::DIFF_RDATA];
+ ++it_;
+ return (true);
+ }
+ };
public:
virtual IteratorContextPtr getAllRecords(int id) const {
if (id == READONLY_ZONE_ID) {
@@ -544,7 +627,13 @@ public:
*update_records_ = *readonly_records_;
}
- return (pair<bool, int>(true, WRITABLE_ZONE_ID));
+ if (zone_name == "bad.example.org.") {
+ return (pair<bool, int>(true, -1));
+ } else if (zone_name == "null.example.org.") {
+ return (pair<bool, int>(true, 13));
+ } else {
+ return (pair<bool, int>(true, WRITABLE_ZONE_ID));
+ }
}
virtual void commit() {
*readonly_records_ = *update_records_;
@@ -658,6 +747,70 @@ public:
isc_throw(isc::Unexpected, "Unknown zone ID");
}
}
+ virtual void addRecordDiff(int id, uint32_t serial,
+ DiffOperation operation,
+ const std::string (&data)[DIFF_PARAM_COUNT])
+ {
+ if (id == 13) { // The null zone doesn't support journaling
+ isc_throw(isc::NotImplemented, "Test not implemented behaviour");
+ } else if (id == -1) { // Bad zone throws
+ isc_throw(DataSourceError, "Test error");
+ } else {
+ journal_entries_->push_back(JournalEntry(id, serial, operation,
+ data));
+ }
+ }
+
+ virtual IteratorContextPtr getDiffs(int id, uint32_t start,
+ uint32_t end) const
+ {
+ vector<JournalEntry> selected_jnl;
+
+ for (vector<JournalEntry>::const_iterator it =
+ journal_entries_->begin();
+ it != journal_entries_->end(); ++it)
+ {
+ // For simplicity we assume this method is called for the
+ // "readonly" zone possibly after making updates on the "writable"
+ // copy and committing them.
+ if (id != READONLY_ZONE_ID) {
+ continue;
+ }
+
+ // Note: the following logic is not 100% accurate in terms of
+ // serial number arithmetic; we prefer brevity for testing.
+ // Skip until we see the starting serial. Once we started
+ // recording this condition is ignored (to support wrap-around
+ // case). Also, it ignores the RR type; it only checks the
+ // versions.
+ if ((*it).serial_ < start && selected_jnl.empty()) {
+ continue;
+ }
+ if ((*it).serial_ > end) { // gone over the end serial. we're done.
+ break;
+ }
+ selected_jnl.push_back(*it);
+ }
+
+ // Check if we've found the requested range. If not, throw.
+ if (selected_jnl.empty() || selected_jnl.front().serial_ != start ||
+ selected_jnl.back().serial_ != end) {
+ isc_throw(NoSuchSerial, "requested diff range is not found");
+ }
+
+ return (IteratorContextPtr(new MockDiffIteratorContext(selected_jnl)));
+ }
+
+ // Check the journal is as expected and clear the journal
+ void checkJournal(const std::vector<JournalEntry> &expected) const {
+ std::vector<JournalEntry> journal;
+ // Clean the journal, but keep local copy to check
+ journal.swap(*journal_entries_);
+ ASSERT_EQ(expected.size(), journal.size());
+ for (size_t i(0); i < expected.size(); ++ i) {
+ EXPECT_TRUE(expected[i] == journal[i]);
+ }
+ }
private:
// The following member variables are storage and/or update work space
@@ -677,6 +830,10 @@ private:
const Domains empty_records_master_;
const Domains* empty_records_;
+ // The journal data
+ std::vector<JournalEntry> journal_entries_master_;
+ std::vector<JournalEntry>* journal_entries_;
+
// used as temporary storage after searchForRecord() and during
// getNextRecord() calls, as well as during the building of the
// fake data
@@ -794,6 +951,10 @@ public:
rrset_.reset(new RRset(qname_, qclass_, qtype_, rrttl_));
rrset_->addRdata(rdata::createRdata(rrset_->getType(),
rrset_->getClass(), "192.0.2.2"));
+ soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
+ soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200"));
// And its RRSIG. Also different from the configured one.
rrsigset_.reset(new RRset(qname_, qclass_, RRType::RRSIG(),
@@ -810,6 +971,24 @@ public:
* times per test.
*/
void createClient() {
+ // To make sure we always have empty diffs table at the beginning of
+ // each test, we re-install the writable data source here.
+ // Note: this is SQLite3 specific and a waste (though otherwise
+ // harmless) for other types of data sources. If and when we support
+ // more types of data sources in this test framework, we should
+ // probably move this to some specialized templated method specific
+ // to SQLite3 (or for even a longer term we should add an API to
+ // purge the diffs table).
+ const char* const install_cmd = INSTALL_PROG " " TEST_DATA_DIR
+ "/rwtest.sqlite3 " TEST_DATA_BUILDDIR
+ "/rwtest.sqlite3.copied";
+ if (system(install_cmd) != 0) {
+ // any exception will do, this is failure in test setup, but nice
+ // to show the command that fails, and shouldn't be caught
+ isc_throw(isc::Exception,
+ "Error setting up; command failed: " << install_cmd);
+ }
+
current_accessor_ = new ACCESSOR_TYPE();
is_mock_ = (dynamic_cast<MockAccessor*>(current_accessor_) != NULL);
client_.reset(new DatabaseClient(qclass_,
@@ -875,6 +1054,48 @@ public:
}
}
+ void checkJournal(const vector<JournalEntry>& expected) {
+ if (is_mock_) {
+ const MockAccessor* mock_accessor =
+ dynamic_cast<const MockAccessor*>(current_accessor_);
+ mock_accessor->checkJournal(expected);
+ } else {
+ // For other generic databases, retrieve the diff using the
+ // reader class and compare the resulting sequence of RRset.
+ // For simplicity we only consider the case where the expected
+ // sequence is not empty.
+ ASSERT_FALSE(expected.empty());
+ const Name zone_name(expected.front().
+ data_[DatabaseAccessor::DIFF_NAME]);
+ ZoneJournalReaderPtr jnl_reader =
+ client_->getJournalReader(zone_name,
+ expected.front().serial_,
+ expected.back().serial_).second;
+ ASSERT_TRUE(jnl_reader);
+ ConstRRsetPtr rrset;
+ vector<JournalEntry>::const_iterator it = expected.begin();
+ for (rrset = jnl_reader->getNextDiff();
+ rrset && it != expected.end();
+ rrset = jnl_reader->getNextDiff(), ++it) {
+ typedef DatabaseAccessor Accessor;
+ RRsetPtr expected_rrset(
+ new RRset(Name((*it).data_[Accessor::DIFF_NAME]),
+ qclass_,
+ RRType((*it).data_[Accessor::DIFF_TYPE]),
+ RRTTL((*it).data_[Accessor::DIFF_TTL])));
+ expected_rrset->addRdata(
+ rdata::createRdata(expected_rrset->getType(),
+ expected_rrset->getClass(),
+ (*it).data_[Accessor::DIFF_RDATA]));
+ isc::testutils::rrsetCheck(expected_rrset, rrset);
+ }
+ // We should have examined all entries of both expected and
+ // actual data.
+ EXPECT_TRUE(it == expected.end());
+ ASSERT_FALSE(rrset);
+ }
+ }
+
// Some tests only work for MockAccessor. We remember whether our accessor
// is of that type.
bool is_mock_;
@@ -895,6 +1116,7 @@ public:
const RRTTL rrttl_; // commonly used RR TTL
RRsetPtr rrset_; // for adding/deleting an RRset
RRsetPtr rrsigset_; // for adding/deleting an RRset
+ RRsetPtr soa_; // for adding/deleting an RRset
// update related objects to be tested
ZoneUpdaterPtr updater_;
@@ -1246,8 +1468,8 @@ TEST_F(MockDatabaseClientTest, ttldiff) {
// Unless we ask for individual RRs in our iterator request. In that case
// every RR should go into its own 'rrset'
-TEST_F(MockDatabaseClientTest, ttldiff_no_adjust_ttl) {
- ZoneIteratorPtr it(this->client_->getIterator(Name("example.org"), false));
+TEST_F(MockDatabaseClientTest, ttldiff_separate_rrs) {
+ ZoneIteratorPtr it(this->client_->getIterator(Name("example.org"), true));
// Walk through the full iterator, we should see 1 rrset with name
// ttldiff1.example.org., and two rdatas. Same for ttldiff2
@@ -2703,4 +2925,336 @@ TEST_F(MockDatabaseClientTest, badName) {
DataSourceError);
}
+/*
+ * Test correct use of the updater with a journal.
+ */
+TYPED_TEST(DatabaseClientTest, journal) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ this->updater_->deleteRRset(*this->soa_);
+ this->updater_->deleteRRset(*this->rrset_);
+ this->soa_.reset(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ this->soa_->addRdata(rdata::createRdata(this->soa_->getType(),
+ this->soa_->getClass(),
+ "ns1.example.org. "
+ "admin.example.org. "
+ "1235 3600 1800 2419200 7200"));
+ this->updater_->addRRset(*this->soa_);
+ this->updater_->addRRset(*this->rrset_);
+ ASSERT_NO_THROW(this->updater_->commit());
+ std::vector<JournalEntry> expected;
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+ DatabaseAccessor::DIFF_DELETE,
+ "example.org.", "SOA", "3600",
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200"));
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+ DatabaseAccessor::DIFF_DELETE,
+ "www.example.org.", "A", "3600",
+ "192.0.2.2"));
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1235,
+ DatabaseAccessor::DIFF_ADD,
+ "example.org.", "SOA", "3600",
+ "ns1.example.org. admin.example.org. "
+ "1235 3600 1800 2419200 7200"));
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1235,
+ DatabaseAccessor::DIFF_ADD,
+ "www.example.org.", "A", "3600",
+ "192.0.2.2"));
+ this->checkJournal(expected);
+}
+
+/*
+ * Push multiple delete-add sequences. Checks it is allowed and all is
+ * saved.
+ */
+TYPED_TEST(DatabaseClientTest, journalMultiple) {
+ std::vector<JournalEntry> expected;
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ std::string soa_rdata = "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200";
+ for (size_t i(1); i < 100; ++ i) {
+ // Remove the old SOA
+ this->updater_->deleteRRset(*this->soa_);
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i - 1,
+ DatabaseAccessor::DIFF_DELETE,
+ "example.org.", "SOA", "3600",
+ soa_rdata));
+ // Create a new SOA
+ soa_rdata = "ns1.example.org. admin.example.org. " +
+ lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
+ this->soa_.reset(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ this->soa_->addRdata(rdata::createRdata(this->soa_->getType(),
+ this->soa_->getClass(),
+ soa_rdata));
+ // Add the new SOA
+ this->updater_->addRRset(*this->soa_);
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i,
+ DatabaseAccessor::DIFF_ADD,
+ "example.org.", "SOA", "3600",
+ soa_rdata));
+ }
+ ASSERT_NO_THROW(this->updater_->commit());
+ // Check the journal contains everything.
+ this->checkJournal(expected);
+}
+
+/*
+ * Test passing a forbidden sequence to it and expect it to throw.
+ *
+ * Note that we implicitly test in different testcases (these for add and
+ * delete) that if the journaling is false, it doesn't expect the order.
+ *
+ * In this test we don't check with the real databases as this case shouldn't
+ * contain backend specific behavior.
+ */
+TEST_F(MockDatabaseClientTest, journalBadSequence) {
+ std::vector<JournalEntry> expected;
+ {
+ SCOPED_TRACE("Delete A before SOA");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_),
+ isc::BadValue);
+ // Make sure the journal is empty now
+ this->checkJournal(expected);
+ }
+
+ {
+ SCOPED_TRACE("Add before delete");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ EXPECT_THROW(this->updater_->addRRset(*this->soa_), isc::BadValue);
+ // Make sure the journal is empty now
+ this->checkJournal(expected);
+ }
+
+ {
+ SCOPED_TRACE("Add A before SOA");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ // So far OK
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+ // But we miss the add SOA here
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), isc::BadValue);
+ // Make sure the journal contains only the first one
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+ DatabaseAccessor::DIFF_DELETE,
+ "example.org.", "SOA", "3600",
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200"));
+ this->checkJournal(expected);
+ }
+
+ {
+ SCOPED_TRACE("Commit before add");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ // So far OK
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+ // Commit at the wrong time
+ EXPECT_THROW(updater_->commit(), isc::BadValue);
+ current_accessor_->checkJournal(expected);
+ }
+
+ {
+ SCOPED_TRACE("Delete two SOAs");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ // So far OK
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+ // Delete the SOA again
+ EXPECT_THROW(this->updater_->deleteRRset(*this->soa_), isc::BadValue);
+ this->checkJournal(expected);
+ }
+
+ {
+ SCOPED_TRACE("Add two SOAs");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ // So far OK
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+ // Still OK
+ EXPECT_NO_THROW(this->updater_->addRRset(*this->soa_));
+ // But this one is added again
+ EXPECT_THROW(this->updater_->addRRset(*this->soa_), isc::BadValue);
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+ DatabaseAccessor::DIFF_ADD,
+ "example.org.", "SOA", "3600",
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200"));
+ this->checkJournal(expected);
+ }
+}
+
+/*
+ * Test it rejects to store journals when we request it together with
+ * erasing the whole zone.
+ */
+TYPED_TEST(DatabaseClientTest, journalOnErase) {
+ EXPECT_THROW(this->client_->getUpdater(this->zname_, true, true),
+ isc::BadValue);
+}
+
+/*
+ * Check that exception is propagated when the journal is not implemented.
+ */
+TEST_F(MockDatabaseClientTest, journalNotImplemented) {
+ updater_ = client_->getUpdater(Name("null.example.org"), false, true);
+ EXPECT_THROW(updater_->deleteRRset(*soa_), isc::NotImplemented);
+ soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
+ soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419201 7200"));
+ EXPECT_THROW(updater_->addRRset(*soa_), isc::NotImplemented);
+}
+
+/*
+ * Test that different exceptions are propagated.
+ */
+TEST_F(MockDatabaseClientTest, journalException) {
+ updater_ = client_->getUpdater(Name("bad.example.org"), false, true);
+ EXPECT_THROW(updater_->deleteRRset(*soa_), DataSourceError);
+}
+
+//
+// Tests for the ZoneJournalReader
+//
+
+// Install a simple, commonly used diff sequence: making an update from one
+// SOA to another. Return the end SOA RRset for the convenience of the caller.
+ConstRRsetPtr
+makeSimpleDiff(DataSourceClient& client, const Name& zname,
+ const RRClass& rrclass, ConstRRsetPtr begin_soa)
+{
+ ZoneUpdaterPtr updater = client.getUpdater(zname, false, true);
+ updater->deleteRRset(*begin_soa);
+ RRsetPtr soa_end(new RRset(zname, rrclass, RRType::SOA(), RRTTL(3600)));
+ soa_end->addRdata(rdata::createRdata(RRType::SOA(), rrclass,
+ "ns1.example.org. admin.example.org. "
+ "1235 3600 1800 2419200 7200"));
+ updater->addRRset(*soa_end);
+ updater->commit();
+
+ return (soa_end);
+}
+
+TYPED_TEST(DatabaseClientTest, journalReader) {
+ // Check the simple case made by makeSimpleDiff.
+ ConstRRsetPtr soa_end = makeSimpleDiff(*this->client_, this->zname_,
+ this->qclass_, this->soa_);
+ pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+ this->client_->getJournalReader(this->zname_, 1234, 1235);
+ EXPECT_EQ(ZoneJournalReader::SUCCESS, result.first);
+ ZoneJournalReaderPtr jnl_reader = result.second;
+ ASSERT_TRUE(jnl_reader);
+ ConstRRsetPtr rrset = jnl_reader->getNextDiff();
+ ASSERT_TRUE(rrset);
+ isc::testutils::rrsetCheck(this->soa_, rrset);
+ rrset = jnl_reader->getNextDiff();
+ ASSERT_TRUE(rrset);
+ isc::testutils::rrsetCheck(soa_end, rrset);
+ rrset = jnl_reader->getNextDiff();
+ ASSERT_FALSE(rrset);
+
+ // Once it reaches the end of the sequence, further read attempt will
+ // result in exception.
+ EXPECT_THROW(jnl_reader->getNextDiff(), isc::InvalidOperation);
+}
+
+TYPED_TEST(DatabaseClientTest, readLargeJournal) {
+ // Similar to journalMultiple, but check that at a higher level.
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+
+ vector<ConstRRsetPtr> expected;
+ for (size_t i = 0; i < 100; ++i) {
+ // Create the old SOA and remove it, and record it in the expected list
+ RRsetPtr rrset1(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ string soa_rdata = "ns1.example.org. admin.example.org. " +
+ lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
+ rrset1->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+ soa_rdata));
+ this->updater_->deleteRRset(*rrset1);
+ expected.push_back(rrset1);
+
+ // Create a new SOA, add it, and record it.
+ RRsetPtr rrset2(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ soa_rdata = "ns1.example.org. admin.example.org. " +
+ lexical_cast<std::string>(1234 + i + 1) +
+ " 3600 1800 2419200 7200";
+ rrset2->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+ soa_rdata));
+ this->updater_->addRRset(*rrset2);
+ expected.push_back(rrset2);
+ }
+ this->updater_->commit();
+
+ ZoneJournalReaderPtr jnl_reader(this->client_->getJournalReader(
+ this->zname_, 1234, 1334).second);
+ ConstRRsetPtr actual;
+ int i = 0;
+ while ((actual = jnl_reader->getNextDiff()) != NULL) {
+ isc::testutils::rrsetCheck(expected.at(i++), actual);
+ }
+ EXPECT_EQ(expected.size(), i); // we should have eaten all expected data
+}
+
+TYPED_TEST(DatabaseClientTest, readJournalForNoRange) {
+ makeSimpleDiff(*this->client_, this->zname_, this->qclass_, this->soa_);
+
+ // The specified range does not exist in the diff storage. The factory
+ // method should result in NO_SUCH_VERSION
+ pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+ this->client_->getJournalReader(this->zname_, 1200, 1235);
+ EXPECT_EQ(ZoneJournalReader::NO_SUCH_VERSION, result.first);
+ EXPECT_FALSE(result.second);
+}
+
+TYPED_TEST(DatabaseClientTest, journalReaderForNXZone) {
+ pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+ this->client_->getJournalReader(Name("nosuchzone"), 0, 1);
+ EXPECT_EQ(ZoneJournalReader::NO_SUCH_ZONE, result.first);
+ EXPECT_FALSE(result.second);
+}
+
+// A helper function for journalWithBadData. It installs a simple diff
+// from one serial (of 'begin') to another ('begin' + 1), tweaking a specified
+// field of data with some invalid value.
+void
+installBadDiff(MockAccessor& accessor, uint32_t begin,
+ DatabaseAccessor::DiffRecordParams modify_param,
+ const char* const data)
+{
+ string data1[] = {"example.org.", "SOA", "3600", "ns. root. 1 1 1 1 1"};
+ string data2[] = {"example.org.", "SOA", "3600", "ns. root. 2 1 1 1 1"};
+ data1[modify_param] = data;
+ accessor.addRecordDiff(READONLY_ZONE_ID, begin,
+ DatabaseAccessor::DIFF_DELETE, data1);
+ accessor.addRecordDiff(READONLY_ZONE_ID, begin + 1,
+ DatabaseAccessor::DIFF_ADD, data2);
+}
+
+TEST_F(MockDatabaseClientTest, journalWithBadData) {
+ MockAccessor& mock_accessor =
+ dynamic_cast<MockAccessor&>(*current_accessor_);
+
+ // One of the fields from the data source is broken as an RR parameter.
+ // The journal reader should still be constructed, but getNextDiff()
+ // should result in exception.
+ installBadDiff(mock_accessor, 1, DatabaseAccessor::DIFF_NAME,
+ "example..org");
+ installBadDiff(mock_accessor, 3, DatabaseAccessor::DIFF_TYPE,
+ "bad-rrtype");
+ installBadDiff(mock_accessor, 5, DatabaseAccessor::DIFF_TTL,
+ "bad-ttl");
+ installBadDiff(mock_accessor, 7, DatabaseAccessor::DIFF_RDATA,
+ "bad rdata");
+ EXPECT_THROW(this->client_->getJournalReader(this->zname_, 1, 2).
+ second->getNextDiff(), DataSourceError);
+ EXPECT_THROW(this->client_->getJournalReader(this->zname_, 3, 4).
+ second->getNextDiff(), DataSourceError);
+ EXPECT_THROW(this->client_->getJournalReader(this->zname_, 5, 6).
+ second->getNextDiff(), DataSourceError);
+ EXPECT_THROW(this->client_->getJournalReader(this->zname_, 7, 8).
+ second->getNextDiff(), DataSourceError);
+}
+
}
diff --git a/src/lib/datasrc/tests/factory_unittest.cc b/src/lib/datasrc/tests/factory_unittest.cc
index 0133508..e98f9bc 100644
--- a/src/lib/datasrc/tests/factory_unittest.cc
+++ b/src/lib/datasrc/tests/factory_unittest.cc
@@ -14,6 +14,7 @@
#include <boost/scoped_ptr.hpp>
+#include <datasrc/datasrc_config.h>
#include <datasrc/factory.h>
#include <datasrc/data_source.h>
#include <datasrc/sqlite3_accessor.h>
@@ -30,6 +31,70 @@ std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
namespace {
+// note this helper only checks the error that is received up to the length
+// of the expected string. It will always pass if you give it an empty
+// expected_error
+void
+pathtestHelper(const std::string& file, const std::string& expected_error) {
+ std::string error;
+ try {
+ DataSourceClientContainer(file, ElementPtr());
+ } catch (const DataSourceLibraryError& dsle) {
+ error = dsle.what();
+ }
+ ASSERT_LT(expected_error.size(), error.size());
+ EXPECT_EQ(expected_error, error.substr(0, expected_error.size()));
+}
+
+TEST(FactoryTest, paths) {
+ // Test whether the paths are made absolute if they are not,
+ // by inspecting the error that is raised when they are wrong
+ const std::string error("dlopen failed for ");
+ // With the current implementation, we can safely assume this has
+ // been set for this test (as the loader would otherwise also fail
+ // unless the loadable backend library happens to be installed)
+ const std::string builddir(getenv("B10_FROM_BUILD"));
+
+ // Absolute and ending with .so should have no change
+ pathtestHelper("/no_such_file.so", error + "/no_such_file.so");
+
+ // If no ending in .so, it should get _ds.so
+ pathtestHelper("/no_such_file", error + "/no_such_file_ds.so");
+
+ // If not starting with /, path should be added. For this test that
+ // means the build directory as set in B10_FROM_BUILD
+ pathtestHelper("no_such_file.so", error + builddir +
+ "/src/lib/datasrc/.libs/no_such_file.so");
+ pathtestHelper("no_such_file", error + builddir +
+ "/src/lib/datasrc/.libs/no_such_file_ds.so");
+
+ // Some tests with '.so' in the name itself
+ pathtestHelper("no_such_file.so.something", error + builddir +
+ "/src/lib/datasrc/.libs/no_such_file.so.something_ds.so");
+ pathtestHelper("/no_such_file.so.something", error +
+ "/no_such_file.so.something_ds.so");
+ pathtestHelper("/no_such_file.so.something.so", error +
+ "/no_such_file.so.something.so");
+ pathtestHelper("/no_such_file.so.so", error +
+ "/no_such_file.so.so");
+ pathtestHelper("no_such_file.so.something", error + builddir +
+ "/src/lib/datasrc/.libs/no_such_file.so.something_ds.so");
+
+ // Temporarily unset B10_FROM_BUILD to see that BACKEND_LIBRARY_PATH
+ // is used
+ unsetenv("B10_FROM_BUILD");
+ pathtestHelper("no_such_file.so", error + BACKEND_LIBRARY_PATH +
+ "no_such_file.so");
+ // Put it back just in case
+ setenv("B10_FROM_BUILD", builddir.c_str(), 1);
+
+ // Test some bad input values
+ ASSERT_THROW(DataSourceClientContainer("", ElementPtr()),
+ DataSourceLibraryError);
+ ASSERT_THROW(DataSourceClientContainer(".so", ElementPtr()),
+ DataSourceLibraryError);
+}
+
TEST(FactoryTest, sqlite3ClientBadConfig) {
// We start out by building the configuration data bit by bit,
// testing each form of 'bad config', until we have a good one.
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 2b854db..a1bd94e 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -177,6 +177,54 @@ TEST_F(InMemoryClientTest, iterator) {
EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
}
+TEST_F(InMemoryClientTest, iterator_separate_rrs) {
+ // Exactly the same tests as for iterator, but now with separate_rrs = true
+ // For the one that returns actual data, the AAAA should now be split up
+ boost::shared_ptr<InMemoryZoneFinder>
+ zone(new InMemoryZoneFinder(RRClass::IN(), Name("a")));
+ RRsetPtr aRRsetA(new RRset(Name("a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ aRRsetA->addRdata(rdata::in::A("192.0.2.1"));
+ RRsetPtr aRRsetAAAA(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+ RRTTL(300)));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::1"));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::2"));
+ RRsetPtr aRRsetAAAA_r1(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+ RRTTL(300)));
+ aRRsetAAAA_r1->addRdata(rdata::in::AAAA("2001:db8::1"));
+ RRsetPtr aRRsetAAAA_r2(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+ RRTTL(300)));
+ aRRsetAAAA_r2->addRdata(rdata::in::AAAA("2001:db8::2"));
+
+ RRsetPtr subRRsetA(new RRset(Name("sub.x.a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ subRRsetA->addRdata(rdata::in::A("192.0.2.2"));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(zone));
+
+ // First, the zone is not there, so it should throw
+ EXPECT_THROW(memory_client.getIterator(Name("b"), true), DataSourceError);
+ // This zone is not there either, even when there's a zone containing this
+ EXPECT_THROW(memory_client.getIterator(Name("x.a")), DataSourceError);
+ // Now, an empty zone
+ ZoneIteratorPtr iterator(memory_client.getIterator(Name("a"), true));
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+ // It throws Unexpected when we are past the end
+ EXPECT_THROW(iterator->getNextRRset(), isc::Unexpected);
+
+ ASSERT_EQ(result::SUCCESS, zone->add(aRRsetA));
+ ASSERT_EQ(result::SUCCESS, zone->add(aRRsetAAAA));
+ ASSERT_EQ(result::SUCCESS, zone->add(subRRsetA));
+ // Check it with full zone, one by one.
+ // It should be in ascending order in case of InMemory data source
+ // (isn't guaranteed in general)
+ iterator = memory_client.getIterator(Name("a"), true);
+ EXPECT_EQ(aRRsetA->toText(), iterator->getNextRRset()->toText());
+ EXPECT_EQ(aRRsetAAAA_r1->toText(), iterator->getNextRRset()->toText());
+ EXPECT_EQ(aRRsetAAAA_r2->toText(), iterator->getNextRRset()->toText());
+ EXPECT_EQ(subRRsetA->toText(), iterator->getNextRRset()->toText());
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+}
+
TEST_F(InMemoryClientTest, getZoneCount) {
EXPECT_EQ(0, memory_client.getZoneCount());
memory_client.addZone(
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
index 90b2ac1..61341f6 100644
--- a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -46,6 +46,7 @@ std::string SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
std::string SQLITE_DBFILE_MEMORY = ":memory:";
std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+std::string SQLITE_DBFILE_DIFFS = TEST_DATA_DIR "/diffs.sqlite3";
// The following file must be non existent and must be non"creatable";
// the sqlite3 library will try to create a new DB file if it doesn't exist,
@@ -116,6 +117,26 @@ TEST_F(SQLite3AccessorTest, noClass) {
EXPECT_FALSE(accessor->getZone("example.com.").first);
}
+// Simple check to test that the sequence is valid. It gets the next record
+// from the iterator, checks that it is not null, then checks the data.
+void checkRR(DatabaseAccessor::IteratorContextPtr& context,
+ std::string name, std::string ttl, std::string type, std::string rdata) {
+
+ // Mark where we are in the text
+ SCOPED_TRACE(name + " " + ttl + " " + type + " " + rdata);
+
+ std::string data[DatabaseAccessor::COLUMN_COUNT];
+
+ // Get next record
+ EXPECT_TRUE(context->getNext(data));
+
+ // ... and check expected values
+ EXPECT_EQ(name, data[DatabaseAccessor::NAME_COLUMN]);
+ EXPECT_EQ(ttl, data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ(type, data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ(rdata, data[DatabaseAccessor::RDATA_COLUMN]);
+}
+
// This tests the iterator context
TEST_F(SQLite3AccessorTest, iterator) {
// Our test zone is conveniently small, but not empty
@@ -130,80 +151,138 @@ TEST_F(SQLite3AccessorTest, iterator) {
ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context);
std::string data[DatabaseAccessor::COLUMN_COUNT];
- // Get and check the first and only record
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("MX", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("10 mail.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("ns1.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ checkRR(context, "example.org.", "3600", "MX", "10 mail.example.org.");
+ checkRR(context, "example.org.", "3600", "NS", "ns1.example.org.");
+ checkRR(context, "example.org.", "3600", "NS", "ns2.example.org.");
+ checkRR(context, "example.org.", "3600", "NS", "ns3.example.org.");
+ checkRR(context, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200");
+ checkRR(context, "dname.example.org.", "3600", "DNAME",
+ "dname.example.info.");
+ checkRR(context, "dname2.foo.example.org.", "3600", "DNAME",
+ "dname2.example.info.");
+ checkRR(context, "mail.example.org.", "3600", "A", "192.0.2.10");
+ checkRR(context, "sub.example.org.", "3600", "NS", "ns.sub.example.org.");
+ checkRR(context, "ns.sub.example.org.", "3600", "A", "192.0.2.101");
+ checkRR(context, "www.example.org.", "3600", "A", "192.0.2.1");
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("ns2.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // Check there's no other
+ EXPECT_FALSE(context->getNext(data));
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("ns3.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // And make sure calling it again won't cause problems.
+ EXPECT_FALSE(context->getNext(data));
+}
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("SOA", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("ns1.example.org. admin.example.org. "
- "1234 3600 1800 2419200 7200",
- data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+// This tests the difference iterator context
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("dname.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("dname.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+// Test that at attempt to create a difference iterator for a serial number
+// that does not exist throws an exception.
+TEST_F(SQLite3AccessorTest, diffIteratorNoRecords) {
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("dname2.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("dname2.foo.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // Our test zone is conveniently small, but not empty
+ initAccessor(SQLITE_DBFILE_DIFFS, "IN");
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("192.0.2.10", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("mail.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+ ASSERT_TRUE(zone_info.first);
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // Get the iterator context. Difference of version 1 does not exist, so
+ // this should throw an exception.
+ EXPECT_THROW(accessor->getDiffs(zone_info.second, 1, 1234),
+ isc::datasrc::NoSuchSerial);
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("192.0.2.101", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // Check that an invalid high version number also throws an exception.
+ EXPECT_THROW(accessor->getDiffs(zone_info.second, 1231, 2234),
+ NoSuchSerial);
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("192.0.2.1", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("www.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // Check that valid versions - but for the wrong zone which does not hold
+ // any records - also throws this exception.
+ EXPECT_THROW(accessor->getDiffs(zone_info.second + 42, 1231, 1234),
+ NoSuchSerial);
- // Check there's no other
- EXPECT_FALSE(context->getNext(data));
+}
- // And make sure calling it again won't cause problems.
- EXPECT_FALSE(context->getNext(data));
+// Try to iterate through a valid sets of differences
+TEST_F(SQLite3AccessorTest, diffIteratorSequences) {
+ std::string data[DatabaseAccessor::COLUMN_COUNT];
+
+ // Our test zone is conveniently small, but not empty
+ initAccessor(SQLITE_DBFILE_DIFFS, "IN");
+ const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+ ASSERT_TRUE(zone_info.first);
+
+
+ // Check the difference sequence 1230-1231 (two adjacent differences)
+ // Get the iterator context
+ DatabaseAccessor::IteratorContextPtr
+ context1(accessor->getDiffs(zone_info.second, 1230, 1231));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context1);
+
+ // Change: 1230-1231
+ checkRR(context1, "example.org.", "1800", "SOA",
+ "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+ checkRR(context1, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+
+ // Check there's no other and that calling it again after no records doesn't
+ // cause problems.
+ EXPECT_FALSE(context1->getNext(data));
+ EXPECT_FALSE(context1->getNext(data));
+
+
+ // Check that the difference sequence 1231-1233 (two separate difference
+ // sequences) is OK.
+ DatabaseAccessor::IteratorContextPtr
+ context2(accessor->getDiffs(zone_info.second, 1231, 1233));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context2);
+
+ // Change 1231-1232
+ checkRR(context2, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+ checkRR(context2, "unused.example.org.", "3600", "A", "192.0.2.102");
+ checkRR(context2, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+ // Change: 1232-1233
+ checkRR(context2, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+ checkRR(context2, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+ checkRR(context2, "sub.example.org.", "3600", "NS", "ns.sub.example.org.");
+ checkRR(context2, "ns.sub.example.org.", "3600", "A", "192.0.2.101");
+
+ // Check there's no other and that calling it again after no records doesn't
+ // cause problems.
+ EXPECT_FALSE(context2->getNext(data));
+ EXPECT_FALSE(context2->getNext(data));
+
+
+ // Check that the difference sequence 4294967280 to 1230 (serial number
+ // rollover) is OK
+ DatabaseAccessor::IteratorContextPtr
+ context3(accessor->getDiffs(zone_info.second, 4294967280U, 1230));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context3);
+
+ // Change 4294967280 to 1230.
+ checkRR(context3, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 4294967280 3600 1800 2419200 7200");
+ checkRR(context3, "www.example.org.", "3600", "A", "192.0.2.31");
+ checkRR(context3, "example.org.", "1800", "SOA",
+ "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+ checkRR(context3, "www.example.org.", "3600", "A", "192.0.2.21");
+
+ EXPECT_FALSE(context3->getNext(data));
+ EXPECT_FALSE(context3->getNext(data));
+
+
+ // Check the difference sequence 1233-1231 (versions in wrong order). This
+ // should give an empty difference set.
+ DatabaseAccessor::IteratorContextPtr
+ context4(accessor->getDiffs(zone_info.second, 1233, 1231));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context2);
+
+ EXPECT_FALSE(context4->getNext(data));
+ EXPECT_FALSE(context4->getNext(data));
}
TEST(SQLite3Open, getDBNameExample2) {
diff --git a/src/lib/datasrc/tests/testdata/Makefile.am b/src/lib/datasrc/tests/testdata/Makefile.am
index 64ae955..6a35fe3 100644
--- a/src/lib/datasrc/tests/testdata/Makefile.am
+++ b/src/lib/datasrc/tests/testdata/Makefile.am
@@ -1,6 +1 @@
CLEANFILES = *.copied
-BUILT_SOURCES = rwtest.sqlite3.copied
-
-# We use install-sh with the -m option to make sure it's writable
-rwtest.sqlite3.copied: $(srcdir)/rwtest.sqlite3
- $(top_srcdir)/install-sh -m 644 $(srcdir)/rwtest.sqlite3 $@
diff --git a/src/lib/datasrc/tests/testdata/brokendb.sqlite3 b/src/lib/datasrc/tests/testdata/brokendb.sqlite3
index 7aad3af..63f3cc5 100644
Binary files a/src/lib/datasrc/tests/testdata/brokendb.sqlite3 and b/src/lib/datasrc/tests/testdata/brokendb.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/diffs.sqlite3 b/src/lib/datasrc/tests/testdata/diffs.sqlite3
new file mode 100644
index 0000000..3820563
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/diffs.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/diffs_table.sql b/src/lib/datasrc/tests/testdata/diffs_table.sql
new file mode 100644
index 0000000..0e05207
--- /dev/null
+++ b/src/lib/datasrc/tests/testdata/diffs_table.sql
@@ -0,0 +1,123 @@
+-- Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+--
+-- Permission to use, copy, modify, and/or distribute this software for any
+-- purpose with or without fee is hereby granted, provided that the above
+-- copyright notice and this permission notice appear in all copies.
+--
+-- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+-- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+-- AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+-- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+-- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+-- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+-- PERFORMANCE OF THIS SOFTWARE.
+
+-- \brief Create Differences Table
+--
+-- This is a short-term solution to creating the differences table for testing
+-- purposes.
+--
+-- It is assumed that the database used is a copy of the "example.org.sqlite3"
+-- database in this test directory. The diffs table is created and populated
+-- with a set of RRs that purport to represent differences that end in the
+-- zone as is.
+--
+-- The file can be executed by the command:
+-- % sqlite3 -init <this-file> <database-file> ".quit"
+--
+-- The file gets executed as the set of SQL statements on the database file,
+-- the ".quit" on the command line then getting executed to exit SQLite3.
+
+-- Create the diffs table
+DROP TABLE diffs;
+CREATE TABLE diffs (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ version INTEGER NOT NULL,
+ operation INTEGER NOT NULL,
+ name STRING NOT NULL COLLATE NOCASE,
+ rrtype STRING NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdata STRING NOT NULL);
+
+-- Populate it. A dummy zone_id is used for now - this will be updated last of
+-- all.
+
+-- Change from 4294967280 (0xfffffff0) to 1230 to show serial rollover
+-- Update one record in the zone.
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 4294967280, 1, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 4294967280 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 4294967280, 1, "www.example.org.", "A", 3600, "192.0.2.31");
+
+-- Records added in version 1230 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1230, 0, "example.org.", "SOA", 1800,
+ "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1230, 0, "www.example.org.", "A", 3600, "192.0.2.21");
+
+-- Change 1230 to 1231: Change change a parameter of the SOA record
+-- Records removed from version 1230 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1230, 1, "example.org.", "SOA", 1800,
+ "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+
+-- Records added in version 1231 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1231, 0, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+
+
+-- Change 1231 to 1232: Remove one record, don't add anything.
+-- Records removed from version 1231 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1231, 1, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1231, 1, "unused.example.org.", "A", 3600, "192.0.2.102");
+
+-- Records added in version 1232 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1232, 0, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+-- Change 1232 to 1233: Add two, don't remove anything.
+-- Records removed from version 1232 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1232, 1, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+-- Records added in version 1233 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 0, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 0, "sub.example.org.", "NS", 3600, "ns.sub.example.org.");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 0, "ns.sub.example.org.", "A", 3600, "192.0.2.101");
+
+
+-- Change 1233 to 1234: change addresses of two A records
+-- Records removed from version 1233 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 1, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 1, "www.example.org.", "A", 3600, "192.0.2.21");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 1, "mail.example.org.", "A", 3600, "192.0.2.210");
+
+-- Records added in version 1234 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1234, 0, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1234, 0, "www.example.org.", "A", 3600, "192.0.2.1");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1234, 0, "mail.example.org.", "A", 3600, "192.0.2.10");
+
+-- Finally, update the zone_id in the diffs table with what is actually
+-- in the zone table.
+UPDATE diffs SET zone_id =
+ (SELECT id FROM ZONES LIMIT 1);
diff --git a/src/lib/datasrc/tests/testdata/example.org.sqlite3 b/src/lib/datasrc/tests/testdata/example.org.sqlite3
index 070012f..60e6e05 100644
Binary files a/src/lib/datasrc/tests/testdata/example.org.sqlite3 and b/src/lib/datasrc/tests/testdata/example.org.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/example2.com.sqlite3 b/src/lib/datasrc/tests/testdata/example2.com.sqlite3
index 8d3bb34..9da7d0e 100644
Binary files a/src/lib/datasrc/tests/testdata/example2.com.sqlite3 and b/src/lib/datasrc/tests/testdata/example2.com.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 b/src/lib/datasrc/tests/testdata/rwtest.sqlite3
index ce95a1d..ccbb884 100644
Binary files a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 and b/src/lib/datasrc/tests/testdata/rwtest.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test-root.sqlite3 b/src/lib/datasrc/tests/testdata/test-root.sqlite3
index 7cc6195..c1dae47 100644
Binary files a/src/lib/datasrc/tests/testdata/test-root.sqlite3 and b/src/lib/datasrc/tests/testdata/test-root.sqlite3 differ
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index fa1c744..e028bea 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -264,12 +264,15 @@ public:
/// proof of the non existence of any matching wildcard or non existence
/// of an exact match when a wildcard match is found.
///
- /// A derived version of this method may involve internal resource
- /// allocation, especially for constructing the resulting RRset, and may
- /// throw an exception if it fails.
- /// It throws DuplicateRRset exception if there are duplicate rrsets under
- /// the same domain.
- /// It should not throw other types of exceptions.
+ /// \exception std::bad_alloc Memory allocation such as for constructing
+ /// the resulting RRset fails
+ /// \exception DataSourceError Derived class specific exception, e.g.
+ /// when encountering a bad zone configuration or database connection
+ /// failure. Although these are considered rare, exceptional events,
+ /// it can happen under relatively usual conditions (unlike memory
+ /// allocation failure). So, in general, the application is expected
+ /// to catch this exception, either specifically or as a result of
+ /// catching a base exception class, and handle it gracefully.
///
/// \param name The domain name to be searched for.
/// \param type The RR type to be searched for.
@@ -438,6 +441,10 @@ public:
/// calls after \c commit() the implementation must throw a
/// \c DataSourceError exception.
///
+ /// If journaling was requested when getting this updater, it will reject
+ /// to add the RRset if the squence doesn't look like and IXFR (see
+ /// DataSourceClient::getUpdater). In such case isc::BadValue is thrown.
+ ///
/// \todo As noted above we may have to revisit the design details as we
/// gain experiences:
///
@@ -454,6 +461,8 @@ public:
///
/// \exception DataSourceError Called after \c commit(), RRset is invalid
/// (see above), internal data source error
+ /// \exception isc::BadValue Journaling is enabled and the current RRset
+ /// doesn't fit into the IXFR sequence (see above).
/// \exception std::bad_alloc Resource allocation failure
///
/// \param rrset The RRset to be added
@@ -503,6 +512,10 @@ public:
/// calls after \c commit() the implementation must throw a
/// \c DataSourceError exception.
///
+ /// If journaling was requested when getting this updater, it will reject
+ /// to add the RRset if the squence doesn't look like and IXFR (see
+ /// DataSourceClient::getUpdater). In such case isc::BadValue is thrown.
+ ///
/// \todo As noted above we may have to revisit the design details as we
/// gain experiences:
///
@@ -520,6 +533,8 @@ public:
///
/// \exception DataSourceError Called after \c commit(), RRset is invalid
/// (see above), internal data source error
+ /// \exception isc::BadValue Journaling is enabled and the current RRset
+ /// doesn't fit into the IXFR sequence (see above).
/// \exception std::bad_alloc Resource allocation failure
///
/// \param rrset The RRset to be deleted
@@ -540,12 +555,106 @@ public:
///
/// \exception DataSourceError Duplicate call of the method,
/// internal data source error
+ /// \exception isc::BadValue Journaling is enabled and the update is not
+ /// complete IXFR sequence.
virtual void commit() = 0;
};
/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
typedef boost::shared_ptr<ZoneUpdater> ZoneUpdaterPtr;
+/// The base class for retrieving differences between two versions of a zone.
+///
+/// On construction, each derived class object will internally set up
+/// retrieving sequences of differences between two specific version of
+/// a specific zone managed in a particular data source. So the constructor
+/// of a derived class would normally take parameters to identify the zone
+/// and the two versions for which the differences should be retrieved.
+/// See \c DataSourceClient::getJournalReader for more concrete details
+/// used in this API.
+///
+/// Once constructed, an object of this class will act like an iterator
+/// over the sequences. Every time the \c getNextDiff() method is called
+/// it returns one element of the differences in the form of an \c RRset
+/// until it reaches the end of the entire sequences.
+class ZoneJournalReader {
+public:
+ /// Result codes used by a factory method for \c ZoneJournalReader
+ enum Result {
+ SUCCESS, ///< A \c ZoneJournalReader object successfully created
+ NO_SUCH_ZONE, ///< Specified zone does not exist in the data source
+ NO_SUCH_VERSION ///< Specified versions do not exist in the diff storage
+ };
+
+protected:
+ /// The default constructor.
+ ///
+ /// This is intentionally defined as protected to ensure that this base
+ /// class is never instantiated directly.
+ ZoneJournalReader() {}
+
+public:
+ /// The destructor
+ virtual ~ZoneJournalReader() {}
+
+ /// Return the next difference RR of difference sequences.
+ ///
+ /// In this API, the difference between two versions of a zone is
+ /// conceptually represented as IXFR-style difference sequences:
+ /// Each difference sequence is a sequence of RRs: an older version of
+ /// SOA (to be deleted), zero or more other deleted RRs, the
+ /// post-transaction SOA (to be added), and zero or more other
+ /// added RRs. (Note, however, that the underlying data source
+ /// implementation may or may not represent the difference in
+ /// straightforward realization of this concept. The mapping between
+ /// the conceptual difference and the actual implementation is hidden
+ /// in each derived class).
+ ///
+ /// This method provides an application with a higher level interface
+ /// to retrieve the difference along with the conceptual model: the
+ /// \c ZoneJournalReader object iterates over the entire sequences
+ /// from the beginning SOA (which is to be deleted) to one of the
+ /// added RR of with the ending SOA, and each call to this method returns
+ /// one RR in the form of an \c RRset that contains exactly one RDATA
+ /// in the order of the sequences.
+ ///
+ /// Note that the ordering of the sequences specifies the semantics of
+ /// each difference: add or delete. For example, the first RR is to
+ /// be deleted, and the last RR is to be added. So the return value
+ /// of this method does not explicitly indicate whether the RR is to be
+ /// added or deleted.
+ ///
+ /// This method ensures the returned \c RRset represents an RR, that is,
+ /// it contains exactly one RDATA. However, it does not necessarily
+ /// ensure that the resulting sequences are in the form of IXFR-style.
+ /// For example, the first RR is supposed to be an SOA, and it should
+ /// normally be the case, but this interface does not necessarily require
+ /// the derived class implementation ensure this. Normally the
+ /// differences are expected to be stored using this API (via a
+ /// \c ZoneUpdater object), and as long as that is the case and the
+ /// underlying implementation follows the requirement of the API, the
+ /// result of this method should be a valid IXFR-style sequences.
+ /// So this API does not mandate the almost redundant check as part of
+ /// the interface. If the application needs to make it sure 100%, it
+ /// must check the resulting sequence itself.
+ ///
+ /// Once the object reaches the end of the sequences, this method returns
+ /// \c Null. Any subsequent call will result in an exception of
+ /// class \c InvalidOperation.
+ ///
+ /// \exception InvalidOperation The method is called beyond the end of
+ /// the difference sequences.
+ /// \exception DataSourceError Underlying data is broken and the RR
+ /// cannot be created or other low level data source error.
+ ///
+ /// \return An \c RRset that contains one RDATA corresponding to the
+ /// next difference in the sequences.
+ virtual isc::dns::ConstRRsetPtr getNextDiff() = 0;
+};
+
+/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
+typedef boost::shared_ptr<ZoneJournalReader> ZoneJournalReaderPtr;
+
} // end of datasrc
} // end of isc
diff --git a/src/lib/dhcp/Makefile.am b/src/lib/dhcp/Makefile.am
index 64dda17..3991033 100644
--- a/src/lib/dhcp/Makefile.am
+++ b/src/lib/dhcp/Makefile.am
@@ -14,6 +14,7 @@ libdhcp_la_SOURCES += option.cc option.h
libdhcp_la_SOURCES += option6_ia.cc option6_ia.h
libdhcp_la_SOURCES += option6_iaaddr.cc option6_iaaddr.h
libdhcp_la_SOURCES += option6_addrlst.cc option6_addrlst.h
+libdhcp_la_SOURCES += option4_addrlst.cc option4_addrlst.h
libdhcp_la_SOURCES += dhcp6.h dhcp4.h
libdhcp_la_SOURCES += pkt6.cc pkt6.h
libdhcp_la_SOURCES += pkt4.cc pkt4.h
diff --git a/src/lib/dhcp/libdhcp.cc b/src/lib/dhcp/libdhcp.cc
index b95a427..f84e495 100644
--- a/src/lib/dhcp/libdhcp.cc
+++ b/src/lib/dhcp/libdhcp.cc
@@ -17,6 +17,7 @@
#include <util/buffer.h>
#include <dhcp/libdhcp.h>
#include "config.h"
+#include <dhcp/dhcp4.h>
#include <dhcp/dhcp6.h>
#include <dhcp/option.h>
#include <dhcp/option6_ia.h>
@@ -90,8 +91,17 @@ LibDHCP::unpackOptions4(const std::vector<uint8_t>& buf,
size_t offset = 0;
// 2 - header of DHCPv4 option
- while (offset + 2 <= buf.size()) {
+ while (offset + 1 <= buf.size()) {
uint8_t opt_type = buf[offset++];
+ if (offset + 1 == buf.size()) {
+ if (opt_type == DHO_END)
+ return; // just return. Don't need to add DHO_END option
+ else {
+ isc_throw(OutOfRange, "Attempt to parse truncated option "
+ << opt_type);
+ }
+ }
+
uint8_t opt_len = buf[offset++];
if (offset + opt_len > buf.size() ) {
isc_throw(OutOfRange, "Option parse failed. Tried to parse "
diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc
index daef288..20dd97a 100644
--- a/src/lib/dhcp/option.cc
+++ b/src/lib/dhcp/option.cc
@@ -128,23 +128,6 @@ Option::pack4(isc::util::OutputBuffer& buf) {
}
unsigned int
-Option::pack4(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset) {
- if (offset + len() > buf_len) {
- isc_throw(OutOfRange, "Failed to pack v4 option=" <<
- type_ << ",len=" << len() << ": too small buffer.");
- }
- uint8_t *ptr = &buf[offset];
- ptr[0] = type_;
- ptr[1] = len() - getHeaderLen();
- ptr += 2;
- memcpy(ptr, &data_[0], data_.size());
-
- return offset + len();
-}
-
-unsigned int
Option::pack6(boost::shared_array<uint8_t>& buf,
unsigned int buf_len,
unsigned int offset) {
@@ -220,7 +203,7 @@ Option::unpack6(const boost::shared_array<uint8_t>& buf,
/// Returns length of the complete option (data length + DHCPv4/DHCPv6
/// option header)
-unsigned short
+uint16_t
Option::len() {
// length of the whole option is header and data stored in this option...
@@ -295,17 +278,7 @@ std::string Option::toText(int indent /* =0 */ ) {
return tmp.str();
}
-unsigned short
-Option::getType() {
- return type_;
-}
-
-const std::vector<uint8_t>&
-Option::getData() {
- return (data_);
-}
-
-unsigned short
+uint16_t
Option::getHeaderLen() {
switch (universe_) {
case V4:
diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h
index 3822cf0..088d094 100644
--- a/src/lib/dhcp/option.h
+++ b/src/lib/dhcp/option.h
@@ -178,20 +178,19 @@ public:
/// Returns option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
///
/// @return option type
- unsigned short
- getType();
+ unsigned short getType() { return (type_); }
/// Returns length of the complete option (data length + DHCPv4/DHCPv6
/// option header)
///
/// @return length of the option
- virtual unsigned short
+ virtual uint16_t
len();
/// @brief Returns length of header (2 for v4, 4 for v6)
///
/// @return length of option header
- virtual unsigned short
+ virtual uint16_t
getHeaderLen();
/// returns if option is valid (e.g. option may be truncated)
@@ -202,9 +201,9 @@ public:
/// Returns pointer to actual data.
///
- /// @return pointer to actual data (or NULL if there is no data)
- virtual const std::vector<uint8_t>&
- getData();
+ /// @return pointer to actual data (or reference to an empty vector
+ /// if there is no data)
+ virtual const std::vector<uint8_t>& getData() { return (data_); }
/// Adds a sub-option.
///
@@ -242,20 +241,6 @@ public:
~Option();
protected:
-
- /// Builds raw (over-wire) buffer of this option, including all
- /// defined suboptions. Version for building DHCPv4 options.
- ///
- /// @param buf output buffer (built options will be stored here)
- /// @param buf_len buffer length (used for buffer overflow checks)
- /// @param offset offset from start of the buf buffer
- ///
- /// @return offset to the next byte after last used byte
- virtual unsigned int
- pack4(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
- unsigned int offset);
-
/// Builds raw (over-wire) buffer of this option, including all
/// defined suboptions. Version for building DHCPv4 options.
///
diff --git a/src/lib/dhcp/option4_addrlst.cc b/src/lib/dhcp/option4_addrlst.cc
new file mode 100644
index 0000000..88eb915
--- /dev/null
+++ b/src/lib/dhcp/option4_addrlst.cc
@@ -0,0 +1,135 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string.h>
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include <iomanip>
+#include <exceptions/exceptions.h>
+#include <asiolink/io_address.h>
+#include <util/io_utilities.h>
+#include <dhcp/option4_addrlst.h>
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::util;
+using namespace isc::asiolink;
+
+Option4AddrLst::Option4AddrLst(uint8_t type)
+ :Option(V4, type) {
+}
+
+Option4AddrLst::Option4AddrLst(uint8_t type, const AddressContainer& addrs)
+ :Option(V4, type) {
+ setAddresses(addrs);
+ // don't set addrs_ directly. setAddresses() will do additional checks.
+}
+
+
+Option4AddrLst::Option4AddrLst(uint8_t type,
+ vector<uint8_t>::const_iterator first,
+ vector<uint8_t>::const_iterator last)
+ :Option(V4, type) {
+ if ( (distance(first, last) % V4ADDRESS_LEN) ) {
+ isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_
+ << " has invalid length=" << distance(first, last)
+ << ", must be divisible by 4.");
+ }
+
+ while (first != last) {
+ const uint8_t* ptr = &(*first);
+ addAddress(IOAddress(readUint32(ptr)));
+ first += V4ADDRESS_LEN;
+ }
+}
+
+Option4AddrLst::Option4AddrLst(uint8_t type, const IOAddress& addr)
+ :Option(V4, type) {
+ setAddress(addr);
+}
+
+void
+Option4AddrLst::pack4(isc::util::OutputBuffer& buf) {
+
+ if (addrs_.size() * V4ADDRESS_LEN > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_ << " is too big."
+ << "At most 255 bytes are supported.");
+ /// TODO Larger options can be stored as separate instances
+ /// of DHCPv4 options. Clients MUST concatenate them.
+ /// Fortunately, there are no such large options used today.
+ }
+
+ buf.writeUint8(type_);
+ buf.writeUint8(len() - getHeaderLen());
+
+ AddressContainer::const_iterator addr = addrs_.begin();
+
+ while (addr != addrs_.end()) {
+ buf.writeUint32(*addr);
+ ++addr;
+ }
+}
+
+void Option4AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+ if (addr.getFamily() != AF_INET) {
+ isc_throw(BadValue, "Can't store non-IPv4 address in "
+ << "Option4AddrLst option");
+ }
+ addrs_.clear();
+ addAddress(addr);
+}
+
+void Option4AddrLst::setAddresses(const AddressContainer& addrs) {
+
+ // Do not copy it as a whole. addAddress() does sanity checks.
+ // i.e. throw if someone tries to set IPv6 address.
+ addrs_.clear();
+ for (AddressContainer::const_iterator addr = addrs.begin();
+ addr != addrs.end(); ++addr) {
+ addAddress(*addr);
+ }
+}
+
+
+void Option4AddrLst::addAddress(const isc::asiolink::IOAddress& addr) {
+ if (addr.getFamily() != AF_INET) {
+ isc_throw(BadValue, "Can't store non-IPv4 address in "
+ << "Option4AddrLst option");
+ }
+ addrs_.push_back(addr);
+}
+
+uint16_t Option4AddrLst::len() {
+
+ // Returns length of the complete option (option header + data length)
+ return (getHeaderLen() + addrs_.size() * V4ADDRESS_LEN);
+}
+
+std::string Option4AddrLst::toText(int indent /* =0 */ ) {
+ std::stringstream tmp;
+
+ for (int i = 0; i < indent; i++) {
+ tmp << " ";
+ }
+
+ tmp << "type=" << type_ << ", len=" << len()-getHeaderLen() << ":";
+
+ for (AddressContainer::const_iterator addr = addrs_.begin();
+ addr != addrs_.end(); ++addr) {
+ tmp << " " << (*addr);
+ }
+
+ return tmp.str();
+}
diff --git a/src/lib/dhcp/option4_addrlst.h b/src/lib/dhcp/option4_addrlst.h
new file mode 100644
index 0000000..c795805
--- /dev/null
+++ b/src/lib/dhcp/option4_addrlst.h
@@ -0,0 +1,167 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION4_ADDRLST_H_
+#define OPTION4_ADDRLST_H_
+
+#include <string>
+#include <map>
+#include <vector>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include <util/buffer.h>
+#include <dhcp/option.h>
+
+namespace isc {
+namespace dhcp {
+
+
+/// @brief DHCPv4 Option class for handling list of IPv4 addresses.
+///
+/// This class handles a list of IPv4 addresses. An example of such option
+/// is dns-servers option. It can also be used to handle a single address.
+class Option4AddrLst : public isc::dhcp::Option {
+public:
+
+ /// Defines a collection of IPv4 addresses.
+ typedef std::vector<isc::asiolink::IOAddress> AddressContainer;
+
+ /// @brief Constructor, creates an option with empty list of addresses.
+ ///
+ /// Creates empty option that can hold addresses. Addresses can be added
+ /// with addAddress(), setAddress() or setAddresses().
+ ///
+ /// @param type option type
+ Option4AddrLst(uint8_t type);
+
+ /// @brief Constructor, creates an option with a list of addresses.
+ ///
+ /// Creates an option that contains specified list of IPv4 addresses.
+ ///
+ /// @param type option type
+ /// @param addrs container with a list of addresses
+ Option4AddrLst(uint8_t type, const AddressContainer& addrs);
+
+ /// @brief Constructor, creates an option with a single address.
+ ///
+ /// Creates an option that contains a single address.
+ ///
+ /// @param type option type
+ /// @param addr a single address that will be stored as 1-elem. address list
+ Option4AddrLst(uint8_t type, const isc::asiolink::IOAddress& addr);
+
+ /// @brief Constructor, used for received options.
+ ///
+ /// TODO: This can be templated to use different containers, not just
+ /// vector. Prototype should look like this:
+ /// template<typename InputIterator> Option(Universe u, uint16_t type,
+ /// InputIterator first, InputIterator last);
+ ///
+ /// vector<int8_t> myData;
+ /// Example usage: new Option(V4, 123, myData.begin()+1, myData.end()-1)
+ /// This will create DHCPv4 option of type 123 that contains data from
+ /// trimmed (first and last byte removed) myData vector.
+ ///
+ /// @param type option type (0-255 for V4 and 0-65535 for V6)
+ /// @param first iterator to the first element that should be copied
+ /// @param last iterator to the next element after the last one
+ /// to be copied.
+ Option4AddrLst(uint8_t type, std::vector<uint8_t>::const_iterator first,
+ std::vector<uint8_t>::const_iterator last);
+
+ /// @brief Writes option in a wire-format to a buffer.
+ ///
+ /// Method will throw if option storing fails for some reason.
+ ///
+ /// TODO Once old (DHCPv6) implementation is rewritten,
+ /// unify pack4() and pack6() and rename them to just pack().
+ ///
+ /// @param buf output buffer (option will be stored there)
+ virtual void
+ pack4(isc::util::OutputBuffer& buf);
+
+ /// Returns string representation of the option.
+ ///
+ /// @param indent number of spaces before printing text
+ ///
+ /// @return string with text representation.
+ virtual std::string
+ toText(int indent = 0);
+
+ /// Returns length of the complete option (data length + DHCPv4/DHCPv6
+ /// option header)
+ ///
+ /// @return length of the option
+ virtual uint16_t len();
+
+ /// @brief Returns vector with addresses.
+ ///
+ /// We return a copy of our list. Although this includes overhead,
+ /// it also makes this list safe to use after this option object
+ /// is no longer available. As options are expected to hold only
+ /// a couple (1-3) addresses, the overhead is not that big.
+ ///
+ /// @return address container with addresses
+ AddressContainer
+ getAddresses() { return addrs_; };
+
+ /// @brief Sets addresses list.
+ ///
+ /// Clears existing list of addresses and adds a single address to that
+ /// list. This is very convenient method for options that are supposed to
+ /// only a single option. See addAddress() if you want to add
+ /// address to existing list or setAddresses() if you want to
+ /// set the whole list at once.
+ ///
+ /// Passed address must be IPv4 address. Otherwire BadValue exception
+ /// will be thrown.
+ ///
+ /// @param addrs address collection to be set
+ void setAddresses(const AddressContainer& addrs);
+
+ /// @brief Clears address list and sets a single address.
+ ///
+ /// Clears existing list of addresses and adds a single address to that
+ /// list. This is very convenient method for options that are supposed to
+ /// only a single option. See addAddress() if you want to add
+ /// address to existing list or setAddresses() if you want to
+ /// set the whole list at once.
+ ///
+ /// Passed address must be IPv4 address. Otherwire BadValue exception
+ /// will be thrown.
+ ///
+ /// @param addr an address that is going to be set as 1-element address list
+ void setAddress(const isc::asiolink::IOAddress& addr);
+
+ /// @brief Adds address to existing list of addresses.
+ ///
+ /// Adds a single address to that list. See setAddress() if you want to
+ /// define only a single address or setAddresses() if you want to
+ /// set the whole list at once.
+ ///
+ /// Passed address must be IPv4 address. Otherwire BadValue exception
+ /// will be thrown.
+ ///
+ /// @param addr an address thait is going to be added to existing list
+ void addAddress(const isc::asiolink::IOAddress& addr);
+
+protected:
+ /// contains list of addresses
+ AddressContainer addrs_;
+};
+
+} // namespace isc::dhcp
+} // namespace isc
+
+#endif
diff --git a/src/lib/dhcp/option6_addrlst.cc b/src/lib/dhcp/option6_addrlst.cc
index fc981fa..9be3810 100644
--- a/src/lib/dhcp/option6_addrlst.cc
+++ b/src/lib/dhcp/option6_addrlst.cc
@@ -50,6 +50,10 @@ Option6AddrLst::Option6AddrLst(unsigned short type,
void
Option6AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+ if (addr.getFamily() != AF_INET6) {
+ isc_throw(BadValue, "Can't store non-IPv6 address in Option6AddrLst option");
+ }
+
addrs_.clear();
addrs_.push_back(addr);
}
@@ -128,7 +132,7 @@ std::string Option6AddrLst::toText(int indent /* =0 */) {
return tmp.str();
}
-unsigned short Option6AddrLst::len() {
+uint16_t Option6AddrLst::len() {
return (OPTION6_HDR_LEN + addrs_.size()*16);
}
diff --git a/src/lib/dhcp/option6_addrlst.h b/src/lib/dhcp/option6_addrlst.h
index c5b32af..a73dc55 100644
--- a/src/lib/dhcp/option6_addrlst.h
+++ b/src/lib/dhcp/option6_addrlst.h
@@ -16,17 +16,16 @@
#define OPTION6_ADDRLST_H_
#include <vector>
-#include "asiolink/io_address.h"
-#include "dhcp/option.h"
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
namespace isc {
namespace dhcp {
-/// @brief Option class for handling list of IPv6 addresses.
+/// @brief DHCPv6 Option class for handling list of IPv6 addresses.
///
/// This class handles a list of IPv6 addresses. An example of such option
/// is dns-servers option. It can also be used to handle single address.
-///
class Option6AddrLst: public Option {
public:
@@ -105,17 +104,17 @@ public:
/// @brief Returns vector with addresses.
///
- /// As user may want to use/modify this list, it is better to return
- /// a copy rather than const reference to the original. This is
- /// usually one or two addresses long, so it is not a big deal.
- ///
- /// @return vector with addresses
+ /// We return a copy of our list. Although this includes overhead,
+ /// it also makes this list safe to use after this option object
+ /// is no longer available. As options are expected to hold only
+ /// a couple (1-3) addresses, the overhead is not that big.
///
+ /// @return address container with addresses
AddressContainer
getAddresses() { return addrs_; };
// returns data length (data length + DHCPv4/DHCPv6 option header)
- virtual unsigned short len();
+ virtual uint16_t len();
protected:
AddressContainer addrs_;
diff --git a/src/lib/dhcp/option6_ia.cc b/src/lib/dhcp/option6_ia.cc
index 46daee1..209f500 100644
--- a/src/lib/dhcp/option6_ia.cc
+++ b/src/lib/dhcp/option6_ia.cc
@@ -77,7 +77,7 @@ Option6IA::unpack(const boost::shared_array<uint8_t>& buf,
if ( parse_len < OPTION6_IA_LEN || offset + OPTION6_IA_LEN > buf_len) {
isc_throw(OutOfRange, "Option " << type_ << " truncated");
}
-
+
iaid_ = readUint32(&buf[offset]);
offset += sizeof(uint32_t);
@@ -121,9 +121,9 @@ std::string Option6IA::toText(int indent /* = 0*/) {
return tmp.str();
}
-unsigned short Option6IA::len() {
+uint16_t Option6IA::len() {
- unsigned short length = OPTION6_HDR_LEN /*header (4)*/ +
+ uint16_t length = OPTION6_HDR_LEN /*header (4)*/ +
OPTION6_IA_LEN /* option content (12) */;
// length of all suboptions
diff --git a/src/lib/dhcp/option6_ia.h b/src/lib/dhcp/option6_ia.h
index 516b2fc..cab8068 100644
--- a/src/lib/dhcp/option6_ia.h
+++ b/src/lib/dhcp/option6_ia.h
@@ -116,7 +116,7 @@ public:
/// Returns length of this option, including option header and suboptions
///
/// @return length of this option
- virtual unsigned short
+ virtual uint16_t
len();
protected:
diff --git a/src/lib/dhcp/option6_iaaddr.cc b/src/lib/dhcp/option6_iaaddr.cc
index 4177714..fd3bca4 100644
--- a/src/lib/dhcp/option6_iaaddr.cc
+++ b/src/lib/dhcp/option6_iaaddr.cc
@@ -116,9 +116,9 @@ std::string Option6IAAddr::toText(int indent /* =0 */) {
return tmp.str();
}
-unsigned short Option6IAAddr::len() {
+uint16_t Option6IAAddr::len() {
- unsigned short length = OPTION6_HDR_LEN + OPTION6_IAADDR_LEN;
+ uint16_t length = OPTION6_HDR_LEN + OPTION6_IAADDR_LEN;
// length of all suboptions
// TODO implement:
diff --git a/src/lib/dhcp/option6_iaaddr.h b/src/lib/dhcp/option6_iaaddr.h
index 60c5c48..40e5967 100644
--- a/src/lib/dhcp/option6_iaaddr.h
+++ b/src/lib/dhcp/option6_iaaddr.h
@@ -126,8 +126,7 @@ public:
getValid() const { return valid_; }
/// returns data length (data length + DHCPv4/DHCPv6 option header)
- virtual unsigned short
- len();
+ virtual uint16_t len();
protected:
/// contains an IPv6 address
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
index ba07a10..bea93fc 100644
--- a/src/lib/dhcp/pkt4.cc
+++ b/src/lib/dhcp/pkt4.cc
@@ -47,11 +47,9 @@ Pkt4::Pkt4(uint8_t msg_type, uint32_t transid)
yiaddr_(DEFAULT_ADDRESS),
siaddr_(DEFAULT_ADDRESS),
giaddr_(DEFAULT_ADDRESS),
- bufferIn_(NULL, 0), // not used, this is TX packet
bufferOut_(DHCPV4_PKT_HDR_LEN),
msg_type_(msg_type)
{
- /// TODO: fixed fields, uncomment in ticket #1224
memset(chaddr_, 0, MAX_CHADDR_LEN);
memset(sname_, 0, MAX_SNAME_LEN);
memset(file_, 0, MAX_FILE_LEN);
@@ -64,7 +62,6 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
ifindex_(-1),
local_port_(DHCP4_SERVER_PORT),
remote_port_(DHCP4_CLIENT_PORT),
- /// TODO Fixed fields, uncomment in ticket #1224
op_(BOOTREQUEST),
transid_(0),
secs_(0),
@@ -73,7 +70,6 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
yiaddr_(DEFAULT_ADDRESS),
siaddr_(DEFAULT_ADDRESS),
giaddr_(DEFAULT_ADDRESS),
- bufferIn_(data, len),
bufferOut_(0), // not used, this is RX packet
msg_type_(DHCPDISCOVER)
{
@@ -82,6 +78,9 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
<< " received, at least " << DHCPV4_PKT_HDR_LEN
<< "is expected");
}
+
+ data_.resize(len);
+ memcpy(&data_[0], data, len);
}
size_t
@@ -117,35 +116,43 @@ Pkt4::pack() {
LibDHCP::packOptions(bufferOut_, options_);
+ // add END option that indicates end of options
+ // (End option is very simple, just a 255 octet)
+ bufferOut_.writeUint8(DHO_END);
+
return (true);
}
bool
Pkt4::unpack() {
- if (bufferIn_.getLength()<DHCPV4_PKT_HDR_LEN) {
+
+ // input buffer (used during message reception)
+ isc::util::InputBuffer bufferIn(&data_[0], data_.size());
+
+ if (bufferIn.getLength()<DHCPV4_PKT_HDR_LEN) {
isc_throw(OutOfRange, "Received truncated DHCPv4 packet (len="
- << bufferIn_.getLength() << " received, at least "
+ << bufferIn.getLength() << " received, at least "
<< DHCPV4_PKT_HDR_LEN << "is expected");
}
- op_ = bufferIn_.readUint8();
- htype_ = bufferIn_.readUint8();
- hlen_ = bufferIn_.readUint8();
- hops_ = bufferIn_.readUint8();
- transid_ = bufferIn_.readUint32();
- secs_ = bufferIn_.readUint16();
- flags_ = bufferIn_.readUint16();
- ciaddr_ = IOAddress(bufferIn_.readUint32());
- yiaddr_ = IOAddress(bufferIn_.readUint32());
- siaddr_ = IOAddress(bufferIn_.readUint32());
- giaddr_ = IOAddress(bufferIn_.readUint32());
- bufferIn_.readData(chaddr_, MAX_CHADDR_LEN);
- bufferIn_.readData(sname_, MAX_SNAME_LEN);
- bufferIn_.readData(file_, MAX_FILE_LEN);
-
- size_t opts_len = bufferIn_.getLength() - bufferIn_.getPosition();
+ op_ = bufferIn.readUint8();
+ htype_ = bufferIn.readUint8();
+ hlen_ = bufferIn.readUint8();
+ hops_ = bufferIn.readUint8();
+ transid_ = bufferIn.readUint32();
+ secs_ = bufferIn.readUint16();
+ flags_ = bufferIn.readUint16();
+ ciaddr_ = IOAddress(bufferIn.readUint32());
+ yiaddr_ = IOAddress(bufferIn.readUint32());
+ siaddr_ = IOAddress(bufferIn.readUint32());
+ giaddr_ = IOAddress(bufferIn.readUint32());
+ bufferIn.readData(chaddr_, MAX_CHADDR_LEN);
+ bufferIn.readData(sname_, MAX_SNAME_LEN);
+ bufferIn.readData(file_, MAX_FILE_LEN);
+
+ size_t opts_len = bufferIn.getLength() - bufferIn.getPosition();
vector<uint8_t> optsBuffer;
// fist use of readVector
- bufferIn_.readVector(optsBuffer, opts_len);
+ bufferIn.readVector(optsBuffer, opts_len);
LibDHCP::unpackOptions4(optsBuffer, options_);
return (true);
diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h
index 8517091..189d95d 100644
--- a/src/lib/dhcp/pkt4.h
+++ b/src/lib/dhcp/pkt4.h
@@ -299,10 +299,21 @@ public:
///
/// @return returns option of requested type (or NULL)
/// if no such option is present
-
boost::shared_ptr<Option>
getOption(uint8_t opt_type);
+
+ /// @brief set interface over which packet should be sent
+ ///
+ /// @param interface defines outbound interface
+ void setIface(const std::string& interface){ iface_ = interface; }
+
+ /// @brief gets interface over which packet was received or
+ /// will be transmitted
+ ///
+ /// @return name of the interface
+ std::string getIface() const { return iface_; }
+
protected:
/// converts DHCP message type to BOOTP op type
@@ -385,14 +396,15 @@ protected:
// end of real DHCPv4 fields
- /// input buffer (used during message reception)
- /// Note that it must be modifiable as hooks can modify incoming buffer),
- /// thus OutputBuffer, not InputBuffer
- isc::util::InputBuffer bufferIn_;
-
/// output buffer (used during message
isc::util::OutputBuffer bufferOut_;
+ // that's the data of input buffer used in RX packet. Note that
+ // InputBuffer does not store the data itself, but just expects that
+ // data will be valid for the whole life of InputBuffer. Therefore we
+ // need to keep the data around.
+ std::vector<uint8_t> data_;
+
/// message type (e.g. 1=DHCPDISCOVER)
/// TODO: this will eventually be replaced with DHCP Message Type
/// option (option 53)
diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am
index 01799da..176992f 100644
--- a/src/lib/dhcp/tests/Makefile.am
+++ b/src/lib/dhcp/tests/Makefile.am
@@ -18,6 +18,7 @@ libdhcp_unittests_SOURCES += ../libdhcp.h ../libdhcp.cc libdhcp_unittest.cc
libdhcp_unittests_SOURCES += ../option6_iaaddr.h ../option6_iaaddr.cc option6_iaaddr_unittest.cc
libdhcp_unittests_SOURCES += ../option6_ia.h ../option6_ia.cc option6_ia_unittest.cc
libdhcp_unittests_SOURCES += ../option6_addrlst.h ../option6_addrlst.cc option6_addrlst_unittest.cc
+libdhcp_unittests_SOURCES += ../option4_addrlst.cc ../option4_addrlst.h option4_addrlst_unittest.cc
libdhcp_unittests_SOURCES += ../option.h ../option.cc option_unittest.cc
libdhcp_unittests_SOURCES += ../pkt6.h ../pkt6.cc pkt6_unittest.cc
libdhcp_unittests_SOURCES += ../pkt4.h ../pkt4.cc pkt4_unittest.cc
diff --git a/src/lib/dhcp/tests/option4_addrlst_unittest.cc b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
new file mode 100644
index 0000000..d4ecf80
--- /dev/null
+++ b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
@@ -0,0 +1,273 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp4.h>
+#include <dhcp/option.h>
+#include <dhcp/option4_addrlst.h>
+#include <util/buffer.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+using namespace isc::util;
+
+namespace {
+
+// a sample data (list of 4 addresses)
+const uint8_t sampledata[] = {
+ 192, 0, 2, 3, // 192.0.2.3
+ 255, 255, 255, 0, // 255.255.255.0 - popular netmask
+ 0, 0, 0 , 0, // used for default routes or (any address)
+ 127, 0, 0, 1 // loopback
+};
+
+// expected on-wire format for an option with 1 address
+const uint8_t expected1[] = { // 1 address
+ DHO_DOMAIN_NAME_SERVERS, 4, // type, length
+ 192, 0, 2, 3, // 192.0.2.3
+};
+
+// expected on-wire format for an option with 4 addresses
+const uint8_t expected4[] = { // 4 addresses
+ 254, 16, // type = 254, len = 16
+ 192, 0, 2, 3, // 192.0.2.3
+ 255, 255, 255, 0, // 255.255.255.0 - popular netmask
+ 0, 0, 0 ,0, // used for default routes or (any address)
+ 127, 0, 0, 1 // loopback
+};
+
+class Option4AddrLstTest : public ::testing::Test {
+protected:
+
+ Option4AddrLstTest():
+ vec_(vector<uint8_t>(300,0)) // 300 bytes long filled with 0s
+ {
+ sampleAddrs_.push_back(IOAddress("192.0.2.3"));
+ sampleAddrs_.push_back(IOAddress("255.255.255.0"));
+ sampleAddrs_.push_back(IOAddress("0.0.0.0"));
+ sampleAddrs_.push_back(IOAddress("127.0.0.1"));
+ }
+
+ vector<uint8_t> vec_;
+ Option4AddrLst::AddressContainer sampleAddrs_;
+
+};
+
+TEST_F(Option4AddrLstTest, parse1) {
+
+ memcpy(&vec_[0], sampledata, sizeof(sampledata));
+
+ // just one address
+ Option4AddrLst* opt1 = 0;
+ EXPECT_NO_THROW(
+ opt1 = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS,
+ vec_.begin(),
+ vec_.begin()+4);
+ // use just first address (4 bytes), not the whole
+ // sampledata
+ );
+
+ EXPECT_EQ(Option::V4, opt1->getUniverse());
+
+ EXPECT_EQ(DHO_DOMAIN_NAME_SERVERS, opt1->getType());
+ EXPECT_EQ(6, opt1->len()); // 2 (header) + 4 (1x IPv4 addr)
+
+ Option4AddrLst::AddressContainer addrs = opt1->getAddresses();
+ ASSERT_EQ(1, addrs.size());
+
+ EXPECT_EQ("192.0.2.3", addrs[0].toText());
+
+ EXPECT_NO_THROW(
+ delete opt1;
+ opt1 = 0;
+ );
+
+ // 1 address
+}
+
+TEST_F(Option4AddrLstTest, parse4) {
+
+ vector<uint8_t> buffer(300,0); // 300 bytes long filled with 0s
+
+ memcpy(&buffer[0], sampledata, sizeof(sampledata));
+
+ // 4 addresses
+ Option4AddrLst* opt4 = 0;
+ EXPECT_NO_THROW(
+ opt4 = new Option4AddrLst(254,
+ buffer.begin(),
+ buffer.begin()+sizeof(sampledata));
+ );
+
+ EXPECT_EQ(Option::V4, opt4->getUniverse());
+
+ EXPECT_EQ(254, opt4->getType());
+ EXPECT_EQ(18, opt4->len()); // 2 (header) + 16 (4x IPv4 addrs)
+
+ Option4AddrLst::AddressContainer addrs = opt4->getAddresses();
+ ASSERT_EQ(4, addrs.size());
+
+ EXPECT_EQ("192.0.2.3", addrs[0].toText());
+ EXPECT_EQ("255.255.255.0", addrs[1].toText());
+ EXPECT_EQ("0.0.0.0", addrs[2].toText());
+ EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+ EXPECT_NO_THROW(
+ delete opt4;
+ opt4 = 0;
+ );
+}
+
+TEST_F(Option4AddrLstTest, assembly1) {
+
+ Option4AddrLst* opt = 0;
+ EXPECT_NO_THROW(
+ opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress("192.0.2.3"));
+ );
+ EXPECT_EQ(Option::V4, opt->getUniverse());
+ EXPECT_EQ(DHO_DOMAIN_NAME_SERVERS, opt->getType());
+
+ Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+ ASSERT_EQ(1, addrs.size() );
+ EXPECT_EQ("192.0.2.3", addrs[0].toText());
+
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ ASSERT_EQ(6, opt->len());
+ ASSERT_EQ(6, buf.getLength());
+
+ EXPECT_EQ(0, memcmp(expected1, buf.getData(), 6));
+
+ EXPECT_NO_THROW(
+ delete opt;
+ opt = 0;
+ );
+
+ // This is old-fashioned option. We don't serve IPv6 types here!
+ EXPECT_THROW(
+ opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress("2001:db8::1")),
+ BadValue
+ );
+ if (opt) {
+ // test failed. Execption was not thrown, but option was created instead.
+ delete opt;
+ }
+}
+
+TEST_F(Option4AddrLstTest, assembly4) {
+
+
+ Option4AddrLst* opt = 0;
+ EXPECT_NO_THROW(
+ opt = new Option4AddrLst(254, sampleAddrs_);
+ );
+ EXPECT_EQ(Option::V4, opt->getUniverse());
+ EXPECT_EQ(254, opt->getType());
+
+ Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+ ASSERT_EQ(4, addrs.size() );
+ EXPECT_EQ("192.0.2.3", addrs[0].toText());
+ EXPECT_EQ("255.255.255.0", addrs[1].toText());
+ EXPECT_EQ("0.0.0.0", addrs[2].toText());
+ EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ ASSERT_EQ(18, opt->len()); // 2(header) + 4xsizeof(IPv4addr)
+ ASSERT_EQ(18, buf.getLength());
+
+ ASSERT_EQ(0, memcmp(expected4, buf.getData(), 18));
+
+ EXPECT_NO_THROW(
+ delete opt;
+ opt = 0;
+ );
+
+ // This is old-fashioned option. We don't serve IPv6 types here!
+ sampleAddrs_.push_back(IOAddress("2001:db8::1"));
+ EXPECT_THROW(
+ opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, sampleAddrs_),
+ BadValue
+ );
+ if (opt) {
+ // test failed. Execption was not thrown, but option was created instead.
+ delete opt;
+ }
+}
+
+TEST_F(Option4AddrLstTest, setAddress) {
+ Option4AddrLst* opt = 0;
+ EXPECT_NO_THROW(
+ opt = new Option4AddrLst(123, IOAddress("1.2.3.4"));
+ );
+ opt->setAddress(IOAddress("192.0.255.255"));
+
+ Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+ ASSERT_EQ(1, addrs.size() );
+ EXPECT_EQ("192.0.255.255", addrs[0].toText());
+
+ // We should accept IPv4-only addresses.
+ EXPECT_THROW(
+ opt->setAddress(IOAddress("2001:db8::1")),
+ BadValue
+ );
+
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+TEST_F(Option4AddrLstTest, setAddresses) {
+
+ Option4AddrLst* opt = 0;
+
+ EXPECT_NO_THROW(
+ opt = new Option4AddrLst(123); // empty list
+ );
+
+ opt->setAddresses(sampleAddrs_);
+
+ Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+ ASSERT_EQ(4, addrs.size() );
+ EXPECT_EQ("192.0.2.3", addrs[0].toText());
+ EXPECT_EQ("255.255.255.0", addrs[1].toText());
+ EXPECT_EQ("0.0.0.0", addrs[2].toText());
+ EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+ // We should accept IPv4-only addresses.
+ sampleAddrs_.push_back(IOAddress("2001:db8::1"));
+ EXPECT_THROW(
+ opt->setAddresses(sampleAddrs_),
+ BadValue
+ );
+
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+} // namespace
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
index db3ee3b..66dce8f 100644
--- a/src/lib/dhcp/tests/option_unittest.cc
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -402,6 +402,8 @@ TEST_F(OptionTest, v6_addgetdel) {
// let's try to delete - should fail
EXPECT_TRUE(false == parent->delOption(2));
+
+ delete parent;
}
}
diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc
index c89743f..091bfac 100644
--- a/src/lib/dhcp/tests/pkt4_unittest.cc
+++ b/src/lib/dhcp/tests/pkt4_unittest.cc
@@ -487,13 +487,15 @@ TEST(Pkt4Test, options) {
const OutputBuffer& buf = pkt->getBuffer();
// check that all options are stored, they should take sizeof(v4Opts)
- ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(v4Opts),
+ // there also should be OPTION_END added (just one byte)
+ ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(v4Opts) + 1,
buf.getLength());
// that that this extra data actually contain our options
const uint8_t* ptr = static_cast<const uint8_t*>(buf.getData());
ptr += Pkt4::DHCPV4_PKT_HDR_LEN; // rewind to end of fixed part
EXPECT_EQ(0, memcmp(ptr, v4Opts, sizeof(v4Opts)));
+ EXPECT_EQ(DHO_END, static_cast<uint8_t>(*(ptr + sizeof(v4Opts))));
EXPECT_NO_THROW(
delete pkt;
@@ -559,4 +561,17 @@ TEST(Pkt4Test, unpackOptions) {
EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+22, 3)); // data len=3
}
+// This test verifies methods that are used for manipulating meta fields
+// i.e. fields that are not part of DHCPv4 (e.g. interface name).
+TEST(Pkt4Ttest, metaFields) {
+ Pkt4 pkt(DHCPDISCOVER, 1234);
+
+ pkt.setIface("lo0");
+
+ EXPECT_EQ("lo0", pkt.getIface());
+
+ /// TODO: Expand this test once additonal getters/setters are
+ /// implemented.
+}
+
} // end of anonymous namespace
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 0d2bffd..5b93f75 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -84,6 +84,8 @@ BUILT_SOURCES += rdataclass.h rdataclass.cc
lib_LTLIBRARIES = libdns++.la
+libdns___la_LDFLAGS = -no-undefined -version-info 1:0:1
+
libdns___la_SOURCES =
libdns___la_SOURCES += edns.h edns.cc
libdns___la_SOURCES += exceptions.h exceptions.cc
@@ -102,6 +104,7 @@ libdns___la_SOURCES += rrsetlist.h rrsetlist.cc
libdns___la_SOURCES += rrttl.h rrttl.cc
libdns___la_SOURCES += rrtype.cc
libdns___la_SOURCES += question.h question.cc
+libdns___la_SOURCES += serial.h serial.cc
libdns___la_SOURCES += tsig.h tsig.cc
libdns___la_SOURCES += tsigerror.h tsigerror.cc
libdns___la_SOURCES += tsigkey.h tsigkey.cc
diff --git a/src/lib/dns/python/Makefile.am b/src/lib/dns/python/Makefile.am
index 3b89358..dd14991 100644
--- a/src/lib/dns/python/Makefile.am
+++ b/src/lib/dns/python/Makefile.am
@@ -12,6 +12,7 @@ libpydnspp_la_SOURCES += rrclass_python.cc rrclass_python.h
libpydnspp_la_SOURCES += rrtype_python.cc rrtype_python.h
libpydnspp_la_SOURCES += rrttl_python.cc rrttl_python.h
libpydnspp_la_SOURCES += rdata_python.cc rdata_python.h
+libpydnspp_la_SOURCES += serial_python.cc serial_python.h
libpydnspp_la_SOURCES += messagerenderer_python.cc messagerenderer_python.h
libpydnspp_la_SOURCES += rcode_python.cc rcode_python.h
libpydnspp_la_SOURCES += opcode_python.cc opcode_python.h
diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc
index 0a7d8e5..212141c 100644
--- a/src/lib/dns/python/pydnspp.cc
+++ b/src/lib/dns/python/pydnspp.cc
@@ -49,6 +49,7 @@
#include "rrset_python.h"
#include "rrttl_python.h"
#include "rrtype_python.h"
+#include "serial_python.h"
#include "tsigerror_python.h"
#include "tsigkey_python.h"
#include "tsig_python.h"
@@ -492,6 +493,18 @@ initModulePart_RRType(PyObject* mod) {
}
bool
+initModulePart_Serial(PyObject* mod) {
+ if (PyType_Ready(&serial_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&serial_type);
+ PyModule_AddObject(mod, "Serial",
+ reinterpret_cast<PyObject*>(&serial_type));
+
+ return (true);
+}
+
+bool
initModulePart_TSIGError(PyObject* mod) {
if (PyType_Ready(&tsigerror_type) < 0) {
return (false);
@@ -804,6 +817,10 @@ PyInit_pydnspp(void) {
return (NULL);
}
+ if (!initModulePart_Serial(mod)) {
+ return (NULL);
+ }
+
if (!initModulePart_TSIGKey(mod)) {
return (NULL);
}
diff --git a/src/lib/dns/python/rdata_python.cc b/src/lib/dns/python/rdata_python.cc
index 06c0263..e4ff890 100644
--- a/src/lib/dns/python/rdata_python.cc
+++ b/src/lib/dns/python/rdata_python.cc
@@ -16,6 +16,7 @@
#include <Python.h>
#include <dns/rdata.h>
#include <dns/messagerenderer.h>
+#include <dns/exceptions.h>
#include <util/buffer.h>
#include <util/python/pycppwrapper_util.h>
@@ -23,6 +24,7 @@
#include "rrtype_python.h"
#include "rrclass_python.h"
#include "messagerenderer_python.h"
+#include "name_python.h"
using namespace isc::dns;
using namespace isc::dns::python;
@@ -31,6 +33,27 @@ using namespace isc::util::python;
using namespace isc::dns::rdata;
namespace {
+
+typedef PyObject* method(PyObject* self, PyObject* args);
+
+// Wrap a method into an exception handling, converting C++ exceptions
+// to python ones. The params and return value is just passed through.
+PyObject*
+exception_wrap(method* method, PyObject* self, PyObject* args) {
+ try {
+ return (method(self, args));
+ } catch (const std::exception& ex) {
+ // FIXME: These exceptions are not tested, I don't know how or if
+ // at all they can be triggered. But they are caught just in the case.
+ PyErr_SetString(PyExc_Exception, (std::string("Unknown exception: ") +
+ ex.what()).c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_Exception, "Unknown exception");
+ return (NULL);
+ }
+}
+
class s_Rdata : public PyObject {
public:
isc::dns::rdata::ConstRdataPtr cppobj;
@@ -44,16 +67,16 @@ typedef CPPPyObjectContainer<s_Rdata, Rdata> RdataContainer;
//
// General creation and destruction
-int Rdata_init(s_Rdata* self, PyObject* args);
-void Rdata_destroy(s_Rdata* self);
+int Rdata_init(PyObject* self, PyObject* args, PyObject*);
+void Rdata_destroy(PyObject* self);
// These are the functions we export
-PyObject* Rdata_toText(s_Rdata* self);
+PyObject* Rdata_toText(PyObject* self, PyObject*);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
PyObject* Rdata_str(PyObject* self);
-PyObject* Rdata_toWire(s_Rdata* self, PyObject* args);
-PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
+PyObject* Rdata_toWire(PyObject* self, PyObject* args);
+PyObject* RData_richcmp(PyObject* self, PyObject* other, int op);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -62,9 +85,9 @@ PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
// 3. Argument type
// 4. Documentation
PyMethodDef Rdata_methods[] = {
- { "to_text", reinterpret_cast<PyCFunction>(Rdata_toText), METH_NOARGS,
+ { "to_text", Rdata_toText, METH_NOARGS,
"Returns the string representation" },
- { "to_wire", reinterpret_cast<PyCFunction>(Rdata_toWire), METH_VARARGS,
+ { "to_wire", Rdata_toWire, METH_VARARGS,
"Converts the Rdata object to wire format.\n"
"The argument can be either a MessageRenderer or an object that "
"implements the sequence interface. If the object is mutable "
@@ -75,58 +98,89 @@ PyMethodDef Rdata_methods[] = {
};
int
-Rdata_init(s_Rdata* self, PyObject* args) {
+Rdata_init(PyObject* self_p, PyObject* args, PyObject*) {
PyObject* rrtype;
PyObject* rrclass;
const char* s;
const char* data;
Py_ssize_t len;
+ s_Rdata* self(static_cast<s_Rdata*>(self_p));
- // Create from string
- if (PyArg_ParseTuple(args, "O!O!s", &rrtype_type, &rrtype,
- &rrclass_type, &rrclass,
- &s)) {
- self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
- PyRRClass_ToRRClass(rrclass), s);
- return (0);
- } else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype,
- &rrclass_type, &rrclass, &data, &len)) {
- InputBuffer input_buffer(data, len);
- self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
- PyRRClass_ToRRClass(rrclass),
- input_buffer, len);
- return (0);
+ try {
+ // Create from string
+ if (PyArg_ParseTuple(args, "O!O!s", &rrtype_type, &rrtype,
+ &rrclass_type, &rrclass,
+ &s)) {
+ self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+ PyRRClass_ToRRClass(rrclass), s);
+ return (0);
+ } else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype,
+ &rrclass_type, &rrclass, &data, &len)) {
+ InputBuffer input_buffer(data, len);
+ self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+ PyRRClass_ToRRClass(rrclass),
+ input_buffer, len);
+ return (0);
+ }
+ } catch (const isc::dns::rdata::InvalidRdataText& irdt) {
+ PyErr_SetString(po_InvalidRdataText, irdt.what());
+ return (-1);
+ } catch (const isc::dns::rdata::InvalidRdataLength& irdl) {
+ PyErr_SetString(po_InvalidRdataLength, irdl.what());
+ return (-1);
+ } catch (const isc::dns::rdata::CharStringTooLong& cstl) {
+ PyErr_SetString(po_CharStringTooLong, cstl.what());
+ return (-1);
+ } catch (const isc::dns::DNSMessageFORMERR& dmfe) {
+ PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
+ return (-1);
+ } catch (const std::exception& ex) {
+ // FIXME: These exceptions are not tested, I don't know how or if
+ // at all they can be triggered. But they are caught just in the case.
+ PyErr_SetString(PyExc_Exception, (std::string("Unknown exception: ") +
+ ex.what()).c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(PyExc_Exception, "Unknown exception");
+ return (-1);
}
return (-1);
}
void
-Rdata_destroy(s_Rdata* self) {
+Rdata_destroy(PyObject* self) {
// Clear the shared_ptr so that its reference count is zero
// before we call tp_free() (there is no direct release())
- self->cppobj.reset();
+ static_cast<s_Rdata*>(self)->cppobj.reset();
Py_TYPE(self)->tp_free(self);
}
PyObject*
-Rdata_toText(s_Rdata* self) {
+Rdata_toText_internal(PyObject* self, PyObject*) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+ return (Py_BuildValue("s", static_cast<const s_Rdata*>(self)->cppobj->
+ toText().c_str()));
+}
+
+PyObject*
+Rdata_toText(PyObject* self, PyObject* args) {
+ return (exception_wrap(&Rdata_toText_internal, self, args));
}
PyObject*
Rdata_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
- const_cast<char*>("to_text"),
+ const_cast<char*>("to_text"),
const_cast<char*>("")));
}
PyObject*
-Rdata_toWire(s_Rdata* self, PyObject* args) {
+Rdata_toWire_internal(PyObject* self_p, PyObject* args) {
PyObject* bytes;
PyObject* mr;
+ const s_Rdata* self(static_cast<const s_Rdata*>(self_p));
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
@@ -134,6 +188,11 @@ Rdata_toWire(s_Rdata* self, PyObject* args) {
OutputBuffer buffer(4);
self->cppobj->toWire(buffer);
PyObject* rd_bytes = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
+ // Make sure exceptions from here are propagated.
+ // The exception is already set, so we just return NULL
+ if (rd_bytes == NULL) {
+ return (NULL);
+ }
PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
// We need to release the object we temporarily created here
// to prevent memory leak
@@ -152,45 +211,64 @@ Rdata_toWire(s_Rdata* self, PyObject* args) {
}
PyObject*
-RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
- bool c;
+Rdata_toWire(PyObject* self, PyObject* args) {
+ return (exception_wrap(&Rdata_toWire_internal, self, args));
+}
+
+PyObject*
+RData_richcmp(PyObject* self_p, PyObject* other_p, int op) {
+ try {
+ bool c;
+ const s_Rdata* self(static_cast<const s_Rdata*>(self_p)),
+ * other(static_cast<const s_Rdata*>(other_p));
- // Check for null and if the types match. If different type,
- // simply return False
- if (!other || (self->ob_type != other->ob_type)) {
- Py_RETURN_FALSE;
- }
+ // Check for null and if the types match. If different type,
+ // simply return False
+ if (!other || (self->ob_type != other->ob_type)) {
+ Py_RETURN_FALSE;
+ }
- switch (op) {
- case Py_LT:
- c = self->cppobj->compare(*other->cppobj) < 0;
- break;
- case Py_LE:
- c = self->cppobj->compare(*other->cppobj) < 0 ||
- self->cppobj->compare(*other->cppobj) == 0;
- break;
- case Py_EQ:
- c = self->cppobj->compare(*other->cppobj) == 0;
- break;
- case Py_NE:
- c = self->cppobj->compare(*other->cppobj) != 0;
- break;
- case Py_GT:
- c = self->cppobj->compare(*other->cppobj) > 0;
- break;
- case Py_GE:
- c = self->cppobj->compare(*other->cppobj) > 0 ||
- self->cppobj->compare(*other->cppobj) == 0;
- break;
- default:
- PyErr_SetString(PyExc_IndexError,
- "Unhandled rich comparison operator");
+ switch (op) {
+ case Py_LT:
+ c = self->cppobj->compare(*other->cppobj) < 0;
+ break;
+ case Py_LE:
+ c = self->cppobj->compare(*other->cppobj) < 0 ||
+ self->cppobj->compare(*other->cppobj) == 0;
+ break;
+ case Py_EQ:
+ c = self->cppobj->compare(*other->cppobj) == 0;
+ break;
+ case Py_NE:
+ c = self->cppobj->compare(*other->cppobj) != 0;
+ break;
+ case Py_GT:
+ c = self->cppobj->compare(*other->cppobj) > 0;
+ break;
+ case Py_GE:
+ c = self->cppobj->compare(*other->cppobj) > 0 ||
+ self->cppobj->compare(*other->cppobj) == 0;
+ break;
+ default:
+ PyErr_SetString(PyExc_IndexError,
+ "Unhandled rich comparison operator");
+ return (NULL);
+ }
+ if (c) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+ } catch (const std::exception& ex) {
+ // FIXME: These exceptions are not tested, I don't know how or if
+ // at all they can be triggered. But they are caught just in the case.
+ PyErr_SetString(PyExc_Exception, (std::string("Unknown exception: ") +
+ ex.what()).c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_Exception, "Unknown exception");
return (NULL);
}
- if (c)
- Py_RETURN_TRUE;
- else
- Py_RETURN_FALSE;
}
} // end of unnamed namespace
@@ -217,7 +295,7 @@ PyTypeObject rdata_type = {
"pydnspp.Rdata",
sizeof(s_Rdata), // tp_basicsize
0, // tp_itemsize
- (destructor)Rdata_destroy, // tp_dealloc
+ Rdata_destroy, // tp_dealloc
NULL, // tp_print
NULL, // tp_getattr
NULL, // tp_setattr
@@ -237,7 +315,7 @@ PyTypeObject rdata_type = {
"a set of common interfaces to manipulate concrete RDATA objects.",
NULL, // tp_traverse
NULL, // tp_clear
- (richcmpfunc)RData_richcmp, // tp_richcompare
+ RData_richcmp, // tp_richcompare
0, // tp_weaklistoffset
NULL, // tp_iter
NULL, // tp_iternext
@@ -249,7 +327,7 @@ PyTypeObject rdata_type = {
NULL, // tp_descr_get
NULL, // tp_descr_set
0, // tp_dictoffset
- (initproc)Rdata_init, // tp_init
+ Rdata_init, // tp_init
NULL, // tp_alloc
PyType_GenericNew, // tp_new
NULL, // tp_free
diff --git a/src/lib/dns/python/rrset_python.cc b/src/lib/dns/python/rrset_python.cc
index 73a19e7..77d520b 100644
--- a/src/lib/dns/python/rrset_python.cc
+++ b/src/lib/dns/python/rrset_python.cc
@@ -52,51 +52,51 @@ public:
int RRset_init(s_RRset* self, PyObject* args);
void RRset_destroy(s_RRset* self);
-PyObject* RRset_getRdataCount(s_RRset* self);
-PyObject* RRset_getName(s_RRset* self);
-PyObject* RRset_getClass(s_RRset* self);
-PyObject* RRset_getType(s_RRset* self);
-PyObject* RRset_getTTL(s_RRset* self);
-PyObject* RRset_setName(s_RRset* self, PyObject* args);
-PyObject* RRset_setTTL(s_RRset* self, PyObject* args);
-PyObject* RRset_toText(s_RRset* self);
+PyObject* RRset_getRdataCount(PyObject* self, PyObject* args);
+PyObject* RRset_getName(PyObject* self, PyObject* args);
+PyObject* RRset_getClass(PyObject* self, PyObject* args);
+PyObject* RRset_getType(PyObject* self, PyObject* args);
+PyObject* RRset_getTTL(PyObject* self, PyObject* args);
+PyObject* RRset_setName(PyObject* self, PyObject* args);
+PyObject* RRset_setTTL(PyObject* self, PyObject* args);
+PyObject* RRset_toText(PyObject* self, PyObject* args);
PyObject* RRset_str(PyObject* self);
-PyObject* RRset_toWire(s_RRset* self, PyObject* args);
-PyObject* RRset_addRdata(s_RRset* self, PyObject* args);
-PyObject* RRset_getRdata(PyObject* po_self, PyObject*);
-PyObject* RRset_removeRRsig(s_RRset* self);
+PyObject* RRset_toWire(PyObject* self, PyObject* args);
+PyObject* RRset_addRdata(PyObject* self, PyObject* args);
+PyObject* RRset_getRdata(PyObject* po_self, PyObject* args);
+PyObject* RRset_removeRRsig(PyObject* self, PyObject* args);
// TODO: iterator?
PyMethodDef RRset_methods[] = {
- { "get_rdata_count", reinterpret_cast<PyCFunction>(RRset_getRdataCount), METH_NOARGS,
+ { "get_rdata_count", RRset_getRdataCount, METH_NOARGS,
"Returns the number of rdata fields." },
- { "get_name", reinterpret_cast<PyCFunction>(RRset_getName), METH_NOARGS,
+ { "get_name", RRset_getName, METH_NOARGS,
"Returns the name of the RRset, as a Name object." },
- { "get_class", reinterpret_cast<PyCFunction>(RRset_getClass), METH_NOARGS,
+ { "get_class", RRset_getClass, METH_NOARGS,
"Returns the class of the RRset as an RRClass object." },
- { "get_type", reinterpret_cast<PyCFunction>(RRset_getType), METH_NOARGS,
+ { "get_type", RRset_getType, METH_NOARGS,
"Returns the type of the RRset as an RRType object." },
- { "get_ttl", reinterpret_cast<PyCFunction>(RRset_getTTL), METH_NOARGS,
+ { "get_ttl", RRset_getTTL, METH_NOARGS,
"Returns the TTL of the RRset as an RRTTL object." },
- { "set_name", reinterpret_cast<PyCFunction>(RRset_setName), METH_VARARGS,
+ { "set_name", RRset_setName, METH_VARARGS,
"Sets the name of the RRset.\nTakes a Name object as an argument." },
- { "set_ttl", reinterpret_cast<PyCFunction>(RRset_setTTL), METH_VARARGS,
+ { "set_ttl", RRset_setTTL, METH_VARARGS,
"Sets the TTL of the RRset.\nTakes an RRTTL object as an argument." },
- { "to_text", reinterpret_cast<PyCFunction>(RRset_toText), METH_NOARGS,
+ { "to_text", RRset_toText, METH_NOARGS,
"Returns the text representation of the RRset as a string" },
- { "to_wire", reinterpret_cast<PyCFunction>(RRset_toWire), METH_VARARGS,
+ { "to_wire", RRset_toWire, METH_VARARGS,
"Converts the RRset object to wire format.\n"
"The argument can be either a MessageRenderer or an object that "
"implements the sequence interface. If the object is mutable "
"(for instance a bytearray()), the wire data is added in-place.\n"
"If it is not (for instance a bytes() object), a new object is "
"returned" },
- { "add_rdata", reinterpret_cast<PyCFunction>(RRset_addRdata), METH_VARARGS,
+ { "add_rdata", RRset_addRdata, METH_VARARGS,
"Adds the rdata for one RR to the RRset.\nTakes an Rdata object as an argument" },
{ "get_rdata", RRset_getRdata, METH_NOARGS,
"Returns a List containing all Rdata elements" },
- { "remove_rrsig", reinterpret_cast<PyCFunction>(RRset_removeRRsig), METH_NOARGS,
+ { "remove_rrsig", RRset_removeRRsig, METH_NOARGS,
"Clears the list of RRsigs for this RRset" },
{ NULL, NULL, 0, NULL }
};
@@ -133,14 +133,16 @@ RRset_destroy(s_RRset* self) {
}
PyObject*
-RRset_getRdataCount(s_RRset* self) {
- return (Py_BuildValue("I", self->cppobj->getRdataCount()));
+RRset_getRdataCount(PyObject* self, PyObject*) {
+ return (Py_BuildValue("I", static_cast<const s_RRset*>(self)->cppobj->
+ getRdataCount()));
}
PyObject*
-RRset_getName(s_RRset* self) {
+RRset_getName(PyObject* self, PyObject*) {
try {
- return (createNameObject(self->cppobj->getName()));
+ return (createNameObject(static_cast<const s_RRset*>(self)->cppobj->
+ getName()));
} catch (const exception& ex) {
const string ex_what =
"Unexpected failure getting rrset Name: " +
@@ -154,9 +156,10 @@ RRset_getName(s_RRset* self) {
}
PyObject*
-RRset_getClass(s_RRset* self) {
+RRset_getClass(PyObject* self, PyObject*) {
try {
- return (createRRClassObject(self->cppobj->getClass()));
+ return (createRRClassObject(static_cast<const s_RRset*>(self)->cppobj->
+ getClass()));
} catch (const exception& ex) {
const string ex_what =
"Unexpected failure getting question RRClass: " +
@@ -170,9 +173,10 @@ RRset_getClass(s_RRset* self) {
}
PyObject*
-RRset_getType(s_RRset* self) {
+RRset_getType(PyObject* self, PyObject*) {
try {
- return (createRRTypeObject(self->cppobj->getType()));
+ return (createRRTypeObject(static_cast<const s_RRset*>(self)->cppobj->
+ getType()));
} catch (const exception& ex) {
const string ex_what =
"Unexpected failure getting question RRType: " +
@@ -186,9 +190,10 @@ RRset_getType(s_RRset* self) {
}
PyObject*
-RRset_getTTL(s_RRset* self) {
+RRset_getTTL(PyObject* self, PyObject*) {
try {
- return (createRRTTLObject(self->cppobj->getTTL()));
+ return (createRRTTLObject(static_cast<const s_RRset*>(self)->cppobj->
+ getTTL()));
} catch (const exception& ex) {
const string ex_what =
"Unexpected failure getting question TTL: " +
@@ -202,29 +207,30 @@ RRset_getTTL(s_RRset* self) {
}
PyObject*
-RRset_setName(s_RRset* self, PyObject* args) {
+RRset_setName(PyObject* self, PyObject* args) {
PyObject* name;
if (!PyArg_ParseTuple(args, "O!", &name_type, &name)) {
return (NULL);
}
- self->cppobj->setName(PyName_ToName(name));
+ static_cast<s_RRset*>(self)->cppobj->setName(PyName_ToName(name));
Py_RETURN_NONE;
}
PyObject*
-RRset_setTTL(s_RRset* self, PyObject* args) {
+RRset_setTTL(PyObject* self, PyObject* args) {
PyObject* rrttl;
if (!PyArg_ParseTuple(args, "O!", &rrttl_type, &rrttl)) {
return (NULL);
}
- self->cppobj->setTTL(PyRRTTL_ToRRTTL(rrttl));
+ static_cast<s_RRset*>(self)->cppobj->setTTL(PyRRTTL_ToRRTTL(rrttl));
Py_RETURN_NONE;
}
PyObject*
-RRset_toText(s_RRset* self) {
+RRset_toText(PyObject* self, PyObject*) {
try {
- return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+ return (Py_BuildValue("s", static_cast<const s_RRset*>(self)->cppobj->
+ toText().c_str()));
} catch (const EmptyRRset& ers) {
PyErr_SetString(po_EmptyRRset, ers.what());
return (NULL);
@@ -235,14 +241,15 @@ PyObject*
RRset_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
- const_cast<char*>("to_text"),
+ const_cast<char*>("to_text"),
const_cast<char*>("")));
}
PyObject*
-RRset_toWire(s_RRset* self, PyObject* args) {
+RRset_toWire(PyObject* self_p, PyObject* args) {
PyObject* bytes;
PyObject* mr;
+ const s_RRset* self(static_cast<const s_RRset*>(self_p));
try {
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
@@ -274,13 +281,13 @@ RRset_toWire(s_RRset* self, PyObject* args) {
}
PyObject*
-RRset_addRdata(s_RRset* self, PyObject* args) {
+RRset_addRdata(PyObject* self, PyObject* args) {
PyObject* rdata;
if (!PyArg_ParseTuple(args, "O!", &rdata_type, &rdata)) {
return (NULL);
}
try {
- self->cppobj->addRdata(PyRdata_ToRdata(rdata));
+ static_cast<s_RRset*>(self)->cppobj->addRdata(PyRdata_ToRdata(rdata));
Py_RETURN_NONE;
} catch (const std::bad_cast&) {
PyErr_Clear();
@@ -324,8 +331,8 @@ RRset_getRdata(PyObject* po_self, PyObject*) {
}
PyObject*
-RRset_removeRRsig(s_RRset* self) {
- self->cppobj->removeRRsig();
+RRset_removeRRsig(PyObject* self, PyObject*) {
+ static_cast<s_RRset*>(self)->cppobj->removeRRsig();
Py_RETURN_NONE;
}
diff --git a/src/lib/dns/python/serial_python.cc b/src/lib/dns/python/serial_python.cc
new file mode 100644
index 0000000..e2bd809
--- /dev/null
+++ b/src/lib/dns/python/serial_python.cc
@@ -0,0 +1,281 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <dns/serial.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "serial_python.h"
+#include "pydnspp_common.h"
+
+using namespace std;
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_Serial : public PyObject {
+public:
+ s_Serial() : cppobj(NULL) {};
+ isc::dns::Serial* cppobj;
+};
+
+typedef CPPPyObjectContainer<s_Serial, Serial> SerialContainer;
+
+PyObject* Serial_str(PyObject* self);
+PyObject* Serial_getValue(s_Serial* self);
+PyObject* Serial_richcmp(s_Serial* self, s_Serial* other, int op);
+PyObject* Serial_add(PyObject *right, PyObject *left);
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef Serial_methods[] = {
+ { "get_value", reinterpret_cast<PyCFunction>(Serial_getValue), METH_NOARGS,
+ "Returns the Serial value as an integer" },
+ { NULL, NULL, 0, NULL }
+};
+
+// For overriding the + operator. We do not define any other operators for
+// this type.
+PyNumberMethods Serial_NumberMethods = {
+ Serial_add, //nb_add;
+ NULL, //nb_subtract;
+ NULL, //nb_multiply;
+ NULL, //nb_remainder;
+ NULL, //nb_divmod;
+ NULL, //nb_power;
+ NULL, //nb_negative;
+ NULL, //nb_positive;
+ NULL, //nb_absolute;
+ NULL, //nb_bool;
+ NULL, //nb_invert;
+ NULL, //nb_lshift;
+ NULL, //nb_rshift;
+ NULL, //nb_and;
+ NULL, //nb_xor;
+ NULL, //nb_or;
+ NULL, //nb_int;
+ NULL, //nb_reserved;
+ NULL, //nb_float;
+
+ NULL, //nb_inplace_add;
+ NULL, //nb_inplace_subtract;
+ NULL, //nb_inplace_multiply;
+ NULL, //nb_inplace_remainder;
+ NULL, //nb_inplace_power;
+ NULL, //nb_inplace_lshift;
+ NULL, //nb_inplace_rshift;
+ NULL, //nb_inplace_and;
+ NULL, //nb_inplace_xor;
+ NULL, //nb_inplace_or;
+
+ NULL, //nb_floor_divide;
+ NULL, //nb_true_divide;
+ NULL, //nb_inplace_floor_divide;
+ NULL, //nb_inplace_true_divide;
+
+ NULL, //nb_index;
+};
+
+int
+Serial_init(s_Serial* self, PyObject* args) {
+ long long i;
+ if (PyArg_ParseTuple(args, "L", &i)) {
+ PyErr_Clear();
+ if (i < 0 || i > 0xffffffff) {
+ PyErr_SetString(PyExc_ValueError, "Serial number out of range");
+ return (-1);
+ }
+ self->cppobj = new Serial(i);
+ return (0);
+ } else {
+ return (-1);
+ }
+}
+
+void
+Serial_destroy(s_Serial* self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+Serial_getValue(s_Serial* self) {
+ return (Py_BuildValue("I", self->cppobj->getValue()));
+}
+
+PyObject*
+Serial_str(PyObject* po_self) {
+ const s_Serial* const self = static_cast<s_Serial*>(po_self);
+ return (PyUnicode_FromFormat("%u", self->cppobj->getValue()));
+}
+
+PyObject*
+Serial_richcmp(s_Serial* self, s_Serial* other, int op) {
+ bool c = false;
+
+ // Check for null and if the types match. If different type,
+ // simply return False
+ if (!other || (self->ob_type != other->ob_type)) {
+ Py_RETURN_FALSE;
+ }
+
+ switch (op) {
+ case Py_LT:
+ c = *self->cppobj < *other->cppobj;
+ break;
+ case Py_LE:
+ c = *self->cppobj <= *other->cppobj;
+ break;
+ case Py_EQ:
+ c = *self->cppobj == *other->cppobj;
+ break;
+ case Py_NE:
+ c = *self->cppobj != *other->cppobj;
+ break;
+ case Py_GT:
+ c = *self->cppobj > *other->cppobj;
+ break;
+ case Py_GE:
+ c = *self->cppobj >= *other->cppobj;
+ break;
+ }
+ if (c) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+
+PyObject *
+Serial_add(PyObject *left, PyObject *right) {
+ // Either can be either a serial or a long, as long as one of them is a
+ // serial
+ if (PySerial_Check(left) && PySerial_Check(right)) {
+ return (createSerialObject(PySerial_ToSerial(left) +
+ PySerial_ToSerial(right)));
+ } else if (PySerial_Check(left) && PyLong_Check(right)) {
+ return (createSerialObject(PySerial_ToSerial(left) +
+ PyLong_AsLong(right)));
+ } else if (PyLong_Check(left) && PySerial_Check(right)) {
+ return (createSerialObject(PySerial_ToSerial(right) +
+ PyLong_AsLong(left)));
+ } else {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+}
+
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Serial
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject serial_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Serial",
+ sizeof(s_Serial), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Serial_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ &Serial_NumberMethods, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Serial_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Serial class encapsulates Serials used in DNS SOA records.\n\n"
+ "This is a straightforward class; an Serial object simply maintains a "
+ "32-bit unsigned integer corresponding to the SOA SERIAL value. The "
+ "main purpose of this class is to provide serial number arithmetic, as "
+ "described in RFC 1892. Objects of this type can be compared and added "
+ "to each other, as described in RFC 1892. Apart from str(), get_value(), "
+ "comparison operators, and the + operator, no other operations are "
+ "defined for this type.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)Serial_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Serial_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Serial_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createSerialObject(const Serial& source) {
+ SerialContainer container(PyObject_New(s_Serial, &serial_type));
+ container.set(new Serial(source));
+ return (container.release());
+}
+
+bool
+PySerial_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Serial typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &serial_type));
+}
+
+const Serial&
+PySerial_ToSerial(const PyObject* serial_obj) {
+ if (serial_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Serial PyObject conversion");
+ }
+ const s_Serial* serial = static_cast<const s_Serial*>(serial_obj);
+ return (*serial->cppobj);
+}
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/serial_python.h b/src/lib/dns/python/serial_python.h
new file mode 100644
index 0000000..48b5199
--- /dev/null
+++ b/src/lib/dns/python/serial_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_SERIAL_H
+#define __PYTHON_SERIAL_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Serial;
+
+namespace python {
+
+extern PyTypeObject serial_type;
+
+/// This is a simple shortcut to create a python Serial object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createSerialObject(const Serial& source);
+
+/// \brief Checks if the given python object is a Serial object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Serial, false otherwise
+bool PySerial_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Serial object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Serial; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PySerial_Check()
+///
+/// \note This is not a copy; if the Serial is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param Serial_obj The Serial object to convert
+const Serial& PySerial_ToSerial(const PyObject* Serial_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_SERIAL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tests/Makefile.am b/src/lib/dns/python/tests/Makefile.am
index d1273f3..3338727 100644
--- a/src/lib/dns/python/tests/Makefile.am
+++ b/src/lib/dns/python/tests/Makefile.am
@@ -11,6 +11,7 @@ PYTESTS += rrclass_python_test.py
PYTESTS += rrset_python_test.py
PYTESTS += rrttl_python_test.py
PYTESTS += rrtype_python_test.py
+PYTESTS += serial_python_test.py
PYTESTS += tsig_python_test.py
PYTESTS += tsig_rdata_python_test.py
PYTESTS += tsigerror_python_test.py
diff --git a/src/lib/dns/python/tests/rdata_python_test.py b/src/lib/dns/python/tests/rdata_python_test.py
index 776f792..81dea5f 100644
--- a/src/lib/dns/python/tests/rdata_python_test.py
+++ b/src/lib/dns/python/tests/rdata_python_test.py
@@ -35,6 +35,14 @@ class RdataTest(unittest.TestCase):
self.assertRaises(TypeError, Rdata, "wrong", RRClass("IN"), "192.0.2.99")
self.assertRaises(TypeError, Rdata, RRType("A"), "wrong", "192.0.2.99")
self.assertRaises(TypeError, Rdata, RRType("A"), RRClass("IN"), 1)
+ self.assertRaises(InvalidRdataText, Rdata, RRType("A"), RRClass("IN"),
+ "Invalid Rdata Text")
+ self.assertRaises(CharStringTooLong, Rdata, RRType("TXT"),
+ RRClass("IN"), ' ' * 256)
+ self.assertRaises(InvalidRdataLength, Rdata, RRType("TXT"),
+ RRClass("IN"), bytes(65536))
+ self.assertRaises(DNSMessageFORMERR, Rdata, RRType("TXT"),
+ RRClass("IN"), b"\xff")
def test_rdata_to_wire(self):
b = bytearray()
diff --git a/src/lib/dns/python/tests/serial_python_test.py b/src/lib/dns/python/tests/serial_python_test.py
new file mode 100644
index 0000000..0ca08c2
--- /dev/null
+++ b/src/lib/dns/python/tests/serial_python_test.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Tests for the rrttl part of the pydnspp module
+#
+
+import unittest
+import os
+from pydnspp import *
+
+class SerialTest(unittest.TestCase):
+ def setUp(self):
+ self.one = Serial(1)
+ self.one_2 = Serial(1)
+ self.two = Serial(2)
+ self.date_zero = Serial(1980120100)
+ self.date_one = Serial(1980120101)
+ self.zero = Serial(0)
+ self.highest = Serial(4294967295)
+ self.number_low = Serial(12345)
+ self.number_medium = Serial(2000000000)
+ self.number_high = Serial(4000000000)
+
+ def test_init(self):
+ self.assertRaises(ValueError, Serial, -1)
+ self.assertRaises(ValueError, Serial, 4294967296)
+ self.assertRaises(ValueError, Serial, 4294967297)
+ self.assertRaises(ValueError, Serial, 100000000000)
+
+ def test_get_value(self):
+ self.assertEqual(1, self.one.get_value())
+ self.assertNotEqual(2, self.one_2.get_value())
+ self.assertEqual(2, self.two.get_value())
+ self.assertEqual(1980120100, self.date_zero.get_value())
+ self.assertEqual(1980120101, self.date_one.get_value())
+ self.assertEqual(0, self.zero.get_value())
+ self.assertEqual(4294967295, self.highest.get_value())
+ self.assertEqual(12345, self.number_low.get_value())
+ self.assertEqual(2000000000, self.number_medium.get_value())
+ self.assertEqual(4000000000, self.number_high.get_value())
+
+ def test_str(self):
+ self.assertEqual('1', str(self.one))
+ self.assertNotEqual('2', str(self.one_2))
+ self.assertEqual('2', str(self.two))
+ self.assertEqual('1980120100', str(self.date_zero))
+ self.assertEqual('1980120101', str(self.date_one))
+ self.assertEqual('0', str(self.zero))
+ self.assertEqual('4294967295', str(self.highest))
+ self.assertEqual('12345', str(self.number_low))
+ self.assertEqual('2000000000', str(self.number_medium))
+ self.assertEqual('4000000000', str(self.number_high))
+
+ def test_equals(self):
+ self.assertEqual(self.one, self.one)
+ self.assertEqual(self.one, self.one_2)
+ self.assertNotEqual(self.one, self.two)
+ self.assertNotEqual(self.two, self.one)
+ self.assertEqual(Serial(12345), self.number_low)
+ self.assertNotEqual(Serial(12346), self.number_low)
+
+ def test_compare(self):
+ # These should be true/false even without serial arithmetic
+ self.assertLessEqual(self.one, self.one)
+ self.assertLessEqual(self.one, self.one_2)
+ self.assertLess(self.one, self.two)
+ self.assertLessEqual(self.one, self.one)
+ self.assertLessEqual(self.one, self.two)
+ self.assertGreater(self.two, self.one)
+ self.assertGreaterEqual(self.two, self.two)
+ self.assertGreaterEqual(self.two, self.one)
+ self.assertLess(self.one, self.number_low)
+ self.assertLess(self.number_low, self.number_medium)
+ self.assertLess(self.number_medium, self.number_high)
+
+ # These should 'wrap'
+ self.assertGreater(self.zero, self.highest)
+ self.assertLess(self.highest, self.one)
+ self.assertLess(self.number_high, self.number_low)
+
+ def test_addition(self):
+ self.assertEqual(self.two, self.one + self.one)
+ self.assertEqual(self.two, self.one + self.one_2)
+ self.assertEqual(self.highest, self.highest + self.zero)
+ self.assertEqual(self.zero, self.highest + self.one)
+ self.assertEqual(self.one, self.highest + self.two)
+ self.assertEqual(self.one, self.highest + self.one + self.one)
+ self.assertEqual(self.one + 100, self.highest + 102)
+ self.assertEqual(100 + self.one, self.highest + 102)
+ self.assertEqual(self.zero + 2147483645, self.highest + 2147483646)
+
+ # using lambda so the error doesn't get thrown on initial evaluation
+ self.assertRaises(TypeError, lambda: self.zero + "bad")
+ self.assertRaises(TypeError, lambda: self.zero + None)
+ self.assertRaises(TypeError, lambda: "bad" + self.zero)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/lib/dns/rdata/generic/soa_6.cc b/src/lib/dns/rdata/generic/soa_6.cc
index 7ecd84f..e473bca 100644
--- a/src/lib/dns/rdata/generic/soa_6.cc
+++ b/src/lib/dns/rdata/generic/soa_6.cc
@@ -106,6 +106,12 @@ SOA::toWire(AbstractMessageRenderer& renderer) const {
renderer.writeData(numdata_, sizeof(numdata_));
}
+Serial
+SOA::getSerial() const {
+ InputBuffer b(numdata_, sizeof(numdata_));
+ return (Serial(b.readUint32()));
+}
+
string
SOA::toText() const {
InputBuffer b(numdata_, sizeof(numdata_));
diff --git a/src/lib/dns/rdata/generic/soa_6.h b/src/lib/dns/rdata/generic/soa_6.h
index 3f6185e..2c180b2 100644
--- a/src/lib/dns/rdata/generic/soa_6.h
+++ b/src/lib/dns/rdata/generic/soa_6.h
@@ -18,6 +18,7 @@
#include <dns/name.h>
#include <dns/rdata.h>
+#include <dns/serial.h>
// BEGIN_ISC_NAMESPACE
@@ -34,6 +35,8 @@ public:
SOA(const Name& mname, const Name& rname, uint32_t serial,
uint32_t refresh, uint32_t retry, uint32_t expire,
uint32_t minimum);
+ /// \brief Returns the serial stored in the SOA.
+ Serial getSerial() const;
private:
/// Note: this is a prototype version; we may reconsider
/// this representation later.
diff --git a/src/lib/dns/serial.cc b/src/lib/dns/serial.cc
new file mode 100644
index 0000000..90bc242
--- /dev/null
+++ b/src/lib/dns/serial.cc
@@ -0,0 +1,76 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dns/serial.h>
+
+namespace isc {
+namespace dns {
+
+bool
+Serial::operator==(const Serial& other) const {
+ return (value_ == other.getValue());
+}
+
+bool
+Serial::operator!=(const Serial& other) const {
+ return (value_ != other.getValue());
+}
+
+bool
+Serial::operator<(const Serial& other) const {
+ uint32_t other_val = other.getValue();
+ bool result = false;
+ if (value_ < other_val) {
+ result = ((other_val - value_) <= MAX_SERIAL_INCREMENT);
+ } else if (other_val < value_) {
+ result = ((value_ - other_val) > MAX_SERIAL_INCREMENT);
+ }
+ return (result);
+}
+
+bool
+Serial::operator<=(const Serial& other) const {
+ return (operator==(other) || operator<(other));
+}
+
+bool
+Serial::operator>(const Serial& other) const {
+ return (!operator==(other) && !operator<(other));
+}
+
+bool
+Serial::operator>=(const Serial& other) const {
+ return (!operator<(other));
+}
+
+Serial
+Serial::operator+(uint32_t other_val) const {
+ uint64_t new_val = static_cast<uint64_t>(value_) +
+ static_cast<uint64_t>(other_val);
+ return Serial(static_cast<uint32_t>(new_val % MAX_SERIAL_VALUE));
+}
+
+Serial
+Serial::operator+(const Serial& other) const {
+ return (operator+(other.getValue()));
+}
+
+std::ostream&
+operator<<(std::ostream& os, const Serial& serial) {
+ return (os << serial.getValue());
+}
+
+} // end namespace dns
+} // end namespace isc
+
diff --git a/src/lib/dns/serial.h b/src/lib/dns/serial.h
new file mode 100644
index 0000000..3549860
--- /dev/null
+++ b/src/lib/dns/serial.h
@@ -0,0 +1,155 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __SERIAL_H
+#define __SERIAL_H 1
+
+#include <stdint.h>
+#include <iostream>
+
+namespace isc {
+namespace dns {
+
+/// The maximum difference between two serial numbers. If the (plain uint32_t)
+/// difference between two serials is greater than this number, the smaller one
+/// is considered greater.
+const uint32_t MAX_SERIAL_INCREMENT = 2147483647;
+
+/// Maximum value a serial can have, used in + operator.
+const uint64_t MAX_SERIAL_VALUE = 4294967296ull;
+
+/// \brief This class defines DNS serial numbers and serial arithmetic.
+///
+/// DNS Serial number are in essence unsigned 32-bits numbers, with one
+/// catch; they should be compared using sequence space arithmetic.
+/// So given that they are 32-bits; as soon as the difference between two
+/// serial numbers is greater than 2147483647 (2^31 - 1), the lower number
+/// (in plain comparison) is considered the higher one.
+///
+/// In order to do this as transparently as possible, these numbers are
+/// stored in the Serial class, which overrides the basic comparison operators.
+///
+/// In this specific context, these operations are called 'serial number
+/// arithmetic', and they are defined in RFC 1982.
+///
+/// \note RFC 1982 defines everything based on the value SERIAL_BITS. Since
+/// the serial number has a fixed length of 32 bits, the values we use are
+/// hard-coded, and not computed based on variable bit lengths.
+class Serial {
+public:
+ /// \brief Constructor with value
+ ///
+ /// \param value The uint32_t value of the serial
+ explicit Serial(uint32_t value) : value_(value) {}
+
+ /// \brief Copy constructor
+ Serial(const Serial& other) : value_(other.getValue()) {}
+
+ /// \brief Direct assignment from other Serial
+ ///
+ /// \param other The Serial to assign the value from
+ void operator=(const Serial& other) { value_ = other.getValue(); }
+
+ /// \brief Direct assignment from value
+ ///
+ /// \param value the uint32_t value to assing
+ void operator=(uint32_t value) { value_ = value; }
+
+ /// \brief Returns the uint32_t representation of this serial value
+ ///
+ /// \return The uint32_t value of this Serial
+ uint32_t getValue() const { return (value_); }
+
+ /// \brief Returns true if the serial values are equal
+ ///
+ /// \return True if the values are equal
+ bool operator==(const Serial& other) const;
+
+ /// \brief Returns true if the serial values are not equal
+ ///
+ /// \return True if the values are not equal
+ bool operator!=(const Serial& other) const;
+
+ /// \brief Returns true if the serial value of this serial is smaller than
+ /// the other, according to serial arithmetic as described in RFC 1982
+ ///
+ /// \param other The Serial to compare to
+ ///
+ /// \return True if this is smaller than the given value
+ bool operator<(const Serial& other) const;
+
+ /// \brief Returns true if the serial value of this serial is equal to or
+ /// smaller than the other, according to serial arithmetic as described
+ /// in RFC 1982
+ ///
+ /// \param other The Serial to compare to
+ ///
+ /// \return True if this is smaller than or equal to the given value
+ bool operator<=(const Serial& other) const;
+
+ /// \brief Returns true if the serial value of this serial is greater than
+ /// the other, according to serial arithmetic as described in RFC 1982
+ ///
+ /// \param other The Serial to compare to
+ ///
+ /// \return True if this is greater than the given value
+ bool operator>(const Serial& other) const;
+
+ /// \brief Returns true if the serial value of this serial is equal to or
+ /// greater than the other, according to serial arithmetic as described in
+ /// RFC 1982
+ ///
+ /// \param other The Serial to compare to
+ ///
+ /// \return True if this is greater than or equal to the given value
+ bool operator>=(const Serial& other) const;
+
+ /// \brief Adds the given value to the serial number. If this would make
+ /// the number greater than 2^32-1, it is 'wrapped'.
+ /// \note According to the specification, an addition greater than
+ /// MAX_SERIAL_INCREMENT is undefined. We do NOT catch this error (so as not
+ /// to raise exceptions), but this behaviour remains undefined.
+ ///
+ /// \param other The Serial to add
+ ///
+ /// \return The result of the addition
+ Serial operator+(const Serial& other) const;
+
+ /// \brief Adds the given value to the serial number. If this would make
+ /// the number greater than 2^32-1, it is 'wrapped'.
+ ///
+ /// \note According to the specification, an addition greater than
+ /// MAX_SERIAL_INCREMENT is undefined. We do NOT catch this error (so as not
+ /// to raise exceptions), but this behaviour remains undefined.
+ ///
+ /// \param other_val The uint32_t value to add
+ ///
+ /// \return The result of the addition
+ Serial operator+(uint32_t other_val) const;
+
+private:
+ uint32_t value_;
+};
+
+/// \brief Helper operator for output streams, writes the value to the stream
+///
+/// \param os The ostream to write to
+/// \param serial The Serial to write
+/// \return the output stream
+std::ostream& operator<<(std::ostream& os, const Serial& serial);
+
+} // end namespace dns
+} // end namespace isc
+
+#endif // __SERIAL_H
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index ceeb3b8..cfd1286 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -54,6 +54,7 @@ run_unittests_SOURCES += question_unittest.cc
run_unittests_SOURCES += rrparamregistry_unittest.cc
run_unittests_SOURCES += masterload_unittest.cc
run_unittests_SOURCES += message_unittest.cc
+run_unittests_SOURCES += serial_unittest.cc
run_unittests_SOURCES += tsig_unittest.cc
run_unittests_SOURCES += tsigerror_unittest.cc
run_unittests_SOURCES += tsigkey_unittest.cc
@@ -61,12 +62,12 @@ run_unittests_SOURCES += tsigrecord_unittest.cc
run_unittests_SOURCES += character_string_unittest.cc
run_unittests_SOURCES += run_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-# We shouldn't need to include BOTAN_LDFLAGS here, but there
+# We shouldn't need to include BOTAN_LIBS here, but there
# is one test system where the path for GTEST_LDFLAGS contains
# an older version of botan, and somehow that version gets
# linked if we don't
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS = $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS) $(AM_LDFLAGS)
+run_unittests_LDADD = $(BOTAN_LIBS) $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/lib/dns/tests/rdata_soa_unittest.cc b/src/lib/dns/tests/rdata_soa_unittest.cc
index 63fe1f7..07c24d5 100644
--- a/src/lib/dns/tests/rdata_soa_unittest.cc
+++ b/src/lib/dns/tests/rdata_soa_unittest.cc
@@ -74,4 +74,9 @@ TEST_F(Rdata_SOA_Test, toText) {
EXPECT_EQ("ns.example.com. root.example.com. "
"2010012601 3600 300 3600000 1200", rdata_soa.toText());
}
+
+TEST_F(Rdata_SOA_Test, getSerial) {
+ EXPECT_EQ(2010012601, rdata_soa.getSerial().getValue());
+}
+
}
diff --git a/src/lib/dns/tests/serial_unittest.cc b/src/lib/dns/tests/serial_unittest.cc
new file mode 100644
index 0000000..e27f628
--- /dev/null
+++ b/src/lib/dns/tests/serial_unittest.cc
@@ -0,0 +1,179 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/serial.h>
+
+using namespace isc::dns;
+
+class SerialTest : public ::testing::Test {
+public:
+ SerialTest() : one(1), one_2(1), two(2),
+ date_zero(1980120100), date_one(1980120101),
+ min(0), max(4294967295u),
+ number_low(12345),
+ number_medium(2000000000),
+ number_high(4000000000u)
+ {}
+ Serial one, one_2, two, date_zero, date_one, min, max, number_low, number_medium, number_high;
+};
+
+//
+// Basic tests
+//
+
+TEST_F(SerialTest, get_value) {
+ EXPECT_EQ(1, one.getValue());
+ EXPECT_NE(2, one.getValue());
+ EXPECT_EQ(2, two.getValue());
+ EXPECT_EQ(1980120100, date_zero.getValue());
+ EXPECT_EQ(1980120101, date_one.getValue());
+ EXPECT_EQ(0, min.getValue());
+ EXPECT_EQ(4294967295u, max.getValue());
+ EXPECT_EQ(12345, number_low.getValue());
+ EXPECT_EQ(2000000000, number_medium.getValue());
+ EXPECT_EQ(4000000000u, number_high.getValue());
+}
+
+TEST_F(SerialTest, equals) {
+ EXPECT_EQ(one, one);
+ EXPECT_EQ(one, one_2);
+ EXPECT_NE(one, two);
+ EXPECT_NE(two, one);
+ EXPECT_EQ(Serial(12345), number_low);
+ EXPECT_NE(Serial(12346), number_low);
+}
+
+TEST_F(SerialTest, comparison) {
+ // These should be true/false even without serial arithmetic
+ EXPECT_LE(one, one);
+ EXPECT_LE(one, one_2);
+ EXPECT_LT(one, two);
+ EXPECT_LE(one, two);
+ EXPECT_GE(two, two);
+ EXPECT_GT(two, one);
+ EXPECT_GE(two, one);
+ EXPECT_LT(one, number_low);
+ EXPECT_LT(number_low, number_medium);
+ EXPECT_LT(number_medium, number_high);
+
+ // now let's try some that 'wrap', as it were
+ EXPECT_GT(min, max);
+ EXPECT_LT(max, min);
+ EXPECT_LT(number_high, number_low);
+}
+
+//
+// RFC 1982 Section 3.1
+//
+TEST_F(SerialTest, addition) {
+ EXPECT_EQ(two, one + one);
+ EXPECT_EQ(two, one + one_2);
+ EXPECT_EQ(max, max + min);
+ EXPECT_EQ(min, max + one);
+ EXPECT_EQ(one, max + two);
+ EXPECT_EQ(one, max + one + one);
+
+ EXPECT_EQ(one + 100, max + 102);
+ EXPECT_EQ(min + 2147483645, max + 2147483646);
+ EXPECT_EQ(min + 2147483646, max + MAX_SERIAL_INCREMENT);
+}
+
+//
+// RFC 1982 Section 3.2 has been checked by the basic tests above
+//
+
+//
+// RFC 1982 Section 4.1
+//
+
+// Helper function for addition_always_larger test, add some numbers
+// and check that the result is always larger than the original
+void do_addition_larger_test(const Serial& number) {
+ EXPECT_GE(number + 0, number);
+ EXPECT_EQ(number + 0, number);
+ EXPECT_GT(number + 1, number);
+ EXPECT_GT(number + 2, number);
+ EXPECT_GT(number + 100, number);
+ EXPECT_GT(number + 1111111, number);
+ EXPECT_GT(number + 2147483646, number);
+ EXPECT_GT(number + MAX_SERIAL_INCREMENT, number);
+ // Try MAX_SERIAL_INCREMENT as a hardcoded number as well
+ EXPECT_GT(number + 2147483647, number);
+}
+
+TEST_F(SerialTest, addition_always_larger) {
+ do_addition_larger_test(one);
+ do_addition_larger_test(two);
+ do_addition_larger_test(date_zero);
+ do_addition_larger_test(date_one);
+ do_addition_larger_test(min);
+ do_addition_larger_test(max);
+ do_addition_larger_test(number_low);
+ do_addition_larger_test(number_medium);
+ do_addition_larger_test(number_high);
+}
+
+//
+// RFC 1982 Section 4.2
+//
+
+// Helper function to do the second addition
+void
+do_two_additions_test_second(const Serial &original,
+ const Serial &number)
+{
+ EXPECT_NE(original, number);
+ EXPECT_NE(original, number + 0);
+ EXPECT_NE(original, number + 1);
+ EXPECT_NE(original, number + 2);
+ EXPECT_NE(original, number + 100);
+ EXPECT_NE(original, number + 1111111);
+ EXPECT_NE(original, number + 2147483646);
+ EXPECT_NE(original, number + MAX_SERIAL_INCREMENT);
+ EXPECT_NE(original, number + 2147483647);
+}
+
+void do_two_additions_test_first(const Serial &number) {
+ do_two_additions_test_second(number, number + 1);
+ do_two_additions_test_second(number, number + 2);
+ do_two_additions_test_second(number, number + 100);
+ do_two_additions_test_second(number, number + 1111111);
+ do_two_additions_test_second(number, number + 2147483646);
+ do_two_additions_test_second(number, number + MAX_SERIAL_INCREMENT);
+ do_two_additions_test_second(number, number + 2147483647);
+}
+
+TEST_F(SerialTest, two_additions_never_equal) {
+ do_two_additions_test_first(one);
+ do_two_additions_test_first(two);
+ do_two_additions_test_first(date_zero);
+ do_two_additions_test_first(date_one);
+ do_two_additions_test_first(min);
+ do_two_additions_test_first(max);
+ do_two_additions_test_first(number_low);
+ do_two_additions_test_first(number_medium);
+ do_two_additions_test_first(number_high);
+}
+
+//
+// RFC 1982 Section 4.3 and 4.4 have nothing to test
+//
+
+//
+// Tests from RFC 1982 examples
+//
+TEST(SerialTextRFCExamples, rfc_example_tests) {
+}
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index 433bb7d..b68f3c4 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -126,6 +126,17 @@ public:
isc::Exception(file, line, what) {}
};
+/// \brief A generic exception that is thrown if a function is called
+/// in a prohibited way.
+///
+/// For example, this can happen if a class method is called when the object's
+/// state does not allow that particular method.
+class InvalidOperation : public Exception {
+public:
+ InvalidOperation(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
///
/// \brief A generic exception that is thrown when an unexpected
/// error condition occurs.
diff --git a/src/lib/log/Makefile.am b/src/lib/log/Makefile.am
index 957d350..286e9fd 100644
--- a/src/lib/log/Makefile.am
+++ b/src/lib/log/Makefile.am
@@ -46,5 +46,4 @@ if USE_CLANGPP
liblog_la_CXXFLAGS += -Wno-error
endif
liblog_la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES)
-liblog_la_LDFLAGS = $(LOG4CPLUS_LDFLAGS)
-liblog_la_LIBADD = $(top_builddir)/src/lib/util/libutil.la
+liblog_la_LIBADD = $(LOG4CPLUS_LIBS) $(top_builddir)/src/lib/util/libutil.la
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index a5f793c..53e97a1 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -48,16 +48,18 @@ endif
noinst_PROGRAMS = logger_example
logger_example_SOURCES = logger_example.cc
logger_example_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-logger_example_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
-logger_example_LDADD = $(top_builddir)/src/lib/log/liblog.la
+logger_example_LDFLAGS = $(AM_LDFLAGS)
+logger_example_LDADD = $(LOG4CPLUS_LIBS)
+logger_example_LDADD += $(top_builddir)/src/lib/log/liblog.la
logger_example_LDADD += $(top_builddir)/src/lib/util/libutil.la
logger_example_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
noinst_PROGRAMS += init_logger_test
init_logger_test_SOURCES = init_logger_test.cc
init_logger_test_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-init_logger_test_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
-init_logger_test_LDADD = $(top_builddir)/src/lib/log/liblog.la
+init_logger_test_LDFLAGS = $(AM_LDFLAGS)
+init_logger_test_LDADD = $(LOG4CPLUS_LIBS)
+init_logger_test_LDADD += $(top_builddir)/src/lib/log/liblog.la
init_logger_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
init_logger_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/nsas/nameserver_entry.cc b/src/lib/nsas/nameserver_entry.cc
index 553c35d..bca8f73 100644
--- a/src/lib/nsas/nameserver_entry.cc
+++ b/src/lib/nsas/nameserver_entry.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -223,7 +223,8 @@ class NameserverEntry::ResolverCallback :
* \short We received the address successfully.
*
* This extracts the addresses out from the response and puts them
- * inside the entry. It tries to reuse the address entries from before (if there were any), to keep their RTTs.
+ * inside the entry. It tries to reuse the address entries from before
+ * (if there were any), to keep their RTTs.
*/
virtual void success(MessagePtr response_message) {
time_t now = time(NULL);
@@ -231,10 +232,21 @@ class NameserverEntry::ResolverCallback :
Lock lock(entry_->mutex_);
// TODO: find the correct RRset, not simply the first
- if (!response_message ||
- response_message->getRcode() != isc::dns::Rcode::NOERROR() ||
+ if (!response_message) {
+ LOG_ERROR(nsas_logger, NSAS_NULL_RESPONSE).arg(entry_->getName());
+ failureInternal(lock);
+ return;
+
+ } else if (response_message->getRcode() != isc::dns::Rcode::NOERROR()) {
+ LOG_DEBUG(nsas_logger, NSAS_DBG_RESULTS, NSAS_ERROR_RESPONSE).
+ arg(response_message->getRcode()).arg(entry_->getName());
+ failureInternal(lock);
+ return;
+
+ } else if (
response_message->getRRCount(isc::dns::Message::SECTION_ANSWER) == 0) {
- LOG_ERROR(nsas_logger, NSAS_INVALID_RESPONSE).arg(entry_->getName());
+ LOG_DEBUG(nsas_logger, NSAS_DBG_RESULTS, NSAS_EMPTY_RESPONSE).
+ arg(entry_->getName());
failureInternal(lock);
return;
}
@@ -371,7 +383,7 @@ class NameserverEntry::ResolverCallback :
}
}
- // Handle a failure to optain data. Dispatches callbacks and leaves
+ // Handle a failure to obtain data. Dispatches callbacks and leaves
// lock unlocked
void failureInternal(Lock &lock) {
// Set state of the addresses
diff --git a/src/lib/nsas/nsas_messages.mes b/src/lib/nsas/nsas_messages.mes
index 512fcd5..6c35172 100644
--- a/src/lib/nsas/nsas_messages.mes
+++ b/src/lib/nsas/nsas_messages.mes
@@ -14,6 +14,16 @@
$NAMESPACE isc::nsas
+% NSAS_EMPTY_RESPONSE response to query for %1 returned an empty answer section
+The NSAS (nameserver address store - part of the resolver) made a query
+for information it needed. The query completed successfully but the
+answer section in the response was empty.
+
+% NSAS_ERROR_RESPONSE error response of %1 returned in query for %2
+The NSAS (nameserver address store - part of the resolver) made a query
+for information it needed. The query completed successfully but the
+RCODE in the response was something other than NOERROR.
+
% NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1
A debug message issued when the NSAS (nameserver address store - part
of the resolver) is making a callback into the resolver to retrieve the
@@ -24,17 +34,6 @@ A debug message issued when the NSAS (nameserver address store - part
of the resolver) has retrieved the given address for the specified
nameserver through an external query.
-% NSAS_INVALID_RESPONSE queried for %1 but got invalid response
-The NSAS (nameserver address store - part of the resolver) made a query
-for a RR for the specified nameserver but received an invalid response.
-Either the success function was called without a DNS message or the
-message was invalid on some way. (In the latter case, the error should
-have been picked up elsewhere in the processing logic, hence the raising
-of the error here.)
-
-This message indicates an internal error in the NSAS. Please raise a
-bug report.
-
% NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled
A debug message issued when an NSAS (nameserver address store - part of
the resolver) lookup for a zone has been canceled.
@@ -46,6 +45,14 @@ for the specified nameserver. This is not necessarily a problem - the
nameserver may be unreachable, in which case the NSAS will try other
nameservers in the zone.
+% NSAS_NULL_RESPONSE got null message in success callback for query for %1
+The NSAS (nameserver address store - part of the resolver) made a query
+for information it needed. The query completed successfully, but the
+message passed to the callback was null.
+
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+
% NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1
A debug message output when a call is made to the NSAS (nameserver
address store - part of the resolver) to obtain the nameservers for
diff --git a/src/lib/python/Makefile.am b/src/lib/python/Makefile.am
index 5924294..893bb8c 100644
--- a/src/lib/python/Makefile.am
+++ b/src/lib/python/Makefile.am
@@ -1,15 +1,8 @@
SUBDIRS = isc
-python_PYTHON = bind10_config.py
+nodist_python_PYTHON = bind10_config.py
pythondir = $(pyexecdir)
-# Explicitly define DIST_COMMON so ${python_PYTHON} is not included
-# as we don't want the generated file included in distributed tarfile.
-DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in bind10_config.py.in
-
-# When setting DIST_COMMON, then need to add the .in file too.
-EXTRA_DIST = bind10_config.py.in
-
CLEANFILES = bind10_config.pyc
CLEANDIRS = __pycache__
diff --git a/src/lib/python/bind10_config.py.in b/src/lib/python/bind10_config.py.in
index 69b17ed..e54b1a8 100644
--- a/src/lib/python/bind10_config.py.in
+++ b/src/lib/python/bind10_config.py.in
@@ -23,6 +23,10 @@ def reload():
global DATA_PATH
global PLUGIN_PATHS
global PREFIX
+ global LIBEXECDIR
+ LIBEXECDIR = ("@libexecdir@/@PACKAGE@"). \
+ replace("${exec_prefix}", "@exec_prefix@"). \
+ replace("${prefix}", "@prefix@")
BIND10_MSGQ_SOCKET_FILE = os.path.join("@localstatedir@",
"@PACKAGE_NAME@",
"msgq_socket").replace("${prefix}",
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
index 43a7605..aa5d0ab 100644
--- a/src/lib/python/isc/bind10/Makefile.am
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -1,4 +1,5 @@
SUBDIRS = . tests
-python_PYTHON = __init__.py sockcreator.py
+python_PYTHON = __init__.py sockcreator.py component.py special_component.py \
+ socket_cache.py
pythondir = $(pyexecdir)/isc/bind10
diff --git a/src/lib/python/isc/bind10/component.py b/src/lib/python/isc/bind10/component.py
new file mode 100644
index 0000000..91b7064
--- /dev/null
+++ b/src/lib/python/isc/bind10/component.py
@@ -0,0 +1,647 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Module for managing components (abstraction of process). It allows starting
+them in given order, handling when they crash (what happens depends on kind
+of component) and shutting down. It also handles the configuration of this.
+
+Dependencies between them are not yet handled. It might turn out they are
+needed, in that case they will be added sometime in future.
+
+This framework allows for a single process to be started multiple times (by
+specifying multiple components with the same configuration). However, the rest
+of the system might not handle such situation well, so until it is made so,
+it would be better to start each process at most once.
+"""
+
+import isc.log
+from isc.log_messages.bind10_messages import *
+import time
+
+logger = isc.log.Logger("boss")
+DBG_TRACE_DATA = 20
+DBG_TRACE_DETAILED = 80
+
+START_CMD = 'start'
+STOP_CMD = 'stop'
+
+STARTED_OK_TIME = 10
+COMPONENT_RESTART_DELAY = 10
+
+STATE_DEAD = 'dead'
+STATE_STOPPED = 'stopped'
+STATE_RUNNING = 'running'
+
+class BaseComponent:
+ """
+ This represents a single component. This one is an abstract base class.
+ There are some methods which should be left untouched, but there are
+ others which define the interface only and should be overriden in
+ concrete implementations.
+
+ The component is in one of the three states:
+ - Stopped - it is either not started yet or it was explicitly stopped.
+ The component is created in this state (it must be asked to start
+ explicitly).
+ - Running - after start() was called, it started successfully and is
+ now running.
+ - Dead - it failed and can not be resurrected.
+
+ Init
+ | stop()
+ | +-----------------------+
+ | | |
+ v | start() success |
+ Stopped --------+--------> Running <----------+
+ | | |
+ |failure | failed() |
+ | | |
+ v | |
+ +<-----------+ |
+ | |
+ | kind == dispensable or kind|== needed and failed late
+ +-----------------------------+
+ |
+ | kind == core or kind == needed and it failed too soon
+ v
+ Dead
+
+ Note that there are still situations which are not handled properly here.
+ We don't recognize a component that is starting up, but not ready yet, one
+ that is already shutting down, impossible to stop, etc. We need to add more
+ states in future to handle it properly.
+ """
+ def __init__(self, boss, kind):
+ """
+ Creates the component in not running mode.
+
+ The parameters are:
+ - `boss` the boss object to plug into. The component needs to plug
+ into it to know when it failed, etc.
+ - `kind` is the kind of component. It may be one of:
+ * 'core' means the system can't run without it and it can't be
+ safely restarted. If it does not start, the system is brought
+ down. If it crashes, the system is turned off as well (with
+ non-zero exit status).
+ * 'needed' means the system is able to restart the component,
+ but it is vital part of the service (like auth server). If
+ it fails to start or crashes in less than 10s after the first
+ startup, the system is brought down. If it crashes later on,
+ it is restarted (see below).
+ * 'dispensable' means the component should be running, but if it
+ doesn't start or crashes for some reason, the system simply tries
+ to restart it and keeps running.
+
+ For components that are restarted, the restarts are not always
+ immediate; if the component has run for more than
+ COMPONENT_RESTART_DELAY (10) seconds, they are restarted right
+ away. If the component has not run that long, the system waits
+ until that time has passed (since the last start) until the
+ component is restarted.
+
+ Note that the __init__ method of child class should have these
+ parameters:
+
+ __init__(self, process, boss, kind, address=None, params=None)
+
+ The extra parameters are:
+ - `process` - which program should be started.
+ - `address` - the address on message bus, used to talk to the
+ component.
+ - `params` - parameters to the program.
+
+ The methods you should not override are:
+ - start
+ - stop
+ - failed
+ - running
+
+ You should override:
+ - _start_internal
+ - _stop_internal
+ - _failed_internal (if you like, the empty default might be suitable)
+ - name
+ - pid
+ - kill
+ """
+ if kind not in ['core', 'needed', 'dispensable']:
+ raise ValueError('Component kind can not be ' + kind)
+ self.__state = STATE_STOPPED
+ self._kind = kind
+ self._boss = boss
+ self._original_start_time = None
+
+ def start(self):
+ """
+ Start the component for the first time or restart it. It runs
+ _start_internal to actually start the component.
+
+ If you try to start an already running component, it raises ValueError.
+ """
+ if self.__state == STATE_DEAD:
+ raise ValueError("Can't resurrect already dead component")
+ if self.running():
+ raise ValueError("Can't start already running component")
+ logger.info(BIND10_COMPONENT_START, self.name())
+ self.__state = STATE_RUNNING
+ self.__start_time = time.time()
+ if self._original_start_time is None:
+ self._original_start_time = self.__start_time
+ self._restart_time = None
+ try:
+ self._start_internal()
+ except Exception as e:
+ logger.error(BIND10_COMPONENT_START_EXCEPTION, self.name(), e)
+ self.failed(None)
+ raise
+
+ def stop(self):
+ """
+ Stop the component. It calls _stop_internal to do the actual
+ stopping.
+
+ If you try to stop a component that is not running, it raises
+ ValueError.
+ """
+ # This is not tested. It talks with the outher world, which is out
+ # of scope of unittests.
+ if not self.running():
+ raise ValueError("Can't stop a component which is not running")
+ logger.info(BIND10_COMPONENT_STOP, self.name())
+ self.__state = STATE_STOPPED
+ self._stop_internal()
+
+ def failed(self, exit_code):
+ """
+ Notify the component it crashed. This will be called from boss object.
+
+ If you try to call failed on a component that is not running,
+ a ValueError is raised.
+
+ If it is a core component or needed component and it was started only
+ recently, the component will become dead and will ask the boss to shut
+ down with error exit status. A dead component can't be started again.
+
+ Otherwise the component will try to restart.
+
+ The exit code is used for logging. It might be None.
+
+ It calls _failed_internal internally.
+
+ Returns True if the process was immediately restarted, returns
+ False is the process was not restarted, either because
+ it is considered a core or needed component, or because
+ the component is to be restarted later.
+ """
+ logger.error(BIND10_COMPONENT_FAILED, self.name(), self.pid(),
+ exit_code if exit_code is not None else "unknown")
+ if not self.running():
+ raise ValueError("Can't fail component that isn't running")
+ self.__state = STATE_STOPPED
+ self._failed_internal()
+ # If it is a core component or the needed component failed to start
+ # (including it stopped really soon)
+ if self._kind == 'core' or \
+ (self._kind == 'needed' and time.time() - STARTED_OK_TIME <
+ self._original_start_time):
+ self.__state = STATE_DEAD
+ logger.fatal(BIND10_COMPONENT_UNSATISFIED, self.name())
+ self._boss.component_shutdown(1)
+ return False
+ # This means we want to restart
+ else:
+ # if the component was only running for a short time, don't
+ # restart right away, but set a time it wants to restarted,
+ # and return that it wants to be restarted later
+ self.set_restart_time()
+ return self.restart()
+
+ def set_restart_time(self):
+ """Calculates and sets the time this component should be restarted.
+ Currently, it uses a very basic algorithm; start time +
+ RESTART_DELAY (10 seconds). This algorithm may be improved upon
+ in the future.
+ """
+ self._restart_at = self.__start_time + COMPONENT_RESTART_DELAY
+
+ def get_restart_time(self):
+ """Returns the time at which this component should be restarted."""
+ return self._restart_at
+
+ def restart(self, now = None):
+ """Restarts the component if it has a restart_time and if the value
+ of the restart_time is smaller than 'now'.
+
+ If the parameter 'now' is given, its value will be used instead
+ of calling time.time().
+
+ Returns True if the component is restarted, False if not."""
+ if now is None:
+ now = time.time()
+ if self.get_restart_time() is not None and\
+ self.get_restart_time() < now:
+ self.start()
+ return True
+ else:
+ return False
+
+ def running(self):
+ """
+ Informs if the component is currently running. It assumes the failed
+ is called whenever the component really fails and there might be some
+ time in between actual failure and the call, so this might be
+ inaccurate (it corresponds to the thing the object thinks is true, not
+ to the real "external" state).
+
+ It is not expected for this method to be overriden.
+ """
+ return self.__state == STATE_RUNNING
+
+ def _start_internal(self):
+ """
+ This method does the actual starting of a process. You need to override
+ this method to do the actual starting.
+
+ The ability to override this method presents some flexibility. It
+ allows processes started in a strange way, as well as components that
+ have no processes at all or components with multiple processes (in case
+ of multiple processes, care should be taken to make their
+ started/stopped state in sync and all the processes that can fail
+ should be registered).
+
+ You should register all the processes created by calling
+ self._boss.register_process.
+ """
+ pass
+
+ def _stop_internal(self):
+ """
+ This is the method that does the actual stopping of a component.
+ You need to provide it in a concrete implementation.
+
+ Also, note that it is a bad idea to raise exceptions from here.
+ Under such circumstance, the component will be considered stopped,
+ and the exception propagated, but we can't be sure it really is
+ dead.
+ """
+ pass
+
+ def _failed_internal(self):
+ """
+ This method is called from failed. You can replace it if you need
+ some specific behaviour when the component crashes. The default
+ implementation is empty.
+
+ Do not raise exceptions from here, please. The propper shutdown
+ would have not happened.
+ """
+ pass
+
+ def name(self):
+ """
+ Provides human readable name of the component, for logging and similar
+ purposes.
+
+ You need to provide this method in a concrete implementation.
+ """
+ pass
+
+ def pid(self):
+ """
+ Provides a PID of a process, if the component is real running process.
+ This may return None in cases when there's no process involved with the
+ component or in case the component is not started yet.
+
+ However, it is expected the component preserves the pid after it was
+ stopped, to ensure we can log it when we ask it to be killed (in case
+ the process refused to stop willingly).
+
+ You need to provide this method in a concrete implementation.
+ """
+ pass
+
+ def kill(self, forceful=False):
+ """
+ Kills the component.
+
+ If forcefull is true, it should do it in more direct and aggressive way
+ (for example by using SIGKILL or some equivalent). If it is false, more
+ peaceful way should be used (SIGTERM or equivalent).
+
+ You need to provide this method in a concrete implementation.
+ """
+ pass
+
+class Component(BaseComponent):
+ """
+ The most common implementation of a component. It can be used either
+ directly, and it will just start the process without anything special,
+ or slightly customised by passing a start_func hook to the __init__
+ to change the way it starts.
+
+ If such customisation isn't enough, you should inherit BaseComponent
+ directly. It is not recommended to override methods of this class
+ on one-by-one basis.
+ """
+ def __init__(self, process, boss, kind, address=None, params=None,
+ start_func=None):
+ """
+ Creates the component in not running mode.
+
+ The parameters are:
+ - `process` is the name of the process to start.
+ - `boss` the boss object to plug into. The component needs to plug
+ into it to know when it failed, etc.
+ - `kind` is the kind of component. Refer to the documentation of
+ BaseComponent for details.
+ - `address` is the address on message bus. It is used to ask it to
+ shut down at the end. If you specialize the class for a component
+ that is shut down differently, it might be None.
+ - `params` is a list of parameters to pass to the process when it
+ starts. It is currently unused and this support is left out for
+ now.
+ - `start_func` is a function called when it is started. It is supposed
+ to start up the process and return a ProcInfo object describing it.
+ There's a sensible default if not provided, which just launches
+ the program without any special care.
+ """
+ BaseComponent.__init__(self, boss, kind)
+ self._process = process
+ self._start_func = start_func
+ self._address = address
+ self._params = params
+ self._procinfo = None
+
+ def _start_internal(self):
+ """
+ You can change the "core" of this function by setting self._start_func
+ to a function without parameters. Such function should start the
+ process and return the procinfo object describing the running process.
+
+ If you don't provide the _start_func, the usual startup by calling
+ boss.start_simple is performed.
+ """
+ # This one is not tested. For one, it starts a real process
+ # which is out of scope of unit tests, for another, it just
+ # delegates the starting to other function in boss (if a derived
+ # class does not provide an override function), which is tested
+ # by use.
+ if self._start_func is not None:
+ procinfo = self._start_func()
+ else:
+ # TODO Handle params, etc
+ procinfo = self._boss.start_simple(self._process)
+ self._procinfo = procinfo
+ self._boss.register_process(self.pid(), self)
+
+ def _stop_internal(self):
+ self._boss.stop_process(self._process, self._address)
+ # TODO Some way to wait for the process that doesn't want to
+ # terminate and kill it would prove nice (or add it to boss somewhere?)
+
+ def name(self):
+ """
+ Returns the name, derived from the process name.
+ """
+ return self._process
+
+ def pid(self):
+ return self._procinfo.pid if self._procinfo is not None else None
+
+ def kill(self, forcefull=False):
+ if self._procinfo is not None:
+ if forcefull:
+ self._procinfo.process.kill()
+ else:
+ self._procinfo.process.terminate()
+
+class Configurator:
+ """
+ This thing keeps track of configuration changes and starts and stops
+ components as it goes. It also handles the inital startup and final
+ shutdown.
+
+ Note that this will allow you to stop (by invoking reconfigure) a core
+ component. There should be some kind of layer protecting users from ever
+ doing so (users must not stop the config manager, message queue and stuff
+ like that or the system won't start again). However, if a user specifies
+ b10-auth as core, it is safe to stop that one.
+
+ The parameters are:
+ * `boss`: The boss we are managing for.
+ * `specials`: Dict of specially started components. Each item is a class
+ representing the component.
+
+ The configuration passed to it (by startup() and reconfigure()) is a
+ dictionary, each item represents one component that should be running.
+ The key is an unique identifier used to reference the component. The
+ value is a dictionary describing the component. All items in the
+ description is optional unless told otherwise and they are as follows:
+ * `special` - Some components are started in a special way. If it is
+ present, it specifies which class from the specials parameter should
+ be used to create the component. In that case, some of the following
+ items might be irrelevant, depending on the special component chosen.
+ If it is not there, the basic Component class is used.
+ * `process` - Name of the executable to start. If it is not present,
+ it defaults to the identifier of the component.
+ * `kind` - The kind of component, either of 'core', 'needed' and
+ 'dispensable'. This specifies what happens if the component fails.
+ This one is required.
+ * `address` - The address of the component on message bus. It is used
+ to shut down the component. All special components currently either
+ know their own address or don't need one and ignore it. The common
+ components should provide this.
+ * `params` - The command line parameters of the executable. Defaults
+ to no parameters. It is currently unused.
+ * `priority` - When starting the component, the components with higher
+ priority are started before the ones with lower priority. If it is
+ not present, it defaults to 0.
+ """
+ def __init__(self, boss, specials = {}):
+ """
+ Initializes the configurator, but nothing is started yet.
+
+ The boss parameter is the boss object used to start and stop processes.
+ """
+ self.__boss = boss
+ # These could be __private, but as we access them from within unittest,
+ # it's more comfortable to have them just _protected.
+
+ # They are tuples (configuration, component)
+ self._components = {}
+ self._running = False
+ self.__specials = specials
+
+ def __reconfigure_internal(self, old, new):
+ """
+ Does a switch from one configuration to another.
+ """
+ self._run_plan(self._build_plan(old, new))
+
+ def startup(self, configuration):
+ """
+ Starts the first set of processes. This configuration is expected
+ to be hardcoded from the boss itself to start the configuration
+ manager and other similar things.
+ """
+ if self._running:
+ raise ValueError("Trying to start the component configurator " +
+ "twice")
+ logger.info(BIND10_CONFIGURATOR_START)
+ self.__reconfigure_internal(self._components, configuration)
+ self._running = True
+
+ def shutdown(self):
+ """
+ Shuts everything down.
+
+ It is not expected that anyone would want to shutdown and then start
+ the configurator again, so we don't explicitly make sure that would
+ work. However, we are not aware of anything that would make it not
+ work either.
+ """
+ if not self._running:
+ raise ValueError("Trying to shutdown the component " +
+ "configurator while it's not yet running")
+ logger.info(BIND10_CONFIGURATOR_STOP)
+ self._running = False
+ self.__reconfigure_internal(self._components, {})
+
+ def reconfigure(self, configuration):
+ """
+ Changes configuration from the current one to the provided. It
+ starts and stops all the components as needed (eg. if there's
+ a component that was not in the original configuration, it is
+ started, any component that was in the old and is not in the
+ new one is stopped).
+ """
+ if not self._running:
+ raise ValueError("Trying to reconfigure the component " +
+ "configurator while it's not yet running")
+ logger.info(BIND10_CONFIGURATOR_RECONFIGURE)
+ self.__reconfigure_internal(self._components, configuration)
+
+ def _build_plan(self, old, new):
+ """
+ Builds a plan how to transfer from the old configuration to the new
+ one. It'll be sorted by priority and it will contain the components
+ (already created, but not started). Each command in the plan is a dict,
+ so it can be extended any time in future to include whatever
+ parameters each operation might need.
+
+ Any configuration problems are expected to be handled here, so the
+ plan is not yet run.
+ """
+ logger.debug(DBG_TRACE_DATA, BIND10_CONFIGURATOR_BUILD, old, new)
+ plan = []
+ # Handle removals of old components
+ for cname in old.keys():
+ if cname not in new:
+ component = self._components[cname][1]
+ if component.running():
+ plan.append({
+ 'command': STOP_CMD,
+ 'component': component,
+ 'name': cname
+ })
+ # Handle transitions of configuration of what is here
+ for cname in new.keys():
+ if cname in old:
+ for option in ['special', 'process', 'kind', 'address',
+ 'params']:
+ if new[cname].get(option) != old[cname][0].get(option):
+ raise NotImplementedError('Changing configuration of' +
+ ' a running component is ' +
+ 'not yet supported. Remove' +
+ ' and re-add ' + cname +
+ ' to get the same effect')
+ # Handle introduction of new components
+ plan_add = []
+ for cname in new.keys():
+ if cname not in old:
+ component_config = new[cname]
+ creator = Component
+ if 'special' in component_config:
+ # TODO: Better error handling
+ creator = self.__specials[component_config['special']]
+ component = creator(component_config.get('process', cname),
+ self.__boss, component_config['kind'],
+ component_config.get('address'),
+ component_config.get('params'))
+ priority = component_config.get('priority', 0)
+ # We store tuples, priority first, so we can easily sort
+ plan_add.append((priority, {
+ 'component': component,
+ 'command': START_CMD,
+ 'name': cname,
+ 'config': component_config
+ }))
+ # Push the starts there sorted by priority
+ plan.extend([command for (_, command) in sorted(plan_add,
+ reverse=True,
+ key=lambda command:
+ command[0])])
+ return plan
+
+ def running(self):
+ """
+ Returns if the configurator is running (eg. was started by startup and
+ not yet stopped by shutdown).
+ """
+ return self._running
+
+ def _run_plan(self, plan):
+ """
+ Run a plan, created beforehand by _build_plan.
+
+ With the start and stop commands, it also adds and removes components
+ in _components.
+
+ Currently implemented commands are:
+ * start
+ * stop
+
+ The plan is a list of tasks, each task is a dictionary. It must contain
+ at last 'component' (a component object to work with) and 'command'
+ (the command to do). Currently, both existing commands need 'name' of
+ the component as well (the identifier from configuration). The 'start'
+ one needs the 'config' to be there, which is the configuration description
+ of the component.
+ """
+ done = 0
+ try:
+ logger.debug(DBG_TRACE_DATA, BIND10_CONFIGURATOR_RUN, len(plan))
+ for task in plan:
+ component = task['component']
+ command = task['command']
+ logger.debug(DBG_TRACE_DETAILED, BIND10_CONFIGURATOR_TASK,
+ command, component.name())
+ if command == START_CMD:
+ component.start()
+ self._components[task['name']] = (task['config'],
+ component)
+ elif command == STOP_CMD:
+ if component.running():
+ component.stop()
+ del self._components[task['name']]
+ else:
+ # Can Not Happen (as the plans are generated by ourselves).
+ # Therefore not tested.
+ raise NotImplementedError("Command unknown: " + command)
+ done += 1
+ except:
+ logger.error(BIND10_CONFIGURATOR_PLAN_INTERRUPTED, done, len(plan))
+ raise
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
index 2345034..c681d07 100644
--- a/src/lib/python/isc/bind10/sockcreator.py
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -202,6 +202,9 @@ class WrappedSocket:
class Creator(Parser):
"""
This starts the socket creator and allows asking for the sockets.
+
+ Note: __process shouldn't be reset once created. See the note
+ of the SockCreator class for details.
"""
def __init__(self, path):
(local, remote) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -213,11 +216,20 @@ class Creator(Parser):
env['PATH'] = path
self.__process = subprocess.Popen(['b10-sockcreator'], env=env,
stdin=remote.fileno(),
- stdout=remote2.fileno())
+ stdout=remote2.fileno(),
+ preexec_fn=self.__preexec_work)
remote.close()
remote2.close()
Parser.__init__(self, WrappedSocket(local))
+ def __preexec_work(self):
+ """Function used before running a program that needs to run as a
+ different user."""
+ # Put us into a separate process group so we don't get
+ # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+ # other means).
+ os.setpgrp()
+
def pid(self):
return self.__process.pid
@@ -225,4 +237,3 @@ class Creator(Parser):
logger.warn(BIND10_SOCKCREATOR_KILL)
if self.__process is not None:
self.__process.kill()
- self.__process = None
diff --git a/src/lib/python/isc/bind10/socket_cache.py b/src/lib/python/isc/bind10/socket_cache.py
new file mode 100644
index 0000000..26e87d2
--- /dev/null
+++ b/src/lib/python/isc/bind10/socket_cache.py
@@ -0,0 +1,302 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Here's the cache for sockets from socket creator.
+"""
+
+import os
+import random
+import isc.bind10.sockcreator
+from copy import copy
+
+class SocketError(Exception):
+ """
+ Exception raised when the socket creator is unable to create requested
+ socket. Possible reasons might be the address it should be bound to
+ is already taken, the permissions are insufficient, the address family
+ is not supported on this computer and many more.
+
+ The errno, if not None, is passed from the socket creator.
+ """
+ def __init__(self, message, errno):
+ Exception.__init__(self, message)
+ self.errno = errno
+
+class ShareError(Exception):
+ """
+ The requested socket is already taken by other component and the sharing
+ parameters don't allow sharing with the new request.
+ """
+ pass
+
+class Socket:
+ """
+ This represents one socket cached by the cache program. This should never
+ be used directly by a user, it is used internally by the Cache. Therefore
+ many member variables are used directly instead of by a accessor method.
+
+ Be warned that this object implements the __del__ method. It closes the
+ socket held inside in it. But this poses various problems with garbage
+ collector. In short, do not make reference cycles with this and generally
+ leave this class alone to live peacefully.
+ """
+ def __init__(self, protocol, address, port, fileno):
+ """
+ Creates the socket.
+
+ The protocol, address and port are preserved for the information.
+ """
+ self.protocol = protocol
+ self.address = address
+ self.port = port
+ self.fileno = fileno
+ # Mapping from token -> application
+ self.active_tokens = {}
+ # The tokens which were not yet picked up
+ self.waiting_tokens = set()
+ # Share modes and names by the tokens (token -> (mode, name))
+ self.shares = {}
+
+ def __del__(self):
+ """
+ Closes the file descriptor.
+ """
+ os.close(self.fileno)
+
+ def share_compatible(self, mode, name):
+ """
+ Checks if the given share mode and name is compatible with the ones
+ already installed here.
+
+ The allowed values for mode are listed in the Cache.get_token
+ function.
+ """
+ if mode not in ['NO', 'SAMEAPP', 'ANY']:
+ raise ValueError("Mode " + mode + " is invalid")
+
+ # Go through the existing ones
+ for (emode, ename) in self.shares.values():
+ if emode == 'NO' or mode == 'NO':
+ # One of them can't live together with anything
+ return False
+ if (emode == 'SAMEAPP' or mode == 'SAMEAPP') and \
+ ename != name:
+ # One of them can't live together with someone of different
+ # name
+ return False
+ # else both are ANY or SAMEAPP with the same name, which is OK
+ # No problem found, so we consider it OK
+ return True
+
+class Cache:
+ """
+ This is the cache for sockets from socket creator. The purpose of cache
+ is to hold the sockets that were requested, until they are no longer
+ needed. One reason is, the socket is created before it is sent over the
+ unix domain socket in boss, so we need to keep it somewhere for a while.
+
+ The other reason is, a single socket might be requested multiple times.
+ So we keep it here in case someone else might ask for it.
+
+ Each socket kept here has a reference count and when it drops to zero,
+ it is removed from cache and closed.
+
+ This is expected to be part of Boss, it is not a general utility class.
+
+ It is not expected to be subclassed. The methods and members are named
+ as protected so tests are easier access into them.
+ """
+ def __init__(self, creator):
+ """
+ Initialization. The creator is the socket creator object
+ (isc.bind10.sockcreator.Creator) which will be used to create yet
+ uncached sockets.
+ """
+ self._creator = creator
+ # The sockets we have live here, these dicts are various ways how
+ # to get them. Each of them contains the Socket objects somehow
+
+ # This one is dict of token: socket for the ones that were not yet
+ # picked up by an application.
+ self._waiting_tokens = {}
+ # This format is the same as above, but for the tokens that were
+ # already picked up by the application and not yet released.
+ self._active_tokens = {}
+ # This is a dict from applications to set of tokens used by the
+ # application, for the sockets already picked up by an application
+ self._active_apps = {}
+ # The sockets live here to be indexed by protocol, address and
+ # subsequently by port
+ self._sockets = {}
+ # These are just the tokens actually in use, so we don't generate
+ # dupes. If one is dropped, it can be potentially reclaimed.
+ self._live_tokens = set()
+
+ def get_token(self, protocol, address, port, share_mode, share_name):
+ """
+ This requests a token representing a socket. The socket is either
+ found in the cache already or requested from the creator at this time
+ (and cached for later time).
+
+ The parameters are:
+ - protocol: either 'UDP' or 'TCP'
+ - address: the IPAddr object representing the address to bind to
+ - port: integer saying which port to bind to
+ - share_mode: either 'NO', 'SAMEAPP' or 'ANY', specifying how the
+ socket can be shared with others. See bin/bind10/creatorapi.txt
+ for details.
+ - share_name: the name of application, in case of 'SAMEAPP' share
+ mode. Only requests with the same name can share the socket.
+
+ If the call is successful, it returns a string token which can be
+ used to pick up the socket later. The socket is created with reference
+ count zero and if it isn't picked up soon enough (the time yet has to
+ be set), it will be removed and the token is invalid.
+
+ It can fail in various ways. Explicitly listed exceptions are:
+ - SocketError: this one is thrown if the socket creator couldn't provide
+ the socket and it is not yet cached (it belongs to other application,
+ for example).
+ - ShareError: the socket is already in the cache, but it can't be
+ shared due to share_mode and share_name combination (both the request
+ restrictions and of all copies of socket handed out are considered,
+ so it can be raised even if you call it with share_mode 'ANY').
+ - isc.bind10.sockcreator.CreatorError: fatal creator errors are
+ propagated. Thay should cause the boss to exit if ever encountered.
+
+ Note that it isn't guaranteed the tokens would be unique and they
+ should be used as an opaque handle only.
+ """
+ addr_str = str(address)
+ try:
+ socket = self._sockets[protocol][addr_str][port]
+ except KeyError:
+ # Something in the dicts is not there, so socket is to be
+ # created
+ try:
+ fileno = self._creator.get_socket(address, port, protocol)
+ except isc.bind10.sockcreator.CreatorError as ce:
+ if ce.fatal:
+ raise
+ else:
+ raise SocketError(str(ce), ce.errno)
+ socket = Socket(protocol, address, port, fileno)
+ # And cache it
+ if protocol not in self._sockets:
+ self._sockets[protocol] = {}
+ if addr_str not in self._sockets[protocol]:
+ self._sockets[protocol][addr_str] = {}
+ self._sockets[protocol][addr_str][port] = socket
+ # Now we get the token, check it is compatible
+ if not socket.share_compatible(share_mode, share_name):
+ raise ShareError("Cached socket not compatible with mode " +
+ share_mode + " and name " + share_name)
+ # Grab yet unused token
+ token = 't' + str(random.randint(0, 2^32-1))
+ while token in self._live_tokens:
+ token = 't' + str(random.randint(0, 2^32-1))
+ self._waiting_tokens[token] = socket
+ self._live_tokens.add(token)
+ socket.shares[token] = (share_mode, share_name)
+ socket.waiting_tokens.add(token)
+ return token
+
+ def get_socket(self, token, application):
+ """
+ This returns the socket created by get_token. The token should be the
+ one returned from previous call from get_token. The token can be used
+ only once to receive the socket.
+
+ The application is a token representing the application that requested
+ it. Currently, boss uses the file descriptor of connection from the
+ application, but anything which can be a key in a dict is OK from the
+ cache's point of view. You just need to use the same thing in
+ drop_application.
+
+ In case the token is considered invalid (it doesn't come from the
+ get_token, it was already used, the socket wasn't picked up soon
+ enough, ...), it raises ValueError.
+ """
+ try:
+ socket = self._waiting_tokens[token]
+ except KeyError:
+ raise ValueError("Token " + token +
+ " isn't waiting to be picked up")
+ del self._waiting_tokens[token]
+ self._active_tokens[token] = socket
+ if application not in self._active_apps:
+ self._active_apps[application] = set()
+ self._active_apps[application].add(token)
+ socket.waiting_tokens.remove(token)
+ socket.active_tokens[token] = application
+ return socket.fileno
+
+ def drop_socket(self, token):
+ """
+ This signals the application no longer uses the socket which was
+ requested by the given token. It decreases the reference count for
+ the socket and closes and removes the cached copy if it was the last
+ one.
+
+ It raises ValueError if the token doesn't exist.
+ """
+ try:
+ socket = self._active_tokens[token]
+ except KeyError:
+ raise ValueError("Token " + token + " doesn't represent an " +
+ "active socket")
+ # Now, remove everything from the bookkeeping
+ del socket.shares[token]
+ app = socket.active_tokens[token]
+ del socket.active_tokens[token]
+ del self._active_tokens[token]
+ self._active_apps[app].remove(token)
+ if len(self._active_apps[app]) == 0:
+ del self._active_apps[app]
+ self._live_tokens.remove(token)
+ # The socket is not used by anything now, so remove it
+ if len(socket.active_tokens) == 0 and len(socket.waiting_tokens) == 0:
+ addr = str(socket.address)
+ port = socket.port
+ proto = socket.protocol
+ del self._sockets[proto][addr][port]
+ # Clean up empty branches of the structure
+ if len(self._sockets[proto][addr]) == 0:
+ del self._sockets[proto][addr]
+ if len(self._sockets[proto]) == 0:
+ del self._sockets[proto]
+
+ def drop_application(self, application):
+ """
+ This signals the application terminated and all sockets it picked up
+ should be considered unused by it now. It effectively calls drop_socket
+ on each of the sockets the application picked up and didn't drop yet.
+
+ If the application is invalid (no get_socket was successful with this
+ value of application), it raises ValueError.
+ """
+ try:
+ # Get a copy. Who knows how iteration works through sets if we
+ # delete from it during the time, so we'll just have our own copy
+ # to iterate
+ to_drop = copy(self._active_apps[application])
+ except KeyError:
+ raise ValueError("Application " + str(application) +
+ " doesn't hold any sockets")
+ for token in to_drop:
+ self.drop_socket(token)
+ # We don't call del now. The last drop_socket should have
+ # removed the application key as well.
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
new file mode 100644
index 0000000..c9c7683
--- /dev/null
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -0,0 +1,153 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from isc.bind10.component import Component, BaseComponent
+import isc.bind10.sockcreator
+from bind10_config import LIBEXECDIR
+import os
+import posix
+import isc.log
+from isc.log_messages.bind10_messages import *
+
+logger = isc.log.Logger("boss")
+
+class SockCreator(BaseComponent):
+ """
+ The socket creator component. Will start and stop the socket creator
+ accordingly.
+
+ Note: _creator shouldn't be reset explicitly once created. The
+ underlying Popen object would then wait() the child process internally,
+ which breaks the assumption of the boss, who is expecting to see
+ the process die in waitpid().
+ """
+ def __init__(self, process, boss, kind, address=None, params=None):
+ BaseComponent.__init__(self, boss, kind)
+ self.__creator = None
+
+ def _start_internal(self):
+ self._boss.curproc = 'b10-sockcreator'
+ self.__creator = isc.bind10.sockcreator.Creator(LIBEXECDIR + ':' +
+ os.environ['PATH'])
+ self._boss.register_process(self.pid(), self)
+ self._boss.set_creator(self.__creator)
+ self._boss.log_started(self.pid())
+
+ def _stop_internal(self):
+ self.__creator.terminate()
+
+ def name(self):
+ return "Socket creator"
+
+ def pid(self):
+ """
+ Pid of the socket creator. It is provided differently from a usual
+ component.
+ """
+ return self.__creator.pid() if self.__creator else None
+
+ def kill(self, forceful=False):
+ # We don't really care about forceful here
+ if self.__creator:
+ self.__creator.kill()
+
+class Msgq(Component):
+ """
+ The message queue. Starting is passed to boss, stopping is not supported
+ and we leave the boss kill it by signal.
+ """
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, None, None,
+ boss.start_msgq)
+
+ def _stop_internal(self):
+ """
+ We can't really stop the message queue, as many processes may need
+ it for their shutdown and it doesn't have a shutdown command anyway.
+ But as it is stateless, it's OK to kill it.
+
+ So we disable this method (as the only time it could be called is
+ during shutdown) and wait for the boss to kill it in the next shutdown
+ step.
+
+ This actually breaks the recommendation at Component we shouldn't
+ override its methods one by one. This is a special case, because
+ we don't provide a different implementation, we completely disable
+ the method by providing an empty one. This can't hurt the internals.
+ """
+ pass
+
+class CfgMgr(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'ConfigManager',
+ None, boss.start_cfgmgr)
+
+class Auth(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Auth', None,
+ boss.start_auth)
+
+class Resolver(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Resolver', None,
+ boss.start_resolver)
+
+class CmdCtl(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Cmdctl', None,
+ boss.start_cmdctl)
+
+class SetUID(BaseComponent):
+ """
+ This is a pseudo-component which drops root privileges when started
+ and sets the uid stored in boss.
+
+ This component does nothing when stopped.
+ """
+ def __init__(self, process, boss, kind, address=None, params=None):
+ BaseComponent.__init__(self, boss, kind)
+ self.uid = boss.uid
+
+ def _start_internal(self):
+ if self.uid is not None:
+ logger.info(BIND10_SETUID, self.uid)
+ posix.setuid(self.uid)
+
+ def _stop_internal(self): pass
+ def kill(self, forceful=False): pass
+
+ def name(self):
+ return "Set UID"
+
+ def pid(self):
+ return None
+
+def get_specials():
+ """
+ List of specially started components. Each one should be the class than can
+ be created for that component.
+ """
+ return {
+ 'sockcreator': SockCreator,
+ 'msgq': Msgq,
+ 'cfgmgr': CfgMgr,
+ # TODO: Should these be replaced by configuration in config manager only?
+ # They should not have any parameters anyway
+ 'auth': Auth,
+ 'resolver': Resolver,
+ 'cmdctl': CmdCtl,
+ # TODO: Remove when not needed, workaround before sockcreator works
+ 'setuid': SetUID
+ }
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
index df8ab30..658db1e 100644
--- a/src/lib/python/isc/bind10/tests/Makefile.am
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -1,7 +1,7 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
-PYTESTS = sockcreator_test.py
+PYTESTS = sockcreator_test.py component_test.py socket_cache_test.py
EXTRA_DIST = $(PYTESTS)
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
new file mode 100644
index 0000000..3b49b18
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -0,0 +1,1032 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Tests for the isc.bind10.component module and the
+isc.bind10.special_component module.
+"""
+
+import unittest
+import isc.log
+import time
+import copy
+from isc.bind10.component import Component, Configurator, BaseComponent
+import isc.bind10.special_component
+
+class TestError(Exception):
+ """
+ Just a private exception not known to anybody we use for our tests.
+ """
+ pass
+
+class BossUtils:
+ """
+ A class that brings some utilities for pretending we're Boss.
+ This is expected to be inherited by the testcases themselves.
+ """
+ def setUp(self):
+ """
+ Part of setup. Should be called by descendant's setUp.
+ """
+ self._shutdown = False
+ self._exitcode = None
+ # Back up the time function, we may want to replace it with something
+ self.__orig_time = isc.bind10.component.time.time
+
+ def tearDown(self):
+ """
+ Clean up after tests. If the descendant implements a tearDown, it
+ should call this method internally.
+ """
+ # Return the original time function
+ isc.bind10.component.time.time = self.__orig_time
+
+ def component_shutdown(self, exitcode=0):
+ """
+ Mock function to shut down. We just note we were asked to do so.
+ """
+ self._shutdown = True
+ self._exitcode = exitcode
+
+ def _timeskip(self):
+ """
+ Skip in time to future some 30s. Implemented by replacing the
+ time.time function in the tested module with function that returns
+ current time increased by 30.
+ """
+ tm = time.time()
+ isc.bind10.component.time.time = lambda: tm + 30
+
+ # Few functions that pretend to start something. Part of pretending of
+ # being boss.
+ def start_msgq(self):
+ pass
+
+ def start_cfgmgr(self):
+ pass
+
+ def start_auth(self):
+ pass
+
+ def start_resolver(self):
+ pass
+
+ def start_cmdctl(self):
+ pass
+
+class ComponentTests(BossUtils, unittest.TestCase):
+ """
+ Tests for the bind10.component.Component class
+ """
+ def setUp(self):
+ """
+ Pretend a newly started system.
+ """
+ BossUtils.setUp(self)
+ self._shutdown = False
+ self._exitcode = None
+ self.__start_called = False
+ self.__stop_called = False
+ self.__failed_called = False
+ self.__registered_processes = {}
+ self.__stop_process_params = None
+ self.__start_simple_params = None
+ # Pretending to be boss
+ self.uid = None
+ self.__uid_set = None
+
+ def __start(self):
+ """
+ Mock function, installed into the component into _start_internal.
+ This only notes the component was "started".
+ """
+ self.__start_called = True
+
+ def __stop(self):
+ """
+ Mock function, installed into the component into _stop_internal.
+ This only notes the component was "stopped".
+ """
+ self.__stop_called = True
+
+ def __fail(self):
+ """
+ Mock function, installed into the component into _failed_internal.
+ This only notes the component called the method.
+ """
+ self.__failed_called = True
+
+ def __fail_to_start(self):
+ """
+ Mock function. It can be installed into the component's _start_internal
+ to simulate a component that fails to start by raising an exception.
+ """
+ orig_started = self.__start_called
+ self.__start_called = True
+ if not orig_started:
+ # This one is from restart. Avoid infinite recursion for now.
+ # FIXME: We should use the restart scheduler to avoid it, not this.
+ raise TestError("Test error")
+
+ def __create_component(self, kind):
+ """
+ Convenience function that creates a component of given kind
+ and installs the mock functions into it so we can hook up into
+ its behaviour.
+
+ The process used is some nonsense, as this isn't used in this
+ kind of tests and we pretend to be the boss.
+ """
+ component = Component('No process', self, kind, 'homeless', [])
+ component._start_internal = self.__start
+ component._stop_internal = self.__stop
+ component._failed_internal = self.__fail
+ return component
+
+ def test_name(self):
+ """
+ Test the name provides whatever we passed to the constructor as process.
+ """
+ component = self.__create_component('core')
+ self.assertEqual('No process', component.name())
+
+ def test_guts(self):
+ """
+ Test the correct data are stored inside the component.
+ """
+ component = self.__create_component('core')
+ self.assertEqual(self, component._boss)
+ self.assertEqual("No process", component._process)
+ self.assertEqual(None, component._start_func)
+ self.assertEqual("homeless", component._address)
+ self.assertEqual([], component._params)
+
+ def __check_startup(self, component):
+ """
+ Check that nothing was called yet. A newly created component should
+ not get started right away, so this should pass after the creation.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertFalse(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertFalse(self.__failed_called)
+ self.assertFalse(component.running())
+ # We can't stop or fail the component yet
+ self.assertRaises(ValueError, component.stop)
+ self.assertRaises(ValueError, component.failed, 1)
+
+ def __check_started(self, component):
+ """
+ Check the component was started, but not stopped anyhow yet.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertFalse(self.__failed_called)
+ self.assertTrue(component.running())
+
+ def __check_dead(self, component):
+ """
+ Check the component is completely dead, and the server too.
+ """
+ self.assertTrue(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertTrue(self.__failed_called)
+ self.assertEqual(1, self._exitcode)
+ self.assertFalse(component.running())
+ # Surely it can't be stopped when already dead
+ self.assertRaises(ValueError, component.stop)
+ # Nor started
+ self.assertRaises(ValueError, component.start)
+ # Nor it can fail again
+ self.assertRaises(ValueError, component.failed, 1)
+
+ def __check_restarted(self, component):
+ """
+ Check the component restarted successfully.
+
+ Reset the self.__start_called to False before calling the function when
+ the component should fail.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertTrue(self.__failed_called)
+ self.assertTrue(component.running())
+ # Check it can't be started again
+ self.assertRaises(ValueError, component.start)
+
+ def __check_not_restarted(self, component):
+ """
+ Check the component has not (yet) restarted successfully.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertTrue(self.__failed_called)
+ self.assertFalse(component.running())
+
+ def __do_start_stop(self, kind):
+ """
+ This is a body of a test. It creates a component of given kind,
+ then starts it and stops it. It checks correct functions are called
+ and the component's status is correct.
+
+ It also checks the component can't be started/stopped twice.
+ """
+ # Create it and check it did not do any funny stuff yet
+ component = self.__create_component(kind)
+ self.__check_startup(component)
+ # Start it and check it called the correct starting functions
+ component.start()
+ self.__check_started(component)
+ # Check it can't be started twice
+ self.assertRaises(ValueError, component.start)
+ # Stop it again and check
+ component.stop()
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertTrue(self.__stop_called)
+ self.assertFalse(self.__failed_called)
+ self.assertFalse(component.running())
+ # Check it can't be stopped twice
+ self.assertRaises(ValueError, component.stop)
+ # Or failed
+ self.assertRaises(ValueError, component.failed, 1)
+ # But it can be started again if it is stopped
+ # (no more checking here, just it doesn't crash)
+ component.start()
+
+ def test_start_stop_core(self):
+ """
+ A start-stop test for core component. See do_start_stop.
+ """
+ self.__do_start_stop('core')
+
+ def test_start_stop_needed(self):
+ """
+ A start-stop test for needed component. See do_start_stop.
+ """
+ self.__do_start_stop('needed')
+
+ def test_start_stop_dispensable(self):
+ """
+ A start-stop test for dispensable component. See do_start_stop.
+ """
+ self.__do_start_stop('dispensable')
+
+ def test_start_fail_core(self):
+ """
+ Start and then fail a core component. It should stop the whole server.
+ """
+ # Just ordinary startup
+ component = self.__create_component('core')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Pretend the component died
+ restarted = component.failed(1)
+ # Since it is a core component, it should not be restarted
+ self.assertFalse(restarted)
+ # It should bring down the whole server
+ self.__check_dead(component)
+
+ def test_start_fail_core_later(self):
+ """
+ Start and then fail a core component, but let it be running for longer time.
+ It should still stop the whole server.
+ """
+ # Just ordinary startup
+ component = self.__create_component('core')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ self._timeskip()
+ # Pretend the component died some time later
+ restarted = component.failed(1)
+ # Should not be restarted
+ self.assertFalse(restarted)
+ # Check the component is still dead
+ self.__check_dead(component)
+
+ def test_start_fail_needed(self):
+ """
+ Start and then fail a needed component. As this happens really soon after
+ being started, it is considered failure to start and should bring down the
+ whole server.
+ """
+ # Just ordinary startup
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail right away.
+ restarted = component.failed(1)
+ # Should not have restarted
+ self.assertFalse(restarted)
+ self.__check_dead(component)
+
+ def test_start_fail_needed_later(self):
+ """
+ Start and then fail a needed component. But the failure is later on, so
+ we just restart it and will be happy.
+ """
+ # Just ordinary startup
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail later on
+ self.__start_called = False
+ self._timeskip()
+ restarted = component.failed(1)
+ # Should have restarted
+ self.assertTrue(restarted)
+ self.__check_restarted(component)
+
+ def test_start_fail_dispensable(self):
+ """
+ Start and then fail a dispensable component. Should not get restarted.
+ """
+ # Just ordinary startup
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail right away
+ restarted = component.failed(1)
+ # Should signal that it did not restart
+ self.assertFalse(restarted)
+ self.__check_not_restarted(component)
+
+ def test_start_fail_dispensable_later(self):
+ """
+ Start and then later on fail a dispensable component. Should just get
+ restarted.
+ """
+ # Just ordinary startup
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail later on
+ self._timeskip()
+ restarted = component.failed(1)
+ # should signal that it restarted
+ self.assertTrue(restarted)
+ # and check if it really did
+ self.__check_restarted(component)
+
+ def test_start_fail_dispensable_restart_later(self):
+ """
+ Start and then fail a dispensable component, wait a bit and try to
+ restart. Should get restarted after the wait.
+ """
+ # Just ordinary startup
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail immediately
+ restarted = component.failed(1)
+ # should signal that it did not restart
+ self.assertFalse(restarted)
+ self.__check_not_restarted(component)
+ self._timeskip()
+ # try to restart again
+ restarted = component.restart()
+ # should signal that it restarted
+ self.assertTrue(restarted)
+ # and check if it really did
+ self.__check_restarted(component)
+
+ def test_fail_core(self):
+ """
+ Failure to start a core component. Should bring the system down
+ and the exception should get through.
+ """
+ component = self.__create_component('core')
+ self.__check_startup(component)
+ component._start_internal = self.__fail_to_start
+ self.assertRaises(TestError, component.start)
+ self.__check_dead(component)
+
+ def test_fail_needed(self):
+ """
+ Failure to start a needed component. Should bring the system down
+ and the exception should get through.
+ """
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component._start_internal = self.__fail_to_start
+ self.assertRaises(TestError, component.start)
+ self.__check_dead(component)
+
+ def test_fail_dispensable(self):
+ """
+ Failure to start a dispensable component. The exception should get
+ through, but it should be restarted after a time skip.
+ """
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ component._start_internal = self.__fail_to_start
+ self.assertRaises(TestError, component.start)
+ # tell it to see if it must restart
+ restarted = component.restart()
+ # should not have restarted yet
+ self.assertFalse(restarted)
+ self.__check_not_restarted(component)
+ self._timeskip()
+ # tell it to see if it must restart and do so, with our vision of time
+ restarted = component.restart()
+ # should have restarted now
+ self.assertTrue(restarted)
+ self.__check_restarted(component)
+
+ def test_component_start_time(self):
+ """
+ Check that original start time is set initially, and remains the same
+ after a restart, while the internal __start_time does change
+ """
+ # Just ordinary startup
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ self.assertIsNone(component._original_start_time)
+ component.start()
+ self.__check_started(component)
+
+ self.assertIsNotNone(component._original_start_time)
+ self.assertIsNotNone(component._BaseComponent__start_time)
+ original_start_time = component._original_start_time
+ start_time = component._BaseComponent__start_time
+ # Not restarted yet, so they should be the same
+ self.assertEqual(original_start_time, start_time)
+
+ self._timeskip()
+ # Make it fail
+ restarted = component.failed(1)
+ # should signal that it restarted
+ self.assertTrue(restarted)
+ # and check if it really did
+ self.__check_restarted(component)
+
+ # original start time should not have changed
+ self.assertEqual(original_start_time, component._original_start_time)
+ # but actual start time should
+ self.assertNotEqual(start_time, component._BaseComponent__start_time)
+
+ def test_bad_kind(self):
+ """
+ Test the component rejects nonsensical kinds. This includes bad
+ capitalization.
+ """
+ for kind in ['Core', 'CORE', 'nonsense', 'need ed', 'required']:
+ self.assertRaises(ValueError, Component, 'No process', self, kind)
+
+ def test_pid_not_running(self):
+ """
+ Test that a componet that is not yet started doesn't have a PID.
+ But it won't fail if asked for and return None.
+ """
+ for component_type in [Component,
+ isc.bind10.special_component.SockCreator,
+ isc.bind10.special_component.Msgq,
+ isc.bind10.special_component.CfgMgr,
+ isc.bind10.special_component.Auth,
+ isc.bind10.special_component.Resolver,
+ isc.bind10.special_component.CmdCtl,
+ isc.bind10.special_component.SetUID]:
+ component = component_type('none', self, 'needed')
+ self.assertIsNone(component.pid())
+
+ def test_kill_unstarted(self):
+ """
+ Try to kill the component if it's not started. Should not fail.
+
+ We do not try to kill a running component, as we should not start
+ it during unit tests.
+ """
+ component = Component('component', self, 'needed')
+ component.kill()
+ component.kill(True)
+
+ def register_process(self, pid, process):
+ """
+ Part of pretending to be a boss
+ """
+ self.__registered_processes[pid] = process
+
+ def test_component_attributes(self):
+ """
+ Test the default attributes of Component (not BaseComponent) and
+ some of the methods we might be allowed to call.
+ """
+ class TestProcInfo:
+ def __init__(self):
+ self.pid = 42
+ component = Component('component', self, 'needed', 'Address',
+ ['hello'], TestProcInfo)
+ self.assertEqual('component', component._process)
+ self.assertEqual('component', component.name())
+ self.assertIsNone(component._procinfo)
+ self.assertIsNone(component.pid())
+ self.assertEqual(['hello'], component._params)
+ self.assertEqual('Address', component._address)
+ self.assertFalse(component.running())
+ self.assertEqual({}, self.__registered_processes)
+ component.start()
+ self.assertTrue(component.running())
+ # Some versions of unittest miss assertIsInstance
+ self.assertTrue(isinstance(component._procinfo, TestProcInfo))
+ self.assertEqual(42, component.pid())
+ self.assertEqual(component, self.__registered_processes.get(42))
+
+ def stop_process(self, process, address):
+ """
+ Part of pretending to be boss.
+ """
+ self.__stop_process_params = (process, address)
+
+ def start_simple(self, process):
+ """
+ Part of pretending to be boss.
+ """
+ self.__start_simple_params = process
+
+ def test_component_start_stop_internal(self):
+ """
+ Test the behavior of _stop_internal and _start_internal.
+ """
+ component = Component('component', self, 'needed', 'Address')
+ component.start()
+ self.assertTrue(component.running())
+ self.assertEqual('component', self.__start_simple_params)
+ component.stop()
+ self.assertFalse(component.running())
+ self.assertEqual(('component', 'Address'), self.__stop_process_params)
+
+ def test_component_kill(self):
+ """
+ Check the kill is propagated. The case when component wasn't started
+ yet is already tested elsewhere.
+ """
+ class Process:
+ def __init__(self):
+ self.killed = False
+ self.terminated = False
+ def kill(self):
+ self.killed = True
+ def terminate(self):
+ self.terminated = True
+ process = Process()
+ class ProcInfo:
+ def __init__(self):
+ self.process = process
+ self.pid = 42
+ component = Component('component', self, 'needed', 'Address',
+ [], ProcInfo)
+ component.start()
+ self.assertTrue(component.running())
+ component.kill()
+ self.assertTrue(process.terminated)
+ self.assertFalse(process.killed)
+ process.terminated = False
+ component.kill(True)
+ self.assertTrue(process.killed)
+ self.assertFalse(process.terminated)
+
+ def setuid(self, uid):
+ self.__uid_set = uid
+
+ def test_setuid(self):
+ """
+ Some tests around the SetUID pseudo-component.
+ """
+ component = isc.bind10.special_component.SetUID(None, self, 'needed',
+ None)
+ orig_setuid = isc.bind10.special_component.posix.setuid
+ isc.bind10.special_component.posix.setuid = self.setuid
+ component.start()
+ # No uid set in boss, nothing called.
+ self.assertIsNone(self.__uid_set)
+ # Doesn't do anything, but doesn't crash
+ component.stop()
+ component.kill()
+ component.kill(True)
+ self.uid = 42
+ component = isc.bind10.special_component.SetUID(None, self, 'needed',
+ None)
+ component.start()
+ # This time, it get's called
+ self.assertEqual(42, self.__uid_set)
+
+class TestComponent(BaseComponent):
+ """
+ A test component. It does not start any processes or so, it just logs
+ information about what happens.
+ """
+ def __init__(self, owner, name, kind, address=None, params=None):
+ """
+ Initializes the component. The owner is the test that started the
+ component. The logging will happen into it.
+
+ The process is used as a name for the logging.
+ """
+ BaseComponent.__init__(self, owner, kind)
+ self.__owner = owner
+ self.__name = name
+ self.log('init')
+ self.log(kind)
+ self._address = address
+ self._params = params
+
+ def log(self, event):
+ """
+ Log an event into the owner. The owner can then check the correct
+ order of events that happened.
+ """
+ self.__owner.log.append((self.__name, event))
+
+ def _start_internal(self):
+ self.log('start')
+
+ def _stop_internal(self):
+ self.log('stop')
+
+ def _failed_internal(self):
+ self.log('failed')
+
+ def kill(self, forceful=False):
+ self.log('killed')
+
+class FailComponent(BaseComponent):
+ """
+ A mock component that fails whenever it is started.
+ """
+ def __init__(self, name, boss, kind, address=None, params=None):
+ BaseComponent.__init__(self, boss, kind)
+
+ def _start_internal(self):
+ raise TestError("test error")
+
+class ConfiguratorTest(BossUtils, unittest.TestCase):
+ """
+ Tests for the configurator.
+ """
+ def setUp(self):
+ """
+ Prepare some test data for the tests.
+ """
+ BossUtils.setUp(self)
+ self.log = []
+ # The core "hardcoded" configuration
+ self.__core = {
+ 'core1': {
+ 'priority': 5,
+ 'process': 'core1',
+ 'special': 'test',
+ 'kind': 'core'
+ },
+ 'core2': {
+ 'process': 'core2',
+ 'special': 'test',
+ 'kind': 'core'
+ },
+ 'core3': {
+ 'process': 'core3',
+ 'priority': 3,
+ 'special': 'test',
+ 'kind': 'core'
+ }
+ }
+ # How they should be started. They are created in the order they are
+ # found in the dict, but then they should be started by priority.
+ # This expects that the same dict returns its keys in the same order
+ # every time
+ self.__core_log_create = []
+ for core in self.__core.keys():
+ self.__core_log_create.append((core, 'init'))
+ self.__core_log_create.append((core, 'core'))
+ self.__core_log_start = [('core1', 'start'), ('core3', 'start'),
+ ('core2', 'start')]
+ self.__core_log = self.__core_log_create + self.__core_log_start
+ self.__specials = { 'test': self.__component_test }
+
+ def __component_test(self, process, boss, kind, address=None, params=None):
+ """
+ Create a test component. It will log events to us.
+ """
+ self.assertEqual(self, boss)
+ return TestComponent(self, process, kind, address, params)
+
+ def test_init(self):
+ """
+ Tests the configurator can be created and it does not create
+ any components yet, nor does it remember anything.
+ """
+ configurator = Configurator(self, self.__specials)
+ self.assertEqual([], self.log)
+ self.assertEqual({}, configurator._components)
+ self.assertFalse(configurator.running())
+
+ def test_run_plan(self):
+ """
+ Test the internal function of running plans. Just see it can handle
+ the commands in the given order. We see that by the log.
+
+ Also includes one that raises, so we see it just stops there.
+ """
+ # Prepare the configurator and the plan
+ configurator = Configurator(self, self.__specials)
+ started = self.__component_test('second', self, 'dispensable')
+ started.start()
+ stopped = self.__component_test('first', self, 'core')
+ configurator._components = {'second': started}
+ plan = [
+ {
+ 'component': stopped,
+ 'command': 'start',
+ 'name': 'first',
+ 'config': {'a': 1}
+ },
+ {
+ 'component': started,
+ 'command': 'stop',
+ 'name': 'second',
+ 'config': {}
+ },
+ {
+ 'component': FailComponent('third', self, 'needed'),
+ 'command': 'start',
+ 'name': 'third',
+ 'config': {}
+ },
+ {
+ 'component': self.__component_test('fourth', self, 'core'),
+ 'command': 'start',
+ 'name': 'fourth',
+ 'config': {}
+ }
+ ]
+ # Don't include the preparation into the log
+ self.log = []
+ # The error from the third component is propagated
+ self.assertRaises(TestError, configurator._run_plan, plan)
+ # The first two were handled, the rest not, due to the exception
+ self.assertEqual([('first', 'start'), ('second', 'stop')], self.log)
+ self.assertEqual({'first': ({'a': 1}, stopped)},
+ configurator._components)
+
+ def __build_components(self, config):
+ """
+ Insert the components into the configuration to specify possible
+ Configurator._components.
+
+ Actually, the components are None, but we need something to be there.
+ """
+ result = {}
+ for name in config.keys():
+ result[name] = (config[name], None)
+ return result
+
+ def test_build_plan(self):
+ """
+ Test building the plan correctly. Not complete yet, this grows as we
+ add more ways of changing the plan.
+ """
+ configurator = Configurator(self, self.__specials)
+ plan = configurator._build_plan({}, self.__core)
+ # This should have created the components
+ self.assertEqual(self.__core_log_create, self.log)
+ self.assertEqual(3, len(plan))
+ for (task, name) in zip(plan, ['core1', 'core3', 'core2']):
+ self.assertTrue('component' in task)
+ self.assertEqual('start', task['command'])
+ self.assertEqual(name, task['name'])
+ component = task['component']
+ self.assertIsNone(component._address)
+ self.assertIsNone(component._params)
+
+ # A plan to go from older state to newer one containing more components
+ bigger = copy.copy(self.__core)
+ bigger['additional'] = {
+ 'priority': 6,
+ 'special': 'test',
+ 'process': 'additional',
+ 'kind': 'needed'
+ }
+ self.log = []
+ plan = configurator._build_plan(self.__build_components(self.__core),
+ bigger)
+ self.assertEqual([('additional', 'init'), ('additional', 'needed')],
+ self.log)
+ self.assertEqual(1, len(plan))
+ self.assertTrue('component' in plan[0])
+ component = plan[0]['component']
+ self.assertEqual('start', plan[0]['command'])
+ self.assertEqual('additional', plan[0]['name'])
+
+ # Now remove the one component again
+ # We run the plan so the component is wired into internal structures
+ configurator._run_plan(plan)
+ self.log = []
+ plan = configurator._build_plan(self.__build_components(bigger),
+ self.__core)
+ self.assertEqual([], self.log)
+ self.assertEqual([{
+ 'command': 'stop',
+ 'name': 'additional',
+ 'component': component
+ }], plan)
+
+ # We want to switch a component. So, prepare the configurator so it
+ # holds one
+ configurator._run_plan(configurator._build_plan(
+ self.__build_components(self.__core), bigger))
+ # Get a different configuration with a different component
+ different = copy.copy(self.__core)
+ different['another'] = {
+ 'special': 'test',
+ 'process': 'another',
+ 'kind': 'dispensable'
+ }
+ self.log = []
+ plan = configurator._build_plan(self.__build_components(bigger),
+ different)
+ self.assertEqual([('another', 'init'), ('another', 'dispensable')],
+ self.log)
+ self.assertEqual(2, len(plan))
+ self.assertEqual('stop', plan[0]['command'])
+ self.assertEqual('additional', plan[0]['name'])
+ self.assertTrue('component' in plan[0])
+ self.assertEqual('start', plan[1]['command'])
+ self.assertEqual('another', plan[1]['name'])
+ self.assertTrue('component' in plan[1])
+
+ # Some slightly insane plans, like missing process, having parameters,
+ # no special, etc
+ plan = configurator._build_plan({}, {
+ 'component': {
+ 'kind': 'needed',
+ 'params': ["1", "2"],
+ 'address': 'address'
+ }
+ })
+ self.assertEqual(1, len(plan))
+ self.assertEqual('start', plan[0]['command'])
+ self.assertEqual('component', plan[0]['name'])
+ component = plan[0]['component']
+ self.assertEqual('component', component.name())
+ self.assertEqual(["1", "2"], component._params)
+ self.assertEqual('address', component._address)
+ self.assertEqual('needed', component._kind)
+ # We don't use isinstance on purpose, it would allow a descendant
+ self.assertTrue(type(component) is Component)
+ plan = configurator._build_plan({}, {
+ 'component': { 'kind': 'dispensable' }
+ })
+ self.assertEqual(1, len(plan))
+ self.assertEqual('start', plan[0]['command'])
+ self.assertEqual('component', plan[0]['name'])
+ component = plan[0]['component']
+ self.assertEqual('component', component.name())
+ self.assertIsNone(component._params)
+ self.assertIsNone(component._address)
+ self.assertEqual('dispensable', component._kind)
+
+ def __do_switch(self, option, value):
+ """
+ Start it with some component and then switch the configuration of the
+ component. This will probably raise, as it is not yet supported.
+ """
+ configurator = Configurator(self, self.__specials)
+ compconfig = {
+ 'special': 'test',
+ 'process': 'process',
+ 'priority': 13,
+ 'kind': 'core'
+ }
+ modifiedconfig = copy.copy(compconfig)
+ modifiedconfig[option] = value
+ return configurator._build_plan({'comp': (compconfig, None)},
+ {'comp': modifiedconfig})
+
+ def test_change_config_plan(self):
+ """
+ Test changing a configuration of one component. This is not yet
+ implemented and should therefore throw.
+ """
+ self.assertRaises(NotImplementedError, self.__do_switch, 'kind',
+ 'dispensable')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'special',
+ 'not_a_test')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'process',
+ 'different')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'address',
+ 'different')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'params',
+ ['different'])
+ # This does not change anything on running component, so no need to
+ # raise
+ self.assertEqual([], self.__do_switch('priority', 5))
+ # Check against false positive, if the data are the same, but different
+ # instance
+ self.assertEqual([], self.__do_switch('special', 'test'))
+
+ def __check_shutdown_log(self):
+ """
+ Checks the log for shutting down from the core configuration.
+ """
+ # We know everything must be stopped, we know what it is.
+ # But we don't know the order, so we check everything is exactly
+ # once in the log
+ components = set(self.__core.keys())
+ for (name, command) in self.log:
+ self.assertEqual('stop', command)
+ self.assertTrue(name in components)
+ components.remove(name)
+ self.assertEqual(set([]), components, "Some component wasn't stopped")
+
+ def test_run(self):
+ """
+ Passes some configuration to the startup method and sees if
+ the components are started up. Then it reconfigures it with
+ empty configuration, the original configuration again and shuts
+ down.
+
+ It also checks the components are kept inside the configurator.
+ """
+ configurator = Configurator(self, self.__specials)
+ # Can't reconfigure nor stop yet
+ self.assertRaises(ValueError, configurator.reconfigure, self.__core)
+ self.assertRaises(ValueError, configurator.shutdown)
+ self.assertFalse(configurator.running())
+ # Start it
+ configurator.startup(self.__core)
+ self.assertEqual(self.__core_log, self.log)
+ for core in self.__core.keys():
+ self.assertTrue(core in configurator._components)
+ self.assertEqual(self.__core[core],
+ configurator._components[core][0])
+ self.assertEqual(set(self.__core), set(configurator._components))
+ self.assertTrue(configurator.running())
+ # It can't be started twice
+ self.assertRaises(ValueError, configurator.startup, self.__core)
+
+ self.log = []
+ # Reconfigure - stop everything
+ configurator.reconfigure({})
+ self.assertEqual({}, configurator._components)
+ self.assertTrue(configurator.running())
+ self.__check_shutdown_log()
+
+ # Start it again
+ self.log = []
+ configurator.reconfigure(self.__core)
+ self.assertEqual(self.__core_log, self.log)
+ for core in self.__core.keys():
+ self.assertTrue(core in configurator._components)
+ self.assertEqual(self.__core[core],
+ configurator._components[core][0])
+ self.assertEqual(set(self.__core), set(configurator._components))
+ self.assertTrue(configurator.running())
+
+ # Do a shutdown
+ self.log = []
+ configurator.shutdown()
+ self.assertEqual({}, configurator._components)
+ self.assertFalse(configurator.running())
+ self.__check_shutdown_log()
+
+ # It can't be stopped twice
+ self.assertRaises(ValueError, configurator.shutdown)
+
+ def test_sort_no_prio(self):
+ """
+ There was a bug if there were two things with the same priority
+ (or without priority), it failed as it couldn't compare the dicts
+ there. This tests it doesn't crash.
+ """
+ configurator = Configurator(self, self.__specials)
+ configurator._build_plan({}, {
+ "c1": { 'kind': 'dispensable'},
+ "c2": { 'kind': 'dispensable'}
+ })
+
+if __name__ == '__main__':
+ isc.log.init("bind10") # FIXME Should this be needed?
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/bind10/tests/sockcreator_test.py b/src/lib/python/isc/bind10/tests/sockcreator_test.py
index 4453184..d97d21b 100644
--- a/src/lib/python/isc/bind10/tests/sockcreator_test.py
+++ b/src/lib/python/isc/bind10/tests/sockcreator_test.py
@@ -13,9 +13,6 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-# This test file is generated .py.in -> .py just to be in the build dir,
-# same as the rest of the tests. Saves a lot of stuff in makefile.
-
"""
Tests for the bind10.sockcreator module.
"""
diff --git a/src/lib/python/isc/bind10/tests/socket_cache_test.py b/src/lib/python/isc/bind10/tests/socket_cache_test.py
new file mode 100644
index 0000000..bbbf776
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/socket_cache_test.py
@@ -0,0 +1,396 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+import isc.log
+import isc.bind10.socket_cache
+import isc.bind10.sockcreator
+from isc.net.addr import IPAddr
+import os
+
+class Test(unittest.TestCase):
+ """
+ Base for the tests here. It replaces the os.close method.
+ """
+ def setUp(self):
+ self._closes = []
+ isc.bind10.socket_cache.os.close = self.__close
+
+ def tearDown(self):
+ # This is not very clean solution. But when the test stops
+ # to exist, the method must not be used to destroy the
+ # object any more. And we can't restore the os.close here
+ # as we never work with real sockets here.
+ isc.bind10.socket_cache.os.close = lambda fd: None
+
+ def __close(self, fd):
+ """
+ Just log a close was called.
+ """
+ self._closes.append(fd)
+
+class SocketTest(Test):
+ """
+ Test for the Socket class.
+ """
+ def setUp(self):
+ """
+ Creates the socket to be tested.
+
+ It also creates other useful test variables.
+ """
+ Test.setUp(self)
+ self.__address = IPAddr("192.0.2.1")
+ self.__socket = isc.bind10.socket_cache.Socket('UDP', self.__address,
+ 1024, 42)
+
+ def test_init(self):
+ """
+ Checks the intrnals of the cache just after the creation.
+ """
+ self.assertEqual('UDP', self.__socket.protocol)
+ self.assertEqual(self.__address, self.__socket.address)
+ self.assertEqual(1024, self.__socket.port)
+ self.assertEqual(42, self.__socket.fileno)
+ self.assertEqual({}, self.__socket.active_tokens)
+ self.assertEqual({}, self.__socket.shares)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+
+ def test_del(self):
+ """
+ Check it closes the socket when removed.
+ """
+ # This should make the refcount 0 and call the descructor
+ # right away
+ self.__socket = None
+ self.assertEqual([42], self._closes)
+
+ def test_share_modes(self):
+ """
+ Test the share mode compatibility check function.
+ """
+ modes = ['NO', 'SAMEAPP', 'ANY']
+ # If there are no shares, it is compatible with everything.
+ for mode in modes:
+ self.assertTrue(self.__socket.share_compatible(mode, 'anything'))
+
+ # There's an NO already, so it is incompatible with everything.
+ self.__socket.shares = {'token': ('NO', 'anything')}
+ for mode in modes:
+ self.assertFalse(self.__socket.share_compatible(mode, 'anything'))
+
+ # If there's SAMEAPP, it is compatible with ANY and SAMEAPP with the
+ # same name.
+ self.__socket.shares = {'token': ('SAMEAPP', 'app')}
+ self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+ self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+ 'something'))
+ self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+ self.assertTrue(self.__socket.share_compatible('ANY', 'app'))
+ self.assertFalse(self.__socket.share_compatible('ANY', 'something'))
+
+ # If there's ANY, then ANY and SAMEAPP with the same name is compatible
+ self.__socket.shares = {'token': ('ANY', 'app')}
+ self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+ self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+ 'something'))
+ self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+ self.assertTrue(self.__socket.share_compatible('ANY', 'something'))
+
+ # In case there are multiple already inside
+ self.__socket.shares = {
+ 'token': ('ANY', 'app'),
+ 'another': ('SAMEAPP', 'app')
+ }
+ self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+ self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+ 'something'))
+ self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+ self.assertFalse(self.__socket.share_compatible('ANY', 'something'))
+ self.assertTrue(self.__socket.share_compatible('ANY', 'app'))
+
+ # Invalid inputs are rejected
+ self.assertRaises(ValueError, self.__socket.share_compatible, 'bad',
+ 'bad')
+
+class SocketCacheTest(Test):
+ """
+ Some tests for the isc.bind10.socket_cache.Cache.
+
+ This class, as well as being the testcase, pretends to be the
+ socket creator so it can hijack all the requests for sockets.
+ """
+ def setUp(self):
+ """
+ Creates the cache for tests with us being the socket creator.
+
+ Also creates some more variables for testing.
+ """
+ Test.setUp(self)
+ self.__cache = isc.bind10.socket_cache.Cache(self)
+ self.__address = IPAddr("192.0.2.1")
+ self.__socket = isc.bind10.socket_cache.Socket('UDP', self.__address,
+ 1024, 42)
+ self.__get_socket_called = False
+
+ def test_init(self):
+ """
+ Checks the internals of the cache just after the creation.
+ """
+ self.assertEqual(self, self.__cache._creator)
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({}, self.__cache._active_apps)
+ self.assertEqual({}, self.__cache._sockets)
+ self.assertEqual(set(), self.__cache._live_tokens)
+
+ def get_socket(self, address, port, socktype):
+ """
+ Pretend to be a socket creator.
+
+ This expects to be called with the _address, port 1024 and 'UDP'.
+
+ Returns 42 and notes down it was called.
+ """
+ self.assertEqual(self.__address, address)
+ self.assertEqual(1024, port)
+ self.assertEqual('UDP', socktype)
+ self.__get_socket_called = True
+ return 42
+
+ def test_get_token_cached(self):
+ """
+ Check the behaviour of get_token when the requested socket is already
+ cached inside.
+ """
+ self.__cache._sockets = {
+ 'UDP': {'192.0.2.1': {1024: self.__socket}}
+ }
+ token = self.__cache.get_token('UDP', self.__address, 1024, 'ANY',
+ 'test')
+ # It didn't call get_socket
+ self.assertFalse(self.__get_socket_called)
+ # It returned something
+ self.assertIsNotNone(token)
+ # The token is both in the waiting sockets and the live tokens
+ self.assertEqual({token: self.__socket}, self.__cache._waiting_tokens)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ # The token got the new share to block any relevant queries
+ self.assertEqual({token: ('ANY', 'test')}, self.__socket.shares)
+ # The socket knows the token is waiting in it
+ self.assertEqual(set([token]), self.__socket.waiting_tokens)
+
+ # If we request one more, with incompatible share, it is rejected
+ self.assertRaises(isc.bind10.socket_cache.ShareError,
+ self.__cache.get_token, 'UDP', self.__address, 1024,
+ 'NO', 'test')
+ # The internals are not changed, so the same checks
+ self.assertEqual({token: self.__socket}, self.__cache._waiting_tokens)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ self.assertEqual({token: ('ANY', 'test')}, self.__socket.shares)
+ self.assertEqual(set([token]), self.__socket.waiting_tokens)
+
+ def test_get_token_uncached(self):
+ """
+ Check a new socket is created when a corresponding one is missing.
+ """
+ token = self.__cache.get_token('UDP', self.__address, 1024, 'ANY',
+ 'test')
+ # The get_socket was called
+ self.assertTrue(self.__get_socket_called)
+ # It returned something
+ self.assertIsNotNone(token)
+ # Get the socket and check it looks OK
+ socket = self.__cache._waiting_tokens[token]
+ self.assertEqual(self.__address, socket.address)
+ self.assertEqual(1024, socket.port)
+ self.assertEqual(42, socket.fileno)
+ self.assertEqual('UDP', socket.protocol)
+ # The socket is properly cached
+ self.assertEqual({
+ 'UDP': {'192.0.2.1': {1024: socket}}
+ }, self.__cache._sockets)
+ # The token is both in the waiting sockets and the live tokens
+ self.assertEqual({token: socket}, self.__cache._waiting_tokens)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ # The token got the new share to block any relevant queries
+ self.assertEqual({token: ('ANY', 'test')}, socket.shares)
+ # The socket knows the token is waiting in it
+ self.assertEqual(set([token]), socket.waiting_tokens)
+
+ def test_get_token_excs(self):
+ """
+ Test that it is handled properly if the socket creator raises
+ some exceptions.
+ """
+ def raiseCreatorError(fatal):
+ raise isc.bind10.sockcreator.CreatorError('test error', fatal)
+ # First, fatal socket creator errors are passed through
+ self.get_socket = lambda addr, port, proto: raiseCreatorError(True)
+ self.assertRaises(isc.bind10.sockcreator.CreatorError,
+ self.__cache.get_token, 'UDP', self.__address, 1024,
+ 'NO', 'test')
+ # And nonfatal are converted to SocketError
+ self.get_socket = lambda addr, port, proto: raiseCreatorError(False)
+ self.assertRaises(isc.bind10.socket_cache.SocketError,
+ self.__cache.get_token, 'UDP', self.__address, 1024,
+ 'NO', 'test')
+
+ def test_get_socket(self):
+ """
+ Test that we can pickup a socket if we know a token.
+ """
+ token = "token"
+ app = 13
+ # No socket prepared there
+ self.assertRaises(ValueError, self.__cache.get_socket, token, app)
+ # Not changed
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({}, self.__cache._active_apps)
+ self.assertEqual({}, self.__cache._sockets)
+ self.assertEqual(set(), self.__cache._live_tokens)
+ # Prepare a token there
+ self.__socket.waiting_tokens = set([token])
+ self.__socket.shares = {token: ('ANY', 'app')}
+ self.__cache._waiting_tokens = {token: self.__socket}
+ self.__cache._sockets = {'UDP': {'192.0.2.1': {1024: self.__socket}}}
+ self.__cache._live_tokens = set([token])
+ socket = self.__cache.get_socket(token, app)
+ # Received the fileno
+ self.assertEqual(42, socket)
+ # It moved from waiting to active ones
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({token: self.__socket}, self.__cache._active_tokens)
+ self.assertEqual({13: set([token])}, self.__cache._active_apps)
+ self.assertEqual(set([token]), self.__cache._live_tokens)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+ self.assertEqual({token: 13}, self.__socket.active_tokens)
+ # Trying to get it again fails
+ self.assertRaises(ValueError, self.__cache.get_socket, token, app)
+
+ def test_drop_application(self):
+ """
+ Test that a drop_application calls drop_socket on all the sockets
+ held by the application.
+ """
+ sockets = set()
+ def drop_socket(token):
+ sockets.add(token)
+ # Mock the drop_socket so we know it is called
+ self.__cache.drop_socket = drop_socket
+ self.assertRaises(ValueError, self.__cache.drop_application,
+ 13)
+ self.assertEqual(set(), sockets)
+ # Put the tokens into active_apps. Nothing else should be touched
+ # by this call, so leave it alone.
+ self.__cache._active_apps = {
+ 1: set(['t1', 't2']),
+ 2: set(['t3'])
+ }
+ self.__cache.drop_application(1)
+ # We don't check the _active_apps, as it would be cleaned by
+ # drop_socket and we removed it.
+ self.assertEqual(set(['t1', 't2']), sockets)
+
+ def test_drop_socket(self):
+ """
+ Test the drop_socket call. It tests:
+ * That a socket that still has something to keep it alive is left alive
+ (both waiting and active).
+ * If not, it is deleted.
+ * All bookkeeping data around are properly removed.
+ * Of course the exception.
+ """
+ self.assertRaises(ValueError, self.__cache.drop_socket, "bad token")
+ self.__socket.active_tokens = {'t1': 1}
+ self.__socket.waiting_tokens = set(['t2'])
+ self.__socket.shares = {'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')}
+ self.__cache._waiting_tokens = {'t2': self.__socket}
+ self.__cache._active_tokens = {'t1': self.__socket}
+ self.__cache._sockets = {'UDP': {'192.0.2.1': {1024: self.__socket}}}
+ self.__cache._live_tokens = set(['t1', 't2'])
+ self.__cache._active_apps = {1: set(['t1'])}
+ # We can't drop what wasn't picket up yet
+ self.assertRaises(ValueError, self.__cache.drop_socket, 't2')
+ self.assertEqual({'t1': 1}, self.__socket.active_tokens)
+ self.assertEqual(set(['t2']), self.__socket.waiting_tokens)
+ self.assertEqual({'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')},
+ self.__socket.shares)
+ self.assertEqual({'t2': self.__socket}, self.__cache._waiting_tokens)
+ self.assertEqual({'t1': self.__socket}, self.__cache._active_tokens)
+ self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+ self.__cache._sockets)
+ self.assertEqual(set(['t1', 't2']), self.__cache._live_tokens)
+ self.assertEqual({1: set(['t1'])}, self.__cache._active_apps)
+ self.assertEqual([], self._closes)
+ # If we drop this, it survives because it waits for being picked up
+ self.__cache.drop_socket('t1')
+ self.assertEqual({}, self.__socket.active_tokens)
+ self.assertEqual(set(['t2']), self.__socket.waiting_tokens)
+ self.assertEqual({'t2': ('ANY', 'app2')}, self.__socket.shares)
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+ self.__cache._sockets)
+ self.assertEqual(set(['t2']), self.__cache._live_tokens)
+ self.assertEqual({}, self.__cache._active_apps)
+ self.assertEqual([], self._closes)
+ # Fill it again, now two applications having the same socket
+ self.__socket.active_tokens = {'t1': 1, 't2': 2}
+ self.__socket.waiting_tokens = set()
+ self.__socket.shares = {'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')}
+ self.__cache._waiting_tokens = {}
+ self.__cache._active_tokens = {
+ 't1': self.__socket,
+ 't2': self.__socket
+ }
+ self.__cache._live_tokens = set(['t1', 't2', 't3'])
+ self.assertEqual([], self._closes)
+ # We cheat here little bit, the t3 doesn't exist enywhere else, but
+ # we need to check the app isn't removed too soon and it shouldn't
+ # matter anywhere else, so we just avoid the tiresome filling in
+ self.__cache._active_apps = {1: set(['t1', 't3']), 2: set(['t2'])}
+ # Drop it as t1. It should still live.
+ self.__cache.drop_socket('t1')
+ self.assertEqual({'t2': 2}, self.__socket.active_tokens)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+ self.assertEqual({'t2': ('ANY', 'app2')}, self.__socket.shares)
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({'t2': self.__socket}, self.__cache._active_tokens)
+ self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+ self.__cache._sockets)
+ self.assertEqual(set(['t3', 't2']), self.__cache._live_tokens)
+ self.assertEqual({1: set(['t3']), 2: set(['t2'])},
+ self.__cache._active_apps)
+ self.assertEqual([], self._closes)
+ # Drop it again, from the other application. It should get removed
+ # and closed.
+ self.__cache.drop_socket('t2')
+ self.assertEqual({}, self.__socket.active_tokens)
+ self.assertEqual(set(), self.__socket.waiting_tokens)
+ self.assertEqual({}, self.__socket.shares)
+ self.assertEqual({}, self.__cache._waiting_tokens)
+ self.assertEqual({}, self.__cache._active_tokens)
+ self.assertEqual({}, self.__cache._sockets)
+ self.assertEqual(set(['t3']), self.__cache._live_tokens)
+ self.assertEqual({1: set(['t3'])}, self.__cache._active_apps)
+ # The cache doesn't hold the socket. So when we remove it ourself,
+ # it should get closed.
+ self.__socket = None
+ self.assertEqual([42], self._closes)
+
+if __name__ == '__main__':
+ isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index a5b4ca3..47f3dbc 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -17,12 +17,14 @@ datasrc_la_SOURCES += client_python.cc client_python.h
datasrc_la_SOURCES += iterator_python.cc iterator_python.h
datasrc_la_SOURCES += finder_python.cc finder_python.h
datasrc_la_SOURCES += updater_python.cc updater_python.h
+datasrc_la_SOURCES += journal_reader_python.cc journal_reader_python.h
datasrc_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
datasrc_la_LDFLAGS = $(PYTHON_LDFLAGS)
datasrc_la_LDFLAGS += -module
datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+datasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
datasrc_la_LIBADD += $(PYTHON_LIB)
@@ -30,6 +32,7 @@ EXTRA_DIST = client_inc.cc
EXTRA_DIST += finder_inc.cc
EXTRA_DIST += iterator_inc.cc
EXTRA_DIST += updater_inc.cc
+EXTRA_DIST += journal_reader_inc.cc
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
index 6465bf3..e0c0f06 100644
--- a/src/lib/python/isc/datasrc/client_inc.cc
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -89,7 +89,7 @@ None\n\
";
const char* const DataSourceClient_getIterator_doc = "\
-get_iterator(name, adjust_ttl=True) -> ZoneIterator\n\
+get_iterator(name, separate_rrs=False) -> ZoneIterator\n\
\n\
Returns an iterator to the given zone.\n\
\n\
@@ -111,17 +111,18 @@ anything else.\n\
Parameters:\n\
isc.dns.Name The name of zone apex to be traversed. It doesn't do\n\
nearest match as find_zone.\n\
- adjust_ttl If True, the iterator will treat RRs with the same\n\
- name and type but different TTL values to be of the\n\
- same RRset, and will adjust the TTL to the lowest\n\
- value found. If false, it will consider the RR to\n\
- belong to a different RRset.\n\
+ separate_rrs If true, the iterator will return each RR as a\n\
+ new RRset object. If false, the iterator will\n\
+ combine consecutive RRs with the name and type\n\
+ into 1 RRset. The capitalization of the RRset will\n\
+ be that of the first RR read, and TTLs will be\n\
+ adjusted to the lowest one found.\n\
\n\
Return Value(s): Pointer to the iterator.\n\
";
const char* const DataSourceClient_getUpdater_doc = "\
-get_updater(name, replace) -> ZoneUpdater\n\
+get_updater(name, replace, journaling=False) -> ZoneUpdater\n\
\n\
Return an updater to make updates to a specific zone.\n\
\n\
@@ -162,6 +163,22 @@ A data source can be \"read only\" or can prohibit partial updates. In\n\
such cases this method will result in an isc.datasrc.NotImplemented exception\n\
unconditionally or when replace is false).\n\
\n\
+If journaling is True, the data source should store a journal of\n\
+changes. These can be used later on by, for example, IXFR-out.\n\
+However, the parameter is a hint only. It might be unable to store\n\
+them and they would be silently discarded. Or it might need to store\n\
+them no matter what (for example a git-based data source would store\n\
+journal implicitly). When the journaling is True, it requires that the\n\
+following update be formatted as IXFR transfer (SOA to be removed,\n\
+bunch of RRs to be removed, SOA to be added, bunch of RRs to be added,\n\
+and possibly repeated). However, it is not required that the updater\n\
+checks that. If it is False, it must not require so and must accept\n\
+any order of changes.\n\
+\n\
+We don't support erasing the whole zone (by replace being True) and\n\
+saving a journal at the same time. In such situation, isc.datasrc.Error\n\
+is thrown.\n\
+\n\
Exceptions:\n\
isc.datasrc. NotImplemented The underlying data source does not support\n\
updates.\n\
@@ -170,6 +187,63 @@ Exceptions:\n\
Parameters:\n\
name The zone name to be updated\n\
replace Whether to delete existing RRs before making updates\n\
+ journaling The zone updater should store a journal of the changes.\n\
+\n\
+";
+
+// Modifications from C++ doc:
+// pointer -> (removed)
+// Null -> None
+// exception types
+const char* const DataSourceClient_getJournalReader_doc = "\
+get_journal_reader(zone, begin_serial, end_serial) ->\n\
+ (int, ZoneJournalReader)\n\
+\n\
+Return a journal reader to retrieve differences of a zone.\n\
+\n\
+A derived version of this method creates a concrete ZoneJournalReader\n\
+object specific to the underlying data source for the specified name\n\
+of zone and differences between the versions specified by the\n\
+beginning and ending serials of the corresponding SOA RRs. The RR\n\
+class of the zone is the one that the client is expected to handle\n\
+(see the detailed description of this class).\n\
+\n\
+Note that the SOA serials are compared by the semantics of the serial\n\
+number arithmetic. So, for example, begin_serial can be larger than\n\
+end_serial as bare unsigned integers. The underlying data source\n\
+implementation is assumed to keep track of sufficient history to\n\
+identify (if exist) the corresponding difference between the specified\n\
+versions.\n\
+\n\
+This method returns the result as a pair of a result code and a\n\
+ZoneJournalReader object. On success, the result code is\n\
+SUCCESS and the object must not be None; otherwise the result code is\n\
+something other than SUCCESS and the object must be None.\n\
+\n\
+If the specified zone is not found in the data source, the result code\n\
+is NO_SUCH_ZONE. Otherwise, if specified range of difference for the\n\
+zone is not found in the data source, the result code is\n\
+NO_SUCH_VERSION.\n\
+\n\
+Handling differences is an optional feature of data source. If the\n\
+underlying data source does not support difference handling, this\n\
+method for that type of data source can throw an exception of class\n\
+isc.datasrc.NotImplemented.\n\
\n\
+Exceptions:\n\
+ isc.datasrc.NotImplemented The data source does not support differences.\n\
+ isc.datasrc.Error Other operational errors at the data source level.\n\
+ SystemError An unexpected error in the backend C++ code. Either a rare\n\
+ system error such as short memory or an implementation bug.\n\
+\n\
+Parameters:\n\
+ zone The name of the zone for which the difference should be\n\
+ retrieved.\n\
+ begin_serial The SOA serial of the beginning version of the\n\
+ differences.\n\
+ end_serial The SOA serial of the ending version of the differences.\n\
+\n\
+Return Value(s): A pair of result code and a ZoneJournalReader object\n\
+(which can be None)\n \
";
} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
index 49235a6..bdf84a3 100644
--- a/src/lib/python/isc/datasrc/client_python.cc
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -38,6 +38,7 @@
#include "finder_python.h"
#include "iterator_python.h"
#include "updater_python.h"
+#include "journal_reader_python.h"
#include "client_inc.cc"
using namespace std;
@@ -84,26 +85,26 @@ PyObject*
DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
PyObject* name_obj;
- PyObject* adjust_ttl_obj = NULL;
+ PyObject* separate_rrs_obj = NULL;
if (PyArg_ParseTuple(args, "O!|O", &name_type, &name_obj,
- &adjust_ttl_obj)) {
+ &separate_rrs_obj)) {
try {
- bool adjust_ttl = true;
- if (adjust_ttl_obj != NULL) {
+ bool separate_rrs = false;
+ if (separate_rrs_obj != NULL) {
// store result in local var so we can explicitely check for
// -1 error return value
- int adjust_ttl_no = PyObject_Not(adjust_ttl_obj);
- if (adjust_ttl_no == 1) {
- adjust_ttl = false;
- } else if (adjust_ttl_no == -1) {
+ int separate_rrs_true = PyObject_IsTrue(separate_rrs_obj);
+ if (separate_rrs_true == 1) {
+ separate_rrs = true;
+ } else if (separate_rrs_true == -1) {
PyErr_SetString(getDataSourceException("Error"),
- "Error getting value of adjust_ttl");
+ "Error getting value of separate_rrs");
return (NULL);
}
}
return (createZoneIteratorObject(
self->cppobj->getInstance().getIterator(PyName_ToName(name_obj),
- adjust_ttl),
+ separate_rrs),
po_self));
} catch (const isc::NotImplemented& ne) {
PyErr_SetString(getDataSourceException("NotImplemented"),
@@ -129,14 +130,17 @@ PyObject*
DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
PyObject *name_obj;
- PyObject *replace_obj;
- if (PyArg_ParseTuple(args, "O!O", &name_type, &name_obj, &replace_obj) &&
- PyBool_Check(replace_obj)) {
- bool replace = (replace_obj != Py_False);
+ PyObject *replace_obj = NULL;
+ PyObject *journaling_obj = Py_False;
+ if (PyArg_ParseTuple(args, "O!O|O", &name_type, &name_obj,
+ &replace_obj, &journaling_obj) &&
+ PyBool_Check(replace_obj) && PyBool_Check(journaling_obj)) {
+ const bool replace = (replace_obj != Py_False);
+ const bool journaling = (journaling_obj == Py_True);
try {
ZoneUpdaterPtr updater =
self->cppobj->getInstance().getUpdater(PyName_ToName(name_obj),
- replace);
+ replace, journaling);
if (!updater) {
return (Py_None);
}
@@ -157,10 +161,56 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
return (NULL);
}
} else {
+ // PyBool_Check doesn't set the error, so we have to set it ourselves.
+ if (replace_obj != NULL && !PyBool_Check(replace_obj)) {
+ PyErr_SetString(PyExc_TypeError, "'replace' for "
+ "DataSourceClient.get_updater must be boolean");
+ }
+ if (!PyBool_Check(journaling_obj)) {
+ PyErr_SetString(PyExc_TypeError, "'journaling' for "
+ "DataSourceClient.get_updater must be boolean");
+ }
return (NULL);
}
}
+PyObject*
+DataSourceClient_getJournalReader(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ unsigned long begin_obj, end_obj;
+
+ if (PyArg_ParseTuple(args, "O!kk", &name_type, &name_obj,
+ &begin_obj, &end_obj)) {
+ try {
+ pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+ self->cppobj->getInstance().getJournalReader(
+ PyName_ToName(name_obj), static_cast<uint32_t>(begin_obj),
+ static_cast<uint32_t>(end_obj));
+ PyObject* po_reader;
+ if (result.first == ZoneJournalReader::SUCCESS) {
+ po_reader = createZoneJournalReaderObject(result.second,
+ po_self);
+ } else {
+ po_reader = Py_None;
+ Py_INCREF(po_reader); // this will soon be released
+ }
+ PyObjectContainer container(po_reader);
+ return (Py_BuildValue("(iO)", result.first, container.get()));
+ } catch (const isc::NotImplemented& ex) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ex.what());
+ } catch (const DataSourceError& ex) {
+ PyErr_SetString(getDataSourceException("Error"), ex.what());
+ } catch (const std::exception& ex) {
+ PyErr_SetString(PyExc_SystemError, ex.what());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected exception");
+ }
+ }
+ return (NULL);
+}
+
// This list contains the actual set of functions we have in
// python. Each entry has
// 1. Python method name
@@ -168,18 +218,21 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
// 3. Argument type
// 4. Documentation
PyMethodDef DataSourceClient_methods[] = {
- { "find_zone", reinterpret_cast<PyCFunction>(DataSourceClient_findZone),
- METH_VARARGS, DataSourceClient_findZone_doc },
+ { "find_zone", DataSourceClient_findZone, METH_VARARGS,
+ DataSourceClient_findZone_doc },
{ "get_iterator",
- reinterpret_cast<PyCFunction>(DataSourceClient_getIterator), METH_VARARGS,
+ DataSourceClient_getIterator, METH_VARARGS,
DataSourceClient_getIterator_doc },
- { "get_updater", reinterpret_cast<PyCFunction>(DataSourceClient_getUpdater),
+ { "get_updater", DataSourceClient_getUpdater,
METH_VARARGS, DataSourceClient_getUpdater_doc },
+ { "get_journal_reader", DataSourceClient_getJournalReader,
+ METH_VARARGS, DataSourceClient_getJournalReader_doc },
{ NULL, NULL, 0, NULL }
};
int
-DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
+DataSourceClient_init(PyObject* po_self, PyObject* args, PyObject*) {
+ s_DataSourceClient* self = static_cast<s_DataSourceClient*>(po_self);
char* ds_type_str;
char* ds_config_str;
try {
@@ -224,7 +277,8 @@ DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
}
void
-DataSourceClient_destroy(s_DataSourceClient* const self) {
+DataSourceClient_destroy(PyObject* po_self) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
delete self->cppobj;
self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
@@ -243,7 +297,7 @@ PyTypeObject datasourceclient_type = {
"datasrc.DataSourceClient",
sizeof(s_DataSourceClient), // tp_basicsize
0, // tp_itemsize
- reinterpret_cast<destructor>(DataSourceClient_destroy),// tp_dealloc
+ DataSourceClient_destroy, // tp_dealloc
NULL, // tp_print
NULL, // tp_getattr
NULL, // tp_setattr
@@ -274,7 +328,7 @@ PyTypeObject datasourceclient_type = {
NULL, // tp_descr_get
NULL, // tp_descr_set
0, // tp_dictoffset
- reinterpret_cast<initproc>(DataSourceClient_init),// tp_init
+ DataSourceClient_init, // tp_init
NULL, // tp_alloc
PyType_GenericNew, // tp_new
NULL, // tp_free
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
index 6ab29d8..1573b81 100644
--- a/src/lib/python/isc/datasrc/datasrc.cc
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -27,6 +27,7 @@
#include "finder_python.h"
#include "iterator_python.h"
#include "updater_python.h"
+#include "journal_reader_python.h"
#include <util/python/pycppwrapper_util.h>
#include <dns/python/pydnspp_common.h>
@@ -192,6 +193,41 @@ initModulePart_ZoneUpdater(PyObject* mod) {
return (true);
}
+bool
+initModulePart_ZoneJournalReader(PyObject* mod) {
+ if (PyType_Ready(&journal_reader_type) < 0) {
+ return (false);
+ }
+ void* p = &journal_reader_type;
+ if (PyModule_AddObject(mod, "ZoneJournalReader",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&journal_reader_type);
+
+ try {
+ installClassVariable(journal_reader_type, "SUCCESS",
+ Py_BuildValue("I", ZoneJournalReader::SUCCESS));
+ installClassVariable(journal_reader_type, "NO_SUCH_ZONE",
+ Py_BuildValue("I",
+ ZoneJournalReader::NO_SUCH_ZONE));
+ installClassVariable(journal_reader_type, "NO_SUCH_VERSION",
+ Py_BuildValue("I",
+ ZoneJournalReader::NO_SUCH_VERSION));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in ZoneJournalReader initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in ZoneJournalReader initialization");
+ return (false);
+ }
+
+ return (true);
+}
PyObject* po_DataSourceError;
PyObject* po_NotImplemented;
@@ -239,6 +275,11 @@ PyInit_datasrc(void) {
return (NULL);
}
+ if (!initModulePart_ZoneJournalReader(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
try {
po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
NULL);
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
index 4a00e78..82c5fdc 100644
--- a/src/lib/python/isc/datasrc/finder_inc.cc
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -46,6 +46,7 @@ Return the RR class of the zone.\n\
// - Return type: use tuple instead of the dedicated FindResult type
// - NULL->None
// - exceptions
+// - description of the 'target' parameter (must be None for now)
const char* const ZoneFinder_find_doc = "\
find(name, type, target=None, options=FIND_DEFAULT) -> (integer, RRset)\n\
\n\
@@ -74,6 +75,7 @@ answer for the search key. Specifically,\n\
- If the target isn't None, all RRsets under the domain are inserted\n\
there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned\n\
instead of normall processing. This is intended to handle ANY query.\n\
+ (Note: the Python version doesn't support this feature yet)\n\
\n\
Note: This behavior is controversial as we discussed in\n\
https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html We\n\
@@ -105,8 +107,7 @@ internal error in the datasource.\n\
Parameters:\n\
name The domain name to be searched for.\n\
type The RR type to be searched for.\n\
- target If target is not None, insert all RRs under the domain\n\
- into it.\n\
+ target Must be None.\n\
options The search options.\n\
\n\
Return Value(s): A tuple of a result code (integer) and an RRset object\n\
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
index 6585049..7f74133 100644
--- a/src/lib/python/isc/datasrc/finder_python.cc
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -53,26 +53,29 @@ namespace isc_datasrc_internal {
PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
if (finder == NULL) {
PyErr_SetString(getDataSourceException("Error"),
- "Internal error in find() wrapper; finder object NULL");
+ "Internal error in find() wrapper; "
+ "finder object NULL");
return (NULL);
}
- PyObject *name;
- PyObject *rrtype;
- PyObject *target;
- int options_int;
- if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+ PyObject* name;
+ PyObject* rrtype;
+ PyObject* target = Py_None;
+ unsigned int options_int = ZoneFinder::FIND_DEFAULT;
+ if (PyArg_ParseTuple(args, "O!O!|OI", &name_type, &name,
&rrtype_type, &rrtype,
&target, &options_int)) {
try {
+ if (target != Py_None) {
+ PyErr_SetString(PyExc_TypeError,
+ "find(): target must be None in this version");
+ return (NULL);
+ }
ZoneFinder::FindOptions options =
static_cast<ZoneFinder::FindOptions>(options_int);
- ZoneFinder::FindResult find_result(
- finder->find(PyName_ToName(name),
- PyRRType_ToRRType(rrtype),
- NULL,
- options
- ));
- ZoneFinder::Result r = find_result.code;
+ const ZoneFinder::FindResult find_result(
+ finder->find(PyName_ToName(name), PyRRType_ToRRType(rrtype),
+ NULL, options));
+ const ZoneFinder::Result r = find_result.code;
isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
if (rrsp) {
// Use N instead of O so the refcount isn't increased twice
diff --git a/src/lib/python/isc/datasrc/journal_reader_inc.cc b/src/lib/python/isc/datasrc/journal_reader_inc.cc
new file mode 100644
index 0000000..35ba70e
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_inc.cc
@@ -0,0 +1,80 @@
+namespace {
+const char* const ZoneJournalReader_doc = "\
+The base class for retrieving differences between two versions of a\n\
+zone.\n\
+\n\
+On construction, each derived class object will internally set up\n\
+retrieving sequences of differences between two specific version of a\n\
+specific zone managed in a particular data source. So the constructor\n\
+of a derived class would normally take parameters to identify the zone\n\
+and the two versions for which the differences should be retrieved.\n\
+See DataSourceClient.get_journal_reader for more concrete details used\n\
+in this API.\n\
+\n\
+Once constructed, an object of this class will act like an iterator\n\
+over the sequences. Every time the get_next_diff() method is called it\n\
+returns one element of the differences in the form of an RRset until\n\
+it reaches the end of the entire sequences.\n\
+\n\
+";
+
+// Modifications from C++ doc:
+// ConstRRsetPtr -> RRset
+// Null -> None
+// InvalidOperation -> ValueError
+const char* const ZoneJournalReader_getNextDiff_doc = "\
+get_next_diff() -> isc.dns.RRset\n\
+\n\
+Return the next difference RR of difference sequences.\n\
+\n\
+In this API, the difference between two versions of a zone is\n\
+conceptually represented as IXFR-style difference sequences: Each\n\
+difference sequence is a sequence of RRs: an older version of SOA (to\n\
+be deleted), zero or more other deleted RRs, the post-transaction SOA\n\
+(to be added), and zero or more other added RRs. (Note, however, that\n\
+the underlying data source implementation may or may not represent the\n\
+difference in straightforward realization of this concept. The mapping\n\
+between the conceptual difference and the actual implementation is\n\
+hidden in each derived class).\n\
+\n\
+This method provides an application with a higher level interface to\n\
+retrieve the difference along with the conceptual model: the\n\
+ZoneJournalReader object iterates over the entire sequences from the\n\
+beginning SOA (which is to be deleted) to one of the added RR of with\n\
+the ending SOA, and each call to this method returns one RR in the\n\
+form of an RRset that contains exactly one RDATA in the order of the\n\
+sequences.\n\
+\n\
+Note that the ordering of the sequences specifies the semantics of\n\
+each difference: add or delete. For example, the first RR is to be\n\
+deleted, and the last RR is to be added. So the return value of this\n\
+method does not explicitly indicate whether the RR is to be added or\n\
+deleted.\n\
+\n\
+This method ensures the returned RRset represents an RR, that is, it\n\
+contains exactly one RDATA. However, it does not necessarily ensure\n\
+that the resulting sequences are in the form of IXFR-style. For\n\
+example, the first RR is supposed to be an SOA, and it should normally\n\
+be the case, but this interface does not necessarily require the\n\
+derived class implementation ensure this. Normally the differences are\n\
+expected to be stored using this API (via a ZoneUpdater object), and\n\
+as long as that is the case and the underlying implementation follows\n\
+the requirement of the API, the result of this method should be a\n\
+valid IXFR-style sequences. So this API does not mandate the almost\n\
+redundant check as part of the interface. If the application needs to\n\
+make it sure 100%, it must check the resulting sequence itself.\n\
+\n\
+Once the object reaches the end of the sequences, this method returns\n\
+None. Any subsequent call will result in an exception of class\n\
+ValueError.\n\
+\n\
+Exceptions:\n\
+ ValueError The method is called beyond the end of the\n\
+ difference sequences.\n\
+ isc.datasrc.Error Underlying data is broken and the RR cannot be\n\
+ created or other low level data source error.\n\
+\n\
+Return Value(s): An RRset that contains one RDATA corresponding to the\n\
+next difference in the sequences.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.cc b/src/lib/python/isc/datasrc/journal_reader_python.cc
new file mode 100644
index 0000000..ff398d1
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.cc
@@ -0,0 +1,200 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "journal_reader_python.h"
+
+#include "journal_reader_inc.cc"
+
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneJournalReader : public PyObject {
+public:
+ s_ZoneJournalReader() : cppobj(ZoneJournalReaderPtr()), base_obj(NULL) {};
+ ZoneJournalReaderPtr cppobj;
+ // This is a reference to a base object; if the object of this class
+ // depends on another object to be in scope during its lifetime,
+ // we use INCREF the base object upon creation, and DECREF it at
+ // the end of the destructor
+ // This is an optional argument to createXXX(). If NULL, it is ignored.
+ PyObject* base_obj;
+};
+
+// General creation and destruction
+int
+ZoneJournalReader_init(PyObject*, PyObject*, PyObject*) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneJournalReader cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneJournalReader_destroy(PyObject* po_self) {
+ s_ZoneJournalReader* const self =
+ static_cast<s_ZoneJournalReader*>(po_self) ;
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ if (self->base_obj != NULL) {
+ Py_DECREF(self->base_obj);
+ }
+ Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneJournalReader_getNextDiff(PyObject* po_self, PyObject*) {
+ s_ZoneJournalReader* self = static_cast<s_ZoneJournalReader*>(po_self);
+ try {
+ isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextDiff();
+ if (!rrset) {
+ Py_RETURN_NONE;
+ }
+ return (createRRsetObject(*rrset));
+ } catch (const isc::InvalidOperation& ex) {
+ PyErr_SetString(PyExc_ValueError, ex.what());
+ return (NULL);
+ } catch (const isc::Exception& isce) {
+ PyErr_SetString(getDataSourceException("Error"), isce.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneJournalReader_iter(PyObject *self) {
+ Py_INCREF(self);
+ return (self);
+}
+
+PyObject*
+ZoneJournalReader_next(PyObject* self) {
+ PyObject* result = ZoneJournalReader_getNextDiff(self, NULL);
+ // iter_next must return NULL without error instead of Py_None
+ if (result == Py_None) {
+ Py_DECREF(result);
+ return (NULL);
+ } else {
+ return (result);
+ }
+}
+
+PyMethodDef ZoneJournalReader_methods[] = {
+ { "get_next_diff", ZoneJournalReader_getNextDiff, METH_NOARGS,
+ ZoneJournalReader_getNextDiff_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject journal_reader_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneJournalReader",
+ sizeof(s_ZoneJournalReader), // tp_basicsize
+ 0, // tp_itemsize
+ ZoneJournalReader_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneJournalReader_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ ZoneJournalReader_iter, // tp_iter
+ ZoneJournalReader_next, // tp_iternext
+ ZoneJournalReader_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ ZoneJournalReader_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneJournalReaderObject(ZoneJournalReaderPtr source,
+ PyObject* base_obj)
+{
+ s_ZoneJournalReader* po = static_cast<s_ZoneJournalReader*>(
+ journal_reader_type.tp_alloc(&journal_reader_type, 0));
+ if (po != NULL) {
+ po->cppobj = source;
+ po->base_obj = base_obj;
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
+ }
+ }
+ return (po);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.h b/src/lib/python/isc/datasrc/journal_reader_python.h
new file mode 100644
index 0000000..56344df
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.h
@@ -0,0 +1,47 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_JOURNAL_READER_H
+#define __PYTHON_DATASRC_JOURNAL_READER_H 1
+
+#include <Python.h>
+
+#include <datasrc/zone.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+extern PyTypeObject journal_reader_type;
+
+/// \brief Create a ZoneJournalReader python object
+///
+/// \param source The zone journal reader pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneJournalReader depends on
+/// Its refcount is increased, and will be decreased when
+/// this reader is destroyed, making sure that the
+/// base object is never destroyed before this reader.
+PyObject* createZoneJournalReaderObject(
+ isc::datasrc::ZoneJournalReaderPtr source,
+ PyObject* base_obj = NULL);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_JOURNAL_READER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index fd63741..daa12fc 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -72,6 +72,14 @@ def create(cur):
rdtype STRING NOT NULL COLLATE NOCASE,
rdata STRING NOT NULL)""")
cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
+ cur.execute("""CREATE TABLE diffs (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ version INTEGER NOT NULL,
+ operation INTEGER NOT NULL,
+ name STRING NOT NULL COLLATE NOCASE,
+ rrtype STRING NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdata STRING NOT NULL)""")
row = [1]
cur.execute("COMMIT TRANSACTION")
return row
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 411b5cc..ab89b93 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -6,6 +6,7 @@ EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += testdata/brokendb.sqlite3
EXTRA_DIST += testdata/example.com.sqlite3
+EXTRA_DIST += testdata/test.sqlite3.nodiffs
CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
# If necessary (rare cases), explicitly specify paths to dynamic libraries
@@ -33,5 +34,6 @@ endif
PYTHONPATH=:$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/python/isc/datasrc/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs \
TESTDATA_PATH=$(abs_srcdir)/testdata \
TESTDATA_WRITE_PATH=$(abs_builddir) \
+ B10_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index 68e075a..3e4a1d7 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -15,9 +15,11 @@
import isc.log
import isc.datasrc
-from isc.datasrc import ZoneFinder
-import isc.dns
+from isc.datasrc import ZoneFinder, ZoneJournalReader
+from isc.dns import *
+from isc.testutils.rrset_utils import rrsets_equal
import unittest
+import sqlite3
import os
import shutil
import sys
@@ -39,19 +41,6 @@ def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
rrset_to_add.add_rdata(isc.dns.Rdata(rrtype, rrclass, rdata))
rrset_list.append(rrset_to_add)
-# helper function, we have no direct rrset comparison atm
-def rrsets_equal(a, b):
- # no accessor for sigs either (so this only checks name, class, type, ttl,
- # and rdata)
- # also, because of the fake data in rrsigs, if the type is rrsig, the
- # rdata is not checked
- return a.get_name() == b.get_name() and\
- a.get_class() == b.get_class() and\
- a.get_type() == b.get_type() and \
- a.get_ttl() == b.get_ttl() and\
- (a.get_type() == isc.dns.RRType.RRSIG() or
- sorted(a.get_rdata()) == sorted(b.get_rdata()))
-
# returns true if rrset is in expected_rrsets
# will remove the rrset from expected_rrsets if found
def check_for_rrset(expected_rrsets, rrset):
@@ -61,6 +50,13 @@ def check_for_rrset(expected_rrsets, rrset):
return True
return False
+def create_soa(serial):
+ soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+ soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ 'ns1.example.org. admin.example.org. ' +
+ str(serial) + ' 3600 1800 2419200 7200'))
+ return soa
+
class DataSrcClient(unittest.TestCase):
def test_(self):
@@ -82,13 +78,12 @@ class DataSrcClient(unittest.TestCase):
isc.datasrc.DataSourceClient, "memory",
"{ \"foo\": 1 }")
- @unittest.skip("This test may fail depending on sqlite3 library behavior")
def test_iterate(self):
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
# for RRSIGS, the TTL's are currently modified. This test should
# start failing when we fix that.
- rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."), False)
+ rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."), True)
# we do not know the order in which they are returned by the iterator
# but we do want to check them, so we put all records into one list
@@ -115,7 +110,11 @@ class DataSrcClient(unittest.TestCase):
"256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
"N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
"TlALxMmspcfdpqun3Yr3YYnztuj06rV7RqmveYckWvAUXVYMSMQZfJ30"+
- "5fs0dE/xLztL/CzZ",
+ "5fs0dE/xLztL/CzZ"
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ [
"257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
"KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
"ZIyvcKq+9RXmV3HK3bUdHnQZ88IZWBRmWKfZ6wnzHo53kdYKAemTErkz"+
@@ -127,8 +126,16 @@ class DataSrcClient(unittest.TestCase):
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
[
- "dns01.example.com.",
- "dns02.example.com.",
+ "dns01.example.com."
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ [
+ "dns02.example.com."
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ [
"dns03.example.com."
])
add_rrset(expected_rrset_list, name, rrclass,
@@ -139,15 +146,19 @@ class DataSrcClient(unittest.TestCase):
# For RRSIGS, we can't add the fake data through the API, so we
# simply pass no rdata at all (which is skipped by the check later)
- # Since we passed adjust_ttl = False to get_iterator, we get several
+ # Since we passed separate_rrs = True to get_iterator, we get several
# sets of RRSIGs, one for each TTL
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+ add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
[
"master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
@@ -191,26 +202,26 @@ class DataSrcClient(unittest.TestCase):
# instead of failing?
self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
- # Without the adjust_ttl argument, it should return 55 RRsets
+ # Without the separate_rrs argument, it should return 55 RRsets
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
rrets = dsc.get_iterator(isc.dns.Name("example.com"))
# there are more than 80 RRs in this zone... let's just count them
# (already did a full check of the smaller zone above)
self.assertEqual(55, len(list(rrets)))
- # same test, but now with explicit True argument for adjust_ttl
+ # same test, but now with explicit False argument for separate_rrs
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
- rrets = dsc.get_iterator(isc.dns.Name("example.com"), True)
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"), False)
# there are more than 80 RRs in this zone... let's just count them
# (already did a full check of the smaller zone above)
self.assertEqual(55, len(list(rrets)))
# Count should be 71 if we request individual rrsets for differing ttls
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
- rrets = dsc.get_iterator(isc.dns.Name("example.com"), False)
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"), True)
# there are more than 80 RRs in this zone... let's just count them
# (already did a full check of the smaller zone above)
- self.assertEqual(71, len(list(rrets)))
+ self.assertEqual(84, len(list(rrets)))
# TODO should we catch this (iterating past end) and just return None
# instead of failing?
self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
@@ -274,6 +285,24 @@ class DataSrcClient(unittest.TestCase):
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
+ # Check the optional parameters are optional
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A())
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(), None)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ # Invalid value for the "target"
+ self.assertRaises(TypeError, finder.find,
+ isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(), True)
+
result, rrset = finder.find(isc.dns.Name("www.sql1.example.com"),
isc.dns.RRType.A(),
None,
@@ -374,6 +403,36 @@ class DataSrcUpdater(unittest.TestCase):
# can't construct directly
self.assertRaises(TypeError, isc.datasrc.ZoneUpdater)
+ def test_update_finder(self):
+ # Check basic behavior of updater's finder
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ updater = dsc.get_updater(isc.dns.Name("example.com"), False)
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ ZoneFinder.FIND_DEFAULT)
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ # Omit optional parameters
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A())
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(), None)
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ # Invalid value for 'target'
+ self.assertRaises(TypeError, updater.find,
+ isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(), 1)
+
def test_update_delete_commit(self):
dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
@@ -565,6 +624,230 @@ class DataSrcUpdater(unittest.TestCase):
self.assertEqual(None, iterator.get_soa())
self.assertEqual(None, iterator.get_next_rrset())
+class JournalWrite(unittest.TestCase):
+ def setUp(self):
+ # Make a fresh copy of the writable database with all original content
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+ self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+ WRITE_ZONE_DB_CONFIG)
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+
+ def tearDown(self):
+ self.dsc = None
+ self.updater = None
+
+ def check_journal(self, expected_list):
+ # This assumes sqlite3 DB and directly fetches stored data from
+ # the DB file. It should be generalized using ZoneJournalReader
+ # once it's supported.
+ conn = sqlite3.connect(WRITE_ZONE_DB_FILE)
+ cur = conn.cursor()
+ cur.execute('SELECT name, rrtype, ttl, rdata FROM diffs ORDER BY id')
+ actual_list = cur.fetchall()
+ self.assertEqual(len(expected_list), len(actual_list))
+ for (expected, actual) in zip(expected_list, actual_list):
+ self.assertEqual(expected, actual)
+ conn.close()
+
+ def create_a(self, address):
+ a_rr = RRset(Name('www.example.org'), RRClass.IN(), RRType.A(),
+ RRTTL(3600))
+ a_rr.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+ return (a_rr)
+
+ def test_journal_write(self):
+ # This is a straightforward port of the C++ 'journal' test
+ # Note: we add/delete 'out of zone' data (example.org in the
+ # example.com zone for convenience.
+ self.updater.delete_rrset(create_soa(1234))
+ self.updater.delete_rrset(self.create_a('192.0.2.2'))
+ self.updater.add_rrset(create_soa(1235))
+ self.updater.add_rrset(self.create_a('192.0.2.2'))
+ self.updater.commit()
+
+ expected = []
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ "1234 3600 1800 2419200 7200"))
+ expected.append(("www.example.org.", "A", 3600, "192.0.2.2"))
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ "1235 3600 1800 2419200 7200"))
+ expected.append(("www.example.org.", "A", 3600, "192.0.2.2"))
+ self.check_journal(expected)
+
+ def test_journal_write_multiple(self):
+ # This is a straightforward port of the C++ 'journalMultiple' test
+ expected = []
+ for i in range(1, 100):
+ self.updater.delete_rrset(create_soa(1234 + i - 1))
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ str(1234 + i - 1) + " 3600 1800 2419200 7200"))
+ self.updater.add_rrset(create_soa(1234 + i))
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ str(1234 + i) + " 3600 1800 2419200 7200"))
+ self.updater.commit()
+ self.check_journal(expected)
+
+ def test_journal_write_bad_sequence(self):
+ # This is a straightforward port of the C++ 'journalBadSequence' test
+
+ # Delete A before SOA
+ self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
+ self.create_a('192.0.2.1'))
+ # Add before delete
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+ create_soa(1234))
+ # Add A before SOA
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+ self.create_a('192.0.2.1'))
+ # Commit before add
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.assertRaises(isc.datasrc.Error, self.updater.commit)
+ # Delete two SOAs
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
+ create_soa(1235))
+ # Add two SOAs
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.updater.add_rrset(create_soa(1235))
+ self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+ create_soa(1236))
+
+ def test_journal_write_onerase(self):
+ self.updater = None
+ self.assertRaises(isc.datasrc.Error, self.dsc.get_updater,
+ Name("example.com"), True, True)
+
+ def test_journal_write_badparam(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ self.assertRaises(TypeError, dsc.get_updater, 0, False, True)
+ self.assertRaises(TypeError, dsc.get_updater, Name('example.com'),
+ False, 0)
+ self.assertRaises(TypeError, dsc.get_updater, Name("example.com"),
+ 1, True)
+
+class JournalRead(unittest.TestCase):
+ def setUp(self):
+ # Make a fresh copy of the writable database with all original content
+ self.zname = Name('example.com')
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+ self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+ WRITE_ZONE_DB_CONFIG)
+ self.reader = None
+
+ def tearDown(self):
+ # Some tests leave the reader in the middle of sequence, holding
+ # the lock. Since the unittest framework keeps each test object
+ # until the end of the entire tests, we need to make sure the reader
+ # is released at the end of each test. The client shouldn't do harm
+ # but we clean it up, too, just in case.
+ self.dsc = None
+ self.reader = None
+
+ def make_simple_diff(self, begin_soa):
+ updater = self.dsc.get_updater(self.zname, False, True)
+ updater.delete_rrset(begin_soa)
+ updater.add_rrset(create_soa(1235))
+ updater.commit()
+
+ def test_journal_reader(self):
+ # This is a straightforward port of the C++ 'journalReader' test
+ self.make_simple_diff(create_soa(1234))
+ result, self.reader = self.dsc.get_journal_reader(self.zname, 1234,
+ 1235)
+ self.assertEqual(ZoneJournalReader.SUCCESS, result)
+ self.assertNotEqual(None, self.reader)
+ rrsets_equal(create_soa(1234), self.reader.get_next_diff())
+ rrsets_equal(create_soa(1235), self.reader.get_next_diff())
+ self.assertEqual(None, self.reader.get_next_diff())
+ self.assertRaises(ValueError, self.reader.get_next_diff)
+
+ def test_journal_reader_with_large_serial(self):
+ # similar to the previous one, but use a very large serial to check
+ # if the python wrapper code has unexpected integer overflow
+ self.make_simple_diff(create_soa(4294967295))
+ result, self.reader = self.dsc.get_journal_reader(self.zname,
+ 4294967295, 1235)
+ self.assertNotEqual(None, self.reader)
+ # dump to text and compare them in case create_soa happens to have
+ # an overflow bug
+ self.assertEqual('example.org. 3600 IN SOA ns1.example.org. ' + \
+ 'admin.example.org. 4294967295 3600 1800 ' + \
+ '2419200 7200\n',
+ self.reader.get_next_diff().to_text())
+
+ def test_journal_reader_large_journal(self):
+ # This is a straightforward port of the C++ 'readLargeJournal' test.
+ # In this test we use the ZoneJournalReader object as a Python
+ # iterator.
+ updater = self.dsc.get_updater(self.zname, False, True)
+ expected = []
+ for i in range(0, 100):
+ rrset = create_soa(1234 + i)
+ updater.delete_rrset(rrset)
+ expected.append(rrset)
+
+ rrset = create_soa(1234 + i + 1)
+ updater.add_rrset(rrset)
+ expected.append(rrset)
+
+ updater.commit()
+ _, self.reader = self.dsc.get_journal_reader(self.zname, 1234, 1334)
+ self.assertNotEqual(None, self.reader)
+ i = 0
+ for rr in self.reader:
+ self.assertNotEqual(len(expected), i)
+ rrsets_equal(expected[i], rr)
+ i += 1
+ self.assertEqual(len(expected), i)
+
+ def test_journal_reader_no_range(self):
+ # This is a straightforward port of the C++ 'readJournalForNoRange'
+ # test
+ self.make_simple_diff(create_soa(1234))
+ result, self.reader = self.dsc.get_journal_reader(self.zname, 1200,
+ 1235)
+ self.assertEqual(ZoneJournalReader.NO_SUCH_VERSION, result)
+ self.assertEqual(None, self.reader)
+
+ def test_journal_reader_no_zone(self):
+ # This is a straightforward port of the C++ 'journalReaderForNXZone'
+ # test
+ result, self.reader = self.dsc.get_journal_reader(Name('nosuchzone'),
+ 0, 1)
+ self.assertEqual(ZoneJournalReader.NO_SUCH_ZONE, result)
+ self.assertEqual(None, self.reader)
+
+ def test_journal_reader_bad_params(self):
+ self.assertRaises(TypeError, self.dsc.get_journal_reader,
+ 'example.com.', 0, 1)
+ self.assertRaises(TypeError, self.dsc.get_journal_reader,
+ self.zname, 'must be int', 1)
+ self.assertRaises(TypeError, self.dsc.get_journal_reader,
+ self.zname, 0, 'must be int')
+
+ def test_journal_reader_direct_construct(self):
+ # ZoneJournalReader can only be constructed via a factory
+ self.assertRaises(TypeError, ZoneJournalReader)
+
+ def test_journal_reader_old_schema(self):
+ # The database doesn't have a "diffs" table.
+ dbfile = TESTDATA_PATH + 'test.sqlite3.nodiffs'
+ client = isc.datasrc.DataSourceClient("sqlite3",
+ "{ \"database_file\": \"" + \
+ dbfile + "\" }")
+ self.assertRaises(isc.datasrc.Error, client.get_journal_reader,
+ self.zname, 0, 1)
+
if __name__ == "__main__":
isc.log.init("bind10")
isc.log.resetUnitTestRootLogger()
diff --git a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3
index cc8cfc3..521cf31 100644
Binary files a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 and b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs
new file mode 100644
index 0000000..cc8cfc3
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs differ
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index c7112b3..2e4a28f 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -303,7 +303,8 @@ public:
extern PyTypeObject logger_type;
int
-Logger_init(LoggerWrapper* self, PyObject* args) {
+Logger_init(PyObject* po_self, PyObject* args, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
const char* name;
if (!PyArg_ParseTuple(args, "s", &name)) {
return (-1);
@@ -323,7 +324,9 @@ Logger_init(LoggerWrapper* self, PyObject* args) {
}
void
-Logger_destroy(LoggerWrapper* const self) {
+//Logger_destroy(LoggerWrapper* const self) {
+Logger_destroy(PyObject* po_self) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
delete self->logger_;
self->logger_ = NULL;
Py_TYPE(self)->tp_free(self);
@@ -351,7 +354,8 @@ severityToText(const Severity& severity) {
}
PyObject*
-Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveSeverity(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
try {
return (Py_BuildValue("s",
severityToText(
@@ -368,7 +372,8 @@ Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
}
PyObject*
-Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveDebugLevel(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
try {
return (Py_BuildValue("i", self->logger_->getEffectiveDebugLevel()));
}
@@ -383,7 +388,8 @@ Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
}
PyObject*
-Logger_setSeverity(LoggerWrapper* self, PyObject* args) {
+Logger_setSeverity(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
const char* severity;
int dbgLevel = 0;
if (!PyArg_ParseTuple(args, "z|i", &severity, &dbgLevel)) {
@@ -425,27 +431,32 @@ Logger_isLevelEnabled(LoggerWrapper* self, FPtr function) {
}
PyObject*
-Logger_isInfoEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isInfoEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isInfoEnabled));
}
PyObject*
-Logger_isWarnEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isWarnEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isWarnEnabled));
}
PyObject*
-Logger_isErrorEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isErrorEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isErrorEnabled));
}
PyObject*
-Logger_isFatalEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isFatalEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isFatalEnabled));
}
PyObject*
-Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
+Logger_isDebugEnabled(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
int level = MIN_DEBUG_LEVEL;
if (!PyArg_ParseTuple(args, "|i", &level)) {
return (NULL);
@@ -470,53 +481,39 @@ Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
string
objectToStr(PyObject* object, bool convert) {
- PyObject* cleanup(NULL);
+ PyObjectContainer objstr_container;
if (convert) {
- object = cleanup = PyObject_Str(object);
- if (object == NULL) {
+ PyObject* text_obj = PyObject_Str(object);
+ if (text_obj == NULL) {
+ // PyObject_Str could fail for various reasons, including because
+ // the object cannot be converted to a string. We exit with
+ // InternalError to preserve the PyErr set in PyObject_Str.
throw InternalError();
}
- }
- const char* value;
- PyObject* tuple(Py_BuildValue("(O)", object));
- if (tuple == NULL) {
- if (cleanup != NULL) {
- Py_DECREF(cleanup);
- }
- throw InternalError();
+ objstr_container.reset(text_obj);
+ object = objstr_container.get();
}
- if (!PyArg_ParseTuple(tuple, "s", &value)) {
- Py_DECREF(tuple);
- if (cleanup != NULL) {
- Py_DECREF(cleanup);
- }
+ PyObjectContainer tuple_container(Py_BuildValue("(O)", object));
+ const char* value;
+ if (!PyArg_ParseTuple(tuple_container.get(), "s", &value)) {
throw InternalError();
}
- string result(value);
- Py_DECREF(tuple);
- if (cleanup != NULL) {
- Py_DECREF(cleanup);
- }
- return (result);
+ return (string(value));
}
// Generic function to output the logging message. Called by the real functions.
-template<class Function>
+template <class Function>
PyObject*
Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
try {
- Py_ssize_t number(PyObject_Length(args));
+ const Py_ssize_t number(PyObject_Length(args));
if (number < 0) {
return (NULL);
}
// Which argument is the first to format?
- size_t start(1);
- if (dbgLevel) {
- start ++;
- }
-
+ const size_t start = dbgLevel ? 2 : 1;
if (number < start) {
return (PyErr_Format(PyExc_TypeError, "Too few arguments to "
"logging call, at least %zu needed and %zd "
@@ -524,18 +521,10 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
}
// Extract the fixed arguments
- PyObject *midO(PySequence_GetItem(args, start - 1));
- if (midO == NULL) {
- return (NULL);
- }
- string mid(objectToStr(midO, false));
long dbg(0);
if (dbgLevel) {
- PyObject *dbgO(PySequence_GetItem(args, 0));
- if (dbgO == NULL) {
- return (NULL);
- }
- dbg = PyLong_AsLong(dbgO);
+ PyObjectContainer dbg_container(PySequence_GetItem(args, 0));
+ dbg = PyLong_AsLong(dbg_container.get());
if (PyErr_Occurred()) {
return (NULL);
}
@@ -544,16 +533,16 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
// We create the logging message right now. If we fail to convert a
// parameter to string, at least the part that we already did will
// be output
+ PyObjectContainer msgid_container(PySequence_GetItem(args, start - 1));
+ const string mid(objectToStr(msgid_container.get(), false));
Logger::Formatter formatter(function(dbg, mid.c_str()));
// Now process the rest of parameters, convert each to string and put
// into the formatter. It will print itself in the end.
for (size_t i(start); i < number; ++ i) {
- PyObject* param(PySequence_GetItem(args, i));
- if (param == NULL) {
- return (NULL);
- }
- formatter = formatter.arg(objectToStr(param, true));
+ PyObjectContainer param_container(PySequence_GetItem(args, i));
+ formatter = formatter.arg(objectToStr(param_container.get(),
+ true));
}
Py_RETURN_NONE;
}
@@ -573,72 +562,74 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
// Now map the functions into the performOutput. I wish C++ could do
// functional programming.
PyObject*
-Logger_debug(LoggerWrapper* self, PyObject* args) {
+Logger_debug(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::debug, self->logger_, _1, _2),
args, true));
}
PyObject*
-Logger_info(LoggerWrapper* self, PyObject* args) {
+Logger_info(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::info, self->logger_, _2),
args, false));
}
PyObject*
-Logger_warn(LoggerWrapper* self, PyObject* args) {
+Logger_warn(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::warn, self->logger_, _2),
args, false));
}
PyObject*
-Logger_error(LoggerWrapper* self, PyObject* args) {
+Logger_error(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::error, self->logger_, _2),
args, false));
}
PyObject*
-Logger_fatal(LoggerWrapper* self, PyObject* args) {
+Logger_fatal(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::fatal, self->logger_, _2),
args, false));
}
PyMethodDef loggerMethods[] = {
- { "get_effective_severity",
- reinterpret_cast<PyCFunction>(Logger_getEffectiveSeverity),
- METH_NOARGS, "Returns the effective logging severity as string" },
- { "get_effective_debug_level",
- reinterpret_cast<PyCFunction>(Logger_getEffectiveDebugLevel),
- METH_NOARGS, "Returns the current debug level." },
- { "set_severity",
- reinterpret_cast<PyCFunction>(Logger_setSeverity), METH_VARARGS,
+ { "get_effective_severity", Logger_getEffectiveSeverity, METH_NOARGS,
+ "Returns the effective logging severity as string" },
+ { "get_effective_debug_level", Logger_getEffectiveDebugLevel, METH_NOARGS,
+ "Returns the current debug level." },
+ { "set_severity", Logger_setSeverity, METH_VARARGS,
"Sets the severity of a logger. The parameters are severity as a "
"string and, optionally, a debug level (integer in range 0-99). "
"The severity may be NULL, in which case an inherited value is taken."
},
- { "is_debug_enabled", reinterpret_cast<PyCFunction>(Logger_isDebugEnabled),
- METH_VARARGS, "Returns if the logger would log debug message now. "
+ { "is_debug_enabled", Logger_isDebugEnabled, METH_VARARGS,
+ "Returns if the logger would log debug message now. "
"You can provide a desired debug level." },
- { "is_info_enabled", reinterpret_cast<PyCFunction>(Logger_isInfoEnabled),
- METH_NOARGS, "Returns if the logger would log info message now." },
- { "is_warn_enabled", reinterpret_cast<PyCFunction>(Logger_isWarnEnabled),
- METH_NOARGS, "Returns if the logger would log warn message now." },
- { "is_error_enabled", reinterpret_cast<PyCFunction>(Logger_isErrorEnabled),
- METH_NOARGS, "Returns if the logger would log error message now." },
- { "is_fatal_enabled", reinterpret_cast<PyCFunction>(Logger_isFatalEnabled),
- METH_NOARGS, "Returns if the logger would log fatal message now." },
- { "debug", reinterpret_cast<PyCFunction>(Logger_debug), METH_VARARGS,
+ { "is_info_enabled", Logger_isInfoEnabled, METH_NOARGS,
+ "Returns if the logger would log info message now." },
+ { "is_warn_enabled", Logger_isWarnEnabled, METH_NOARGS,
+ "Returns if the logger would log warn message now." },
+ { "is_error_enabled", Logger_isErrorEnabled, METH_NOARGS,
+ "Returns if the logger would log error message now." },
+ { "is_fatal_enabled", Logger_isFatalEnabled, METH_NOARGS,
+ "Returns if the logger would log fatal message now." },
+ { "debug", Logger_debug, METH_VARARGS,
"Logs a debug-severity message. It takes the debug level, message ID "
"and any number of stringifiable arguments to the message." },
- { "info", reinterpret_cast<PyCFunction>(Logger_info), METH_VARARGS,
+ { "info", Logger_info, METH_VARARGS,
"Logs a info-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
- { "warn", reinterpret_cast<PyCFunction>(Logger_warn), METH_VARARGS,
+ { "warn", Logger_warn, METH_VARARGS,
"Logs a warn-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
- { "error", reinterpret_cast<PyCFunction>(Logger_error), METH_VARARGS,
+ { "error", Logger_error, METH_VARARGS,
"Logs a error-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
- { "fatal", reinterpret_cast<PyCFunction>(Logger_fatal), METH_VARARGS,
+ { "fatal", Logger_fatal, METH_VARARGS,
"Logs a fatal-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
{ NULL, NULL, 0, NULL }
@@ -649,7 +640,7 @@ PyTypeObject logger_type = {
"isc.log.Logger",
sizeof(LoggerWrapper), // tp_basicsize
0, // tp_itemsize
- reinterpret_cast<destructor>(Logger_destroy), // tp_dealloc
+ Logger_destroy, // tp_dealloc
NULL, // tp_print
NULL, // tp_getattr
NULL, // tp_setattr
@@ -681,7 +672,7 @@ PyTypeObject logger_type = {
NULL, // tp_descr_get
NULL, // tp_descr_set
0, // tp_dictoffset
- reinterpret_cast<initproc>(Logger_init), // tp_init
+ Logger_init, // tp_init
NULL, // tp_alloc
PyType_GenericNew, // tp_new
NULL, // tp_free
@@ -718,21 +709,21 @@ PyInit_log(void) {
return (NULL);
}
- if (PyType_Ready(&logger_type) < 0) {
- return (NULL);
- }
-
- if (PyModule_AddObject(mod, "Logger",
- static_cast<PyObject*>(static_cast<void*>(
- &logger_type))) < 0) {
- return (NULL);
- }
-
- // Add in the definitions of the standard debug levels. These can then
- // be referred to in Python through the constants log.DBGLVL_XXX.
+ // Finalize logger class and add in the definitions of the standard debug
+ // levels. These can then be referred to in Python through the constants
+ // log.DBGLVL_XXX.
// N.B. These should be kept in sync with the constants defined in
// log_dbglevels.h.
try {
+ if (PyType_Ready(&logger_type) < 0) {
+ throw InternalError();
+ }
+ void* p = &logger_type;
+ if (PyModule_AddObject(mod, "Logger",
+ static_cast<PyObject*>(p)) < 0) {
+ throw InternalError();
+ }
+
installClassVariable(logger_type, "DBGLVL_START_SHUT",
Py_BuildValue("I", DBGLVL_START_SHUT));
installClassVariable(logger_type, "DBGLVL_COMMAND",
@@ -747,15 +738,20 @@ PyInit_log(void) {
Py_BuildValue("I", DBGLVL_TRACE_DETAIL));
installClassVariable(logger_type, "DBGLVL_TRACE_DETAIL_DATA",
Py_BuildValue("I", DBGLVL_TRACE_DETAIL_DATA));
+ } catch (const InternalError&) {
+ Py_DECREF(mod);
+ return (NULL);
} catch (const std::exception& ex) {
const std::string ex_what =
"Unexpected failure in Log initialization: " +
std::string(ex.what());
PyErr_SetString(PyExc_SystemError, ex_what.c_str());
+ Py_DECREF(mod);
return (NULL);
} catch (...) {
PyErr_SetString(PyExc_SystemError,
"Unexpected failure in Log initialization");
+ Py_DECREF(mod);
return (NULL);
}
diff --git a/src/lib/python/isc/log/tests/log_test.py b/src/lib/python/isc/log/tests/log_test.py
index 8deaeae..1337654 100644
--- a/src/lib/python/isc/log/tests/log_test.py
+++ b/src/lib/python/isc/log/tests/log_test.py
@@ -17,6 +17,7 @@
import isc.log
import unittest
import json
+import sys
import bind10_config
from isc.config.ccsession import path_search
@@ -89,6 +90,7 @@ class Logger(unittest.TestCase):
def setUp(self):
isc.log.init("root", "DEBUG", 50)
self.sevs = ['INFO', 'WARN', 'ERROR', 'FATAL']
+ self.TEST_MSG = isc.log.create_message('TEST_MESSAGE', '%1')
# Checks defaults of the logger
def defaults(self, logger):
@@ -169,5 +171,34 @@ class Logger(unittest.TestCase):
logger = isc.log.Logger("child")
self.assertEqual(logger.DBGLVL_COMMAND, 10)
+ def test_param_reference(self):
+ """
+ Check whether passing a parameter to a logger causes a reference leak.
+ """
+ class LogParam:
+ def __str__(self):
+ return 'LogParam'
+ logger = isc.log.Logger("child")
+ param = LogParam()
+ orig_msgrefcnt = sys.getrefcount(param)
+ orig_idrefcnt = sys.getrefcount(self.TEST_MSG)
+ logger.info(self.TEST_MSG, param);
+ self.assertEqual(sys.getrefcount(self.TEST_MSG), orig_idrefcnt)
+ self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+ # intentionally pass an invalid type for debug level. It will
+ # result in TypeError. The passed object still shouldn't leak a
+ # reference.
+ self.assertRaises(TypeError, logger.debug, param, self.TEST_MSG, param)
+ self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+ def test_bad_parameter(self):
+ # a log parameter cannot be converted to a string object.
+ class LogParam:
+ def __str__(self):
+ raise ValueError("LogParam can't be converted to string")
+ logger = isc.log.Logger("child")
+ self.assertRaises(ValueError, logger.info, self.TEST_MSG, LogParam())
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index 6b91c87..64a4b3e 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -21,6 +21,7 @@ import threading
import time
import errno
from isc.datasrc import sqlite3_ds
+from isc.datasrc import DataSourceClient
from isc.net import addr
import isc
from isc.log_messages.notify_out_messages import *
@@ -31,7 +32,7 @@ logger = isc.log.Logger("notify_out")
# we can't import we should not start anyway, and logging an error
# is a bad idea since the logging system is most likely not
# initialized yet. see trac ticket #1103
-from pydnspp import *
+from isc.dns import *
ZONE_NEW_DATA_READY_CMD = 'zone_new_data_ready'
_MAX_NOTIFY_NUM = 30
@@ -51,6 +52,24 @@ _BAD_REPLY_PACKET = 5
SOCK_DATA = b's'
+# borrowed from xfrin.py @ #1298. We should eventually unify it.
+def format_zone_str(zone_name, zone_class):
+ """Helper function to format a zone name and class as a string of
+ the form '<name>/<class>'.
+ Parameters:
+ zone_name (isc.dns.Name) name to format
+ zone_class (isc.dns.RRClass) class to format
+ """
+ return zone_name.to_text() + '/' + str(zone_class)
+
+class NotifyOutDataSourceError(Exception):
+ """An exception raised when data source error happens within notify out.
+
+ This exception is expected to be caught within the notify_out module.
+
+ """
+ pass
+
class ZoneNotifyInfo:
'''This class keeps track of notify-out information for one zone.'''
@@ -123,16 +142,20 @@ class NotifyOut:
self._nonblock_event = threading.Event()
def _init_notify_out(self, datasrc_file):
- '''Get all the zones name and its notify target's address
+ '''Get all the zones name and its notify target's address.
+
TODO, currently the zones are got by going through the zone
table in database. There should be a better way to get them
and also the setting 'also_notify', and there should be one
- mechanism to cover the changed datasrc.'''
+ mechanism to cover the changed datasrc.
+
+ '''
self._db_file = datasrc_file
for zone_name, zone_class in sqlite3_ds.get_zones_info(datasrc_file):
zone_id = (zone_name, zone_class)
self._notify_infos[zone_id] = ZoneNotifyInfo(zone_name, zone_class)
- slaves = self._get_notify_slaves_from_ns(zone_name)
+ slaves = self._get_notify_slaves_from_ns(Name(zone_name),
+ RRClass(zone_class))
for item in slaves:
self._notify_infos[zone_id].notify_slaves.append((item, 53))
@@ -234,7 +257,7 @@ class NotifyOut:
def _get_rdata_data(self, rr):
return rr[7].strip()
- def _get_notify_slaves_from_ns(self, zone_name):
+ def _get_notify_slaves_from_ns(self, zone_name, zone_class):
'''Get all NS records, then remove the primary master from ns rrset,
then use the name in NS record rdata part to get the a/aaaa records
in the same zone. the targets listed in a/aaaa record rdata are treated
@@ -242,28 +265,52 @@ class NotifyOut:
Note: this is the simplest way to get the address of slaves,
but not correct, it can't handle the delegation slaves, or the CNAME
and DNAME logic.
- TODO. the function should be provided by one library.'''
- ns_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'NS', self._db_file)
- soa_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
- ns_rr_name = []
- for ns in ns_rrset:
- ns_rr_name.append(self._get_rdata_data(ns))
-
- if len(soa_rrset) > 0:
- sname = (soa_rrset[0][sqlite3_ds.RR_RDATA_INDEX].split(' '))[0].strip() #TODO, bad hardcode to get rdata part
- if sname in ns_rr_name:
- ns_rr_name.remove(sname)
-
- addr_list = []
- for rr_name in ns_rr_name:
- a_rrset = sqlite3_ds.get_zone_rrset(zone_name, rr_name, 'A', self._db_file)
- aaaa_rrset = sqlite3_ds.get_zone_rrset(zone_name, rr_name, 'AAAA', self._db_file)
- for rr in a_rrset:
- addr_list.append(self._get_rdata_data(rr))
- for rr in aaaa_rrset:
- addr_list.append(self._get_rdata_data(rr))
-
- return addr_list
+ TODO. the function should be provided by one library.
+
+ '''
+ # Prepare data source client. This should eventually be moved to
+ # an earlier stage of initialization and also support multiple
+ # data sources.
+ datasrc_config = '{ "database_file": "' + self._db_file + '"}'
+ try:
+ result, finder = DataSourceClient('sqlite3',
+ datasrc_config).find_zone(
+ zone_name)
+ except isc.datasrc.Error as ex:
+ logger.error(NOTIFY_OUT_DATASRC_ACCESS_FAILURE, ex)
+ return []
+ if result is not DataSourceClient.SUCCESS:
+ logger.error(NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND,
+ format_zone_str(zone_name, zone_class))
+ return []
+
+ result, ns_rrset = finder.find(zone_name, RRType.NS())
+ if result is not finder.SUCCESS or ns_rrset is None:
+ logger.warn(NOTIFY_OUT_ZONE_NO_NS,
+ format_zone_str(zone_name, zone_class))
+ return []
+ result, soa_rrset = finder.find(zone_name, RRType.SOA())
+ if result is not finder.SUCCESS or soa_rrset is None or \
+ soa_rrset.get_rdata_count() != 1:
+ logger.warn(NOTIFY_OUT_ZONE_BAD_SOA,
+ format_zone_str(zone_name, zone_class))
+ return [] # broken zone anyway, stop here.
+ soa_mname = Name(soa_rrset.get_rdata()[0].to_text().split(' ')[0])
+
+ addrs = []
+ for ns_rdata in ns_rrset.get_rdata():
+ ns_name = Name(ns_rdata.to_text())
+ if soa_mname == ns_name:
+ continue
+ result, rrset = finder.find(ns_name, RRType.A())
+ if result is finder.SUCCESS and rrset is not None:
+ addrs.extend([a.to_text() for a in rrset.get_rdata()])
+
+ result, rrset = finder.find(ns_name, RRType.AAAA())
+ if result is finder.SUCCESS and rrset is not None:
+ addrs.extend([aaaa.to_text() for aaaa in rrset.get_rdata()])
+
+ return addrs
def _prepare_select_info(self):
'''
@@ -404,8 +451,9 @@ class NotifyOut:
self._nonblock_event.set()
def _send_notify_message_udp(self, zone_notify_info, addrinfo):
- msg, qid = self._create_notify_message(zone_notify_info.zone_name,
- zone_notify_info.zone_class)
+ msg, qid = self._create_notify_message(
+ Name(zone_notify_info.zone_name),
+ RRClass(zone_notify_info.zone_class))
render = MessageRenderer()
render.set_length_limit(512)
msg.to_wire(render)
@@ -426,17 +474,6 @@ class NotifyOut:
return True
- def _create_rrset_from_db_record(self, record, zone_class):
- '''Create one rrset from one record of datasource, if the schema of record is changed,
- This function should be updated first. TODO, the function is copied from xfrout, there
- should be library for creating one rrset. '''
- rrtype_ = RRType(record[sqlite3_ds.RR_TYPE_INDEX])
- rdata_ = Rdata(rrtype_, RRClass(zone_class), " ".join(record[sqlite3_ds.RR_RDATA_INDEX:]))
- rrset_ = RRset(Name(record[sqlite3_ds.RR_NAME_INDEX]), RRClass(zone_class), \
- rrtype_, RRTTL( int(record[sqlite3_ds.RR_TTL_INDEX])))
- rrset_.add_rdata(rdata_)
- return rrset_
-
def _create_notify_message(self, zone_name, zone_class):
msg = Message(Message.RENDER)
qid = random.randint(0, 0xFFFF)
@@ -444,14 +481,35 @@ class NotifyOut:
msg.set_opcode(Opcode.NOTIFY())
msg.set_rcode(Rcode.NOERROR())
msg.set_header_flag(Message.HEADERFLAG_AA)
- question = Question(Name(zone_name), RRClass(zone_class), RRType('SOA'))
- msg.add_question(question)
- # Add soa record to answer section
- soa_record = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
- rrset_soa = self._create_rrset_from_db_record(soa_record[0], zone_class)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+ msg.add_question(Question(zone_name, zone_class, RRType.SOA()))
+ msg.add_rrset(Message.SECTION_ANSWER, self._get_zone_soa(zone_name,
+ zone_class))
return msg, qid
+ def _get_zone_soa(self, zone_name, zone_class):
+ # We create (and soon drop) the data source client here because
+ # clients should be thread specific. We could let the main thread
+ # loop (_dispatcher) create and retain the client in order to avoid
+ # the overhead when we generalize the interface (and we may also
+ # revisit the design of notify_out more substantially anyway).
+ datasrc_config = '{ "database_file": "' + self._db_file + '"}'
+ result, finder = DataSourceClient('sqlite3',
+ datasrc_config).find_zone(zone_name)
+ if result is not DataSourceClient.SUCCESS:
+ raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
+ zone_name.to_text() + '/' +
+ zone_class.to_text() + ' not found')
+
+ result, soa_rrset = finder.find(zone_name, RRType.SOA())
+ if result is not finder.SUCCESS or soa_rrset is None or \
+ soa_rrset.get_rdata_count() != 1:
+ raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
+ zone_name.to_text() + '/' +
+ zone_class.to_text() +
+ ' is broken: no valid SOA found')
+
+ return soa_rrset
+
def _handle_notify_reply(self, zone_notify_info, msg_data, from_addr):
'''Parse the notify reply message.
rcode will not checked here, If we get the response
diff --git a/src/lib/python/isc/notify/notify_out_messages.mes b/src/lib/python/isc/notify/notify_out_messages.mes
index 570f51e..b77a60c 100644
--- a/src/lib/python/isc/notify/notify_out_messages.mes
+++ b/src/lib/python/isc/notify/notify_out_messages.mes
@@ -81,3 +81,24 @@ programming error, since all exceptions should have been caught
explicitly. Please file a bug report. Since there was a response,
no more notifies will be sent to this server for this notification
event.
+
+% NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message. This can be either a
+configuration error or installation setup failure.
+
+% NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source. This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process. So this means some critical inconsistency in the data
+source or software bug.
+
+% NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR. Notify message won't be sent to such a zone.
+
+% NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs. Notify message won't
+be sent to such a zone.
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index 00c2eee..3af5991 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -1,12 +1,20 @@
PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = notify_out_test.py
EXTRA_DIST = $(PYTESTS)
+EXTRA_DIST += testdata/test.sqlite3 testdata/brokentest.sqlite3
+# The rest of the files are actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com testdata/example.net
+EXTRA_DIST += testdata/nons.example testdata/nosoa.example
+EXTRA_DIST += testdata/multisoa.example
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# Some systems need the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -20,5 +28,7 @@ endif
echo Running test: $$pytest ; \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
+ TESTDATASRCDIR=$(abs_top_srcdir)/src/lib/python/isc/notify/tests/testdata/ \
+ B10_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/notify/tests/notify_out_test.py b/src/lib/python/isc/notify/tests/notify_out_test.py
index 83f6d1a..d64c203 100644
--- a/src/lib/python/isc/notify/tests/notify_out_test.py
+++ b/src/lib/python/isc/notify/tests/notify_out_test.py
@@ -19,9 +19,11 @@ import os
import tempfile
import time
import socket
-from isc.datasrc import sqlite3_ds
from isc.notify import notify_out, SOCK_DATA
import isc.log
+from isc.dns import *
+
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
# our fake socket, where we can read and insert messages
class MockSocket():
@@ -92,10 +94,8 @@ class TestZoneNotifyInfo(unittest.TestCase):
class TestNotifyOut(unittest.TestCase):
def setUp(self):
- self._db_file = tempfile.NamedTemporaryFile(delete=False)
- sqlite3_ds.load(self._db_file.name, 'example.net.', self._example_net_data_reader)
- sqlite3_ds.load(self._db_file.name, 'example.com.', self._example_com_data_reader)
- self._notify = notify_out.NotifyOut(self._db_file.name)
+ self._db_file = TESTDATA_SRCDIR + '/test.sqlite3'
+ self._notify = notify_out.NotifyOut(self._db_file)
self._notify._notify_infos[('example.com.', 'IN')] = MockZoneNotifyInfo('example.com.', 'IN')
self._notify._notify_infos[('example.com.', 'CH')] = MockZoneNotifyInfo('example.com.', 'CH')
self._notify._notify_infos[('example.net.', 'IN')] = MockZoneNotifyInfo('example.net.', 'IN')
@@ -110,10 +110,6 @@ class TestNotifyOut(unittest.TestCase):
com_ch_info = self._notify._notify_infos[('example.com.', 'CH')]
com_ch_info.notify_slaves.append(('1.1.1.1', 5353))
- def tearDown(self):
- self._db_file.close()
- os.unlink(self._db_file.name)
-
def test_send_notify(self):
notify_out._MAX_NOTIFY_NUM = 2
@@ -309,39 +305,9 @@ class TestNotifyOut(unittest.TestCase):
self._notify._zone_notify_handler(example_net_info, notify_out._EVENT_READ)
self.assertNotEqual(cur_tgt, example_net_info._notify_current)
-
- def _example_net_data_reader(self):
- zone_data = [
- ('example.net.', '1000', 'IN', 'SOA', 'a.dns.example.net. mail.example.net. 1 1 1 1 1'),
- ('example.net.', '1000', 'IN', 'NS', 'a.dns.example.net.'),
- ('example.net.', '1000', 'IN', 'NS', 'b.dns.example.net.'),
- ('example.net.', '1000', 'IN', 'NS', 'c.dns.example.net.'),
- ('a.dns.example.net.', '1000', 'IN', 'A', '1.1.1.1'),
- ('a.dns.example.net.', '1000', 'IN', 'AAAA', '2:2::2:2'),
- ('b.dns.example.net.', '1000', 'IN', 'A', '3.3.3.3'),
- ('b.dns.example.net.', '1000', 'IN', 'AAAA', '4:4::4:4'),
- ('b.dns.example.net.', '1000', 'IN', 'AAAA', '5:5::5:5'),
- ('c.dns.example.net.', '1000', 'IN', 'A', '6.6.6.6'),
- ('c.dns.example.net.', '1000', 'IN', 'A', '7.7.7.7'),
- ('c.dns.example.net.', '1000', 'IN', 'AAAA', '8:8::8:8')]
- for item in zone_data:
- yield item
-
- def _example_com_data_reader(self):
- zone_data = [
- ('example.com.', '1000', 'IN', 'SOA', 'a.dns.example.com. mail.example.com. 1 1 1 1 1'),
- ('example.com.', '1000', 'IN', 'NS', 'a.dns.example.com.'),
- ('example.com.', '1000', 'IN', 'NS', 'b.dns.example.com.'),
- ('example.com.', '1000', 'IN', 'NS', 'c.dns.example.com.'),
- ('a.dns.example.com.', '1000', 'IN', 'A', '1.1.1.1'),
- ('b.dns.example.com.', '1000', 'IN', 'A', '3.3.3.3'),
- ('b.dns.example.com.', '1000', 'IN', 'AAAA', '4:4::4:4'),
- ('b.dns.example.com.', '1000', 'IN', 'AAAA', '5:5::5:5')]
- for item in zone_data:
- yield item
-
def test_get_notify_slaves_from_ns(self):
- records = self._notify._get_notify_slaves_from_ns('example.net.')
+ records = self._notify._get_notify_slaves_from_ns(Name('example.net.'),
+ RRClass.IN())
self.assertEqual(6, len(records))
self.assertEqual('8:8::8:8', records[5])
self.assertEqual('7.7.7.7', records[4])
@@ -350,14 +316,32 @@ class TestNotifyOut(unittest.TestCase):
self.assertEqual('4:4::4:4', records[1])
self.assertEqual('3.3.3.3', records[0])
- records = self._notify._get_notify_slaves_from_ns('example.com.')
+ records = self._notify._get_notify_slaves_from_ns(Name('example.com.'),
+ RRClass.IN())
self.assertEqual(3, len(records))
self.assertEqual('5:5::5:5', records[2])
self.assertEqual('4:4::4:4', records[1])
self.assertEqual('3.3.3.3', records[0])
+ def test_get_notify_slaves_from_ns_unusual(self):
+ self._notify._db_file = TESTDATA_SRCDIR + '/brokentest.sqlite3'
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('nons.example'), RRClass.IN()))
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('nosoa.example'), RRClass.IN()))
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('multisoa.example'), RRClass.IN()))
+
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('nosuchzone.example'), RRClass.IN()))
+
+ # This will cause failure in getting access to the data source.
+ self._notify._db_file = TESTDATA_SRCDIR + '/nodir/error.sqlite3'
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('example.com'), RRClass.IN()))
+
def test_init_notify_out(self):
- self._notify._init_notify_out(self._db_file.name)
+ self._notify._init_notify_out(self._db_file)
self.assertListEqual([('3.3.3.3', 53), ('4:4::4:4', 53), ('5:5::5:5', 53)],
self._notify._notify_infos[('example.com.', 'IN')].notify_slaves)
@@ -417,6 +401,5 @@ class TestNotifyOut(unittest.TestCase):
if __name__== "__main__":
isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
unittest.main()
-
-
diff --git a/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3
new file mode 100644
index 0000000..61e766c
Binary files /dev/null and b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 differ
diff --git a/src/lib/python/isc/notify/tests/testdata/example.com b/src/lib/python/isc/notify/tests/testdata/example.com
new file mode 100644
index 0000000..5d59819
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/example.com
@@ -0,0 +1,10 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+example.com. 1000 IN SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+example.com. 1000 IN NS a.dns.example.com.
+example.com. 1000 IN NS b.dns.example.com.
+example.com. 1000 IN NS c.dns.example.com.
+a.dns.example.com. 1000 IN A 1.1.1.1
+b.dns.example.com. 1000 IN A 3.3.3.3
+b.dns.example.com. 1000 IN AAAA 4:4::4:4
+b.dns.example.com. 1000 IN AAAA 5:5::5:5
diff --git a/src/lib/python/isc/notify/tests/testdata/example.net b/src/lib/python/isc/notify/tests/testdata/example.net
new file mode 100644
index 0000000..001d2d9
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/example.net
@@ -0,0 +1,14 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+example.net. 1000 IN SOA a.dns.example.net. mail.example.net. 1 1 1 1 1
+example.net. 1000 IN NS a.dns.example.net.
+example.net. 1000 IN NS b.dns.example.net.
+example.net. 1000 IN NS c.dns.example.net.
+a.dns.example.net. 1000 IN A 1.1.1.1
+a.dns.example.net. 1000 IN AAAA 2:2::2:2
+b.dns.example.net. 1000 IN A 3.3.3.3
+b.dns.example.net. 1000 IN AAAA 4:4::4:4
+b.dns.example.net. 1000 IN AAAA 5:5::5:5
+c.dns.example.net. 1000 IN A 6.6.6.6
+c.dns.example.net. 1000 IN A 7.7.7.7
+c.dns.example.net. 1000 IN AAAA 8:8::8:8
diff --git a/src/lib/python/isc/notify/tests/testdata/multisoa.example b/src/lib/python/isc/notify/tests/testdata/multisoa.example
new file mode 100644
index 0000000..eca2fbd
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/multisoa.example
@@ -0,0 +1,5 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+multisoa.example. 1000 IN SOA a.dns.multisoa.example. mail.multisoa.example. 1 1 1 1 1
+multisoa.example. 1000 IN SOA a.dns.multisoa.example. mail.multisoa.example. 2 2 2 2 2
+multisoa.example. 1000 IN NS a.dns.multisoa.example.
diff --git a/src/lib/python/isc/notify/tests/testdata/nons.example b/src/lib/python/isc/notify/tests/testdata/nons.example
new file mode 100644
index 0000000..c1fc1b8
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/nons.example
@@ -0,0 +1,3 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+nons.example. 1000 IN SOA a.dns.nons.example. mail.nons.example. 1 1 1 1 1
diff --git a/src/lib/python/isc/notify/tests/testdata/nosoa.example b/src/lib/python/isc/notify/tests/testdata/nosoa.example
new file mode 100644
index 0000000..18e87e1
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/nosoa.example
@@ -0,0 +1,7 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+;; (SOA has been removed)
+nosoa.example. 1000 IN SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+nosoa.example. 1000 IN NS a.dns.nosoa.example.
+nosoa.example. 1000 IN NS b.dns.nosoa.example.
+nosoa.example. 1000 IN NS c.dns.nosoa.example.
diff --git a/src/lib/python/isc/notify/tests/testdata/test.sqlite3 b/src/lib/python/isc/notify/tests/testdata/test.sqlite3
new file mode 100644
index 0000000..e3cadb0
Binary files /dev/null and b/src/lib/python/isc/notify/tests/testdata/test.sqlite3 differ
diff --git a/src/lib/python/isc/testutils/Makefile.am b/src/lib/python/isc/testutils/Makefile.am
index 0b08257..5479d83 100644
--- a/src/lib/python/isc/testutils/Makefile.am
+++ b/src/lib/python/isc/testutils/Makefile.am
@@ -1,4 +1,4 @@
-EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py
+EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py rrset_utils.py
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/testutils/rrset_utils.py b/src/lib/python/isc/testutils/rrset_utils.py
new file mode 100644
index 0000000..7eac772
--- /dev/null
+++ b/src/lib/python/isc/testutils/rrset_utils.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Utility functions handling DNS RRsets commonly used for tests'''
+
+from isc.dns import *
+
+def rrsets_equal(a, b):
+ '''Compare two RRsets, return True if equal, otherwise False
+
+ We provide this function as part of test utils as we have no direct rrset
+ comparison atm. There's no accessor for sigs either (so this only checks
+ name, class, type, ttl, and rdata).
+ Also, since we often use fake data in RRSIGs, RRSIG RDATA are not checked.
+
+ '''
+ return a.get_name() == b.get_name() and \
+ a.get_class() == b.get_class() and \
+ a.get_type() == b.get_type() and \
+ a.get_ttl() == b.get_ttl() and \
+ (a.get_type() == RRType.RRSIG() or
+ sorted(a.get_rdata()) == sorted(b.get_rdata()))
+
+# The following are short cut utilities to create an RRset of a specific
+# RR type with one RDATA. Many of the RR parameters are common in most
+# tests, so we define default values for them for convenience.
+
+def create_a(name, address, ttl=3600):
+ rrset = RRset(name, RRClass.IN(), RRType.A(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+ return rrset
+
+def create_aaaa(name, address, ttl=3600):
+ rrset = RRset(name, RRClass.IN(), RRType.AAAA(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.AAAA(), RRClass.IN(), address))
+ return rrset
+
+def create_ns(nsname, name=Name('example.com'), ttl=3600):
+ '''For convenience we use a default name often used as a zone name'''
+ rrset = RRset(name, RRClass.IN(), RRType.NS(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.NS(), RRClass.IN(), nsname))
+ return rrset
+
+def create_cname(target='target.example.com', name=Name('example.com'),
+ ttl=3600):
+ rrset = RRset(name, RRClass.IN(), RRType.CNAME(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.CNAME(), RRClass.IN(), target))
+ return rrset
+
+def create_generic(name, rdlen, type=RRType('TYPE65300'), ttl=3600):
+ '''Create an RR of a general type with an arbitrary length of RDATA
+
+ If the RR type isn't specified, type of 65300 will be used, which is
+ arbitrarily chosen from the IANA "Reserved for Private Usage" range.
+ The RDATA will be filled with specified length of all-0 data.
+
+ '''
+ rrset = RRset(name, RRClass.IN(), type, RRTTL(ttl))
+ rrset.add_rdata(Rdata(type, RRClass.IN(), '\\# ' +
+ str(rdlen) + ' ' + '00' * rdlen))
+ return rrset
+
+def create_soa(serial, name=Name('example.com'), ttl=3600):
+ '''For convenience we use a default name often used as a zone name'''
+
+ rrset = RRset(name, RRClass.IN(), RRType.SOA(), RRTTL(ttl))
+ rdata_str = 'master.example.com. admin.example.com. ' + \
+ str(serial) + ' 3600 1800 2419200 7200'
+ rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), rdata_str))
+ return rrset
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
index a2d9a7d..38b7f39 100644
--- a/src/lib/python/isc/xfrin/diff.py
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -59,7 +59,7 @@ class Diff:
the changes to underlying data source right away, but keeps them for
a while.
"""
- def __init__(self, ds_client, zone, replace=False):
+ def __init__(self, ds_client, zone, replace=False, journaling=False):
"""
Initializes the diff to a ready state. It checks the zone exists
in the datasource and if not, NoSuchZone is raised. This also creates
@@ -67,13 +67,25 @@ class Diff:
The ds_client is the datasource client containing the zone. Zone is
isc.dns.Name object representing the name of the zone (its apex).
- If replace is true, the content of the whole zone is wiped out before
+ If replace is True, the content of the whole zone is wiped out before
applying the diff.
+ If journaling is True, the history of subsequent updates will be
+ recorded as well as the updates themselves as long as the underlying
+ data source support the journaling. If the data source allows
+ incoming updates but does not support journaling, the Diff object
+ will still continue applying the diffs with disabling journaling.
+
You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented
exceptions.
"""
- self.__updater = ds_client.get_updater(zone, replace)
+ try:
+ self.__updater = ds_client.get_updater(zone, replace, journaling)
+ except isc.datasrc.NotImplemented as ex:
+ if not journaling:
+ raise ex
+ self.__updater = ds_client.get_updater(zone, replace, False)
+ logger.info(LIBXFRIN_NO_JOURNAL, zone, ds_client)
if self.__updater is None:
# The no such zone case
raise NoSuchZone("Zone " + str(zone) +
diff --git a/src/lib/python/isc/xfrin/libxfrin_messages.mes b/src/lib/python/isc/xfrin/libxfrin_messages.mes
index be943c8..203e31f 100644
--- a/src/lib/python/isc/xfrin/libxfrin_messages.mes
+++ b/src/lib/python/isc/xfrin/libxfrin_messages.mes
@@ -19,3 +19,13 @@
The xfrin module received an update containing multiple rdata changes for the
same RRset. But the TTLs of these don't match each other. As we combine them
together, the later one get's overwritten to the earlier one in the sequence.
+
+% LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled. At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR). Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
index 9fab890..9944404 100644
--- a/src/lib/python/isc/xfrin/tests/diff_tests.py
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -15,6 +15,7 @@
import isc.log
import unittest
+import isc.datasrc
from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
from isc.xfrin.diff import Diff, NoSuchZone
@@ -127,7 +128,7 @@ class DiffTest(unittest.TestCase):
"""
return self.__rrclass
- def get_updater(self, zone_name, replace):
+ def get_updater(self, zone_name, replace, journaling=False):
"""
This one pretends this is the data source client and serves
getting an updater.
@@ -138,11 +139,20 @@ class DiffTest(unittest.TestCase):
# The diff should not delete the old data.
self.assertEqual(self.__should_replace, replace)
self.__updater_requested = True
- # Pretend this zone doesn't exist
if zone_name == Name('none.example.org.'):
+ # Pretend this zone doesn't exist
return None
+
+ # If journaling is enabled, record the fact; for a special zone
+ # pretend that we don't support journaling.
+ if journaling:
+ if zone_name == Name('nodiff.example.org'):
+ raise isc.datasrc.NotImplemented('journaling not supported')
+ self.__journaling_enabled = True
else:
- return self
+ self.__journaling_enabled = False
+
+ return self
def test_create(self):
"""
@@ -152,6 +162,8 @@ class DiffTest(unittest.TestCase):
diff = Diff(self, Name('example.org.'))
self.assertTrue(self.__updater_requested)
self.assertEqual([], diff.get_buffer())
+ # By default journaling is disabled
+ self.assertFalse(self.__journaling_enabled)
def test_create_nonexist(self):
"""
@@ -161,6 +173,14 @@ class DiffTest(unittest.TestCase):
self.assertRaises(NoSuchZone, Diff, self, Name('none.example.org.'))
self.assertTrue(self.__updater_requested)
+ def test_create_withjournal(self):
+ Diff(self, Name('example.org'), False, True)
+ self.assertTrue(self.__journaling_enabled)
+
+ def test_create_nojournal(self):
+ Diff(self, Name('nodiff.example.org'), False, True)
+ self.assertFalse(self.__journaling_enabled)
+
def __data_common(self, diff, method, operation):
"""
Common part of test for test_add and test_delte.
diff --git a/src/lib/resolve/recursive_query.cc b/src/lib/resolve/recursive_query.cc
index 0d3fb4c..2bd3ae8 100644
--- a/src/lib/resolve/recursive_query.cc
+++ b/src/lib/resolve/recursive_query.cc
@@ -28,9 +28,9 @@
#include <dns/opcode.h>
#include <dns/exceptions.h>
#include <dns/rdataclass.h>
-
#include <resolve/resolve.h>
#include <resolve/resolve_log.h>
+#include <resolve/resolve_messages.h>
#include <cache/resolver_cache.h>
#include <nsas/address_request_callback.h>
#include <nsas/nameserver_address.h>
@@ -39,6 +39,7 @@
#include <asiodns/dns_service.h>
#include <asiodns/io_fetch.h>
#include <asiolink/io_service.h>
+#include <resolve/response_classifier.h>
#include <resolve/recursive_query.h>
using namespace isc::dns;
@@ -228,6 +229,9 @@ private:
// case of a TCP packet being returned with the TC bit set.
IOFetch::Protocol protocol_;
+ // EDNS flag
+ bool edns_;
+
// To prevent both unreasonably long cname chains and cname loops,
// we simply keep a counter of the number of CNAMEs we have
// followed so far (and error if it exceeds RESOLVER_MAX_CNAME_CHAIN
@@ -357,17 +361,19 @@ private:
IOFetch query(protocol_, io_, question_,
current_ns_address.getAddress(),
53, buffer_, this,
- query_timeout_);
+ query_timeout_, edns_);
io_.get_io_service().post(query);
}
}
// 'general' send, ask the NSAS to give us an address.
- void send(IOFetch::Protocol protocol = IOFetch::UDP) {
+ void send(IOFetch::Protocol protocol = IOFetch::UDP, bool edns = true) {
protocol_ = protocol; // Store protocol being used for this
+ edns_ = edns;
if (test_server_.second != 0) {
// Send query to test server
- LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_TRACE, RESLIB_TEST_UPSTREAM)
+ LOG_DEBUG(isc::resolve::logger,
+ RESLIB_DBG_TRACE, RESLIB_TEST_UPSTREAM)
.arg(questionText(question_)).arg(test_server_.first);
gettimeofday(¤t_ns_qsent_time, NULL);
++outstanding_events_;
@@ -380,8 +386,9 @@ private:
} else {
// Ask the NSAS for an address for the current zone,
// the callback will call the actual sendTo()
- LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_TRACE, RESLIB_NSAS_LOOKUP)
- .arg(cur_zone_);
+ LOG_DEBUG(isc::resolve::logger,
+ RESLIB_DBG_TRACE, RESLIB_NSAS_LOOKUP)
+ .arg(cur_zone_);
// Can we have multiple calls to nsas_out? Let's assume not
// for now
@@ -430,7 +437,7 @@ private:
.arg(questionText(question_));
isc::resolve::copyResponseMessage(incoming, answer_message_);
cache_.update(*answer_message_);
- return true;
+ return (true);
break;
case isc::resolve::ResponseClassifier::CNAME:
@@ -444,7 +451,7 @@ private:
LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_LONG_CHAIN)
.arg(questionText(question_));
makeSERVFAIL();
- return true;
+ return (true);
}
LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_CNAME)
@@ -460,7 +467,7 @@ private:
LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_FOLLOW_CNAME)
.arg(questionText(question_));
doLookup();
- return false;
+ return (false);
break;
case isc::resolve::ResponseClassifier::NXDOMAIN:
@@ -471,7 +478,7 @@ private:
isc::resolve::copyResponseMessage(incoming, answer_message_);
// no negcache yet
//cache_.update(*answer_message_);
- return true;
+ return (true);
break;
case isc::resolve::ResponseClassifier::REFERRAL:
@@ -520,7 +527,7 @@ private:
nsas_callback_out_ = true;
nsas_.lookup(cur_zone_, question_.getClass(),
nsas_callback_, ANY_OK, glue_hints);
- return false;
+ return (false);
} else {
// Referral was received but did not contain an NS RRset.
LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_NO_NS_RRSET)
@@ -528,48 +535,142 @@ private:
// TODO this will result in answering with the delegation. oh well
isc::resolve::copyResponseMessage(incoming, answer_message_);
- return true;
+ return (true);
}
break;
+
case isc::resolve::ResponseClassifier::TRUNCATED:
// Truncated packet. If the protocol we used for the last one is
// UDP, re-query using TCP. Otherwise regard it as an error.
if (protocol_ == IOFetch::UDP) {
- LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_TRUNCATED)
- .arg(questionText(question_));
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS,
+ RESLIB_TRUNCATED).arg(questionText(question_));
send(IOFetch::TCP);
- return false;
+ return (false);
}
- // Was a TCP query so we have received a packet over TCP with the TC
- // bit set: drop through to common error processing.
- // TODO: Can we use what we have received instead of discarding it?
-
- case isc::resolve::ResponseClassifier::EMPTY:
- case isc::resolve::ResponseClassifier::EXTRADATA:
- case isc::resolve::ResponseClassifier::INVNAMCLASS:
- case isc::resolve::ResponseClassifier::INVTYPE:
- case isc::resolve::ResponseClassifier::MISMATQUEST:
- case isc::resolve::ResponseClassifier::MULTICLASS:
- case isc::resolve::ResponseClassifier::NOTONEQUEST:
- case isc::resolve::ResponseClassifier::NOTRESPONSE:
- case isc::resolve::ResponseClassifier::NOTSINGLE:
- case isc::resolve::ResponseClassifier::OPCODE:
+
+ // Was a TCP query so we have received a packet over TCP with the
+ // TC bit set: report an error by going to the common
+ // error code.
+ goto SERVFAIL;
+
case isc::resolve::ResponseClassifier::RCODE:
- LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_ERR)
- .arg(questionText(question_));
- // Should we try a different server rather than SERVFAIL?
+ // see if it's a FORMERR and a potential EDNS problem
+ if (incoming.getRcode() == Rcode::FORMERR()) {
+ if (protocol_ == IOFetch::UDP && edns_) {
+ // try EDNS over TCP
+ send(IOFetch::TCP, true);
+ return (false);
+ } else if (protocol_ == IOFetch::TCP && edns_) {
+ // try UDP, no EDNS
+ send(IOFetch::UDP, false);
+ return (false);
+ }
+
+ // TC should take care of non-EDNS over UDP, fall through to
+ // SERVFAIL if we get FORMERR instead
+ }
+ goto SERVFAIL;
+
+ default:
+SERVFAIL:
+ // Some error in received packet it. Report it and return SERVFAIL
+ // to the caller.
+ if (logger.isDebugEnabled()) {
+ reportResponseClassifierError(category, incoming.getRcode());
+ }
makeSERVFAIL();
- return true;
- break;
+ return (true);
}
- // Since we do not have a default in the switch above,
- // the compiler should have errored on any missing case
- // statements.
+ // If we get here, there is some serious logic error (or a missing
+ // "return").
assert(false);
- return true;
+ return (true); // To keep the compiler happy
}
-
+
+ /// \brief Report classification-detected error
+ ///
+ /// When the response classifier has detected an error in the response from
+ /// an upstream query, this method is called to log a debug message giving
+ /// information about the problem.
+ ///
+ /// \param category Classification code for the packet
+ /// \param rcode RCODE value in the packet
+ void reportResponseClassifierError(ResponseClassifier::Category category,
+ const Rcode& rcode)
+ {
+ // We could set up a table of response classifications to message
+ // IDs here and index into that table. But given that (a) C++ does
+ // not have C's named initializers, (b) the codes for the
+ // response classifier are in another module and (c) not all messages
+ // have the same number of arguments, the setup of the table would be
+ // almost as long as the code here: it would need to include a number
+ // of assertions to ensure that any change to the the response
+ // classifier codes was detected, and the checking logic would need to
+ // check that the numeric value of the code lay within the defined
+ // limits of the table.
+
+ if (category == ResponseClassifier::RCODE) {
+
+ // Special case as this message takes two arguments.
+ LOG_DEBUG(logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_ERROR).
+ arg(questionText(question_)).arg(rcode);
+
+ } else {
+
+ isc::log::MessageID message_id;
+ switch (category) {
+ case ResponseClassifier::TRUNCATED:
+ message_id = RESLIB_TCP_TRUNCATED;
+ break;
+
+ case ResponseClassifier::EMPTY:
+ message_id = RESLIB_EMPTY_RESPONSE;
+ break;
+
+ case ResponseClassifier::EXTRADATA:
+ message_id = RESLIB_EXTRADATA_RESPONSE;
+ break;
+
+ case ResponseClassifier::INVNAMCLASS:
+ message_id = RESLIB_INVALID_NAMECLASS_RESPONSE;
+ break;
+
+ case ResponseClassifier::INVTYPE:
+ message_id = RESLIB_INVALID_TYPE_RESPONSE;
+ break;
+
+ case ResponseClassifier::MISMATQUEST:
+ message_id = RESLIB_INVALID_QNAME_RESPONSE;
+ break;
+
+ case ResponseClassifier::MULTICLASS:
+ message_id = RESLIB_MULTIPLE_CLASS_RESPONSE;
+ break;
+
+ case ResponseClassifier::NOTONEQUEST:
+ message_id = RESLIB_NOT_ONE_QNAME_RESPONSE;
+ break;
+
+ case ResponseClassifier::NOTRESPONSE:
+ message_id = RESLIB_NOT_RESPONSE;
+ break;
+
+ case ResponseClassifier::NOTSINGLE:
+ message_id = RESLIB_NOTSINGLE_RESPONSE;
+
+ case ResponseClassifier::OPCODE:
+ message_id = RESLIB_OPCODE_RESPONSE;
+
+ default:
+ message_id = RESLIB_ERROR_RESPONSE;
+ }
+ LOG_DEBUG(logger, RESLIB_DBG_RESULTS, message_id).
+ arg(questionText(question_));
+ }
+ }
+
public:
RunningQuery(IOService& io,
const Question& question,
@@ -734,12 +835,7 @@ public:
incoming.fromWire(ibuf);
buffer_->clear();
- if (incoming.getRcode() == Rcode::NOERROR()) {
- done_ = handleRecursiveAnswer(incoming);
- } else {
- isc::resolve::copyResponseMessage(incoming, answer_message_);
- done_ = true;
- }
+ done_ = handleRecursiveAnswer(incoming);
if (done_) {
callCallback(true);
stop();
diff --git a/src/lib/resolve/resolve_messages.mes b/src/lib/resolve/resolve_messages.mes
index f702d9b..b59fd8c 100644
--- a/src/lib/resolve/resolve_messages.mes
+++ b/src/lib/resolve/resolve_messages.mes
@@ -15,22 +15,61 @@
$NAMESPACE isc::resolve
% RESLIB_ANSWER answer received in response to query for <%1>
-A debug message recording that an answer has been received to an upstream
-query for the specified question. Previous debug messages will have indicated
-the server to which the question was sent.
+A debug message reporting that an answer has been received to an upstream
+query for the specified question. Previous debug messages will have
+indicated the server to which the question was sent.
% RESLIB_CNAME CNAME received in response to query for <%1>
-A debug message recording that CNAME response has been received to an upstream
-query for the specified question. Previous debug messages will have indicated
-the server to which the question was sent.
+A debug message recording that CNAME response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
% RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2
-A debug message, a cache lookup did not find the specified <name, class,
-type> tuple in the cache; instead, the deepest delegation found is indicated.
+A debug message, a cache lookup did not find the specified <name,
+class, type> tuple in the cache; instead, the deepest delegation found
+is indicated.
+
+% RESLIB_EMPTY_RESPONSE empty response received to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver did not contain anything in the answer or authority sections,
+although in all other respects it was a valid response. A SERVFAIL will
+be returned to the system making the original query.
+
+% RESLIB_ERROR_RESPONSE unspecified error received in response to query for <%1>
+A debug message, the response to the specified query to an upstream
+nameserver indicated that the response was classified as an erroneous
+response, but that the nature of the error cannot be identified.
+A SERVFAIL will be returned to the system making the original query.
+
+% RESLIB_EXTRADATA_RESPONSE extra data in response to query for <%1>
+A debug message indicating that the response to the specified query
+from an upstream nameserver contained too much data. This can happen if
+an ANY query was sent and the answer section in the response contained
+multiple RRs with different names. A SERVFAIL will be returned to the
+system making the original query.
% RESLIB_FOLLOW_CNAME following CNAME chain to <%1>
-A debug message, a CNAME response was received and another query is being issued
-for the <name, class, type> tuple.
+A debug message, a CNAME response was received and another query is
+being issued for the <name, class, type> tuple.
+
+% RESLIB_INVALID_NAMECLASS_RESPONSE invalid name or class in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) contained either
+an answer not matching the query name or an answer having a different
+class to that queried for. A SERVFAIL will be returned to the system
+making the original query.
+
+% RESLIB_INVALID_QNAME_RESPONSE invalid name or class in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) contained a name
+in the question section that did not match that of the query. A SERVFAIL
+will be returned to the system making the original query.
+
+% RESLIB_INVALID_TYPE_RESPONSE invalid name or class in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) contained an
+invalid type field. A SERVFAIL will be returned to the system making
+the original query.
% RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded
A debug message recording that a CNAME response has been received to an upstream
@@ -39,16 +78,47 @@ the server to which the question was sent). However, receipt of this CNAME
has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
is where on CNAME points to another) and so an error is being returned.
+% RESLIB_MULTIPLE_CLASS_RESPONSE response to query for <%1> contained multiple RRsets with different classes
+A debug message reporting that the response to an upstream query for
+the specified name contained multiple RRsets in the answer and not all
+were of the same class. This is a violation of the standard and so a
+SERVFAIL will be returned.
+
% RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1>
A debug message, this indicates that a response was received for the specified
query and was categorized as a referral. However, the received message did
not contain any NS RRsets. This may indicate a programming error in the
response classification code.
+% RESLIB_NOT_ONE_QNAME_RESPONSE not one question in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) did not contain
+one name in the question section as required by the standard. A SERVFAIL
+will be returned to the system making the original query.
+
+% RESLIB_NOT_RESPONSE response to query for <%1> was not a response
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) did not have the QR
+bit set (thus indicating that the packet was a query, not a response).
+A SERVFAIL will be returned to the system making the original query.
+
+% RESLIB_NOTSINGLE_RESPONSE response to query for <%1> was not a response
+A debug message, the response to the specified query from an upstream
+nameserver was a CNAME that had mutiple RRs in the RRset. This is
+an invalid response according to the standards so a SERVFAIL will be
+returned to the system making the original query.
+
% RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS
A debug message, the RunningQuery object is querying the NSAS for the
nameservers for the specified zone.
+% RESLIB_OPCODE_RESPONSE response to query for <%1> did not have query opcode
+A debug message, the response to the specified query from an upstream
+nameserver was a response that did not have the opcode set to that of
+a query. According to the standards, this is an invalid response to
+the query that was made, so a SERVFAIL will be returned to the system
+making the original query.
+
% RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1>
A debug message recording that either a NXDOMAIN or an NXRRSET response has
been received to an upstream query for the specified question. Previous debug
@@ -63,7 +133,7 @@ A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
-% RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1>
+% RESLIB_RCODE_ERROR response to query for <%1> returns RCODE of %2
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
@@ -122,6 +192,11 @@ A debug message indicating that a RunningQuery's success callback has been
called because a nameserver has been found, and that a query is being sent
to the specified nameserver.
+% RESLIB_TCP_TRUNCATED TCP response to query for %1 was truncated
+This is a debug message logged when a response to the specified query to an
+upstream nameserver returned a response with the TC (truncation) bit set. This
+is treated as an error by the code.
+
% RESLIB_TEST_SERVER setting test server to %1(%2)
This is a warning message only generated in unit tests. It indicates
that all upstream queries from the resolver are being routed to the
diff --git a/src/lib/resolve/response_classifier.h b/src/lib/resolve/response_classifier.h
index 3821560..a027bd0 100644
--- a/src/lib/resolve/response_classifier.h
+++ b/src/lib/resolve/response_classifier.h
@@ -151,7 +151,7 @@ private:
size_t size);
};
-#endif // __RESPONSE_CLASSIFIER_H
-
} // namespace resolve
} // namespace isc
+
+#endif // __RESPONSE_CLASSIFIER_H
diff --git a/src/lib/xfr/Makefile.am b/src/lib/xfr/Makefile.am
index d714990..3d7f60f 100644
--- a/src/lib/xfr/Makefile.am
+++ b/src/lib/xfr/Makefile.am
@@ -1,3 +1,5 @@
+SUBDIRS = . tests
+
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_srcdir)/src/lib/dns -I$(top_builddir)/src/lib/dns
AM_CPPFLAGS += $(BOOST_INCLUDES)
diff --git a/src/lib/xfr/tests/Makefile.am b/src/lib/xfr/tests/Makefile.am
new file mode 100644
index 0000000..4abb456
--- /dev/null
+++ b/src/lib/xfr/tests/Makefile.am
@@ -0,0 +1,25 @@
+AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+CLEANFILES = *.gcno *.gcda
+
+TESTS =
+if HAVE_GTEST
+TESTS += run_unittests
+run_unittests_SOURCES = run_unittests.cc client_test.cc
+
+run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/xfr/tests/client_test.cc b/src/lib/xfr/tests/client_test.cc
new file mode 100644
index 0000000..6c9f4ad
--- /dev/null
+++ b/src/lib/xfr/tests/client_test.cc
@@ -0,0 +1,37 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <sys/un.h>
+#include <string>
+
+#include <xfr/xfrout_client.h>
+
+using namespace std;
+using namespace isc::xfr;
+
+namespace {
+
+TEST(ClientTest, connetFile) {
+ // File path is too long
+ struct sockaddr_un s; // can't be const; some compiler complains
+ EXPECT_THROW(XfroutClient(string(sizeof(s.sun_path), 'x')).connect(),
+ XfroutError);
+
+ // File doesn't exist (we assume the file "no_such_file" doesn't exist)
+ EXPECT_THROW(XfroutClient("no_such_file").connect(), XfroutError);
+}
+
+}
diff --git a/src/lib/xfr/tests/run_unittests.cc b/src/lib/xfr/tests/run_unittests.cc
new file mode 100644
index 0000000..8dc59a2
--- /dev/null
+++ b/src/lib/xfr/tests/run_unittests.cc
@@ -0,0 +1,24 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <util/unittests/run_all.h>
+
+int
+main(int argc, char* argv[]) {
+ ::testing::InitGoogleTest(&argc, argv);
+ isc::log::initLogger();
+ return (isc::util::unittests::run_all());
+}
diff --git a/src/lib/xfr/xfrout_client.cc b/src/lib/xfr/xfrout_client.cc
index 6ab905b..227ffc4 100644
--- a/src/lib/xfr/xfrout_client.cc
+++ b/src/lib/xfr/xfrout_client.cc
@@ -52,10 +52,11 @@ XfroutClient::~XfroutClient() {
void
XfroutClient::connect() {
- asio::error_code err;
- impl_->socket_.connect(stream_protocol::endpoint(impl_->file_path_), err);
- if (err) {
- isc_throw(XfroutError, "socket connect failed: " << err.message());
+ try {
+ impl_->socket_.connect(stream_protocol::endpoint(impl_->file_path_));
+ } catch (const asio::system_error& err) {
+ isc_throw(XfroutError, "socket connect failed for " <<
+ impl_->file_path_ << ": " << err.what());
}
}
diff --git a/tests/lettuce/configurations/xfrin/retransfer_master.conf b/tests/lettuce/configurations/xfrin/retransfer_master.conf
new file mode 100644
index 0000000..95cd88e
--- /dev/null
+++ b/tests/lettuce/configurations/xfrin/retransfer_master.conf
@@ -0,0 +1,22 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "debuglevel": 99,
+ "severity": "DEBUG",
+ "name": "auth"
+ } ]
+ },
+ "Auth": {
+ "database_file": "data/example.org.sqlite3",
+ "listen_on": [ {
+ "port": 47807,
+ "address": "127.0.0.1"
+ } ]
+ },
+ "Xfrout": {
+ "zone_config": [ {
+ "origin": "example.org"
+ } ]
+ }
+}
diff --git a/tests/lettuce/configurations/xfrin/retransfer_slave.conf b/tests/lettuce/configurations/xfrin/retransfer_slave.conf
new file mode 100644
index 0000000..51622cd
--- /dev/null
+++ b/tests/lettuce/configurations/xfrin/retransfer_slave.conf
@@ -0,0 +1,17 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "debuglevel": 99,
+ "severity": "DEBUG",
+ "name": "auth"
+ } ]
+ },
+ "Auth": {
+ "database_file": "data/test_nonexistent_db.sqlite3",
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
index e104a81..5248316 100644
--- a/tests/lettuce/features/terrain/bind10_control.py
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -18,8 +18,10 @@ import subprocess
import re
@step('start bind10(?: with configuration (\S+))?' +\
- '(?: with cmdctl port (\d+))?(?: as (\S+))?')
-def start_bind10(step, config_file, cmdctl_port, process_name):
+ '(?: with cmdctl port (\d+))?' +\
+ '(?: with msgq socket file (\S+))?' +\
+ '(?: as (\S+))?')
+def start_bind10(step, config_file, cmdctl_port, msgq_sockfile, process_name):
"""
Start BIND 10 with the given optional config file, cmdctl port, and
store the running process in world with the given process name.
@@ -29,6 +31,8 @@ def start_bind10(step, config_file, cmdctl_port, process_name):
directory.
cmdctl_port ('with cmdctl port <portnr>', optional): The port on which
b10-cmdctl listens for bindctl commands. Defaults to 47805.
+ msgq_sockfile ('with msgq socket file', optional): The msgq socket file
+ that will be used for internal communication
process_name ('as <name>', optional). This is the name that can be used
in the following steps of the scenario to refer to this
BIND 10 instance. Defaults to 'bind10'.
@@ -57,10 +61,10 @@ def start_bind10(step, config_file, cmdctl_port, process_name):
world.processes.add_process(step, process_name, args)
# check output to know when startup has been completed
- message = world.processes.wait_for_stderr_str(process_name,
- ["BIND10_STARTUP_COMPLETE",
- "BIND10_STARTUP_ERROR"])
- assert message == "BIND10_STARTUP_COMPLETE", "Got: " + str(message)
+ (message, line) = world.processes.wait_for_stderr_str(process_name,
+ ["BIND10_STARTUP_COMPLETE",
+ "BIND10_STARTUP_ERROR"])
+ assert message == "BIND10_STARTUP_COMPLETE", "Got: " + str(line)
@step('wait for bind10 auth (?:of (\w+) )?to start')
def wait_for_auth(step, process_name):
@@ -75,15 +79,24 @@ def wait_for_auth(step, process_name):
world.processes.wait_for_stderr_str(process_name, ['AUTH_SERVER_STARTED'],
False)
- at step('have bind10 running(?: with configuration ([\w.]+))?')
-def have_bind10_running(step, config_file):
+ at step('have bind10 running(?: with configuration ([\S]+))?' +\
+ '(?: with cmdctl port (\d+))?' +\
+ '(?: as ([\S]+))?')
+def have_bind10_running(step, config_file, cmdctl_port, process_name):
"""
Compound convenience step for running bind10, which consists of
start_bind10 and wait_for_auth.
Currently only supports the 'with configuration' option.
"""
- step.given('start bind10 with configuration ' + config_file)
- step.given('wait for bind10 auth to start')
+ start_step = 'start bind10 with configuration ' + config_file
+ wait_step = 'wait for bind10 auth to start'
+ if cmdctl_port is not None:
+ start_step += ' with cmdctl port ' + str(cmdctl_port)
+ if process_name is not None:
+ start_step += ' as ' + process_name
+ wait_step = 'wait for bind10 auth of ' + process_name + ' to start'
+ step.given(start_step)
+ step.given(wait_step)
@step('set bind10 configuration (\S+) to (.*)(?: with cmdctl port (\d+))?')
def set_config_command(step, name, value, cmdctl_port):
@@ -106,3 +119,26 @@ def set_config_command(step, name, value, cmdctl_port):
bindctl.stdin.write("quit\n")
result = bindctl.wait()
assert result == 0, "bindctl exit code: " + str(result)
+
+ at step('send bind10 the command (.+)(?: with cmdctl port (\d+))?')
+def send_command(step, command, cmdctl_port):
+ """
+ Run bindctl, send the given command, and exit bindctl.
+ Parameters:
+ command ('the command <command>'): The command to send.
+ cmdctl_port ('with cmdctl port <portnr>', optional): cmdctl port to send
+ the command to. Defaults to 47805.
+ Fails if cmdctl does not exit with status code 0.
+ """
+ if cmdctl_port is None:
+ cmdctl_port = '47805'
+ args = ['bindctl', '-p', cmdctl_port]
+ bindctl = subprocess.Popen(args, 1, None, subprocess.PIPE,
+ subprocess.PIPE, None)
+ bindctl.stdin.write(command + "\n")
+ bindctl.stdin.write("quit\n")
+ (stdout, stderr) = bindctl.communicate()
+ result = bindctl.returncode
+ assert result == 0, "bindctl exit code: " + str(result) +\
+ "\nstdout:\n" + str(stdout) +\
+ "stderr:\n" + str(stderr)
diff --git a/tests/lettuce/features/terrain/steps.py b/tests/lettuce/features/terrain/steps.py
index 4050940..4b199d6 100644
--- a/tests/lettuce/features/terrain/steps.py
+++ b/tests/lettuce/features/terrain/steps.py
@@ -30,8 +30,8 @@ def stop_a_named_process(step, process_name):
"""
world.processes.stop_process(process_name)
- at step('wait for (new )?(\w+) stderr message (\w+)')
-def wait_for_message(step, new, process_name, message):
+ at step('wait for (new )?(\w+) stderr message (\w+)(?: not (\w+))?')
+def wait_for_message(step, new, process_name, message, not_message):
"""
Block until the given message is printed to the given process's stderr
output.
@@ -40,12 +40,18 @@ def wait_for_message(step, new, process_name, message):
this step was used for this process.
process_name ('<name> stderr'): Name of the process to check the output of.
message ('message <message>'): Output (part) to wait for.
+ not_message ('not <message>'): Output (part) to wait for, and fail
Fails if the message is not found after 10 seconds.
"""
- world.processes.wait_for_stderr_str(process_name, [message], new)
+ strings = [message]
+ if not_message is not None:
+ strings.append(not_message)
+ (found, line) = world.processes.wait_for_stderr_str(process_name, strings, new)
+ if not_message is not None:
+ assert found != not_message, line
- at step('wait for (new )?(\w+) stdout message (\w+)')
-def wait_for_message(step, process_name, message):
+ at step('wait for (new )?(\w+) stdout message (\w+)(?: not (\w+))?')
+def wait_for_message(step, process_name, message, not_message):
"""
Block until the given message is printed to the given process's stdout
output.
@@ -53,10 +59,16 @@ def wait_for_message(step, process_name, message):
new: (' new', optional): Only check the output printed since last time
this step was used for this process.
process_name ('<name> stderr'): Name of the process to check the output of.
- message ('message <message>'): Output (part) to wait for.
+ message ('message <message>'): Output (part) to wait for, and succeed.
+ not_message ('not <message>'): Output (part) to wait for, and fail
Fails if the message is not found after 10 seconds.
"""
- world.processes.wait_for_stdout_str(process_name, [message], new)
+ strings = [message]
+ if not_message is not None:
+ strings.append(not_message)
+ (found, line) = world.processes.wait_for_stdout_str(process_name, strings, new)
+ if not_message is not None:
+ assert found != not_message, line
@step('the file (\S+) should (not )?exist')
def check_existence(step, file_name, should_not_exist):
diff --git a/tests/lettuce/features/terrain/terrain.py b/tests/lettuce/features/terrain/terrain.py
index 634d2fb..d2ac03f 100644
--- a/tests/lettuce/features/terrain/terrain.py
+++ b/tests/lettuce/features/terrain/terrain.py
@@ -173,7 +173,8 @@ class RunningProcess:
strings: Array of strings to look for.
only_new: If true, only check output since last time this method was
called. If false, first check earlier output.
- Returns the matched string.
+ Returns a tuple containing the matched string, and the complete line
+ it was found in.
Fails if none of the strings was read after 10 seconds
(OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
"""
@@ -183,7 +184,7 @@ class RunningProcess:
for string in strings:
if line.find(string) != -1:
full_file.close()
- return string
+ return (string, line)
wait_count = 0
while wait_count < OUTPUT_WAIT_MAX_INTERVALS:
where = running_file.tell()
@@ -191,7 +192,7 @@ class RunningProcess:
if line:
for string in strings:
if line.find(string) != -1:
- return string
+ return (string, line)
else:
wait_count += 1
time.sleep(OUTPUT_WAIT_INTERVAL)
@@ -205,7 +206,8 @@ class RunningProcess:
strings: Array of strings to look for.
only_new: If true, only check output since last time this method was
called. If false, first check earlier output.
- Returns the matched string.
+ Returns a tuple containing the matched string, and the complete line
+ it was found in.
Fails if none of the strings was read after 10 seconds
(OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
"""
@@ -219,7 +221,8 @@ class RunningProcess:
strings: Array of strings to look for.
only_new: If true, only check output since last time this method was
called. If false, first check earlier output.
- Returns the matched string.
+ Returns a tuple containing the matched string, and the complete line
+ it was found in.
Fails if none of the strings was read after 10 seconds
(OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
"""
@@ -249,7 +252,7 @@ class RunningProcesses:
Fails if a process with the given name is already running.
"""
assert process_name not in self.processes,\
- "Process " + name + " already running"
+ "Process " + process_name + " already running"
self.processes[process_name] = RunningProcess(step, process_name, args)
def get_process(self, process_name):
diff --git a/tests/lettuce/features/xfrin_bind10.feature b/tests/lettuce/features/xfrin_bind10.feature
new file mode 100644
index 0000000..70c3571
--- /dev/null
+++ b/tests/lettuce/features/xfrin_bind10.feature
@@ -0,0 +1,11 @@
+Feature: Xfrin
+ Tests for Xfrin, specific for BIND 10 behaviour.
+
+ Scenario: Retransfer command
+ Given I have bind10 running with configuration xfrin/retransfer_master.conf with cmdctl port 47804 as master
+ And I have bind10 running with configuration xfrin/retransfer_slave.conf
+ A query for www.example.org should have rcode REFUSED
+ Wait for bind10 stderr message CMDCTL_STARTED
+ When I send bind10 the command Xfrin retransfer example.org IN 127.0.0.1 47807
+ Then wait for new bind10 stderr message XFRIN_XFR_TRANSFER_SUCCESS not XFRIN_XFR_PROCESS_FAILURE
+ A query for www.example.org should have rcode NOERROR
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 49ef0f1..565b306 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -50,7 +50,7 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Stopping b10-auth and checking that ($n)"
-echo 'config set Boss/start_auth false
+echo 'config remove Boss/components b10-auth
config commit
quit
' | $RUN_BINDCTL \
@@ -61,7 +61,8 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Restarting b10-auth and checking that ($n)"
-echo 'config set Boss/start_auth true
+echo 'config add Boss/components b10-auth
+config set Boss/components/b10-auth { "special": "auth", "kind": "needed" }
config commit
quit
' | $RUN_BINDCTL \
diff --git a/tests/tools/badpacket/badpacket.cc b/tests/tools/badpacket/badpacket.cc
index 86bbc47..be393d5 100644
--- a/tests/tools/badpacket/badpacket.cc
+++ b/tests/tools/badpacket/badpacket.cc
@@ -18,6 +18,7 @@
#include <config.h>
#include <exceptions/exceptions.h>
+#include <log/logger_support.h>
#include "command_options.h"
#include "scan.h"
@@ -44,6 +45,7 @@ using namespace isc::badpacket;
/// \brief Main Program
int main(int argc, char* argv[]) {
+ isc::log::initLogger("badpacket");
try {
// Parse command
diff --git a/tools/reorder_message_file.py b/tools/reorder_message_file.py
new file mode 100644
index 0000000..31f4941
--- /dev/null
+++ b/tools/reorder_message_file.py
@@ -0,0 +1,196 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Reorder Message File
+#
+# Reads a message file into memory, then outputs it with the messages and
+# associated descriptions in alphabetical order.
+#
+# Invocation:
+# The code is invoked using the command line:
+#
+# python reorder.py message_file
+#
+# Output is written to stdout.
+
+import sys
+
+def remove_empty_leading_trailing(lines):
+ """
+ Removes leading and trailing empty lines.
+
+ A list of strings is passed as argument, some of which may be empty.
+ This function removes from the start and end of the list a contiguous
+ sequence of empty lines and returns the result. Embedded sequences of
+ empty lines are not touched.
+
+ Parameters:
+ lines List of strings to be modified.
+
+ Return:
+ Input list of strings with leading/trailing blank line sequences
+ removed.
+ """
+
+ retlines = []
+
+ # Dispose of degenerate case of empty array
+ if len(lines) == 0:
+ return retlines
+
+ # Search for first non-blank line
+ start = 0
+ while start < len(lines):
+ if len(lines[start]) > 0:
+ break
+ start = start + 1
+
+ # Handle case when entire list is empty
+ if start >= len(lines):
+ return retlines
+
+ # Search for last non-blank line
+ finish = len(lines) - 1
+ while finish >= 0:
+ if len(lines[finish]) > 0:
+ break
+ finish = finish - 1
+
+ retlines = lines[start:finish + 1]
+ return retlines
+
+
+def canonicalise_message_line(line):
+ """
+ Given a line known to start with the '%' character (i.e. a line
+ introducing a message), canonicalise it by ensuring that the result
+ is of the form '%<single-space>MESSAGE_IDENTIFIER<single-space>text'.
+
+ Parameters:
+ line - input line. Known to start with a '%' and to have leading
+ and trailing spaces removed.
+
+ Return:
+ Canonicalised line.
+ """
+ # Cope with degenerate case of a single "%"
+ if len(line) == 1:
+ return line
+
+ # Get the rest of the line
+ line = line[1:].lstrip()
+
+ # Extract the first word (the message ID)
+ words = line.split()
+ message_line = "% " + words[0]
+
+ # ... and now the rest of the line
+ if len(line) > len(words[0]):
+ message_line = message_line + " " + line[len(words[0]):].lstrip()
+
+ return message_line
+
+
+def make_dict(lines):
+ """
+ Split the lines into segments starting with the message definition and
+ place into a dictionary.
+
+ Parameters:
+ lines - list of lines containing the text of the message file (less the
+ header).
+
+ Returns:
+ dictionary - map of the messages, keyed by the line that holds the message
+ ID.
+ """
+
+ dictionary = {}
+
+ message_key = canonicalise_message_line(lines[0])
+ message_lines = [message_key]
+ index = 1;
+ while index < len(lines):
+ if lines[index].startswith("%"):
+ # Start of new message
+ dictionary[message_key] = remove_empty_leading_trailing(message_lines)
+ message_key = canonicalise_message_line(lines[index])
+ message_lines = [message_key]
+ else:
+ message_lines.append(lines[index])
+
+ index = index + 1
+
+ dictionary[message_key] = remove_empty_leading_trailing(message_lines)
+
+ return dictionary
+
+
+def print_dict(dictionary):
+ """
+ Prints the dictionary with a blank line between entries.
+
+ Parameters:
+ dicitionary - Map holding the message dictionary
+ """
+ count = 0
+ for msgid in sorted(dictionary):
+
+ # Blank line before all entries but the first
+ if count > 0:
+ print("")
+ count = count + 1
+
+ # ... and the entry itself.
+ for l in dictionary[msgid]:
+ print(l.strip())
+
+
+def process_file(filename):
+ """
+ Processes a file by reading it and searching for the first line starting
+ with the '%' sign. Everything before that line is treated as the file
+ header and is copied to the output with leading and trailing spaces removed.
+ After that, each message block is read and stored for later sorting.
+
+ Parameters:
+ filename Name of the message file to process
+ """
+ lines = open(filename).read().splitlines()
+
+ # Search for the first line starting with the percent character. Everything
+ # before it is considered the file header and is copied to the output with
+ # leading and trailing spaces removed.
+ index = 0
+ while index < len(lines):
+ if lines[index].startswith("%"):
+ break
+ print(lines[index].strip())
+ index = index + 1
+
+ # Now put the remaining lines into the message dictionary
+ dictionary = make_dict(lines[index:])
+
+ # ...and print it
+ print_dict(dictionary)
+
+
+# Main program
+if __name__ == "__main__":
+
+ # Read the files and load the data
+ if len(sys.argv) != 2:
+ print "Usage: python reorder.py message_file"
+ else:
+ process_file(sys.argv[1])
More information about the bind10-changes
mailing list