BIND 10 trac1310, updated. eb06cb8dfea727c5d9366583581ca674d23c4c2e Merge branch 'master' into trac1310
BIND 10 source code commits
bind10-changes at lists.isc.org
Tue Nov 29 01:55:44 UTC 2011
The branch, trac1310 has been updated
via eb06cb8dfea727c5d9366583581ca674d23c4c2e (commit)
via 5de824f59cd2ba7e8cbb3cc58c4cd42c585c09c3 (commit)
via 8bab1dfcb1f33d58bf64f4c86ca7ba860b57cc76 (commit)
via 5e07929601d0799df76eaaf3ac5165b634efc556 (commit)
via 81f62344db074bc5eea3aaf3682122fdec6451ad (commit)
via 1825a24fe6fcda419cf2cdcd05180aa1b18ca526 (commit)
via fa7208a015545459cf56b03001fa1e6681e52d3a (commit)
via 1a81569fb7c422d499f5a8eeef2d70d20e3284c6 (commit)
via c35d0dde3e835fc5f0a78fcfcc8b76c74bc727ca (commit)
via 64c2d16fff1dd9e903378a55087843ad058791f5 (commit)
via 7ab1afe9a76986c4f175c338fdd6a8076a9d6dc9 (commit)
via e99a54597a5bb6dde1a0240ab74ac010b5029afb (commit)
via f02b9adf8e899f9358a26e087cfb43a5d4657b07 (commit)
via 3d5f2c3c14bcbf9cb7441f61ac8f84bceb8e6594 (commit)
via c1171699a2b501321ab54207ad26e5da2b092d63 (commit)
via 8f5429e41cb99c615abc1de0ee549702ca638217 (commit)
via 52fa244c12eb6d708b0260b5554396df5d00b079 (commit)
via ce54ff203a48042d3fa085037a23b315ccc2ecca (commit)
via 8fe1dbdadf8ce7aa288ae08320f315ab56433cb6 (commit)
via f76fdc8db2c0c9eba1603af7fa0272e7955e20d8 (commit)
via 6247db1cab96103bc06a6a281963227084cfb68d (commit)
via a9dc55c6cc18e2ed28f79cfbbdf7408a64a04ca4 (commit)
via 1dddec95f5e398269b28473a094dd6ad00ce648b (commit)
via 9150be5e5d0d6c0a46e44a0bbcdbd235c74bd6a7 (commit)
via f11f46b276646364fc115783ccc3d706510a2ee8 (commit)
via 151a4b05228e38b2031d095e438b63ae75dc0b76 (commit)
via 3924f73fed9f8158918713b09672175f09a973e4 (commit)
via 411a806a41666b522ed35552588789d114cc1390 (commit)
via ab3f90da16d31fc6833d869686e07729d9b8c135 (commit)
via e12070c73b529d348f64f8f6e24d75ce710a8a12 (commit)
via 710e8207090f894b14eaa9834a9c6cd551ea950d (commit)
via 80c131f5b0763753d199b0fb9b51f10990bcd92b (commit)
via a01eb512f67a14855fc9be9fff561c3c86634e0b (commit)
via 635662711c673bbcfc8fac95c96cfdc33702ca94 (commit)
via 15e23bca2cf7f266d32c6bb30a142a80ee543227 (commit)
via ec1cc2b4be6e19519644534889865a3ee2c81a8a (commit)
via 277b80e0671586d8ace205cb53465b1f6f414466 (commit)
via a435f3ac50667bcb76dca44b7b5d152f45432b57 (commit)
via 6dd270220a4bac70fa4cd6a898e331b658fe0af2 (commit)
via 1bb5168b7014a83690d1bb363dbcc0fa6d8fd7f1 (commit)
via ddb6d109c0947f203eaa6265a22d2fb3b166db0b (commit)
via 2eb9f486619e27aee0684f840c85d152b3ddfe0f (commit)
via 71378c1048bb610c748788dabfd04e421f6b4ac0 (commit)
via de43982b90d0fafd6b4e1857e366a6cd983cfab7 (commit)
via 77d69c99f2b3cc8ee627d8f73174ead9f03da412 (commit)
via 3ff33cfedcca0cd1acb80a5cf2651f89403a82a9 (commit)
via cf297878998c80018ba9523a53ae4947fc7e6a5e (commit)
via 52f4e6faf56afb5c0432f88d5b1528090530c62e (commit)
via 13f108b25fbccc56b731bd5bcc505cdf48e91e91 (commit)
via 4d3aef6a83965e26781d6b74f0ff913926845c7c (commit)
via fb33c8d379f9e75b82edafff45d4dc13fda62630 (commit)
via 4f02b45248227dd98904b61bbcd2e6cff36b5fd6 (commit)
via 54d9d7c1597df3bcdf47d07db040f63f7008c6a7 (commit)
via 48c07943ac1dd24922f46cf970c214b5cf24813f (commit)
via bea7b0e3fde35a335bb9e6cf170b0fc240650275 (commit)
via 9b1c64b7d164b6b27d126e55391b2bbafeaf8c00 (commit)
via 96bf3ab5271347542e13b52e2c37b9c8810a6fad (commit)
via c59bb2dcd90a5d580a7f3c9e42a54a080f763add (commit)
via 319bc2d65301606aa938363dcb30a8519755886e (commit)
via d953caeeaf821743ed27ef4a47a45bef66615dc9 (commit)
via 5d382b4295b8455fae844a5ca94886788f6cb19b (commit)
via d08c42ad20f2c91bf64ef47ed893fa2aac4ff037 (commit)
via 08915b387e64f3cf9d9a86a5a21c4492db3a488c (commit)
via 1d4541dfd067cd2f0c9e155049c2b7f9d70fa896 (commit)
via ecf6a71b5845c6710119dd97b500c7edeb3f44c2 (commit)
via a24c6579ab039afd67ecb50a71b9fc8eabf9b6c7 (commit)
via 3647e8ff9c194c1c0a576558f4f49ba4ff2614e7 (commit)
via c3d71baca757b39e13968369e0afb39dd4472eb8 (commit)
via a9040d4aba8e3c01a77236c81f07e2b06b300918 (commit)
via 35556de064c193779c3cd5e5b0fde583f4a8d598 (commit)
via c4f22c20ee19e1ffba43914671c059a434f4518c (commit)
via 12b72af07f5e06cf172b115b0acba3fbe3554467 (commit)
via ecd9c5fc4b3cf747e2b5a221504feac3adeb236e (commit)
via fc0a31681f7a8e4198068be0038eb9a4f8a74ec7 (commit)
via d3db538710b6547cc2e04127fb5fc9d2d5a181f9 (commit)
via 2ab2fd55d4a12d1469060a3657893121114e2e2f (commit)
via 2dd7ee33a13a07a00e22fbc81ecb8b19b57efa8f (commit)
via 5cea4cfbee9770f4299f5a701af89f7cbf977ef4 (commit)
via 1af57091dc0c38cff538de2470275f25caeb2eab (commit)
via 256c0a08483ac2bf396dfa8424b4c02f0681a0f4 (commit)
via 8f74718cc2012ca68a44d9ed9996f479c6834101 (commit)
via 5c92f567d93977bd56a7ed2898c7bee098b552ab (commit)
via 956a0a589db0a8250ec94ece377657783ac15caf (commit)
via 39def1d39c9543fc485eceaa5d390062edb97676 (commit)
via bcb432839cacdf10172d49dec94292871aee3526 (commit)
via 164d651a0e4c1059c71f56b52ea87ac72b7f6c77 (commit)
via 09f6d6281a4203a91dcfb6c56e240c06f11935b6 (commit)
via 76fb414ea5257b639ba58ee336fae9a68998b30d (commit)
via e5f37058b67c641b8eb024bd48ca269ae9e41163 (commit)
via 934a07b6d0ebec8bab258398894905de32878a8b (commit)
via 40f6dd2b378f31f4ec561eeeac534874a02a8ae8 (commit)
via 84fa061af28d72e51939039bfcbb04e1febc3cb1 (commit)
via b54f1b460285db4d6ae89dd716098a88363b1511 (commit)
via c1138d13b2692fa3a4f2ae1454052c866d24e654 (commit)
via 35b1914ce6ab5481ce40f584729d0949746c2219 (commit)
via 4df29b3303dbce85b8143d8d74935b3c9283fb31 (commit)
via 33a956b09f22597d91929b22542913412757e279 (commit)
via ed91f985331705fc345bec838697c9bda4b6b7e4 (commit)
via 1219d81b49e51adece77dc57b5902fa1c6be1407 (commit)
via 8380ccceca1b8412fbc6742cb37dbd7de843ac50 (commit)
via 38d84c59fbc097e57d03ac10d6a83edc63c4cffa (commit)
via c0cc183880fc5e1949bcc97585c20ac2ab21e281 (commit)
via 2d85e22f10321fbc5b9cd12f70e90907cb01830f (commit)
via 1c9f121360e6e612d02d365d70bd0843f8f93457 (commit)
via d142274062ed21d53f9e0b2a85531c935580013c (commit)
via 5de9e8a440b6b4ed8c6bbce156d75b740ec4c1b5 (commit)
via 631c5c2d24ba8be2b12930cc8267b2298414d563 (commit)
via 1b3e21e08311d84d649a2780471e9a8b46143dca (commit)
via ddf219d781a40764999bd8b19c80f607c2783b57 (commit)
via 24c2111ed800e95bc62901cd3b2970692a205578 (commit)
via f9224368908dd7ba16875b0d36329cf1161193f0 (commit)
via 4a68215905542025570f06fcc703fa44d6b37cfd (commit)
via b8f67d200e64a2a9931b6d664781caf835f2ecd4 (commit)
via 315f4999df039dbb2baa77ee12afa0dfbe01dc25 (commit)
via 7344d2788cd06e54ca7ca3e3a3f69010dac80670 (commit)
via 46bd9a8e6e3a543f97af6213bc7e43d619064aa7 (commit)
via ce546dddcbbf7efc4778c1d0d4210ca139ed5bf9 (commit)
via fa89a0798d166574e089b38d7bd43a701eda5467 (commit)
via 12b1a920f219e627bb5860f0a0217cc5c86749e5 (commit)
via cd342dae58399be6cdfad55a466a76ee385ccb08 (commit)
via f9e81512329b71d6b5d94bafa789c63e763b2a72 (commit)
via 226dc3ee718e1320079d6c6d8642e0f0dda1bdef (commit)
via 962a91763b9ef79e887e52e22fa23462ff7d680e (commit)
via 170936d47b2e9ad3d5c3ceabf86026fca9795150 (commit)
via dbf32272f3b76b90678add39038fb6978c03ab3e (commit)
via 3e19362bc1ba7dc67a87768e2b172c48b32417f5 (commit)
via 295732d42d2b0a9641edfa352087033d8eff2794 (commit)
via 758ab4461e8792e563ce1e0ad069d53b5e15d8dd (commit)
via b449ad20a4f58eb96aec8cd7dd7bb857bdb5d14b (commit)
via 9f89f07adcc9ccdde454016f037076e04eb791c1 (commit)
via fdefb47da0a5d7203496738ba03d4e1737e8149e (commit)
via 93a5d45d9c1aa90249494608b8c2829059cc3b28 (commit)
via c1f5fb059e9c272dedc27a3f14fa8ed2fec71b95 (commit)
via fd1ae8e05771b151877ae3c082a7b3e3b32a20c7 (commit)
via 21887dffc4cd692ce23bfff1685fba0e2c1e55b0 (commit)
via c41c32c11999a34a46d2e20155358438d769f767 (commit)
via 181926059b0162e09c30b4b967b09294d713918e (commit)
via 466a968426ed9062d86239560492edf7dc72ee02 (commit)
via a59f28758abdb92721e010956bd421148643377b (commit)
via e09910d37b783b182ae2dc83f6cb272bff68cbb6 (commit)
via da3e9e54f1374d581d78f1d874ddafd427a622ab (commit)
via b34bf286c064d44746ec0b79e38a6177d01e6956 (commit)
via 648a187c5d7181019dc19531a1057bc3e6f70e96 (commit)
via 16b7feca0339f67acae30eb67d913bd3ef0298be (commit)
via 120946aa30b22c36995135b7d5bfcade4c26e192 (commit)
via 78770f52c7f1e7268d99e8bfa8c61e889813bb33 (commit)
via ff5154291678973eaa0483518302b74a62f0acba (commit)
via 498677a8877e4894fad598f9ec99974c414ef58c (commit)
via c4c93896137dd936066cd1a714569468bf248451 (commit)
via 713160c9bed3d991a00b2ea5e7e3e7714d79625d (commit)
via 9bab697bc984a6565a6f0dfe8a981f4809edc91c (commit)
via ab406229e29b7cfc470142ee0166086bf70790a3 (commit)
via e24f557e8208f43a8ade0855395c87b175bc351c (commit)
via 3f93372ba9416c9d759ea0c6d8981837c036448e (commit)
via b79e0ef1ad1ac5c64c8a131ea8e125ca6df066eb (commit)
via 3d3592d4b1e7d3b0b3164067e57c1343db691851 (commit)
via 84290dae3201ee83c8e4aad6f7e2b181d708811e (commit)
via 9b6f54409617896742151c6aab9f5f318b7f53c5 (commit)
via 36a5cd751a12ccbd31284ea19d0b10e8a5836b70 (commit)
via f1cb067ea86ab38810007ec6743e7c1f91042e99 (commit)
via 6ddab5f4ea56162d0834e22a68605a1a427cc8c2 (commit)
via cd4fd339a084dbfb1e2d35d5c008260de9d48572 (commit)
via e4b99333e4c9946741148b6c95ed070653bec0fe (commit)
via b0cb2b651ec620418e891db0d21791beadb81906 (commit)
via e9e0f96594eec741393fa197c1d91362c96109e1 (commit)
via 96e0aa96b5a2fd31833e9afe64bb8e4cc34e23c5 (commit)
via 48ee64bfbde99ce88eb305d2a751283b42c826ad (commit)
via cfecb1acb98f45a12864b7730ea58afbeb674c7b (commit)
via 9ab6902f20b57452eaecf8f737d37f8dedcd623a (commit)
via d9be597335af84bc93c9559bbd76fa85ef0f49c4 (commit)
via 8c57956e16dd09b528cd11dbf4c2fa51e48da359 (commit)
via e84f2aa5e9e493aa7dadfbd3b31753b5837d9069 (commit)
via dabf62d5444fe3a1e55e72aa393e0dddf188df7b (commit)
via ca3d2d1badee8e5e6d3c1f73fb29afdcc7692fa6 (commit)
via 94ec743d73153258d8a231e2e5126749ea00e3c8 (commit)
via dca136175cf0dde67a63f40953187ca60f90caad (commit)
via 625aea76304c024102cb5065f910e5121b1641f7 (commit)
via a4c51111cc0fc28c6517a11f8ae88682ab8e6996 (commit)
via 8a5b3e3b460e7f741b1560f73423c8d688db9d85 (commit)
via 275d091229e914a848408b785f0143541abed6d5 (commit)
via b5553ef764f6c8cb0acea25e14b6e7a6a3a4cd47 (commit)
via bdde86c05963e9d491015e906c1b899609417887 (commit)
via eb53cae4b35f858436cc20bf28ad06cbdb2211ab (commit)
via 11a4fe2def28da2ae83c94647a11fbb2114ec467 (commit)
via c2213ce7281be2aed47023a6f052bbec868a6028 (commit)
via 045c30f0dffebb30ad8862986be435748ed0efb6 (commit)
via a6fd03e989a1fd5ae9514774bb3b3bb2a6668765 (commit)
via 8c07f46adfdd748ee33b3b5e9d33a78a64dded10 (commit)
via 235ff5af7733a7d464b172c4424f8facf284fed6 (commit)
via 8f3f3e9da36c5a0cbbfa4e2a5ddc598be7fece4a (commit)
via fe04c9377836fcd387f79447906e7ec83911b5b2 (commit)
via 43de15e4b0bd0094910ecc4f4365744cb6c1eeab (commit)
via 5e2238494ec665b558a6bf3b6a2c7351c1e022ba (commit)
via 755cd222be64019ea3b8db62e6d2643e6b6374c7 (commit)
via 5720f2262f0a1e4b8b2dcb1b66b94431e0dc6ff2 (commit)
via 8780f998204e96767785b29cd5b0e58cbeb10e1f (commit)
via 0d94cca23a4f22d1bb953d62d38358a8b0e49f01 (commit)
via 4215dabae27f7b9b089ff8fafef2ba5425062fc5 (commit)
via 219879a5c8d6cb361d6d6f91d88c199930560994 (commit)
via 7003eecf6f7792d140e74bac444fb00eb7b8415b (commit)
via 0878c77ba4bcbaeb509f2bb7c2d52ee62864dadc (commit)
via efeb506e624945c6f21755621897a088715045b7 (commit)
via fda514f6b5ff65648709273dc62f960d85f4e066 (commit)
via 2afbc7d3564b16d49043d48fe5ed9dd343311861 (commit)
via ce28b51d36567d63b5258648f7fbe406baaa5677 (commit)
via 9753568c850855beecaabf500aea33483369d64f (commit)
via 7c6c725225eb89d9911b28aff0c6d80152e26aaf (commit)
via 0ad9b8c8482a134af7c47b64b412f642d08ce642 (commit)
via 132e0b02edf9a0cebccd64a183eb56839f42606f (commit)
via 2aac7b891f4ee43fa29bbd41ee3bd48c4a849010 (commit)
via bccc91bbd2496b87b408ebff3cd9c6880f952b1c (commit)
via 88147da513fdb22eb4e430390746f36c96304c7e (commit)
via 3a330862f1357a4e0edd570de5896785029f4530 (commit)
via 567f822d4758d13b84161d67118ac1bce08b4c47 (commit)
via f94f5bc089b09a77b34138bbf19ea71921a7950d (commit)
via e3406364189d62ba54d85c3d23b40cefd02af584 (commit)
via 6da32eaece41f360a87388c44528dca979c10ab0 (commit)
via 3dcdc74a5e0f8cb7fd0c6a3f6dee480e30199f03 (commit)
via 7fb9faf4602b6b4feff4c940942c12be838a8153 (commit)
via d60907a85ba3f762b81189588d1b7317b95e0521 (commit)
via b88b05b2a779554a0e3c345933104d42046fffaa (commit)
via 489a53541118413b38865c8a3cf84b24b8b7dfe2 (commit)
via 63f04832f2604868133a23d110ce6df5a9707993 (commit)
via de07e6a0ab66de4d3c7720dc93bc7d9198c9d26b (commit)
via 4ca71b858671d112fade23b449f2a59f14d1d300 (commit)
via 2139076757c1a14ecce96eafd1388f978732f8aa (commit)
via ab47b771999bd12171e65a8a3fb2ee512b709c4b (commit)
via ebe4e57805eda25ca347e0a9db8adad11fb3d4b5 (commit)
via d85912df5ef89ff95c3653403503f61d120a0761 (commit)
via 0f76bcddad8050baf811b0eaa5a117cc61dcbba1 (commit)
via f01fb1d89b20b23c0a680b1a97dc83e5a174e2e6 (commit)
via d2e805bb39d06f0ed47c49879909f35b5d341530 (commit)
via 92794c72752a77005c2f9c7683fd2c65d7d802e9 (commit)
via 046729c74341bb2ed1e6f60f81470cf6a6883000 (commit)
via 36db2f897ac139ca9b71ccee07a7b1ba1e3aee7b (commit)
via 3000256b60ee6a2c19a7188be4d17eca833ce869 (commit)
via edf044e9e2f1572b618ec2438cea1cad46432276 (commit)
via 573abf93bec24753aebb5a6c70d8f50def521879 (commit)
via d287d9c92ecfb59d2c9f525cf79c7bb5167984f6 (commit)
via 50e96053742a30584f91a6bdb4b788977cd166bf (commit)
via 06d6be693064252ed2535fc8685ca4e7b8db0989 (commit)
via f1e08d75cabc45454a9bde86158dc8c7348d7f9d (commit)
via cc48074a9fec60ef9ba69991549f9e167e620225 (commit)
via 7a5903389ed505f6c7ca4c87adf705216d11d1af (commit)
via 8e8607c6faa34d9493a831054ecb64281f1f06c7 (commit)
via d99d546be040419fd49ad3be179eb2206f5023de (commit)
via 4ab7d17edc10ce4f7b834709aa009aba4db9d877 (commit)
via df02b63fe1176c572a7eee996921f211ca970953 (commit)
via f8a64959bc5f3ddf68ba4d01bee092bf4f1f9558 (commit)
via 7e96227163334ecd54e506bd2cedb58d3f6cf91d (commit)
via ca42fb6438b70ef569d00dc07b1bb23c0f6124f2 (commit)
via bcb37a2f6b11128620bb34a0c2d3dbf7334c0ab7 (commit)
via d17ae6dc7160a471abdd05f22aacc359df54b4e4 (commit)
via d9319841c509648c1ac18fec2c3d2b2c08313eb9 (commit)
via 6d5f34008d7e793546fd990cad11e40268c0ff04 (commit)
via 89b3af8226cb89bcc59ceff5e9547dbfc5b30665 (commit)
via d0a7cf4a98daf0ec8759640a91a12059cece6c6d (commit)
via 5dc6be6febd523e202771cd11624efc29854349c (commit)
via f230c7d18b68d5c03131089a4f5c9739af7f9d83 (commit)
via e1682a42d23d36a3647878e13681dcd659622818 (commit)
via e45fa27d90ab3ea7b1081ca7d9513f63f5083b8d (commit)
via 1e9bc2c16ef78f35ec35e340c696b4bdc10b47b2 (commit)
via 85a2ce538c6f939ca539347676e5587228a29895 (commit)
via d1773b2ef6f98c26493ae76783158fc2ae6fbe52 (commit)
via 2f51afcbc57c6d58e7d90f37962f3b93bc768e1b (commit)
via 0b9c1b299f2078ab1a7bf08759a463eb179f0365 (commit)
via 918c35143eb61d6e0ac96e98f2a95b12d55fdc0c (commit)
via 480da1fe075da66aa8a144d37c23bac2fcfa1e2c (commit)
via 81b1ba0e9cf67bc5e8ee6040b28436d4c64b72cc (commit)
via fc17063223655ab14b4db33bd63dd33fdc5ed5ac (commit)
via 61feac8366f972b60410b925e36a9267338b3e9a (commit)
via 2085b2255a79c0e5a04fe457bbb228d2fa24953b (commit)
via 2d20ee347d82f840328c2bddd014cdf232962843 (commit)
via 1ff0be2456cfaf9279970ae9a30a48d6267b96cf (commit)
via 80447181a64656b97afa9ab71440907017e873f4 (commit)
via 3878aa43817deaee33b21956d3066baef77a24ce (commit)
via cb1c34cd2ffb876819441b4869a66a4cb500a8ba (commit)
via 01b4b95b5fb7aa99765f29ffc61f5131173148eb (commit)
via a3a4e317a91c075f0d16de7d16cc652e508df101 (commit)
via 96086ea69576acae7d59e1d7665f622bd526c7c1 (commit)
via 7c229ebaca82e06899126f9b364fe524ec6d4b56 (commit)
via 6b600cb1816705b04470ba2d0aca64dfdf8f55d2 (commit)
via c383ebc71434baa5cb314b3de3f3e18f39ebd0c7 (commit)
via d5ec22cc344998038cf68b6fdf309ad2a12b2b5e (commit)
via 2024a1554624868e5f0a4d09b75c3ddf39dd342d (commit)
via 10b6bc17b7c264f41dcdba64fc3a79904c06164a (commit)
via a48e7fa14f2ef90bce27ff3e7aa4a93165e08d37 (commit)
via 62809f71c563128cb3cc467d867c621c61dbb926 (commit)
via 08d090b4685220d3f286e1506e1a3c884146122f (commit)
via 7b667cbd3bd3baeaceb60b987ab9770684ff5038 (commit)
via ab48bd8228405d50b149c502d7f73b5eb1a57608 (commit)
via ecf9f48f4b4c3beaf97ae0e83c11f4547f024734 (commit)
via 4d39e13d7f5ae5c30277f602f669f0421e2bf05c (commit)
via 3bf84d5c678f9f86df6382cf30e694404e2f77cb (commit)
via 12a6217c59bf48ead2e11aaaedb774af7a618701 (commit)
via cb57c9fcaa897752dd7599dcc15d647fb880285f (commit)
via 1294219279910a89d4a99e6292cea8e13a4c301e (commit)
via 61dd61b8f259b0938646fa2539fe928608a0fbad (commit)
via f1306bd835659173f3fffcfbe1bf8971dc62efd9 (commit)
via 7cc8a7d6c32472021b34e43d43a288cfa263f007 (commit)
via 599ec7f889bba386c838ec85735b203514905d9d (commit)
from e35e9b8a1cef995079ef15b0321aa7b420139226 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit eb06cb8dfea727c5d9366583581ca674d23c4c2e
Merge: e35e9b8a1cef995079ef15b0321aa7b420139226 5de824f59cd2ba7e8cbb3cc58c4cd42c585c09c3
Author: Xie Jiagui <xiejiagui at cnnic.cn>
Date: Tue Nov 29 09:54:47 2011 +0800
Merge branch 'master' into trac1310
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 116 +++-
Makefile.am | 2 +-
compatcheck/Makefile.am | 8 +
compatcheck/README | 5 +
compatcheck/sqlite3-difftbl-check.py.in | 60 ++
configure.ac | 5 +-
doc/guide/bind10-guide.html | 241 +++++--
doc/guide/bind10-guide.txt | 205 +++++-
doc/guide/bind10-guide.xml | 245 ++++++-
doc/guide/bind10-messages.html | 482 +++++++++---
doc/guide/bind10-messages.xml | 549 +++++++++++---
src/bin/auth/auth_srv.cc | 24 +-
src/bin/auth/tests/auth_srv_unittest.cc | 155 +++-
src/bin/bind10/bind10.8 | 220 +++++-
src/bin/bind10/bind10.xml | 215 +++++-
src/bin/bind10/bind10_src.py.in | 171 ++---
src/bin/bind10/bob.spec | 2 +-
src/bin/bind10/tests/bind10_test.py.in | 62 +--
src/bin/stats/stats-httpd-xml.tpl | 23 +-
src/bin/stats/stats-httpd-xsd.tpl | 38 +-
src/bin/stats/stats-httpd-xsl.tpl | 27 +-
src/bin/stats/stats.py.in | 8 +-
src/bin/stats/stats_httpd.py.in | 511 ++++++++++---
src/bin/stats/stats_httpd_messages.mes | 6 +
src/bin/stats/tests/Makefile.am | 2 +-
src/bin/stats/tests/b10-stats-httpd_test.py | 823 +++++++++++++++++---
src/bin/stats/tests/b10-stats_test.py | 194 ++++-
src/bin/stats/tests/test_utils.py | 59 ++-
src/bin/xfrin/tests/Makefile.am | 2 +-
src/bin/xfrin/tests/testdata/example.com.sqlite3 | Bin 11264 -> 12288 bytes
src/bin/xfrin/tests/xfrin_test.py | 22 +-
src/bin/xfrin/xfrin.py.in | 5 +-
src/bin/xfrout/b10-xfrout.8 | 13 +
src/bin/xfrout/b10-xfrout.xml | 25 +
src/bin/xfrout/tests/Makefile.am | 8 +
src/bin/xfrout/tests/testdata/creatediff.py | 58 ++
src/bin/xfrout/tests/testdata/example.com | 6 +
.../tests/testdata/test.sqlite3} | Bin 11264 -> 12288 bytes
src/bin/xfrout/tests/xfrout_test.py.in | 727 ++++++++++++-----
src/bin/xfrout/xfrout.py.in | 483 ++++++++----
src/bin/xfrout/xfrout_messages.mes | 120 +++-
src/lib/datasrc/client.h | 88 ++-
src/lib/datasrc/data_source.h | 12 +
src/lib/datasrc/database.cc | 293 ++++++--
src/lib/datasrc/database.h | 83 ++-
src/lib/datasrc/datasrc_messages.mes | 28 +
src/lib/datasrc/memory_datasrc.cc | 64 ++-
src/lib/datasrc/memory_datasrc.h | 9 +-
src/lib/datasrc/sqlite3_accessor.cc | 326 +++++++-
src/lib/datasrc/sqlite3_accessor.h | 60 ++-
src/lib/datasrc/tests/Makefile.am | 1 +
src/lib/datasrc/tests/client_unittest.cc | 11 +-
src/lib/datasrc/tests/database_unittest.cc | 560 +++++++++++++-
src/lib/datasrc/tests/memory_datasrc_unittest.cc | 48 ++
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 203 ++++--
src/lib/datasrc/tests/testdata/Makefile.am | 5 -
src/lib/datasrc/tests/testdata/brokendb.sqlite3 | Bin 2048 -> 4096 bytes
.../lib/datasrc/tests/testdata/diffs.sqlite3 | Bin 14336 -> 16384 bytes
src/lib/datasrc/tests/testdata/diffs_table.sql | 123 +++
src/lib/datasrc/tests/testdata/example.org.sqlite3 | Bin 14336 -> 14336 bytes
.../datasrc/tests/testdata/example2.com.sqlite3 | Bin 11264 -> 14336 bytes
src/lib/datasrc/tests/testdata/rwtest.sqlite3 | Bin 11264 -> 13312 bytes
src/lib/datasrc/tests/testdata/test-root.sqlite3 | Bin 14336 -> 17408 bytes
src/lib/datasrc/zone.h | 106 +++
src/lib/dns/rdata/generic/soa_6.cc | 6 +
src/lib/dns/rdata/generic/soa_6.h | 2 +
src/lib/dns/tests/rdata_soa_unittest.cc | 5 +
src/lib/exceptions/exceptions.h | 11 +
src/lib/python/isc/bind10/component.py | 66 ++-
src/lib/python/isc/bind10/special_component.py | 12 +-
src/lib/python/isc/bind10/tests/component_test.py | 121 +++-
src/lib/python/isc/datasrc/Makefile.am | 2 +
src/lib/python/isc/datasrc/client_inc.cc | 88 ++-
src/lib/python/isc/datasrc/client_python.cc | 100 ++-
src/lib/python/isc/datasrc/datasrc.cc | 41 +
src/lib/python/isc/datasrc/journal_reader_inc.cc | 80 ++
...iterator_python.cc => journal_reader_python.cc} | 120 +--
.../{iterator_python.h => journal_reader_python.h} | 27 +-
src/lib/python/isc/datasrc/sqlite3_ds.py | 8 +
src/lib/python/isc/datasrc/tests/Makefile.am | 1 +
src/lib/python/isc/datasrc/tests/datasrc_test.py | 289 +++++++-
.../isc/datasrc/tests/testdata/example.com.sqlite3 | Bin 43008 -> 44032 bytes
.../datasrc/tests/testdata/test.sqlite3.nodiffs | Bin 43008 -> 43008 bytes
src/lib/python/isc/log/log.cc | 188 +++---
src/lib/python/isc/log/tests/log_test.py | 31 +
src/lib/python/isc/notify/notify_out.py | 155 +++--
src/lib/python/isc/notify/notify_out_messages.mes | 21 +
src/lib/python/isc/notify/tests/Makefile.am | 9 +
src/lib/python/isc/notify/tests/notify_out_test.py | 73 +--
.../isc/notify/tests/testdata/brokentest.sqlite3} | Bin 11264 -> 11264 bytes
.../python/isc/notify/tests/testdata/example.com | 10 +
.../python/isc/notify/tests/testdata/example.net | 14 +
.../isc/notify/tests/testdata/multisoa.example | 5 +
.../python/isc/notify/tests/testdata/nons.example | 3 +
.../python/isc/notify/tests/testdata/nosoa.example | 7 +
.../python/isc/notify/tests/testdata/test.sqlite3} | Bin 11264 -> 13312 bytes
src/lib/python/isc/testutils/Makefile.am | 2 +-
src/lib/python/isc/testutils/rrset_utils.py | 63 ++
src/lib/python/isc/xfrin/diff.py | 18 +-
src/lib/python/isc/xfrin/libxfrin_messages.mes | 10 +
src/lib/python/isc/xfrin/tests/diff_tests.py | 26 +-
tests/tools/badpacket/badpacket.cc | 2 +
102 files changed, 7761 insertions(+), 1802 deletions(-)
create mode 100644 compatcheck/Makefile.am
create mode 100644 compatcheck/README
create mode 100755 compatcheck/sqlite3-difftbl-check.py.in
create mode 100755 src/bin/xfrout/tests/testdata/creatediff.py
create mode 100644 src/bin/xfrout/tests/testdata/example.com
copy src/bin/{xfrin/tests/testdata/example.com.sqlite3 => xfrout/tests/testdata/test.sqlite3} (77%)
copy tests/lettuce/data/example.org.sqlite3 => src/lib/datasrc/tests/testdata/diffs.sqlite3 (68%)
create mode 100644 src/lib/datasrc/tests/testdata/diffs_table.sql
create mode 100644 src/lib/python/isc/datasrc/journal_reader_inc.cc
copy src/lib/python/isc/datasrc/{iterator_python.cc => journal_reader_python.cc} (61%)
copy src/lib/python/isc/datasrc/{iterator_python.h => journal_reader_python.h} (61%)
copy src/lib/{ => python/isc}/datasrc/tests/testdata/test.sqlite3.nodiffs (100%)
copy src/{bin/xfrin/tests/testdata/example.com.sqlite3 => lib/python/isc/notify/tests/testdata/brokentest.sqlite3} (77%)
create mode 100644 src/lib/python/isc/notify/tests/testdata/example.com
create mode 100644 src/lib/python/isc/notify/tests/testdata/example.net
create mode 100644 src/lib/python/isc/notify/tests/testdata/multisoa.example
create mode 100644 src/lib/python/isc/notify/tests/testdata/nons.example
create mode 100644 src/lib/python/isc/notify/tests/testdata/nosoa.example
copy src/{bin/xfrin/tests/testdata/example.com.sqlite3 => lib/python/isc/notify/tests/testdata/test.sqlite3} (69%)
create mode 100644 src/lib/python/isc/testutils/rrset_utils.py
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 4a72bbc..eaec9e9 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,6 +1,114 @@
+bind10-devel-20111128 released on November 28, 2011
+
+331. [bug] shane
+ Fixed a bug in data source library where a zone with more labels
+ than an out-of-bailiwick name server would cause an exception to
+ be raised.
+ (Trac #1430, git 81f62344db074bc5eea3aaf3682122fdec6451ad)
+
+330. [bug] jelte
+ Fixed a bug in b10-auth where it would sometimes fail because it
+ tried to check for queued msgq messages before the session was
+ fully running.
+ (git c35d0dde3e835fc5f0a78fcfcc8b76c74bc727ca)
+
+329. [doc] vorner, jreed
+ Document the bind10 run control configuration in guide and
+ manual page.
+ (Trac #1341, git c1171699a2b501321ab54207ad26e5da2b092d63)
+
+328. [func] jelte
+ b10-auth now passes IXFR requests on to b10-xfrout, and no longer
+ responds to them with NOTIMPL.
+ (Trac #1390, git ab3f90da16d31fc6833d869686e07729d9b8c135)
+
+327. [func] jinmei
+ b10-xfrout now supports IXFR. (Right now there is no user
+ configurable parameter about this feature; b10-xfrout will
+ always respond to IXFR requests according to RFC1995).
+ (Trac #1371 and #1372, git 80c131f5b0763753d199b0fb9b51f10990bcd92b)
+
+326. [build]* jinmei
+ Added a check script for the SQLite3 schema version. It will be
+ run at the beginning of 'make install', and if it detects an old
+ version of schema, installation will stop. You'll then need to
+ upgrade the database file by following the error message.
+ (Trac #1404, git a435f3ac50667bcb76dca44b7b5d152f45432b57)
+
+325. [func] jinmei
+ Python isc.datasrc: added interfaces for difference management:
+ DataSourceClient.get_updater() now has the 'journaling' parameter
+ to enable storing diffs to the data source, and a new class
+ ZoneJournalReader was introduced to retrieve them, which can be
+ created by the new DataSourceClient.get_journal_reader() method.
+ (Trac #1333, git 3e19362bc1ba7dc67a87768e2b172c48b32417f5,
+ git 39def1d39c9543fc485eceaa5d390062edb97676)
+
+324. [bug] jinmei
+ Fixed reference leak in the isc.log Python module. Most of all
+ BIND 10 Python programs had memory leak (even though the pace of
+ leak may be slow) due to this bug.
+ (Trac #1359, git 164d651a0e4c1059c71f56b52ea87ac72b7f6c77)
+
+323. [bug] jinmei
+ b10-xfrout incorrectly skipped adding TSIG RRs to some
+ intermediate responses (when TSIG is to be used for the
+ responses). While RFC2845 optionally allows to skip intermediate
+ TSIGs (as long as the digest for the skipped part was included
+ in a later TSIG), the underlying TSIG API doesn't support this
+ mode of signing.
+ (Trac #1370, git 76fb414ea5257b639ba58ee336fae9a68998b30d)
+
+322. [func] jinmei
+ datasrc: Added C++ API for retrieving difference of two versions
+ of a zone. A new ZoneJournalReader class was introduced for this
+ purpose, and a corresponding factory method was added to
+ DataSourceClient.
+ (Trac #1332, git c1138d13b2692fa3a4f2ae1454052c866d24e654)
+
+321. [func]* jinmei
+ b10-xfrin now installs IXFR differences into the underlying data
+ source (if it supports journaling) so that the stored differences
+ can be used for subsequent IXFR-out transactions.
+ Note: this is a backward incompatibility change for older sqlite3
+ database files. They need to be upgraded to have a "diffs" table.
+ (Trac #1376, git 1219d81b49e51adece77dc57b5902fa1c6be1407)
+
+320. [func]* vorner
+ The --brittle switch was removed from the bind10 executable.
+ It didn't work after change #316 (Trac #213) and the same
+ effect can be accomplished by declaring all components as core.
+ (Trac #1340, git f9224368908dd7ba16875b0d36329cf1161193f0)
+
+319. [func] naokikambe
+ b10-stats-httpd was updated. In addition of the access to all
+ statistics items of all modules, the specified item or the items
+ of the specified module name can be accessed. For example, the
+ URI requested by using the feature is showed as
+ "/bind10/statistics/xml/Auth" or
+ "/bind10/statistics/xml/Auth/queries.tcp". The list of all possible
+ module names and all possible item names can be showed in the
+ root document, whose URI is "/bind10/statistics/xml". This change
+ is not only for the XML documents but also is for the XSD and
+ XSL documents.
+ (Trac #917, git b34bf286c064d44746ec0b79e38a6177d01e6956)
+
+318. [func] stephen
+ Add C++ API for accessing zone difference information in
+ database-based data sources.
+ (Trac #1330, git 78770f52c7f1e7268d99e8bfa8c61e889813bb33)
+
+317. [func] vorner
+ datasrc: the getUpdater method of DataSourceClient supports an
+ optional 'journaling' parameter to indicate the generated updater
+ to store diffs. The database based derived class implements this
+ extension.
+ (Trac #1331, git 713160c9bed3d991a00b2ea5e7e3e7714d79625d)
+
316. [func]* vorner
- The configuration of what parts of the system run is more flexible now.
- Everything that should run must have an entry in Boss/components.
+ The configuration of what parts of the system run is more
+ flexible now. Everything that should run must have an
+ entry in Boss/components.
(Trac #213, git 08e1873a3593b4fa06754654d22d99771aa388a6)
315. [func] tomek
@@ -70,7 +178,7 @@
automatically.
(Trac #1279, git cd3588c9020d0310f949bfd053c4d3a4bd84ef88)
-306. [bug] Stephen
+306. [bug] stephen
Boss process now waits for the configuration manager to initialize
itself before continuing with startup. This fixes a race condition
whereby the Boss could start the configuration manager and then
@@ -484,7 +592,7 @@ bind10-devel-20110705 released on July 05, 2011
(Trac #542, git 1aa773d84cd6431aa1483eb34a7f4204949a610f)
243. [func]* feng
- Add optional hmac algorithm SHA224/384/812.
+ Add optional hmac algorithm SHA224/384/512.
(Trac #782, git 77d792c9d7c1a3f95d3e6a8b721ac79002cd7db1)
bind10-devel-20110519 released on May 19, 2011
diff --git a/Makefile.am b/Makefile.am
index 50aa6b9..cc91a56 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = doc src tests
+SUBDIRS = compatcheck doc src tests
USE_LCOV=@USE_LCOV@
LCOV=@LCOV@
GENHTML=@GENHTML@
diff --git a/compatcheck/Makefile.am b/compatcheck/Makefile.am
new file mode 100644
index 0000000..029578d
--- /dev/null
+++ b/compatcheck/Makefile.am
@@ -0,0 +1,8 @@
+noinst_SCRIPTS = sqlite3-difftbl-check.py
+
+# We're going to abuse install-data-local for a pre-install check.
+# This is to be considered a short term hack and is expected to be removed
+# in a near future version.
+install-data-local:
+ $(PYTHON) sqlite3-difftbl-check.py \
+ $(localstatedir)/$(PACKAGE)/zone.sqlite3
diff --git a/compatcheck/README b/compatcheck/README
new file mode 100644
index 0000000..8381e60
--- /dev/null
+++ b/compatcheck/README
@@ -0,0 +1,5 @@
+This directory is a collection of compatibility checker programs.
+They will be run before any other installation attempts on 'make install'
+to see if the installation causes any substantial compatibility problems
+with existing configuratons. If any checker program finds an issue,
+'make install' will stop at that point.
diff --git a/compatcheck/sqlite3-difftbl-check.py.in b/compatcheck/sqlite3-difftbl-check.py.in
new file mode 100755
index 0000000..e3b7b91
--- /dev/null
+++ b/compatcheck/sqlite3-difftbl-check.py.in
@@ -0,0 +1,60 @@
+#!@PYTHON@
+
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os, sqlite3, sys
+from optparse import OptionParser
+
+usage = 'usage: %prog [options] db_file'
+parser = OptionParser(usage=usage)
+parser.add_option("-u", "--upgrade", action="store_true",
+ dest="upgrade", default=False,
+ help="Upgrade the database file [default: %default]")
+(options, args) = parser.parse_args()
+if len(args) == 0:
+ parser.error('missing argument')
+
+db_file = args[0]
+
+# If the file doesn't exist, there's nothing to do
+if not os.path.exists(db_file):
+ sys.exit(0)
+
+conn = sqlite3.connect(db_file)
+cur = conn.cursor()
+try:
+ # This can be anything that works iff the "diffs" table exists
+ cur.execute('SELECT name FROM diffs DESC LIMIT 1')
+except sqlite3.OperationalError as ex:
+ # If it fails with 'no such table', create a new one or fail with
+ # warning depending on the --upgrade command line option.
+ if str(ex) == 'no such table: diffs':
+ if options.upgrade:
+ cur.execute('CREATE TABLE diffs (id INTEGER PRIMARY KEY, ' +
+ 'zone_id INTEGER NOT NULL, ' +
+ 'version INTEGER NOT NULL, ' +
+ 'operation INTEGER NOT NULL, ' +
+ 'name STRING NOT NULL COLLATE NOCASE, ' +
+ 'rrtype STRING NOT NULL COLLATE NOCASE, ' +
+ 'ttl INTEGER NOT NULL, rdata STRING NOT NULL)')
+ else:
+ sys.stdout.write('Found an older version of SQLite3 DB file: ' +
+ db_file + '\n' + "Perform '" + os.getcwd() +
+ "/sqlite3-difftbl-check.py --upgrade " +
+ db_file + "'\n" +
+ 'before continuing install.\n')
+ sys.exit(1)
+conn.close()
diff --git a/configure.ac b/configure.ac
index 2692ddb..26c1e34 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
# Process this file with autoconf to produce a configure script.
AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20111021, bind10-dev at isc.org)
+AC_INIT(bind10-devel, 20111129, bind10-dev at isc.org)
AC_CONFIG_SRCDIR(README)
AM_INIT_AUTOMAKE
AC_CONFIG_HEADERS([config.h])
@@ -816,6 +816,7 @@ AM_CONDITIONAL(INSTALL_CONFIGURATIONS, test x$install_configurations = xyes || t
AC_CONFIG_FILES([Makefile
doc/Makefile
doc/guide/Makefile
+ compatcheck/Makefile
src/Makefile
src/bin/Makefile
src/bin/bind10/Makefile
@@ -937,6 +938,7 @@ AC_CONFIG_FILES([Makefile
tests/tools/badpacket/tests/Makefile
])
AC_OUTPUT([doc/version.ent
+ compatcheck/sqlite3-difftbl-check.py
src/bin/cfgmgr/b10-cfgmgr.py
src/bin/cfgmgr/tests/b10-cfgmgr_test.py
src/bin/cmdctl/cmdctl.py
@@ -1016,6 +1018,7 @@ AC_OUTPUT([doc/version.ent
tests/system/ixfr/in-3/setup.sh
tests/system/ixfr/in-4/setup.sh
], [
+ chmod +x compatcheck/sqlite3-difftbl-check.py
chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
chmod +x src/bin/xfrin/run_b10-xfrin.sh
chmod +x src/bin/xfrout/run_b10-xfrout.sh
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 97ffb84..2972cdf 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,21 +1,21 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110809. The most up-to-date version of this document (in PDF, HTML, and plain text formats), along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p c
lass="releaseinfo">This is the reference guide for BIND 10 version
- 20110809.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20111021. The most up-to-date version of this document (in PDF, HTML, and plain text formats), along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p c
lass="releaseinfo">This is the reference guide for BIND 10 version
+ 20111021.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the reference guide for BIND 10 version 20110809.
+ This is the reference guide for BIND 10 version 20111021.
The most up-to-date version of this document (in PDF, HTML,
and plain text formats), along with other documents for
BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
- </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span c
lass="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a href="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b
10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229437660">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229437725">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229437755">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229437989">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438027">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438069">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfe
rs</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438327">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438512">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438628">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438638">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439154">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439328">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229439609">Logging Message Format</a></span></dt></dl></dd></dl><
/div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+ </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span c
lass="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt><dt><span class="section"><a href="#bind10.config">Configuration of started processes</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a href="#cmdctl">6. Remote control daemon</a><
/span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438007">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229438072">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229438171">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438302">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438340">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438382">Trigger an Incoming Zone Transfer Ma
nually</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438673">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438891">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439042">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439052">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439294">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439468">Example session</a></span></dt></dl></dd><dt><s
pan class="section"><a href="#id1168229440023">Logging Message Format</a></span></dt></dl></dd></dl></div><div class="list-of-tables"><p><b>List of Tables</b></p><dl><dt>3.1. <a href="#id1168229437338"></a></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
BIND is the popular implementation of a DNS server, developer
interfaces, and DNS tools.
BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++ and Python
and provides a modular environment for serving and maintaining DNS.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This guide covers the experimental prototype of
- BIND 10 version 20110809.
+ BIND 10 version 20111021.
</p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
BIND 10 provides a EDNS0- and DNSSEC-capable
authoritative DNS server and a caching recursive name server
@@ -315,11 +315,11 @@
<code class="filename">var/bind10-devel/</code> —
data source and configuration databases.
</li></ul></div><p>
- </p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></div><p>
+ </p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt><dt><span class="section"><a href="#bind10.config">Configuration of started processes</a></span></dt></dl></div><p>
BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
starts up the required processes.
<span class="command"><strong>bind10</strong></span>
- will also restart processes that exit unexpectedly.
+ will also restart some processes that exit unexpectedly.
This is the only command needed to start the BIND 10 system.
</p><p>
After starting the <span class="command"><strong>b10-msgq</strong></span> communications channel,
@@ -327,17 +327,20 @@
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</p><p>
- The <span class="command"><strong>b10-msgq</strong></span> and <span class="command"><strong>b10-cfgmgr</strong></span>
+ The <span class="command"><strong>b10-sockcreator</strong></span>, <span class="command"><strong>b10-msgq</strong></span> and
+ <span class="command"><strong>b10-cfgmgr</strong></span>
services make up the core. The <span class="command"><strong>b10-msgq</strong></span> daemon
provides the communication channel between every part of the system.
The <span class="command"><strong>b10-cfgmgr</strong></span> daemon is always needed by every
module, if only to send information about themselves somewhere,
but more importantly to ask about their own settings, and
- about other modules.
- The <span class="command"><strong>bind10</strong></span> master process will also start up
+ about other modules. The <span class="command"><strong>b10-sockcreator</strong></span> will
+ allocate sockets for the rest of the system.
+ </p><p>
+ In its default configuration, the <span class="command"><strong>bind10</strong></span>
+ master process will also start up
<span class="command"><strong>b10-cmdctl</strong></span> for admins to communicate with the
- system, <span class="command"><strong>b10-auth</strong></span> for authoritative DNS service or
- <span class="command"><strong>b10-resolver</strong></span> for recursive name service,
+ system, <span class="command"><strong>b10-auth</strong></span> for authoritative DNS service,
<span class="command"><strong>b10-stats</strong></span> for statistics collection,
<span class="command"><strong>b10-xfrin</strong></span> for inbound DNS zone transfers,
<span class="command"><strong>b10-xfrout</strong></span> for outbound DNS zone transfers,
@@ -351,7 +354,107 @@
the process names for the Python-based daemons will be renamed
to better identify them instead of just <span class="quote">“<span class="quote">python</span>”</span>.
This is not needed on some operating systems.
- </p></div></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
+ </p></div></div><div class="section" title="Configuration of started processes"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="bind10.config"></a>Configuration of started processes</h2></div></div></div><p>
+ The processes to be started can be configured, with the exception
+ of the <span class="command"><strong>b10-sockcreator</strong></span>, <span class="command"><strong>b10-msgq</strong></span>
+ and <span class="command"><strong>b10-cfgmgr</strong></span>.
+ </p><p>
+ The configuration is in the Boss/components section. Each element
+ represents one component, which is an abstraction of a process
+ (currently there's also one component which doesn't represent
+ a process). If you didn't want to transfer out at all (your server
+ is a slave only), you would just remove the corresponding component
+ from the set, like this and the process would be stopped immediately
+ (and not started on the next startup):
+ </p><pre class="screen">> <strong class="userinput"><code>config remove Boss/components b10-xfrout</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+ </p><p>
+ To add a process to the set, let's say the resolver (which not started
+ by default), you would do this:
+ </p><pre class="screen">> <strong class="userinput"><code>config add Boss/components b10-resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/special resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/kind needed</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/priority 10</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+ Now, what it means. We add an entry called b10-resolver. It is both a
+ name used to reference this component in the configuration and the
+ name of the process to start. Then we set some parameters on how to
+ start it.
+ </p><p>
+ The special one is for components that need some kind of special care
+ during startup or shutdown. Unless specified, the component is started
+ in usual way. This is the list of components that need to be started
+ in a special way, with the value of special used for them:
+ </p><div class="table"><a name="id1168229437338"></a><p class="title"><b>Table 3.1. </b></p><div class="table-contents"><table border="1"><colgroup><col align="left"><col align="left"><col align="left"></colgroup><thead><tr><th align="left">Component</th><th align="left">Special</th><th align="left">Description</th></tr></thead><tbody><tr><td align="left">b10-auth</td><td align="left">auth</td><td align="left">Authoritative server</td></tr><tr><td align="left">b10-resolver</td><td align="left">resolver</td><td align="left">The resolver</td></tr><tr><td align="left">b10-cmdctl</td><td align="left">cmdctl</td><td align="left">The command control (remote control interface)</td></tr><tr><td align="left">setuid</td><td align="left">setuid</td><td align="left">Virtual component, see below</td></tr></tbody></table></div></div><p><br class="table-break">
+ </p><p>
+ The kind specifies how a failure of the component should
+ be handled. If it is set to <span class="quote">“<span class="quote">dispensable</span>”</span>
+ (the default unless you set something else), it will get
+ started again if it fails. If it is set to <span class="quote">“<span class="quote">needed</span>”</span>
+ and it fails at startup, the whole <span class="command"><strong>bind10</strong></span>
+ shuts down and exits with error exit code. But if it fails
+ some time later, it is just started again. If you set it
+ to <span class="quote">“<span class="quote">core</span>”</span>, you indicate that the system is
+ not usable without the component and if such component
+ fails, the system shuts down no matter when the failure
+ happened. This is the behaviour of the core components
+ (the ones you can't turn off), but you can declare any
+ other components as core as well if you wish (but you can
+ turn these off, they just can't fail).
+ </p><p>
+ The priority defines order in which the components should start.
+ The ones with higher number are started sooner than the ones with
+ lower ones. If you don't set it, 0 (zero) is used as the priority.
+ </p><p>
+ There are other parameters we didn't use in our example.
+ One of them is <span class="quote">“<span class="quote">address</span>”</span>. It is the address
+ used by the component on the <span class="command"><strong>b10-msgq</strong></span>
+ message bus. The special components already know their
+ address, but the usual ones don't. The address is by
+ convention the thing after <span class="emphasis"><em>b10-</em></span>, with
+ the first letter capital (eg. <span class="command"><strong>b10-stats</strong></span>
+ would have <span class="quote">“<span class="quote">Stats</span>”</span> as its address).
+
+ </p><p>
+ The last one is process. It is the name of the process to be started.
+ It defaults to the name of the component if not set, but you can use
+ this to override it.
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ This system allows you to start the same component multiple times
+ (by including it in the configuration with different names, but the
+ same process setting). However, the rest of the system doesn't expect
+ such situation, so it would probably not do what you want. Such
+ support is yet to be implemented.
+ </p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ The configuration is quite powerful, but that includes
+ a lot of space for mistakes. You could turn off the
+ <span class="command"><strong>b10-cmdctl</strong></span>, but then you couldn't
+ change it back the usual way, as it would require it to
+ be running (you would have to find and edit the configuration
+ directly). Also, some modules might have dependencies
+ -- <span class="command"><strong>b10-stats-httpd</strong></span> need
+ <span class="command"><strong>b10-stats</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>
+ needs the <span class="command"><strong>b10-auth</strong></span> to be running, etc.
+
+
+
+ </p><p>
+ In short, you should think twice before disabling something here.
+ </p></div><p>
+ Now, to the mysterious setuid virtual component. If you
+ use the <span class="command"><strong>-u</strong></span> option to start the
+ <span class="command"><strong>bind10</strong></span> as root, but change the user
+ later, we need to start the <span class="command"><strong>b10-auth</strong></span> or
+ <span class="command"><strong>b10-resolver</strong></span> as root (until the socket
+ creator is finished). So we need to specify
+ the time when the switch from root do the given user happens
+ and that's what the setuid component is for. The switch is
+ done at the time the setuid component would be started, if
+ it was a process. The default configuration contains the
+ setuid component with priority 5, <span class="command"><strong>b10-auth</strong></span>
+ has 10 to be started before the switch and everything else
+ is without priority, so it is started after the switch.
+ </p></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
The BIND 10 components use the <span class="command"><strong>b10-msgq</strong></span>
message routing daemon to communicate with other BIND 10 components.
The <span class="command"><strong>b10-msgq</strong></span> implements what is called the
@@ -507,12 +610,12 @@ shutdown
the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
channel) the configuration on to the specified module.
</p><p>
- </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229437660">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229437725">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229437755">Loading Master Zones Files</a></span></dt></dl></div><p>
+ </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438007">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229438072">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229438171">Loading Master Zones Files</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
It supports EDNS0 and DNSSEC. It supports IPv6.
Normally it is started by the <span class="command"><strong>bind10</strong></span> master
process.
- </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437660"></a>Server Configurations</h2></div></div></div><p>
+ </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438007"></a>Server Configurations</h2></div></div></div><p>
<span class="command"><strong>b10-auth</strong></span> is configured via the
<span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -532,7 +635,7 @@ This may be a temporary setting until then.
</p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
</dd></dl></div><p>
- </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437725"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438072"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
supports a SQLite3 data source backend and in-memory data source
backend.
@@ -546,7 +649,7 @@ This may be a temporary setting until then.
The default is <code class="filename">/usr/local/var/</code>.)
This data file location may be changed by defining the
<span class="quote">“<span class="quote">database_file</span>”</span> configuration.
- </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437755"></a>Loading Master Zones Files</h2></div></div></div><p>
+ </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438171"></a>Loading Master Zones Files</h2></div></div></div><p>
RFC 1035 style DNS master zone files may imported
into a BIND 10 data source by using the
<span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -575,7 +678,7 @@ This may be a temporary setting until then.
If you reload a zone already existing in the database,
all records from that prior zone disappear and a whole new set
appears.
- </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229437989">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438027">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438069">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438302">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438340">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438382">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></div><p>
Incoming zones are transferred using the <span class="command"><strong>b10-xfrin</strong></span>
process which is started by <span class="command"><strong>bind10</strong></span>.
When received, the zone is stored in the corresponding BIND 10
@@ -593,7 +696,7 @@ This may be a temporary setting until then.
In the current development release of BIND 10, incoming zone
transfers are only available for SQLite3-based data sources,
that is, they don't work for an in-memory data source.
- </p></div><div class="section" title="Configuration for Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437989"></a>Configuration for Incoming Zone Transfers</h2></div></div></div><p>
+ </p></div><div class="section" title="Configuration for Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438302"></a>Configuration for Incoming Zone Transfers</h2></div></div></div><p>
In practice, you need to specify a list of secondary zones to
enable incoming zone transfers for these zones (you can still
trigger a zone transfer manually, without a prior configuration
@@ -609,7 +712,7 @@ This may be a temporary setting until then.
> <strong class="userinput"><code>config commit</code></strong></pre><p>
(We assume there has been no zone configuration before).
- </p></div><div class="section" title="Enabling IXFR"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438027"></a>Enabling IXFR</h2></div></div></div><p>
+ </p></div><div class="section" title="Enabling IXFR"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438340"></a>Enabling IXFR</h2></div></div></div><p>
As noted above, <span class="command"><strong>b10-xfrin</strong></span> uses AXFR for
zone transfers by default. To enable IXFR for zone transfers
for a particular zone, set the <strong class="userinput"><code>use_ixfr</code></strong>
@@ -631,7 +734,7 @@ This may be a temporary setting until then.
make this selection automatically.
These features will be implemented in a near future
version, at which point we will enable IXFR by default.
- </p></div></div><div class="section" title="Trigger an Incoming Zone Transfer Manually"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438069"></a>Trigger an Incoming Zone Transfer Manually</h2></div></div></div><p>
+ </p></div></div><div class="section" title="Trigger an Incoming Zone Transfer Manually"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438382"></a>Trigger an Incoming Zone Transfer Manually</h2></div></div></div><p>
To manually trigger a zone transfer to retrieve a remote zone,
you may use the <span class="command"><strong>bindctl</strong></span> utility.
For example, at the <span class="command"><strong>bindctl</strong></span> prompt run:
@@ -641,16 +744,53 @@ This may be a temporary setting until then.
The <span class="command"><strong>b10-xfrout</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
When the <span class="command"><strong>b10-auth</strong></span> authoritative DNS server
- receives an AXFR request, <span class="command"><strong>b10-xfrout</strong></span>
- sends the zone.
- This is used to provide master DNS service to share zones
+ receives an AXFR or IXFR request, <span class="command"><strong>b10-auth</strong></span>
+ internally forwards the request to <span class="command"><strong>b10-xfrout</strong></span>,
+ which handles the rest of request processing.
+ This is used to provide primary DNS service to share zones
to secondary name servers.
The <span class="command"><strong>b10-xfrout</strong></span> is also used to send
- NOTIFY messages to slaves.
+ NOTIFY messages to secondary servers.
+ </p><p>
+ A global or per zone <code class="option">transfer_acl</code> configuration
+ can be used to control accessibility of the outbound zone
+ transfer service.
+ By default, <span class="command"><strong>b10-xfrout</strong></span> allows any clients to
+ perform zone transfers for any zones:
+ </p><pre class="screen">> <strong class="userinput"><code>config show Xfrout/transfer_acl</code></strong>
+Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)</pre><p>
+ You can change this to, for example, rejecting all transfer
+ requests by default while allowing requests for the transfer
+ of zone "example.com" from 192.0.2.1 and 2001:db8::1 as follows:
+ </p><pre class="screen">> <strong class="userinput"><code>config set Xfrout/transfer_acl[0] {"action": "REJECT"}</code></strong>
+> <strong class="userinput"><code>config add Xfrout/zone_config</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/origin "example.com"</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},</code></strong>
+<strong class="userinput"><code> {"action": "ACCEPT", "from": "2001:db8::1"}]</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ In the above example the lines
+ for <code class="option">transfer_acl</code> were divided for
+ readability. In the actual input it must be in a single line.
+ </p></div><p>
+ If you want to require TSIG in access control, a separate TSIG
+ "key ring" must be configured specifically
+ for <span class="command"><strong>b10-xfrout</strong></span> as well as a system wide
+ key ring, both containing a consistent set of keys.
+ For example, to change the previous example to allowing requests
+ from 192.0.2.1 signed by a TSIG with a key name of
+ "key.example", you'll need to do this:
+ </p><pre class="screen">> <strong class="userinput"><code>config set tsig_keys/keys ["key.example:<base64-key>"]</code></strong>
+> <strong class="userinput"><code>config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+ The first line of configuration defines a system wide key ring.
+ This is necessary because the <span class="command"><strong>b10-auth</strong></span> server
+ also checks TSIGs and it uses the system wide configuration.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
- The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
- Access control is not yet provided.
+ In a future version, <span class="command"><strong>b10-xfrout</strong></span> will also
+ use the system wide TSIG configuration.
+ The way to specify zone specific configuration (ACLs, etc) is
+ likely to be changed, too.
</p></div></div><div class="chapter" title="Chapter 11. Secondary Manager"><div class="titlepage"><div><div><h2 class="title"><a name="zonemgr"></a>Chapter 11. Secondary Manager</h2></div></div></div><p>
The <span class="command"><strong>b10-zonemgr</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
@@ -665,7 +805,7 @@ This may be a temporary setting until then.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Access control (such as allowing notifies) is not yet provided.
The primary/secondary service is not yet complete.
- </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438327">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438512">Forwarding</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438673">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438891">Forwarding</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-resolver</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
@@ -678,8 +818,13 @@ This may be a temporary setting until then.
You may change this using <span class="command"><strong>bindctl</strong></span>, for example:
</p><pre class="screen">
-> <strong class="userinput"><code>config set Boss/start_auth false</code></strong>
-> <strong class="userinput"><code>config set Boss/start_resolver true</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-xfrout</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-xfrin</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-auth</code></strong>
+> <strong class="userinput"><code>config add Boss/components b10-resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/special resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/kind needed</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/priority 10</code></strong>
> <strong class="userinput"><code>config commit</code></strong>
</pre><p>
@@ -699,7 +844,7 @@ This may be a temporary setting until then.
</pre><p>
</p><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
- Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438327"></a>Access Control</h2></div></div></div><p>
+ Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438673"></a>Access Control</h2></div></div></div><p>
By default, the <span class="command"><strong>b10-resolver</strong></span> daemon only accepts
DNS queries from the localhost (127.0.0.1 and ::1).
The <code class="option">Resolver/query_acl</code> configuration may
@@ -732,7 +877,7 @@ This may be a temporary setting until then.
</pre><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
Resolver/query_acl</code></strong></span>”</span> if needed.)</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>This prototype access control configuration
- syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438512"></a>Forwarding</h2></div></div></div><p>
+ syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438891"></a>Forwarding</h2></div></div></div><p>
To enable forwarding, the upstream address and port must be
configured to forward queries to, such as:
@@ -786,7 +931,7 @@ This may be a temporary setting until then.
}
}
</pre><p>
- </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438628">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438638">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439154">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439328">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229439609">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438628"></a>Logging configuration</h2></div></div></div><p>
+ </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229439042">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439052">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439294">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439468">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229440023">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229439042"></a>Logging configuration</h2></div></div></div><p>
The logging system in BIND 10 is configured through the
Logging module. All BIND 10 modules will look at the
@@ -795,7 +940,7 @@ This may be a temporary setting until then.
- </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229438638"></a>Loggers</h3></div></div></div><p>
+ </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439052"></a>Loggers</h3></div></div></div><p>
Within BIND 10, a message is logged through a component
called a "logger". Different parts of BIND 10 log messages
@@ -816,7 +961,7 @@ This may be a temporary setting until then.
(what to log), and the <code class="option">output_options</code>
(where to log).
- </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229438663"></a>name (string)</h4></div></div></div><p>
+ </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439077"></a>name (string)</h4></div></div></div><p>
Each logger in the system has a name, the name being that
of the component using it to log messages. For instance,
if you want to configure logging for the resolver module,
@@ -889,7 +1034,7 @@ This may be a temporary setting until then.
<span class="quote">“<span class="quote">Auth.cache</span>”</span> logger will appear in the output
with a logger name of <span class="quote">“<span class="quote">b10-auth.cache</span>”</span>).
- </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439035"></a>severity (string)</h4></div></div></div><p>
+ </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439176"></a>severity (string)</h4></div></div></div><p>
This specifies the category of messages logged.
Each message is logged with an associated severity which
@@ -905,7 +1050,7 @@ This may be a temporary setting until then.
- </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439086"></a>output_options (list)</h4></div></div></div><p>
+ </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439227"></a>output_options (list)</h4></div></div></div><p>
Each logger can have zero or more
<code class="option">output_options</code>. These specify where log
@@ -915,7 +1060,7 @@ This may be a temporary setting until then.
The other options for a logger are:
- </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439102"></a>debuglevel (integer)</h4></div></div></div><p>
+ </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439243"></a>debuglevel (integer)</h4></div></div></div><p>
When a logger's severity is set to DEBUG, this value
specifies what debug messages should be printed. It ranges
@@ -924,7 +1069,7 @@ This may be a temporary setting until then.
If severity for the logger is not DEBUG, this value is ignored.
- </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439117"></a>additive (true or false)</h4></div></div></div><p>
+ </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439258"></a>additive (true or false)</h4></div></div></div><p>
If this is true, the <code class="option">output_options</code> from
the parent will be used. For example, if there are two
@@ -938,18 +1083,18 @@ This may be a temporary setting until then.
- </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439154"></a>Output Options</h3></div></div></div><p>
+ </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439294"></a>Output Options</h3></div></div></div><p>
The main settings for an output option are the
<code class="option">destination</code> and a value called
<code class="option">output</code>, the meaning of which depends on
the destination that is set.
- </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439169"></a>destination (string)</h4></div></div></div><p>
+ </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439309"></a>destination (string)</h4></div></div></div><p>
The destination is the type of output. It can be one of:
- </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439201"></a>output (string)</h4></div></div></div><p>
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439341"></a>output (string)</h4></div></div></div><p>
Depending on what is set as the output destination, this
value is interpreted as follows:
@@ -971,12 +1116,12 @@ This may be a temporary setting until then.
The other options for <code class="option">output_options</code> are:
- </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439286"></a>flush (true of false)</h5></div></div></div><p>
+ </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439427"></a>flush (true of false)</h5></div></div></div><p>
Flush buffers after each log message. Doing this will
reduce performance but will ensure that if the program
terminates abnormally, all messages up to the point of
termination are output.
- </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439296"></a>maxsize (integer)</h5></div></div></div><p>
+ </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439436"></a>maxsize (integer)</h5></div></div></div><p>
Only relevant when destination is file, this is maximum
file size of output files in bytes. When the maximum
size is reached, the file is renamed and a new file opened.
@@ -985,11 +1130,11 @@ This may be a temporary setting until then.
etc.)
</p><p>
If this is 0, no maximum file size is used.
- </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439308"></a>maxver (integer)</h5></div></div></div><p>
+ </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439449"></a>maxver (integer)</h5></div></div></div><p>
Maximum number of old log files to keep around when
rolling the output file. Only relevant when
<code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span>.
- </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439328"></a>Example session</h3></div></div></div><p>
+ </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439468"></a>Example session</h3></div></div></div><p>
In this example we want to set the global logging to
write to the file <code class="filename">/var/log/my_bind10.log</code>,
@@ -1150,7 +1295,7 @@ Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
And every module will now be using the values from the
logger named <span class="quote">“<span class="quote">*</span>”</span>.
- </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229439609"></a>Logging Message Format</h2></div></div></div><p>
+ </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229440023"></a>Logging Message Format</h2></div></div></div><p>
Each message written by BIND 10 to the configured logging
destinations comprises a number of components that identify
the origin of the message and, if the message indicates
diff --git a/doc/guide/bind10-guide.txt b/doc/guide/bind10-guide.txt
index 619d56f..9c8ffbe 100644
--- a/doc/guide/bind10-guide.txt
+++ b/doc/guide/bind10-guide.txt
@@ -2,7 +2,7 @@
Administrator Reference for BIND 10
- This is the reference guide for BIND 10 version 20110809.
+ This is the reference guide for BIND 10 version 20111021.
Copyright (c) 2010-2011 Internet Systems Consortium, Inc.
@@ -12,7 +12,7 @@ Administrator Reference for BIND 10
Consortium (ISC). It includes DNS libraries and modular components for
controlling authoritative and recursive DNS servers.
- This is the reference guide for BIND 10 version 20110809. The most
+ This is the reference guide for BIND 10 version 20111021. The most
up-to-date version of this document (in PDF, HTML, and plain text
formats), along with other documents for BIND 10, can be found at
http://bind10.isc.org/docs.
@@ -55,6 +55,8 @@ Administrator Reference for BIND 10
Starting BIND 10
+ Configuration of started processes
+
4. Command channel
5. Configuration manager
@@ -105,6 +107,10 @@ Administrator Reference for BIND 10
Logging Message Format
+ List of Tables
+
+ 3.1.
+
Chapter 1. Introduction
Table of Contents
@@ -124,7 +130,7 @@ Chapter 1. Introduction
Note
- This guide covers the experimental prototype of BIND 10 version 20110809.
+ This guide covers the experimental prototype of BIND 10 version 20111021.
Note
@@ -427,24 +433,28 @@ Chapter 3. Starting BIND10 with bind10
Starting BIND 10
+ Configuration of started processes
+
BIND 10 provides the bind10 command which starts up the required
- processes. bind10 will also restart processes that exit unexpectedly. This
- is the only command needed to start the BIND 10 system.
+ processes. bind10 will also restart some processes that exit unexpectedly.
+ This is the only command needed to start the BIND 10 system.
After starting the b10-msgq communications channel, bind10 connects to it,
runs the configuration manager, and reads its own configuration. Then it
starts the other modules.
- The b10-msgq and b10-cfgmgr services make up the core. The b10-msgq daemon
- provides the communication channel between every part of the system. The
- b10-cfgmgr daemon is always needed by every module, if only to send
- information about themselves somewhere, but more importantly to ask about
- their own settings, and about other modules. The bind10 master process
- will also start up b10-cmdctl for admins to communicate with the system,
- b10-auth for authoritative DNS service or b10-resolver for recursive name
- service, b10-stats for statistics collection, b10-xfrin for inbound DNS
- zone transfers, b10-xfrout for outbound DNS zone transfers, and
- b10-zonemgr for secondary service.
+ The b10-sockcreator, b10-msgq and b10-cfgmgr services make up the core.
+ The b10-msgq daemon provides the communication channel between every part
+ of the system. The b10-cfgmgr daemon is always needed by every module, if
+ only to send information about themselves somewhere, but more importantly
+ to ask about their own settings, and about other modules. The
+ b10-sockcreator will allocate sockets for the rest of the system.
+
+ In its default configuration, the bind10 master process will also start up
+ b10-cmdctl for admins to communicate with the system, b10-auth for
+ authoritative DNS service, b10-stats for statistics collection, b10-xfrin
+ for inbound DNS zone transfers, b10-xfrout for outbound DNS zone
+ transfers, and b10-zonemgr for secondary service.
Starting BIND 10
@@ -457,6 +467,110 @@ Starting BIND 10
names for the Python-based daemons will be renamed to better identify them
instead of just "python". This is not needed on some operating systems.
+Configuration of started processes
+
+ The processes to be started can be configured, with the exception of the
+ b10-sockcreator, b10-msgq and b10-cfgmgr.
+
+ The configuration is in the Boss/components section. Each element
+ represents one component, which is an abstraction of a process (currently
+ there's also one component which doesn't represent a process). If you
+ didn't want to transfer out at all (your server is a slave only), you
+ would just remove the corresponding component from the set, like this and
+ the process would be stopped immediately (and not started on the next
+ startup):
+
+ > config remove Boss/components b10-xfrout
+ > config commit
+
+ To add a process to the set, let's say the resolver (which not started by
+ default), you would do this:
+
+ > config add Boss/components b10-resolver
+ > config set Boss/components/b10-resolver/special resolver
+ > config set Boss/components/b10-resolver/kind needed
+ > config set Boss/components/b10-resolver/priority 10
+ > config commit
+
+ Now, what it means. We add an entry called b10-resolver. It is both a name
+ used to reference this component in the configuration and the name of the
+ process to start. Then we set some parameters on how to start it.
+
+ The special one is for components that need some kind of special care
+ during startup or shutdown. Unless specified, the component is started in
+ usual way. This is the list of components that need to be started in a
+ special way, with the value of special used for them:
+
+ Table 3.1.
+
+ +------------------------------------------------------------------------+
+ | Component | Special | Description |
+ |--------------+----------+----------------------------------------------|
+ | b10-auth | auth | Authoritative server |
+ |--------------+----------+----------------------------------------------|
+ | b10-resolver | resolver | The resolver |
+ |--------------+----------+----------------------------------------------|
+ | b10-cmdctl | cmdctl | The command control (remote control |
+ | | | interface) |
+ |--------------+----------+----------------------------------------------|
+ | setuid | setuid | Virtual component, see below |
+ +------------------------------------------------------------------------+
+
+ The kind specifies how a failure of the component should be handled. If it
+ is set to "dispensable" (the default unless you set something else), it
+ will get started again if it fails. If it is set to "needed" and it fails
+ at startup, the whole bind10 shuts down and exits with error exit code.
+ But if it fails some time later, it is just started again. If you set it
+ to "core", you indicate that the system is not usable without the
+ component and if such component fails, the system shuts down no matter
+ when the failure happened. This is the behaviour of the core components
+ (the ones you can't turn off), but you can declare any other components as
+ core as well if you wish (but you can turn these off, they just can't
+ fail).
+
+ The priority defines order in which the components should start. The ones
+ with higher number are started sooner than the ones with lower ones. If
+ you don't set it, 0 (zero) is used as the priority.
+
+ There are other parameters we didn't use in our example. One of them is
+ "address". It is the address used by the component on the b10-msgq message
+ bus. The special components already know their address, but the usual ones
+ don't. The address is by convention the thing after b10-, with the first
+ letter capital (eg. b10-stats would have "Stats" as its address).
+
+ The last one is process. It is the name of the process to be started. It
+ defaults to the name of the component if not set, but you can use this to
+ override it.
+
+ Note
+
+ This system allows you to start the same component multiple times (by
+ including it in the configuration with different names, but the same
+ process setting). However, the rest of the system doesn't expect such
+ situation, so it would probably not do what you want. Such support is yet
+ to be implemented.
+
+ Note
+
+ The configuration is quite powerful, but that includes a lot of space for
+ mistakes. You could turn off the b10-cmdctl, but then you couldn't change
+ it back the usual way, as it would require it to be running (you would
+ have to find and edit the configuration directly). Also, some modules
+ might have dependencies -- b10-stats-httpd need b10-stats, b10-xfrout
+ needs the b10-auth to be running, etc.
+
+ In short, you should think twice before disabling something here.
+
+ Now, to the mysterious setuid virtual component. If you use the -u option
+ to start the bind10 as root, but change the user later, we need to start
+ the b10-auth or b10-resolver as root (until the socket creator is
+ finished). So we need to specify the time when the switch from root do the
+ given user happens and that's what the setuid component is for. The switch
+ is done at the time the setuid component would be started, if it was a
+ process. The default configuration contains the setuid component with
+ priority 5, b10-auth has 10 to be started before the switch and everything
+ else is without priority, so it is started after the switch.
+
Chapter 4. Command channel
The BIND 10 components use the b10-msgq message routing daemon to
@@ -739,15 +853,55 @@ Trigger an Incoming Zone Transfer Manually
Chapter 10. Outbound Zone Transfers
The b10-xfrout process is started by bind10. When the b10-auth
- authoritative DNS server receives an AXFR request, b10-xfrout sends the
- zone. This is used to provide master DNS service to share zones to
- secondary name servers. The b10-xfrout is also used to send NOTIFY
- messages to slaves.
+ authoritative DNS server receives an AXFR or IXFR request, b10-auth
+ internally forwards the request to b10-xfrout, which handles the rest of
+ request processing. This is used to provide primary DNS service to share
+ zones to secondary name servers. The b10-xfrout is also used to send
+ NOTIFY messages to secondary servers.
+
+ A global or per zone transfer_acl configuration can be used to control
+ accessibility of the outbound zone transfer service. By default,
+ b10-xfrout allows any clients to perform zone transfers for any zones:
+
+ > config show Xfrout/transfer_acl
+ Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)
+
+ You can change this to, for example, rejecting all transfer requests by
+ default while allowing requests for the transfer of zone "example.com"
+ from 192.0.2.1 and 2001:db8::1 as follows:
+
+ > config set Xfrout/transfer_acl[0] {"action": "REJECT"}
+ > config add Xfrout/zone_config
+ > config set Xfrout/zone_config[0]/origin "example.com"
+ > config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},
+ {"action": "ACCEPT", "from": "2001:db8::1"}]
+ > config commit
+
+ Note
+
+ In the above example the lines for transfer_acl were divided for
+ readability. In the actual input it must be in a single line.
+
+ If you want to require TSIG in access control, a separate TSIG "key ring"
+ must be configured specifically for b10-xfrout as well as a system wide
+ key ring, both containing a consistent set of keys. For example, to change
+ the previous example to allowing requests from 192.0.2.1 signed by a TSIG
+ with a key name of "key.example", you'll need to do this:
+
+ > config set tsig_keys/keys ["key.example:<base64-key>"]
+ > config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]
+ > config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]
+ > config commit
+
+ The first line of configuration defines a system wide key ring. This is
+ necessary because the b10-auth server also checks TSIGs and it uses the
+ system wide configuration.
Note
- The current development release of BIND 10 only supports AXFR. (IXFR is
- not supported.) Access control is not yet provided.
+ In a future version, b10-xfrout will also use the system wide TSIG
+ configuration. The way to specify zone specific configuration (ACLs, etc)
+ is likely to be changed, too.
Chapter 11. Secondary Manager
@@ -777,8 +931,13 @@ Chapter 12. Recursive Name Server
authoritative or resolver or both. By default, it starts the authoritative
service. You may change this using bindctl, for example:
- > config set Boss/start_auth false
- > config set Boss/start_resolver true
+ > config remove Boss/components b10-xfrout
+ > config remove Boss/components b10-xfrin
+ > config remove Boss/components b10-auth
+ > config add Boss/components b10-resolver
+ > config set Boss/components/b10-resolver/special resolver
+ > config set Boss/components/b10-resolver/kind needed
+ > config set Boss/components/b10-resolver/priority 10
> config commit
The master bind10 will stop and start the desired services.
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 21bb671..e61725f 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -706,7 +706,7 @@ Debian and Ubuntu:
BIND 10 provides the <command>bind10</command> command which
starts up the required processes.
<command>bind10</command>
- will also restart processes that exit unexpectedly.
+ will also restart some processes that exit unexpectedly.
This is the only command needed to start the BIND 10 system.
</para>
@@ -718,17 +718,22 @@ Debian and Ubuntu:
</para>
<para>
- The <command>b10-msgq</command> and <command>b10-cfgmgr</command>
+ The <command>b10-sockcreator</command>, <command>b10-msgq</command> and
+ <command>b10-cfgmgr</command>
services make up the core. The <command>b10-msgq</command> daemon
provides the communication channel between every part of the system.
The <command>b10-cfgmgr</command> daemon is always needed by every
module, if only to send information about themselves somewhere,
but more importantly to ask about their own settings, and
- about other modules.
- The <command>bind10</command> master process will also start up
+ about other modules. The <command>b10-sockcreator</command> will
+ allocate sockets for the rest of the system.
+ </para>
+
+ <para>
+ In its default configuration, the <command>bind10</command>
+ master process will also start up
<command>b10-cmdctl</command> for admins to communicate with the
- system, <command>b10-auth</command> for authoritative DNS service or
- <command>b10-resolver</command> for recursive name service,
+ system, <command>b10-auth</command> for authoritative DNS service,
<command>b10-stats</command> for statistics collection,
<command>b10-xfrin</command> for inbound DNS zone transfers,
<command>b10-xfrout</command> for outbound DNS zone transfers,
@@ -754,6 +759,159 @@ Debian and Ubuntu:
</note>
</section>
+ <section id="bind10.config">
+ <title>Configuration of started processes</title>
+ <para>
+ The processes to be started can be configured, with the exception
+ of the <command>b10-sockcreator</command>, <command>b10-msgq</command>
+ and <command>b10-cfgmgr</command>.
+ </para>
+
+ <para>
+ The configuration is in the Boss/components section. Each element
+ represents one component, which is an abstraction of a process
+ (currently there's also one component which doesn't represent
+ a process). If you didn't want to transfer out at all (your server
+ is a slave only), you would just remove the corresponding component
+ from the set, like this and the process would be stopped immediately
+ (and not started on the next startup):
+ <screen>> <userinput>config remove Boss/components b10-xfrout</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+
+ <para>
+ To add a process to the set, let's say the resolver (which not started
+ by default), you would do this:
+ <screen>> <userinput>config add Boss/components b10-resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
+> <userinput>config commit</userinput></screen></para>
+
+ <para>
+ Now, what it means. We add an entry called b10-resolver. It is both a
+ name used to reference this component in the configuration and the
+ name of the process to start. Then we set some parameters on how to
+ start it.
+ </para>
+
+ <para>
+ The special one is for components that need some kind of special care
+ during startup or shutdown. Unless specified, the component is started
+ in usual way. This is the list of components that need to be started
+ in a special way, with the value of special used for them:
+ <table>
+ <tgroup cols='3' align='left'>
+ <colspec colname='component'/>
+ <colspec colname='special'/>
+ <colspec colname='description'/>
+ <thead><row><entry>Component</entry><entry>Special</entry><entry>Description</entry></row></thead>
+ <tbody>
+ <row><entry>b10-auth</entry><entry>auth</entry><entry>Authoritative server</entry></row>
+ <row><entry>b10-resolver</entry><entry>resolver</entry><entry>The resolver</entry></row>
+ <row><entry>b10-cmdctl</entry><entry>cmdctl</entry><entry>The command control (remote control interface)</entry></row>
+ <row><entry>setuid</entry><entry>setuid</entry><entry>Virtual component, see below</entry></row>
+ <!-- TODO Either add xfrin and xfrout as well or clean up the workarounds in boss before the release -->
+ </tbody>
+ </tgroup>
+ </table>
+ </para>
+
+ <para>
+ The kind specifies how a failure of the component should
+ be handled. If it is set to <quote>dispensable</quote>
+ (the default unless you set something else), it will get
+ started again if it fails. If it is set to <quote>needed</quote>
+ and it fails at startup, the whole <command>bind10</command>
+ shuts down and exits with error exit code. But if it fails
+ some time later, it is just started again. If you set it
+ to <quote>core</quote>, you indicate that the system is
+ not usable without the component and if such component
+ fails, the system shuts down no matter when the failure
+ happened. This is the behaviour of the core components
+ (the ones you can't turn off), but you can declare any
+ other components as core as well if you wish (but you can
+ turn these off, they just can't fail).
+ </para>
+
+ <para>
+ The priority defines order in which the components should start.
+ The ones with higher number are started sooner than the ones with
+ lower ones. If you don't set it, 0 (zero) is used as the priority.
+ </para>
+
+ <para>
+ There are other parameters we didn't use in our example.
+ One of them is <quote>address</quote>. It is the address
+ used by the component on the <command>b10-msgq</command>
+ message bus. The special components already know their
+ address, but the usual ones don't. The address is by
+ convention the thing after <emphasis>b10-</emphasis>, with
+ the first letter capital (eg. <command>b10-stats</command>
+ would have <quote>Stats</quote> as its address).
+<!-- TODO: this should be simplified so we don't even have to document it -->
+ </para>
+
+<!-- TODO: what does "The special components already know their
+address, but the usual ones don't." mean? -->
+
+<!-- TODO: document params when is enabled -->
+
+ <para>
+ The last one is process. It is the name of the process to be started.
+ It defaults to the name of the component if not set, but you can use
+ this to override it.
+ </para>
+
+ <!-- TODO Add parameters when they work, not implemented yet-->
+
+ <note>
+ <para>
+ This system allows you to start the same component multiple times
+ (by including it in the configuration with different names, but the
+ same process setting). However, the rest of the system doesn't expect
+ such situation, so it would probably not do what you want. Such
+ support is yet to be implemented.
+ </para>
+ </note>
+
+ <note>
+ <para>
+ The configuration is quite powerful, but that includes
+ a lot of space for mistakes. You could turn off the
+ <command>b10-cmdctl</command>, but then you couldn't
+ change it back the usual way, as it would require it to
+ be running (you would have to find and edit the configuration
+ directly). Also, some modules might have dependencies
+ -- <command>b10-stats-httpd</command> need
+ <command>b10-stats</command>, <command>b10-xfrout</command>
+ needs the <command>b10-auth</command> to be running, etc.
+
+<!-- TODO: should we define dependencies? -->
+
+ </para>
+ <para>
+ In short, you should think twice before disabling something here.
+ </para>
+ </note>
+
+ <para>
+ Now, to the mysterious setuid virtual component. If you
+ use the <command>-u</command> option to start the
+ <command>bind10</command> as root, but change the user
+ later, we need to start the <command>b10-auth</command> or
+ <command>b10-resolver</command> as root (until the socket
+ creator is finished).<!-- TODO --> So we need to specify
+ the time when the switch from root do the given user happens
+ and that's what the setuid component is for. The switch is
+ done at the time the setuid component would be started, if
+ it was a process. The default configuration contains the
+ setuid component with priority 5, <command>b10-auth</command>
+ has 10 to be started before the switch and everything else
+ is without priority, so it is started after the switch.
+ </para>
+
+ </section>
</chapter>
@@ -1369,20 +1527,72 @@ what if a NOTIFY is sent?
The <command>b10-xfrout</command> process is started by
<command>bind10</command>.
When the <command>b10-auth</command> authoritative DNS server
- receives an AXFR request, <command>b10-xfrout</command>
- sends the zone.
- This is used to provide master DNS service to share zones
+ receives an AXFR or IXFR request, <command>b10-auth</command>
+ internally forwards the request to <command>b10-xfrout</command>,
+ which handles the rest of request processing.
+ This is used to provide primary DNS service to share zones
to secondary name servers.
The <command>b10-xfrout</command> is also used to send
- NOTIFY messages to slaves.
+ NOTIFY messages to secondary servers.
+ </para>
+
+ <para>
+ A global or per zone <option>transfer_acl</option> configuration
+ can be used to control accessibility of the outbound zone
+ transfer service.
+ By default, <command>b10-xfrout</command> allows any clients to
+ perform zone transfers for any zones:
+ </para>
+
+ <screen>> <userinput>config show Xfrout/transfer_acl</userinput>
+Xfrout/transfer_acl[0] {"action": "ACCEPT"} any (default)</screen>
+
+ <para>
+ You can change this to, for example, rejecting all transfer
+ requests by default while allowing requests for the transfer
+ of zone "example.com" from 192.0.2.1 and 2001:db8::1 as follows:
</para>
+ <screen>> <userinput>config set Xfrout/transfer_acl[0] {"action": "REJECT"}</userinput>
+> <userinput>config add Xfrout/zone_config</userinput>
+> <userinput>config set Xfrout/zone_config[0]/origin "example.com"</userinput>
+> <userinput>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},</userinput>
+<userinput> {"action": "ACCEPT", "from": "2001:db8::1"}]</userinput>
+> <userinput>config commit</userinput></screen>
+
<note><simpara>
- The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
- Access control is not yet provided.
+ In the above example the lines
+ for <option>transfer_acl</option> were divided for
+ readability. In the actual input it must be in a single line.
</simpara></note>
+ <para>
+ If you want to require TSIG in access control, a separate TSIG
+ "key ring" must be configured specifically
+ for <command>b10-xfrout</command> as well as a system wide
+ key ring, both containing a consistent set of keys.
+ For example, to change the previous example to allowing requests
+ from 192.0.2.1 signed by a TSIG with a key name of
+ "key.example", you'll need to do this:
+ </para>
+
+ <screen>> <userinput>config set tsig_keys/keys ["key.example:<base64-key>"]</userinput>
+> <userinput>config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]</userinput>
+> <userinput>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]</userinput>
+> <userinput>config commit</userinput></screen>
+
+ <para>
+ The first line of configuration defines a system wide key ring.
+ This is necessary because the <command>b10-auth</command> server
+ also checks TSIGs and it uses the system wide configuration.
+ </para>
+
+ <note><simpara>
+ In a future version, <command>b10-xfrout</command> will also
+ use the system wide TSIG configuration.
+ The way to specify zone specific configuration (ACLs, etc) is
+ likely to be changed, too.
+ </simpara></note>
<!--
TODO:
@@ -1442,8 +1652,13 @@ what is XfroutClient xfr_client??
You may change this using <command>bindctl</command>, for example:
<screen>
-> <userinput>config set Boss/start_auth false</userinput>
-> <userinput>config set Boss/start_resolver true</userinput>
+> <userinput>config remove Boss/components b10-xfrout</userinput>
+> <userinput>config remove Boss/components b10-xfrin</userinput>
+> <userinput>config remove Boss/components b10-auth</userinput>
+> <userinput>config add Boss/components b10-resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
> <userinput>config commit</userinput>
</screen>
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
index 237b7ad..f2f57f1 100644
--- a/doc/guide/bind10-messages.html
+++ b/doc/guide/bind10-messages.html
@@ -1,10 +1,10 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
- 20110809.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20111021. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+ 20111021.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the messages manual for BIND 10 version 20110809.
+ This is the messages manual for BIND 10 version 20111021.
The most up-to-date version of this document, along with
other documents for BIND 10, can be found at
<a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
@@ -107,6 +107,9 @@ This is a debug message, generated by the authoritative server when an
attempt to parse the header of a received DNS packet has failed. (The
reason for the failure is given in the message.) The server will drop the
packet.
+</p></dd><dt><a name="AUTH_INVALID_STATISTICS_DATA"></a><span class="term">AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified</span></dt><dd><p>
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
</p></dd><dt><a name="AUTH_LOAD_TSIG"></a><span class="term">AUTH_LOAD_TSIG loading TSIG keys</span></dt><dd><p>
This is a debug message indicating that the authoritative server
has requested the keyring holding TSIG keys from the configuration
@@ -263,12 +266,58 @@ NOTIFY request will not be honored.
The boss process is starting up and will now check if the message bus
daemon is already running. If so, it will not be able to start, as it
needs a dedicated message bus.
-</p></dd><dt><a name="BIND10_CONFIGURATION_START_AUTH"></a><span class="term">BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</span></dt><dd><p>
-This message shows whether or not the authoritative server should be
-started according to the configuration.
-</p></dd><dt><a name="BIND10_CONFIGURATION_START_RESOLVER"></a><span class="term">BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</span></dt><dd><p>
-This message shows whether or not the resolver should be
-started according to the configuration.
+</p></dd><dt><a name="BIND10_COMPONENT_FAILED"></a><span class="term">BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status</span></dt><dd><p>
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
+</p></dd><dt><a name="BIND10_COMPONENT_RESTART"></a><span class="term">BIND10_COMPONENT_RESTART component %1 is about to restart</span></dt><dd><p>
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+</p></dd><dt><a name="BIND10_COMPONENT_START"></a><span class="term">BIND10_COMPONENT_START component %1 is starting</span></dt><dd><p>
+The named component is about to be started by the boss process.
+</p></dd><dt><a name="BIND10_COMPONENT_START_EXCEPTION"></a><span class="term">BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2</span></dt><dd><p>
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+</p></dd><dt><a name="BIND10_COMPONENT_STOP"></a><span class="term">BIND10_COMPONENT_STOP component %1 is being stopped</span></dt><dd><p>
+A component is about to be asked to stop willingly by the boss.
+</p></dd><dt><a name="BIND10_COMPONENT_UNSATISFIED"></a><span class="term">BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed</span></dt><dd><p>
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_BUILD"></a><span class="term">BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'</span></dt><dd><p>
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_PLAN_INTERRUPTED"></a><span class="term">BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done</span></dt><dd><p>
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_RECONFIGURE"></a><span class="term">BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components</span></dt><dd><p>
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_RUN"></a><span class="term">BIND10_CONFIGURATOR_RUN running plan of %1 tasks</span></dt><dd><p>
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_START"></a><span class="term">BIND10_CONFIGURATOR_START bind10 component configurator is starting up</span></dt><dd><p>
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_STOP"></a><span class="term">BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down</span></dt><dd><p>
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+</p></dd><dt><a name="BIND10_CONFIGURATOR_TASK"></a><span class="term">BIND10_CONFIGURATOR_TASK performing task %1 on %2</span></dt><dd><p>
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
+</p></dd><dt><a name="BIND10_INVALID_STATISTICS_DATA"></a><span class="term">BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified</span></dt><dd><p>
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
</p></dd><dt><a name="BIND10_INVALID_USER"></a><span class="term">BIND10_INVALID_USER invalid user: %1</span></dt><dd><p>
The boss process was started with the -u option, to drop root privileges
and continue running as the specified user, but the user is unknown.
@@ -284,24 +333,14 @@ There already appears to be a message bus daemon running. Either an
old process was not shut down correctly, and needs to be killed, or
another instance of BIND10, with the same msgq domain socket, is
running, which needs to be stopped.
-</p></dd><dt><a name="BIND10_MSGQ_DAEMON_ENDED"></a><span class="term">BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</span></dt><dd><p>
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
</p></dd><dt><a name="BIND10_MSGQ_DISAPPEARED"></a><span class="term">BIND10_MSGQ_DISAPPEARED msgq channel disappeared</span></dt><dd><p>
While listening on the message bus channel for messages, it suddenly
disappeared. The msgq daemon may have died. This might lead to an
inconsistent state of the system, and BIND 10 will now shut down.
-</p></dd><dt><a name="BIND10_PROCESS_ENDED_NO_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</span></dt><dd><p>
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
-</p></dd><dt><a name="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</span></dt><dd><p>
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED"></a><span class="term">BIND10_PROCESS_ENDED process %2 of %1 ended with status %3</span></dt><dd><p>
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
</p></dd><dt><a name="BIND10_READING_BOSS_CONFIGURATION"></a><span class="term">BIND10_READING_BOSS_CONFIGURATION reading boss configuration</span></dt><dd><p>
The boss process is starting up, and will now process the initial
configuration, as received from the configuration manager.
@@ -327,6 +366,8 @@ so BIND 10 will now shut down. The specific error is printed.
The boss module is sending a SIGKILL signal to the given process.
</p></dd><dt><a name="BIND10_SEND_SIGTERM"></a><span class="term">BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</span></dt><dd><p>
The boss module is sending a SIGTERM signal to the given process.
+</p></dd><dt><a name="BIND10_SETUID"></a><span class="term">BIND10_SETUID setting UID to %1</span></dt><dd><p>
+The boss switches the user it runs as to the given UID.
</p></dd><dt><a name="BIND10_SHUTDOWN"></a><span class="term">BIND10_SHUTDOWN stopping the server</span></dt><dd><p>
The boss process received a command or signal telling it to shut down.
It will send a shutdown command to each process. The processes that do
@@ -341,10 +382,6 @@ which failed is unknown (not one of 'S' for socket or 'B' for bind).
</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_RESPONSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</span></dt><dd><p>
The boss requested a socket from the creator, but the answer is unknown. This
looks like a programmer error.
-</p></dd><dt><a name="BIND10_SOCKCREATOR_CRASHED"></a><span class="term">BIND10_SOCKCREATOR_CRASHED the socket creator crashed</span></dt><dd><p>
-The socket creator terminated unexpectedly. It is not possible to restart it
-(because the boss already gave up root privileges), so the system is going
-to terminate.
</p></dd><dt><a name="BIND10_SOCKCREATOR_EOF"></a><span class="term">BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</span></dt><dd><p>
There should be more data from the socket creator, but it closed the socket.
It probably crashed.
@@ -368,12 +405,18 @@ The socket creator failed to create the requested socket. It failed on the
indicated OS API function with given error.
</p></dd><dt><a name="BIND10_SOCKET_GET"></a><span class="term">BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</span></dt><dd><p>
The boss forwards a request for a socket to the socket creator.
+</p></dd><dt><a name="BIND10_STARTED_CC"></a><span class="term">BIND10_STARTED_CC started configuration/command session</span></dt><dd><p>
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
</p></dd><dt><a name="BIND10_STARTED_PROCESS"></a><span class="term">BIND10_STARTED_PROCESS started %1</span></dt><dd><p>
The given process has successfully been started.
</p></dd><dt><a name="BIND10_STARTED_PROCESS_PID"></a><span class="term">BIND10_STARTED_PROCESS_PID started %1 (PID %2)</span></dt><dd><p>
The given process has successfully been started, and has the given PID.
</p></dd><dt><a name="BIND10_STARTING"></a><span class="term">BIND10_STARTING starting BIND10: %1</span></dt><dd><p>
Informational message on startup that shows the full version.
+</p></dd><dt><a name="BIND10_STARTING_CC"></a><span class="term">BIND10_STARTING_CC starting configuration/command session</span></dt><dd><p>
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
</p></dd><dt><a name="BIND10_STARTING_PROCESS"></a><span class="term">BIND10_STARTING_PROCESS starting process %1</span></dt><dd><p>
The boss module is starting the given process.
</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT"></a><span class="term">BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</span></dt><dd><p>
@@ -387,8 +430,24 @@ All modules have been successfully started, and BIND 10 is now running.
</p></dd><dt><a name="BIND10_STARTUP_ERROR"></a><span class="term">BIND10_STARTUP_ERROR error during startup: %1</span></dt><dd><p>
There was a fatal error when BIND10 was trying to start. The error is
shown, and BIND10 will now shut down.
-</p></dd><dt><a name="BIND10_START_AS_NON_ROOT"></a><span class="term">BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</span></dt><dd><p>
-The given module is being started or restarted without root privileges.
+</p></dd><dt><a name="BIND10_STARTUP_UNEXPECTED_MESSAGE"></a><span class="term">BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1</span></dt><dd><p>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts. This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected. It may be that processes are starting
+of sequence.
+</p></dd><dt><a name="BIND10_STARTUP_UNRECOGNISED_MESSAGE"></a><span class="term">BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1</span></dt><dd><p>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts. This error is output when a
+message received by the Boss process is not recognised.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT_AUTH"></a><span class="term">BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.</span></dt><dd><p>
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT_RESOLVER"></a><span class="term">BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.</span></dt><dd><p>
+The resolver is being started or restarted without root privileges.
If the module needs these privileges, it may have problems starting.
Note that this issue should be resolved by the pending 'socket-creator'
process; once that has been implemented, modules should not need root
@@ -399,6 +458,15 @@ the message channel.
</p></dd><dt><a name="BIND10_UNKNOWN_CHILD_PROCESS_ENDED"></a><span class="term">BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</span></dt><dd><p>
An unknown child process has exited. The PID is printed, but no further
action will be taken by the boss process.
+</p></dd><dt><a name="BIND10_WAIT_CFGMGR"></a><span class="term">BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize</span></dt><dd><p>
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing. This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up. The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
</p></dd><dt><a name="CACHE_ENTRY_MISSING_RRSET"></a><span class="term">CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</span></dt><dd><p>
The cache tried to generate the complete answer message. It knows the structure
of the message, but some of the RRsets to be put there are not in cache (they
@@ -487,7 +555,7 @@ Debug message. The RRset cache to hold at most this many RRsets for the given
class is being created.
</p></dd><dt><a name="CACHE_RRSET_LOOKUP"></a><span class="term">CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</span></dt><dd><p>
Debug message. The resolver is trying to look up data in the RRset cache.
-</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</span></dt><dd><p>
+</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache</span></dt><dd><p>
Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
in the cache.
</p></dd><dt><a name="CACHE_RRSET_REMOVE_OLD"></a><span class="term">CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</span></dt><dd><p>
@@ -642,6 +710,8 @@ The user was denied because the SSL connection could not successfully
be set up. The specific error is given in the log message. Possible
causes may be that the ssl request itself was bad, or the local key or
certificate file could not be read.
+</p></dd><dt><a name="CMDCTL_STARTED"></a><span class="term">CMDCTL_STARTED cmdctl is listening for connections on %1:%2</span></dt><dd><p>
+The cmdctl daemon has started and is now listening for connections.
</p></dd><dt><a name="CMDCTL_STOPPED_BY_KEYBOARD"></a><span class="term">CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
There was a keyboard interrupt signal to stop the cmdctl daemon. The
daemon will now shut down.
@@ -756,28 +826,18 @@ Debug information. An item is being removed from the hotspot cache.
The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
means no limit.
-</p></dd><dt><a name="DATASRC_DATABASE_FIND_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</span></dt><dd><p>
-This was an internal error while reading data from a datasource. This can either
-mean the specific data source implementation is not behaving correctly, or the
-data it provides is invalid. The current search is aborted.
-The error message contains specific information about the error.
+</p></dd><dt><a name="DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED"></a><span class="term">DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2</span></dt><dd><p>
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
</p></dd><dt><a name="DATASRC_DATABASE_FIND_RECORDS"></a><span class="term">DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</span></dt><dd><p>
Debug information. The database data source is looking up records with the given
name and type in the database.
</p></dd><dt><a name="DATASRC_DATABASE_FIND_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</span></dt><dd><p>
The datasource backend provided resource records for the given RRset with
-different TTL values. The TTL of the RRSET is set to the lowest value, which
-is printed in the log message.
-</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</span></dt><dd><p>
-There was an uncaught general exception while reading data from a datasource.
-This most likely points to a logic error in the code, and can be considered a
-bug. The current search is aborted. Specific information about the exception is
-printed in this error message.
-</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</span></dt><dd><p>
-There was an uncaught ISC exception while reading data from a datasource. This
-most likely points to a logic error in the code, and can be considered a bug.
-The current search is aborted. Specific information about the exception is
-printed in this error message.
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</span></dt><dd><p>
When searching for a domain, the program met a delegation to a different zone
at the given domain name. It will return that one instead.
@@ -789,6 +849,10 @@ It will return the NS record instead.
When searching for a domain, the program met a DNAME redirection to a different
place in the domain space at the given domain name. It will return that one
instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL"></a><span class="term">DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1</span></dt><dd><p>
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXDOMAIN"></a><span class="term">DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</span></dt><dd><p>
The data returned by the database backend did not contain any data for the given
domain name, class and type.
@@ -799,6 +863,91 @@ name and class, but not for the given type.
The data returned by the database backend contained data for the given domain
name, and it either matches the type or has a relevant type. The RRset that is
returned is printed.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE"></a><span class="term">DATASRC_DATABASE_ITERATE iterating zone %1</span></dt><dd><p>
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_END"></a><span class="term">DATASRC_DATABASE_ITERATE_END iterating zone finished</span></dt><dd><p>
+While iterating through the zone, the program reached end of the data.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_NEXT"></a><span class="term">DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2</span></dt><dd><p>
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4</span></dt><dd><p>
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_END"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5</span></dt><dd><p>
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences. The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_NEXT"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5</span></dt><dd><p>
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset. The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_START"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5</span></dt><dd><p>
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source. The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADR_BADDATA"></a><span class="term">DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6</span></dt><dd><p>
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset. The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message. The administrator should examine the diff in the database
+to find any invalid data and fix it.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_COMMIT"></a><span class="term">DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</span></dt><dd><p>
+Debug information. A set of updates to a zone has been successfully
+committed to the corresponding database backend. The zone name,
+its class and the database name are printed.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_CREATED"></a><span class="term">DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3</span></dt><dd><p>
+Debug information. A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_DESTROYED"></a><span class="term">DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3</span></dt><dd><p>
+Debug information. A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_ROLLBACK"></a><span class="term">DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3</span></dt><dd><p>
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes. The intermediate changes made through the updater won't
+be applied to the underlying database. The zone name, its class, and
+the underlying database name are shown in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_ROLLBACKFAIL"></a><span class="term">DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4</span></dt><dd><p>
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails. The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation. In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid. The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD"></a><span class="term">DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1</span></dt><dd><p>
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_CANCEL_NS"></a><span class="term">DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1</span></dt><dd><p>
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_CANCEL_SUB"></a><span class="term">DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1</span></dt><dd><p>
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_EMPTY"></a><span class="term">DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1</span></dt><dd><p>
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
</p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
A debug message indicating that a query for the given name and RR type is being
processed.
@@ -1138,6 +1287,19 @@ data source.
</p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
This indicates a programming error. An internal task of unknown type was
generated.
+</p></dd><dt><a name="LIBXFRIN_DIFFERENT_TTL"></a><span class="term">LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.</span></dt><dd><p>
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
+</p></dd><dt><a name="LIBXFRIN_NO_JOURNAL"></a><span class="term">LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2</span></dt><dd><p>
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled. At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR). Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
</p></dd><dt><a name="LOGIMPL_ABOVE_MAX_DEBUG"></a><span class="term">LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
A message from the interface to the underlying logger implementation reporting
that the debug level (as set by an internally-created string DEBUGn, where n
@@ -1259,6 +1421,16 @@ Within a message file, a line starting with a dollar symbol was found
</p></dd><dt><a name="LOG_WRITE_ERROR"></a><span class="term">LOG_WRITE_ERROR error writing to %1: %2</span></dt><dd><p>
The specified error was encountered by the message compiler when writing
to the named output file.
+</p></dd><dt><a name="NOTIFY_OUT_DATASRC_ACCESS_FAILURE"></a><span class="term">NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1</span></dt><dd><p>
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message. This can be either a
+configuration error or installation setup failure.
+</p></dd><dt><a name="NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND"></a><span class="term">NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found</span></dt><dd><p>
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source. This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process. So this means some critical inconsistency in the data
+source or software bug.
</p></dd><dt><a name="NOTIFY_OUT_INVALID_ADDRESS"></a><span class="term">NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</span></dt><dd><p>
The notify_out library tried to send a notify message to the given
address, but it appears to be an invalid address. The configuration
@@ -1315,6 +1487,13 @@ provide more information.
The notify message to the given address (noted as address#port) has
timed out, and the message will be resent until the max retry limit
is reached.
+</p></dd><dt><a name="NOTIFY_OUT_ZONE_BAD_SOA"></a><span class="term">NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA</span></dt><dd><p>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs. Notify message won't
+be sent to such a zone.
+</p></dd><dt><a name="NOTIFY_OUT_ZONE_NO_NS"></a><span class="term">NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR</span></dt><dd><p>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR. Notify message won't be sent to such a zone.
</p></dd><dt><a name="NSAS_FIND_NS_ADDRESS"></a><span class="term">NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
A debug message issued when the NSAS (nameserver address store - part
of the resolver) is making a callback into the resolver to retrieve the
@@ -1732,6 +1911,11 @@ respond with 'Stats Httpd is up.' and its PID.
An unknown command has been sent to the stats-httpd module. The
stats-httpd module will respond with an error, and the command will
be ignored.
+</p></dd><dt><a name="STATHTTPD_SERVER_DATAERROR"></a><span class="term">STATHTTPD_SERVER_DATAERROR HTTP server data error: %1</span></dt><dd><p>
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
</p></dd><dt><a name="STATHTTPD_SERVER_ERROR"></a><span class="term">STATHTTPD_SERVER_ERROR HTTP server error: %1</span></dt><dd><p>
An internal error occurred while handling an HTTP request. An HTTP 500
response will be sent back, and the specific error is printed. This
@@ -1776,14 +1960,10 @@ control bus. A likely problem is that the message bus daemon
</p></dd><dt><a name="STATS_RECEIVED_NEW_CONFIG"></a><span class="term">STATS_RECEIVED_NEW_CONFIG received new configuration: %1</span></dt><dd><p>
This debug message is printed when the stats module has received a
configuration update from the configuration manager.
-</p></dd><dt><a name="STATS_RECEIVED_REMOVE_COMMAND"></a><span class="term">STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</span></dt><dd><p>
-A remove command for the given name was sent to the stats module, and
-the given statistics value will now be removed. It will not appear in
-statistics reports until it appears in a statistics update from a
-module again.
-</p></dd><dt><a name="STATS_RECEIVED_RESET_COMMAND"></a><span class="term">STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</span></dt><dd><p>
-The stats module received a command to clear all collected statistics.
-The data is cleared until it receives an update from the modules again.
+</p></dd><dt><a name="STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema</span></dt><dd><p>
+The stats module received a command to show all statistics schemas of all modules.
+</p></dd><dt><a name="STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND"></a><span class="term">STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1</span></dt><dd><p>
+The stats module received a command to show the specified statistics schema of the specified module.
</p></dd><dt><a name="STATS_RECEIVED_SHOW_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</span></dt><dd><p>
The stats module received a command to show all statistics that it has
collected.
@@ -1801,6 +1981,11 @@ will respond with an error and the command will be ignored.
</p></dd><dt><a name="STATS_SEND_REQUEST_BOSS"></a><span class="term">STATS_SEND_REQUEST_BOSS requesting boss to send statistics</span></dt><dd><p>
This debug message is printed when a request is sent to the boss module
to send its data to the stats module.
+</p></dd><dt><a name="STATS_STARTING"></a><span class="term">STATS_STARTING starting</span></dt><dd><p>
+The stats module will be now starting.
+</p></dd><dt><a name="STATS_START_ERROR"></a><span class="term">STATS_START_ERROR stats module error: %1</span></dt><dd><p>
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
</p></dd><dt><a name="STATS_STOPPED_BY_KEYBOARD"></a><span class="term">STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
There was a keyboard interrupt signal to stop the stats module. The
daemon will now shut down.
@@ -1812,19 +1997,23 @@ from a different version of BIND 10 than the stats module itself.
Please check your installation.
</p></dd><dt><a name="XFRIN_AXFR_DATABASE_FAILURE"></a><span class="term">XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message.
-</p></dd><dt><a name="XFRIN_AXFR_INTERNAL_FAILURE"></a><span class="term">XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
-The AXFR transfer for the given zone has failed due to an internal
-problem in the bind10 python wrapper library.
-The error is shown in the log message.
-</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
-The AXFR transfer for the given zone has failed due to a protocol error.
-The error is shown in the log message.
-</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_STARTED"></a><span class="term">XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</span></dt><dd><p>
-A connection to the master server has been made, the serial value in
-the SOA record has been checked, and a zone transfer has been started.
-</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</span></dt><dd><p>
-The AXFR transfer of the given zone was successfully completed.
+The error is shown in the log message. Note: due to the code structure
+this can only happen for AXFR.
+</p></dd><dt><a name="XFRIN_AXFR_INCONSISTENT_SOA"></a><span class="term">XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received</span></dt><dd><p>
+The serial fields of the first and last SOAs of AXFR (including AXFR-style
+IXFR) are not the same. According to RFC 5936 these two SOAs must be the
+"same" (not only for the serial), but it is still not clear what the
+receiver should do if this condition does not hold. There was a discussion
+about this at the IETF dnsext wg:
+http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+and the general feeling seems that it would be better to reject the
+transfer if a mismatch is detected. On the other hand, also as noted
+in that email thread, neither BIND 9 nor NSD performs any comparison
+on the SOAs. For now, we only check the serials (ignoring other fields)
+and only leave a warning log message when a mismatch is found. If it
+turns out to happen with a real world primary server implementation
+and that server actually feeds broken data (e.g. mixed versions of
+zone), we can consider a stricter action.
</p></dd><dt><a name="XFRIN_BAD_MASTER_ADDR_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</span></dt><dd><p>
The given master address is not a valid IP address.
</p></dd><dt><a name="XFRIN_BAD_MASTER_PORT_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</span></dt><dd><p>
@@ -1843,6 +2032,17 @@ error is given in the log message.
</p></dd><dt><a name="XFRIN_CONNECT_MASTER"></a><span class="term">XFRIN_CONNECT_MASTER error connecting to master at %1: %2</span></dt><dd><p>
There was an error opening a connection to the master. The error is
shown in the log message.
+</p></dd><dt><a name="XFRIN_GOT_INCREMENTAL_RESP"></a><span class="term">XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1</span></dt><dd><p>
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found. This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+</p></dd><dt><a name="XFRIN_GOT_NONINCREMENTAL_RESP"></a><span class="term">XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1</span></dt><dd><p>
+Non incremental transfer was detected at the "first data" of a transfer,
+which is the RR following the initial SOA. Non incremental transfer is
+either AXFR or AXFR-style IXFR. In the latter case, it means that
+in a response to IXFR query the first data is not SOA or its SOA serial
+is not equal to the requested SOA serial.
</p></dd><dt><a name="XFRIN_IMPORT_DNS"></a><span class="term">XFRIN_IMPORT_DNS error importing python DNS module: %1</span></dt><dd><p>
There was an error importing the python DNS module pydnspp. The most
likely cause is a PYTHONPATH problem.
@@ -1853,6 +2053,11 @@ was killed.
</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER"></a><span class="term">XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</span></dt><dd><p>
There was a problem sending a message to the zone manager. This most
likely means that the msgq daemon has quit or was killed.
+</p></dd><dt><a name="XFRIN_NOTIFY_UNKNOWN_MASTER"></a><span class="term">XFRIN_NOTIFY_UNKNOWN_MASTER got notification to retransfer zone %1 from %2, expected %3</span></dt><dd><p>
+The system received a notify for the given zone, but the address it came
+from does not match the master address in the Xfrin configuration. The notify
+is ignored. This may indicate that the configuration for the master is wrong,
+that a wrong machine is sending notifies, or that fake notifies are being sent.
</p></dd><dt><a name="XFRIN_RETRANSFER_UNKNOWN_ZONE"></a><span class="term">XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</span></dt><dd><p>
There was an internal command to retransfer the given zone, but the
zone is not known to the system. This may indicate that the configuration
@@ -1866,24 +2071,37 @@ daemon will now shut down.
</p></dd><dt><a name="XFRIN_UNKNOWN_ERROR"></a><span class="term">XFRIN_UNKNOWN_ERROR unknown error: %1</span></dt><dd><p>
An uncaught exception was raised while running the xfrin daemon. The
exception message is printed in the log message.
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_DONE"></a><span class="term">XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</span></dt><dd><p>
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_ERROR"></a><span class="term">XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</span></dt><dd><p>
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_FAILED"></a><span class="term">XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</span></dt><dd><p>
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_STARTED"></a><span class="term">XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</span></dt><dd><p>
-A transfer out of the given zone has started.
+</p></dd><dt><a name="XFRIN_XFR_OTHER_FAILURE"></a><span class="term">XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3</span></dt><dd><p>
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module. Possible reasons are a broken DNS message or failure
+in database connection. The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_XFR_PROCESS_FAILURE"></a><span class="term">XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4</span></dt><dd><p>
+An XFR session failed outside the main protocol handling. This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure. Details of the error are shown in
+the log message. In general, these errors are not really expected
+ones, and indicate an installation error or a program bug. The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete. So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</span></dt><dd><p>
+The XFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_FALLBACK"></a><span class="term">XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1</span></dt><dd><p>
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_STARTED"></a><span class="term">XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</span></dt><dd><p>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded</span></dt><dd><p>
+The XFR transfer of the given zone was successfully completed.
</p></dd><dt><a name="XFROUT_BAD_TSIG_KEY_STRING"></a><span class="term">XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
The TSIG key string as read from the configuration does not represent
a valid TSIG key.
@@ -1894,6 +2112,9 @@ most likely cause is that the msgq daemon is not running.
There was a problem reading a response from another module over the
command and control channel. The most likely cause is that the
configuration manager b10-cfgmgr is not running.
+</p></dd><dt><a name="XFROUT_CONFIG_ERROR"></a><span class="term">XFROUT_CONFIG_ERROR error found in configuration data: %1</span></dt><dd><p>
+The xfrout process encountered an error when installing the configuration at
+startup time. Details of the error are included in the log message.
</p></dd><dt><a name="XFROUT_FETCH_REQUEST_ERROR"></a><span class="term">XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</span></dt><dd><p>
There was a socket error while contacting the b10-auth daemon to
fetch a transfer request. The auth daemon may have shutdown.
@@ -1908,6 +2129,45 @@ by xfrout could not be found. This suggests that either some libraries
are missing on the system, or the PYTHONPATH variable is not correct.
The specific place where this library needs to be depends on your
system and your specific installation.
+</p></dd><dt><a name="XFROUT_IXFR_MULTIPLE_SOA"></a><span class="term">XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs</span></dt><dd><p>
+An IXFR request was received with more than one SOA RRs in the authority
+section. The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_JOURNAL_SUPPORT"></a><span class="term">XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR</span></dt><dd><p>
+An IXFR request was received but the underlying data source did
+not support journaling. The xfrout daemon fell back to AXFR-style
+IXFR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_SOA"></a><span class="term">XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA</span></dt><dd><p>
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_VERSION"></a><span class="term">XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR</span></dt><dd><p>
+An IXFR request was received, but the requested range of differences
+were not found in the data source. The xfrout daemon fell back to
+AXFR-style IXFR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_ZONE"></a><span class="term">XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal</span></dt><dd><p>
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source. This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone. The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
+</p></dd><dt><a name="XFROUT_IXFR_UPTODATE"></a><span class="term">XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)</span></dt><dd><p>
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server. The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic. This will soon
+be implemented.
+</p></dd><dt><a name="XFROUT_MODULECC_SESSION_ERROR"></a><span class="term">XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</span></dt><dd><p>
+There was a problem in the lower level module handling configuration and
+control commands. This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization. A detailed error message from the module
+will also be displayed.
</p></dd><dt><a name="XFROUT_NEW_CONFIG"></a><span class="term">XFROUT_NEW_CONFIG Update xfrout configuration</span></dt><dd><p>
New configuration settings have been sent from the configuration
manager. The xfrout daemon will now apply them.
@@ -1929,15 +2189,25 @@ There was an error processing a transfer request. The error is included
in the log message, but at this point no specific information other
than that could be given. This points to incomplete exception handling
in the code.
-</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</span></dt><dd><p>
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
-</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</span></dt><dd><p>
+</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped</span></dt><dd><p>
+The xfrout process silently dropped a request to transfer zone to
+given host. This is required by the ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</p></dd><dt><a name="XFROUT_QUERY_QUOTA_EXCCEEDED"></a><span class="term">XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)</span></dt><dd><p>
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message. The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
+</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected</span></dt><dd><p>
The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
</p></dd><dt><a name="XFROUT_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
The xfrout daemon received a shutdown command from the command channel
and will now shut down.
@@ -1973,6 +2243,30 @@ socket needed for contacting the b10-auth daemon to pass requests
on, but the file is in use. The most likely cause is that another
xfrout daemon process is still running. This xfrout daemon (the one
printing this message) will not start.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_CHECK_ERROR"></a><span class="term">XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4</span></dt><dd><p>
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage. Some detail of the error is also included in the
+message. The xfrout server tries to return a SERVFAIL response in this case.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_DONE"></a><span class="term">XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete</span></dt><dd><p>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_ERROR"></a><span class="term">XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4</span></dt><dd><p>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_FAILED"></a><span class="term">XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4</span></dt><dd><p>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_STARTED"></a><span class="term">XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started</span></dt><dd><p>
+A transfer out of the given zone has started.
</p></dd><dt><a name="ZONEMGR_CCSESSION_ERROR"></a><span class="term">ZONEMGR_CCSESSION_ERROR command channel session error: %1</span></dt><dd><p>
An error was encountered on the command channel. The message indicates
the nature of the error.
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
index bade381..4dc02d4 100644
--- a/doc/guide/bind10-messages.xml
+++ b/doc/guide/bind10-messages.xml
@@ -573,19 +573,117 @@ needs a dedicated message bus.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_CONFIGURATION_START_AUTH">
-<term>BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</term>
+<varlistentry id="BIND10_COMPONENT_FAILED">
+<term>BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status</term>
<listitem><para>
-This message shows whether or not the authoritative server should be
-started according to the configuration.
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_CONFIGURATION_START_RESOLVER">
-<term>BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</term>
+<varlistentry id="BIND10_COMPONENT_RESTART">
+<term>BIND10_COMPONENT_RESTART component %1 is about to restart</term>
<listitem><para>
-This message shows whether or not the resolver should be
-started according to the configuration.
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_START">
+<term>BIND10_COMPONENT_START component %1 is starting</term>
+<listitem><para>
+The named component is about to be started by the boss process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_START_EXCEPTION">
+<term>BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2</term>
+<listitem><para>
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_STOP">
+<term>BIND10_COMPONENT_STOP component %1 is being stopped</term>
+<listitem><para>
+A component is about to be asked to stop willingly by the boss.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_UNSATISFIED">
+<term>BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed</term>
+<listitem><para>
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_BUILD">
+<term>BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'</term>
+<listitem><para>
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_PLAN_INTERRUPTED">
+<term>BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done</term>
+<listitem><para>
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_RECONFIGURE">
+<term>BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components</term>
+<listitem><para>
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_RUN">
+<term>BIND10_CONFIGURATOR_RUN running plan of %1 tasks</term>
+<listitem><para>
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_START">
+<term>BIND10_CONFIGURATOR_START bind10 component configurator is starting up</term>
+<listitem><para>
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_STOP">
+<term>BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down</term>
+<listitem><para>
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_TASK">
+<term>BIND10_CONFIGURATOR_TASK performing task %1 on %2</term>
+<listitem><para>
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
</para></listitem>
</varlistentry>
@@ -632,14 +730,6 @@ running, which needs to be stopped.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_MSGQ_DAEMON_ENDED">
-<term>BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</term>
-<listitem><para>
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
-</para></listitem>
-</varlistentry>
-
<varlistentry id="BIND10_MSGQ_DISAPPEARED">
<term>BIND10_MSGQ_DISAPPEARED msgq channel disappeared</term>
<listitem><para>
@@ -649,24 +739,12 @@ inconsistent state of the system, and BIND 10 will now shut down.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_PROCESS_ENDED_NO_EXIT_STATUS">
-<term>BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</term>
-<listitem><para>
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS">
-<term>BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</term>
+<varlistentry id="BIND10_PROCESS_ENDED">
+<term>BIND10_PROCESS_ENDED process %2 of %1 ended with status %3</term>
<listitem><para>
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
</para></listitem>
</varlistentry>
@@ -740,6 +818,13 @@ The boss module is sending a SIGTERM signal to the given process.
</para></listitem>
</varlistentry>
+<varlistentry id="BIND10_SETUID">
+<term>BIND10_SETUID setting UID to %1</term>
+<listitem><para>
+The boss switches the user it runs as to the given UID.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="BIND10_SHUTDOWN">
<term>BIND10_SHUTDOWN stopping the server</term>
<listitem><para>
@@ -774,15 +859,6 @@ looks like a programmer error.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_SOCKCREATOR_CRASHED">
-<term>BIND10_SOCKCREATOR_CRASHED the socket creator crashed</term>
-<listitem><para>
-The socket creator terminated unexpectedly. It is not possible to restart it
-(because the boss already gave up root privileges), so the system is going
-to terminate.
-</para></listitem>
-</varlistentry>
-
<varlistentry id="BIND10_SOCKCREATOR_EOF">
<term>BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</term>
<listitem><para>
@@ -846,6 +922,14 @@ The boss forwards a request for a socket to the socket creator.
</para></listitem>
</varlistentry>
+<varlistentry id="BIND10_STARTED_CC">
+<term>BIND10_STARTED_CC started configuration/command session</term>
+<listitem><para>
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="BIND10_STARTED_PROCESS">
<term>BIND10_STARTED_PROCESS started %1</term>
<listitem><para>
@@ -867,6 +951,14 @@ Informational message on startup that shows the full version.
</para></listitem>
</varlistentry>
+<varlistentry id="BIND10_STARTING_CC">
+<term>BIND10_STARTING_CC starting configuration/command session</term>
+<listitem><para>
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="BIND10_STARTING_PROCESS">
<term>BIND10_STARTING_PROCESS starting process %1</term>
<listitem><para>
@@ -905,10 +997,41 @@ shown, and BIND10 will now shut down.
</para></listitem>
</varlistentry>
-<varlistentry id="BIND10_START_AS_NON_ROOT">
-<term>BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</term>
+<varlistentry id="BIND10_STARTUP_UNEXPECTED_MESSAGE">
+<term>BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1</term>
<listitem><para>
-The given module is being started or restarted without root privileges.
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts. This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected. It may be that processes are starting
+of sequence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_UNRECOGNISED_MESSAGE">
+<term>BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1</term>
+<listitem><para>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts. This error is output when a
+message received by the Boss process is not recognised.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT_AUTH">
+<term>BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.</term>
+<listitem><para>
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT_RESOLVER">
+<term>BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.</term>
+<listitem><para>
+The resolver is being started or restarted without root privileges.
If the module needs these privileges, it may have problems starting.
Note that this issue should be resolved by the pending 'socket-creator'
process; once that has been implemented, modules should not need root
@@ -932,6 +1055,20 @@ action will be taken by the boss process.
</para></listitem>
</varlistentry>
+<varlistentry id="BIND10_WAIT_CFGMGR">
+<term>BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize</term>
+<listitem><para>
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing. This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up. The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="CACHE_ENTRY_MISSING_RRSET">
<term>CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</term>
<listitem><para>
@@ -1535,6 +1672,13 @@ certificate file could not be read.
</para></listitem>
</varlistentry>
+<varlistentry id="CMDCTL_STARTED">
+<term>CMDCTL_STARTED cmdctl is listening for connections on %1:%2</term>
+<listitem><para>
+The cmdctl daemon has started and is now listening for connections.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="CMDCTL_STOPPED_BY_KEYBOARD">
<term>CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
<listitem><para>
@@ -1909,6 +2053,50 @@ database). The data in database should be checked and fixed.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_END">
+<term>DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5</term>
+<listitem><para>
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences. The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_NEXT">
+<term>DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5</term>
+<listitem><para>
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset. The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_START">
+<term>DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5</term>
+<listitem><para>
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source. The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADR_BADDATA">
+<term>DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6</term>
+<listitem><para>
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset. The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message. The administrator should examine the diff in the database
+to find any invalid data and fix it.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_DATABASE_UPDATER_COMMIT">
<term>DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</term>
<listitem><para>
@@ -2890,6 +3078,20 @@ together, the later one get's overwritten to the earlier one in the sequence.
</para></listitem>
</varlistentry>
+<varlistentry id="LIBXFRIN_NO_JOURNAL">
+<term>LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2</term>
+<listitem><para>
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled. At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR). Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
<term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
<listitem><para>
@@ -3126,6 +3328,26 @@ to the named output file.
</para></listitem>
</varlistentry>
+<varlistentry id="NOTIFY_OUT_DATASRC_ACCESS_FAILURE">
+<term>NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1</term>
+<listitem><para>
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message. This can be either a
+configuration error or installation setup failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND">
+<term>NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found</term>
+<listitem><para>
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source. This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process. So this means some critical inconsistency in the data
+source or software bug.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="NOTIFY_OUT_INVALID_ADDRESS">
<term>NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</term>
<listitem><para>
@@ -3237,6 +3459,23 @@ is reached.
</para></listitem>
</varlistentry>
+<varlistentry id="NOTIFY_OUT_ZONE_BAD_SOA">
+<term>NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA</term>
+<listitem><para>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs. Notify message won't
+be sent to such a zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_ZONE_NO_NS">
+<term>NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR</term>
+<listitem><para>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR. Notify message won't be sent to such a zone.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="NSAS_FIND_NS_ADDRESS">
<term>NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</term>
<listitem><para>
@@ -4144,6 +4383,16 @@ be ignored.
</para></listitem>
</varlistentry>
+<varlistentry id="STATHTTPD_SERVER_DATAERROR">
+<term>STATHTTPD_SERVER_DATAERROR HTTP server data error: %1</term>
+<listitem><para>
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="STATHTTPD_SERVER_ERROR">
<term>STATHTTPD_SERVER_ERROR HTTP server error: %1</term>
<listitem><para>
@@ -4518,6 +4767,25 @@ in database connection. The error is shown in the log message.
</para></listitem>
</varlistentry>
+<varlistentry id="XFRIN_XFR_PROCESS_FAILURE">
+<term>XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4</term>
+<listitem><para>
+An XFR session failed outside the main protocol handling. This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure. Details of the error are shown in
+the log message. In general, these errors are not really expected
+ones, and indicate an installation error or a program bug. The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete. So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFRIN_XFR_TRANSFER_FAILURE">
<term>XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</term>
<listitem><para>
@@ -4526,6 +4794,16 @@ The error is shown in the log message.
</para></listitem>
</varlistentry>
+<varlistentry id="XFRIN_XFR_TRANSFER_FALLBACK">
+<term>XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1</term>
+<listitem><para>
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFRIN_XFR_TRANSFER_STARTED">
<term>XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</term>
<listitem><para>
@@ -4541,44 +4819,6 @@ The XFR transfer of the given zone was successfully completed.
</para></listitem>
</varlistentry>
-<varlistentry id="XFROUT_AXFR_TRANSFER_DONE">
-<term>XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</term>
-<listitem><para>
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_ERROR">
-<term>XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</term>
-<listitem><para>
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_FAILED">
-<term>XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</term>
-<listitem><para>
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_STARTED">
-<term>XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</term>
-<listitem><para>
-A transfer out of the given zone has started.
-</para></listitem>
-</varlistentry>
-
<varlistentry id="XFROUT_BAD_TSIG_KEY_STRING">
<term>XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
<listitem><para>
@@ -4641,6 +4881,69 @@ system and your specific installation.
</para></listitem>
</varlistentry>
+<varlistentry id="XFROUT_IXFR_MULTIPLE_SOA">
+<term>XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs</term>
+<listitem><para>
+An IXFR request was received with more than one SOA RRs in the authority
+section. The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_JOURNAL_SUPPORT">
+<term>XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR</term>
+<listitem><para>
+An IXFR request was received but the underlying data source did
+not support journaling. The xfrout daemon fell back to AXFR-style
+IXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_SOA">
+<term>XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA</term>
+<listitem><para>
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_VERSION">
+<term>XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR</term>
+<listitem><para>
+An IXFR request was received, but the requested range of differences
+were not found in the data source. The xfrout daemon fell back to
+AXFR-style IXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_ZONE">
+<term>XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal</term>
+<listitem><para>
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source. This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone. The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_UPTODATE">
+<term>XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)</term>
+<listitem><para>
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server. The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic. This will soon
+be implemented.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFROUT_MODULECC_SESSION_ERROR">
<term>XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</term>
<listitem><para>
@@ -4699,21 +5002,36 @@ in the code.
</varlistentry>
<varlistentry id="XFROUT_QUERY_DROPPED">
-<term>XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</term>
+<term>XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped</term>
+<listitem><para>
+The xfrout process silently dropped a request to transfer zone to
+given host. This is required by the ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_QUOTA_EXCCEEDED">
+<term>XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)</term>
<listitem><para>
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message. The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
</para></listitem>
</varlistentry>
<varlistentry id="XFROUT_QUERY_REJECTED">
-<term>XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</term>
+<term>XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected</term>
<listitem><para>
The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
</para></listitem>
</varlistentry>
@@ -4792,6 +5110,55 @@ printing this message) will not start.
</para></listitem>
</varlistentry>
+<varlistentry id="XFROUT_XFR_TRANSFER_CHECK_ERROR">
+<term>XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4</term>
+<listitem><para>
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage. Some detail of the error is also included in the
+message. The xfrout server tries to return a SERVFAIL response in this case.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_DONE">
+<term>XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete</term>
+<listitem><para>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_ERROR">
+<term>XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4</term>
+<listitem><para>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_FAILED">
+<term>XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4</term>
+<listitem><para>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_STARTED">
+<term>XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started</term>
+<listitem><para>
+A transfer out of the given zone has started.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="ZONEMGR_CCSESSION_ERROR">
<term>ZONEMGR_CCSESSION_ERROR command channel session error: %1</term>
<listitem><para>
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index c9dac88..caf69b9 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -91,9 +91,9 @@ public:
bool processNormalQuery(const IOMessage& io_message, MessagePtr message,
OutputBufferPtr buffer,
auto_ptr<TSIGContext> tsig_context);
- bool processAxfrQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer,
- auto_ptr<TSIGContext> tsig_context);
+ bool processXfrQuery(const IOMessage& io_message, MessagePtr message,
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context);
bool processNotify(const IOMessage& io_message, MessagePtr message,
OutputBufferPtr buffer,
auto_ptr<TSIGContext> tsig_context);
@@ -219,8 +219,9 @@ class ConfigChecker : public SimpleCallback {
public:
ConfigChecker(AuthSrv* srv) : server_(srv) {}
virtual void operator()(const IOMessage&) const {
- if (server_->getConfigSession()->hasQueuedMsgs()) {
- server_->getConfigSession()->checkCommand();
+ ModuleCCSession* cfg_session = server_->getConfigSession();
+ if (cfg_session != NULL && cfg_session->hasQueuedMsgs()) {
+ cfg_session->checkCommand();
}
}
private:
@@ -472,10 +473,11 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
ConstQuestionPtr question = *message->beginQuestion();
const RRType &qtype = question->getType();
if (qtype == RRType::AXFR()) {
- sendAnswer = impl_->processAxfrQuery(io_message, message, buffer,
- tsig_context);
+ sendAnswer = impl_->processXfrQuery(io_message, message, buffer,
+ tsig_context);
} else if (qtype == RRType::IXFR()) {
- makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
+ sendAnswer = impl_->processXfrQuery(io_message, message, buffer,
+ tsig_context);
} else {
sendAnswer = impl_->processNormalQuery(io_message, message, buffer,
tsig_context);
@@ -543,9 +545,9 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
}
bool
-AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer,
- auto_ptr<TSIGContext> tsig_context)
+AuthSrvImpl::processXfrQuery(const IOMessage& io_message, MessagePtr message,
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context)
{
// Increment query counter.
incCounter(io_message.getSocket().getProtocol());
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index 4698588..ac25cd6 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -229,7 +229,8 @@ TEST_F(AuthSrvTest, AXFROverUDP) {
TEST_F(AuthSrvTest, AXFRSuccess) {
EXPECT_FALSE(xfrout.isConnected());
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("example.com"), RRClass::IN(), RRType::AXFR());
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
createRequestPacket(request_message, IPPROTO_TCP);
// On success, the AXFR query has been passed to a separate process,
// so we shouldn't have to respond.
@@ -245,7 +246,8 @@ TEST_F(AuthSrvTest, TSIGSigned) {
const TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
TSIGContext context(key);
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("version.bind"), RRClass::CH(), RRType::TXT());
+ Name("version.bind"), RRClass::CH(),
+ RRType::TXT());
createRequestPacket(request_message, IPPROTO_UDP, &context);
// Run the message through the server
@@ -278,7 +280,8 @@ TEST_F(AuthSrvTest, TSIGSignedBadKey) {
TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
TSIGContext context(key);
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("version.bind"), RRClass::CH(), RRType::TXT());
+ Name("version.bind"), RRClass::CH(),
+ RRType::TXT());
createRequestPacket(request_message, IPPROTO_UDP, &context);
// Process the message, but use a different key there
@@ -309,7 +312,8 @@ TEST_F(AuthSrvTest, TSIGBadSig) {
TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
TSIGContext context(key);
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("version.bind"), RRClass::CH(), RRType::TXT());
+ Name("version.bind"), RRClass::CH(),
+ RRType::TXT());
createRequestPacket(request_message, IPPROTO_UDP, &context);
// Process the message, but use a different key there
@@ -375,7 +379,8 @@ TEST_F(AuthSrvTest, AXFRConnectFail) {
EXPECT_FALSE(xfrout.isConnected()); // check prerequisite
xfrout.disableConnect();
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("example.com"), RRClass::IN(), RRType::AXFR());
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
createRequestPacket(request_message, IPPROTO_TCP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
EXPECT_TRUE(dnsserv.hasAnswer());
@@ -388,7 +393,8 @@ TEST_F(AuthSrvTest, AXFRSendFail) {
// first send a valid query, making the connection with the xfr process
// open.
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("example.com"), RRClass::IN(), RRType::AXFR());
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
createRequestPacket(request_message, IPPROTO_TCP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
EXPECT_TRUE(xfrout.isConnected());
@@ -397,7 +403,8 @@ TEST_F(AuthSrvTest, AXFRSendFail) {
parse_message->clear(Message::PARSE);
response_obuffer->clear();
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("example.com"), RRClass::IN(), RRType::AXFR());
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
createRequestPacket(request_message, IPPROTO_TCP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
EXPECT_TRUE(dnsserv.hasAnswer());
@@ -414,7 +421,66 @@ TEST_F(AuthSrvTest, AXFRDisconnectFail) {
xfrout.disableSend();
xfrout.disableDisconnect();
UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
- Name("example.com"), RRClass::IN(), RRType::AXFR());
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ EXPECT_THROW(server.processMessage(*io_message, parse_message,
+ response_obuffer, &dnsserv),
+ XfroutError);
+ EXPECT_TRUE(xfrout.isConnected());
+ // XXX: we need to re-enable disconnect. otherwise an exception would be
+ // thrown via the destructor of the server.
+ xfrout.enableDisconnect();
+}
+
+TEST_F(AuthSrvTest, IXFRConnectFail) {
+ EXPECT_FALSE(xfrout.isConnected()); // check prerequisite
+ xfrout.disableConnect();
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::IXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, Rcode::SERVFAIL(),
+ opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+ EXPECT_FALSE(xfrout.isConnected());
+}
+
+TEST_F(AuthSrvTest, IXFRSendFail) {
+ // first send a valid query, making the connection with the xfr process
+ // open.
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::IXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+ EXPECT_TRUE(xfrout.isConnected());
+
+ xfrout.disableSend();
+ parse_message->clear(Message::PARSE);
+ response_obuffer->clear();
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::IXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, Rcode::SERVFAIL(),
+ opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+
+ // The connection should have been closed due to the send failure.
+ EXPECT_FALSE(xfrout.isConnected());
+}
+
+TEST_F(AuthSrvTest, IXFRDisconnectFail) {
+ // In our usage disconnect() shouldn't fail. So we'll see the exception
+ // should it be thrown.
+ xfrout.disableSend();
+ xfrout.disableDisconnect();
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::IXFR());
createRequestPacket(request_message, IPPROTO_TCP);
EXPECT_THROW(server.processMessage(*io_message, parse_message,
response_obuffer, &dnsserv),
@@ -426,8 +492,9 @@ TEST_F(AuthSrvTest, AXFRDisconnectFail) {
}
TEST_F(AuthSrvTest, notify) {
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -458,8 +525,9 @@ TEST_F(AuthSrvTest, notify) {
TEST_F(AuthSrvTest, notifyForCHClass) {
// Same as the previous test, but for the CH RRClass.
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::CH(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::CH(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -487,8 +555,9 @@ TEST_F(AuthSrvTest, notifyEmptyQuestion) {
}
TEST_F(AuthSrvTest, notifyMultiQuestions) {
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
// add one more SOA question
request_message.addQuestion(Question(Name("example.com"), RRClass::IN(),
RRType::SOA()));
@@ -501,8 +570,9 @@ TEST_F(AuthSrvTest, notifyMultiQuestions) {
}
TEST_F(AuthSrvTest, notifyNonSOAQuestion) {
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::NS());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::NS());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -513,8 +583,9 @@ TEST_F(AuthSrvTest, notifyNonSOAQuestion) {
TEST_F(AuthSrvTest, notifyWithoutAA) {
// implicitly leave the AA bit off. our implementation will accept it.
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
EXPECT_TRUE(dnsserv.hasAnswer());
@@ -523,8 +594,9 @@ TEST_F(AuthSrvTest, notifyWithoutAA) {
}
TEST_F(AuthSrvTest, notifyWithErrorRcode) {
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
request_message.setRcode(Rcode::SERVFAIL());
createRequestPacket(request_message, IPPROTO_UDP);
@@ -537,8 +609,9 @@ TEST_F(AuthSrvTest, notifyWithErrorRcode) {
TEST_F(AuthSrvTest, notifyWithoutSession) {
server.setXfrinSession(NULL);
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
@@ -551,8 +624,9 @@ TEST_F(AuthSrvTest, notifyWithoutSession) {
TEST_F(AuthSrvTest, notifySendFail) {
notify_session.disableSend();
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
@@ -563,8 +637,9 @@ TEST_F(AuthSrvTest, notifySendFail) {
TEST_F(AuthSrvTest, notifyReceiveFail) {
notify_session.disableReceive();
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -574,8 +649,9 @@ TEST_F(AuthSrvTest, notifyReceiveFail) {
TEST_F(AuthSrvTest, notifyWithBogusSessionMessage) {
notify_session.setMessage(Element::fromJSON("{\"foo\": 1}"));
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -586,8 +662,9 @@ TEST_F(AuthSrvTest, notifyWithSessionMessageError) {
notify_session.setMessage(
Element::fromJSON("{\"result\": [1, \"FAIL\"]}"));
- UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
- Name("example.com"), RRClass::IN(), RRType::SOA());
+ UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+ default_qid, Name("example.com"),
+ RRClass::IN(), RRType::SOA());
request_message.setHeaderFlag(Message::HEADERFLAG_AA);
createRequestPacket(request_message, IPPROTO_UDP);
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -737,12 +814,28 @@ TEST_F(AuthSrvTest, queryCounterTCPAXFR) {
Name("example.com"), RRClass::IN(), RRType::AXFR());
createRequestPacket(request_message, IPPROTO_TCP);
// On success, the AXFR query has been passed to a separate process,
- // so we shouldn't have to respond.
+ // so auth itself shouldn't respond.
server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+ EXPECT_FALSE(dnsserv.hasAnswer());
// After processing TCP AXFR query, the counter should be 1.
EXPECT_EQ(1, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
}
+// Submit TCP IXFR query and check query counter
+TEST_F(AuthSrvTest, queryCounterTCPIXFR) {
+ // The counter should be initialized to 0.
+ EXPECT_EQ(0, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(), RRType::IXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ // On success, the IXFR query has been passed to a separate process,
+ // so auth itself shouldn't respond.
+ server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+ EXPECT_FALSE(dnsserv.hasAnswer());
+ // After processing TCP IXFR query, the counter should be 1.
+ EXPECT_EQ(1, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+}
+
// class for queryCounterUnexpected test
// getProtocol() returns IPPROTO_IP
class DummyUnknownSocket : public IOSocket {
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index 0adcb70..c2e44e7 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -2,21 +2,12 @@
.\" Title: bind10
.\" Author: [see the "AUTHORS" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: August 11, 2011
+.\" Date: November 23, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "BIND10" "8" "August 11, 2011" "BIND10" "BIND10"
-.\" -----------------------------------------------------------------
-.\" * Define some portability stuff
-.\" -----------------------------------------------------------------
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.\" http://bugs.debian.org/507673
-.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.ie \n(.g .ds Aq \(aq
-.el .ds Aq '
+.TH "BIND10" "8" "November 23, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -31,7 +22,7 @@
bind10 \- BIND 10 boss process
.SH "SYNOPSIS"
.HP \w'\fBbind10\fR\ 'u
-\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-brittle\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR]
+\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR]
.SH "DESCRIPTION"
.PP
The
@@ -41,13 +32,6 @@ daemon starts up other BIND 10 required daemons\&. It handles restarting of exit
.PP
The arguments are as follows:
.PP
-\fB\-\-brittle\fR
-.RS 4
-Shutdown if any of the child processes of
-\fBbind10\fR
-exit\&. This is intended to help developers debug the server, and should not be used in production\&.
-.RE
-.PP
\fB\-c\fR \fIconfig\-filename\fR, \fB\-\-config\-file\fR \fIconfig\-filename\fR
.RS 4
The configuration filename to use\&. Can be either absolute or relative to data path\&. In case it is absolute, value of data path is not considered\&.
@@ -121,6 +105,204 @@ and its child processes\&.
.RS 4
Sets the amount of time that BIND 10 will wait for the configuration manager (a key component of BIND 10) to initialize itself before abandoning the start up and terminating with an error\&. The wait_time is specified in seconds and has a default value of 10\&.
.RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The configuration provides settings for components for
+\fBbind10\fR
+to manage under
+\fI/Boss/components/\fR\&. The default elements are:
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-auth\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-cmdctl\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/setuid\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-stats\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-stats\-httpd\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-xfrin\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-xfrout\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-zonemgr\fR
+.RE
+.PP
+(Note that the startup of
+\fBb10\-sockcreator\fR,
+\fBb10\-cfgmgr\fR, and
+\fBb10\-msgq\fR
+is not configurable\&. It is hardcoded and
+\fBbind10\fR
+will not run without them\&.)
+.PP
+These named sets (listed above) contain the following settings:
+.PP
+\fIaddress\fR
+.RS 4
+The name used for communicating to it on the message bus\&.
+.RE
+.PP
+\fIkind\fR
+.RS 4
+This defines how required a component is\&. The possible settings for
+\fIkind\fR
+are:
+\fIcore\fR
+(system won\'t start if it won\'t start and
+\fBbind10\fR
+will shutdown if a
+\(lqcore\(rq
+component crashes),
+\fIdispensable\fR
+(\fBbind10\fR
+will restart failing component), and
+\fIneeded\fR
+(\fBbind10\fR
+will shutdown if component won\'t initially start, but if crashes later, it will attempt to restart)\&. This setting is required\&.
+.RE
+.PP
+\fIpriority\fR
+.RS 4
+This is an integer\&.
+\fBbind10\fR
+will start the components with largest priority numbers first\&.
+.RE
+.PP
+\fIprocess\fR
+.RS 4
+This is the filename of the executable to be started\&. If not defined, then
+\fBbind10\fR
+will use the component name instead\&.
+.RE
+.PP
+\fIspecial\fR
+.RS 4
+This defines if the component is started a special way\&.
+.RE
+.PP
+The
+\fIBoss\fR
+configuration commands are:
+.PP
+
+\fBgetstats\fR
+tells
+\fBbind10\fR
+to send its statistics data to the
+\fBb10\-stats\fR
+daemon\&. This is an internal command and not exposed to the administrator\&.
+
+.PP
+
+\fBping\fR
+is used to check the connection with the
+\fBbind10\fR
+daemon\&. It returns the text
+\(lqpong\(rq\&.
+.PP
+
+\fBsendstats\fR
+tells
+\fBbind10\fR
+to send its statistics data to the
+\fBb10\-stats\fR
+daemon immediately\&.
+.PP
+
+\fBshow_processes\fR
+lists the current processes managed by
+\fBbind10\fR\&. The output is an array in JSON format containing the process ID and the name for each\&.
+
+
+.PP
+
+\fBshutdown\fR
+tells
+\fBbind10\fR
+to shutdown the BIND 10 servers\&. It will tell each process it manages to shutdown and, when complete,
+\fBbind10\fR
+will exit\&.
.SH "STATISTICS DATA"
.PP
The statistics data collected by the
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 6de0947..6705760 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>August 11, 2011</date>
+ <date>November 23, 2011</date>
</refentryinfo>
<refmeta>
@@ -51,7 +51,6 @@
<arg><option>-u <replaceable>user</replaceable></option></arg>
<arg><option>-v</option></arg>
<arg><option>-w <replaceable>wait_time</replaceable></option></arg>
- <arg><option>--brittle</option></arg>
<arg><option>--cmdctl-port</option> <replaceable>port</replaceable></arg>
<arg><option>--config-file</option> <replaceable>config-filename</replaceable></arg>
<arg><option>--data-path</option> <replaceable>directory</replaceable></arg>
@@ -92,20 +91,6 @@
<varlistentry>
<term>
- <option>--brittle</option>
- </term>
- <listitem>
- <para>
- Shutdown if any of the child processes of
- <command>bind10</command> exit. This is intended to
- help developers debug the server, and should not be
- used in production.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
<option>-c</option> <replaceable>config-filename</replaceable>,
<option>--config-file</option> <replaceable>config-filename</replaceable>
</term>
@@ -233,6 +218,204 @@ TODO: configuration section
-->
<refsect1>
+ <title>CONFIGURATION AND COMMANDS</title>
+
+ <para>
+ The configuration provides settings for components for
+ <command>bind10</command> to manage under
+ <varname>/Boss/components/</varname>.
+ The default elements are:
+ </para>
+
+ <itemizedlist>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-auth</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-cmdctl</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/setuid</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-stats</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-stats-httpd</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-xfrin</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-xfrout</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Boss/components/b10-zonemgr</varname> </para>
+ </listitem>
+
+ </itemizedlist>
+
+ <para>
+ (Note that the startup of <command>b10-sockcreator</command>,
+ <command>b10-cfgmgr</command>, and <command>b10-msgq</command>
+ is not configurable. It is hardcoded and <command>bind10</command>
+ will not run without them.)
+ </para>
+
+ <para>
+ These named sets (listed above) contain the following settings:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term><varname>address</varname></term>
+ <listitem>
+ <para>The name used for communicating to it on the message
+ bus.</para>
+<!-- NOTE: vorner said:
+These can be null, because the components are special ones, and
+the special class there already knows the address. It is (I hope)
+explained in the guide. I'd like to get rid of the special components
+sometime and I'd like it to teach to guess the address.
+-->
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><varname>kind</varname></term>
+ <listitem>
+ <para>
+ This defines how required a component is.
+ The possible settings for <varname>kind</varname> are:
+ <varname>core</varname> (system won't start if it won't
+ start and <command>bind10</command> will shutdown if
+ a <quote>core</quote> component crashes),
+ <varname>dispensable</varname> (<command>bind10</command>
+ will restart failing component),
+ and
+ <varname>needed</varname> (<command>bind10</command>
+ will shutdown if component won't initially start, but
+ if crashes later, it will attempt to restart).
+ This setting is required.
+<!-- TODO: formatting -->
+ </para>
+ </listitem>
+ </varlistentry>
+
+<!--
+TODO: currently not used
+ <varlistentry>
+ <term> <varname>params</varname> </term>
+ <listitem>
+ <para>
+list
+</para>
+ </listitem>
+ </varlistentry>
+-->
+
+ <varlistentry>
+ <term> <varname>priority</varname> </term>
+ <listitem>
+ <para>This is an integer. <command>bind10</command>
+ will start the components with largest priority numbers first.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term> <varname>process</varname> </term>
+ <listitem>
+ <para>This is the filename of the executable to be started.
+ If not defined, then <command>bind10</command> will
+ use the component name instead.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term> <varname>special</varname> </term>
+ <listitem>
+ <para>
+ This defines if the component is started a special
+ way.
+<!--
+TODO: document this ... but maybe some of these will be removed
+once we get rid of some using switches for components?
+
+auth
+cfgmgr
+cmdctl
+msgq
+resolver
+setuid
+sockcreator
+xfrin
+-->
+
+</para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+<!-- TODO: formating -->
+ <para>
+ The <varname>Boss</varname> configuration commands are:
+ </para>
+<!-- TODO: let's just let bind10 be known as bind10 and not Boss -->
+
+ <para>
+ <command>getstats</command> tells <command>bind10</command>
+ to send its statistics data to the <command>b10-stats</command>
+ daemon.
+ This is an internal command and not exposed to the administrator.
+<!-- not defined in spec -->
+<!-- TODO: explain difference with sendstat -->
+ </para>
+
+ <para>
+ <command>ping</command> is used to check the connection with the
+ <command>bind10</command> daemon.
+ It returns the text <quote>pong</quote>.
+ </para>
+
+ <para>
+ <command>sendstats</command> tells <command>bind10</command>
+ to send its statistics data to the <command>b10-stats</command>
+ daemon immediately.
+<!-- TODO: compare with internal command getstats? -->
+ </para>
+
+ <para>
+ <command>show_processes</command> lists the current processes
+ managed by <command>bind10</command>.
+ The output is an array in JSON format containing the process
+ ID and the name for each.
+<!-- TODO: what is name? -->
+<!-- TODO: change to JSON object format? -->
+<!-- TODO: ticket #1406 -->
+ </para>
+
+ <para>
+ <command>shutdown</command> tells <command>bind10</command>
+ to shutdown the BIND 10 servers.
+ It will tell each process it manages to shutdown and, when
+ complete, <command>bind10</command> will exit.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
<title>STATISTICS DATA</title>
<para>
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index 0b4f4cb..13cd3e3 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -92,51 +92,6 @@ VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
# This is for boot_time of Boss
_BASETIME = time.gmtime()
-class RestartSchedule:
- """
-Keeps state when restarting something (in this case, a process).
-
-When a process dies unexpectedly, we need to restart it. However, if
-it fails to restart for some reason, then we should not simply keep
-restarting it at high speed.
-
-A more sophisticated algorithm can be developed, but for now we choose
-a simple set of rules:
-
- * If a process was been running for >=10 seconds, we restart it
- right away.
- * If a process was running for <10 seconds, we wait until 10 seconds
- after it was started.
-
-To avoid programs getting into lockstep, we use a normal distribution
-to avoid being restarted at exactly 10 seconds."""
-
- def __init__(self, restart_frequency=10.0):
- self.restart_frequency = restart_frequency
- self.run_start_time = None
- self.run_stop_time = None
- self.restart_time = None
-
- def set_run_start_time(self, when=None):
- if when is None:
- when = time.time()
- self.run_start_time = when
- sigma = self.restart_frequency * 0.05
- self.restart_time = when + random.normalvariate(self.restart_frequency,
- sigma)
-
- def set_run_stop_time(self, when=None):
- """We don't actually do anything with stop time now, but it
- might be useful for future algorithms."""
- if when is None:
- when = time.time()
- self.run_stop_time = when
-
- def get_restart_time(self, when=None):
- if when is None:
- when = time.time()
- return max(when, self.restart_time)
-
class ProcessInfoError(Exception): pass
class ProcessInfo:
@@ -151,7 +106,6 @@ class ProcessInfo:
self.env = env
self.dev_null_stdout = dev_null_stdout
self.dev_null_stderr = dev_null_stderr
- self.restart_schedule = RestartSchedule()
self.uid = uid
self.username = username
self.process = None
@@ -200,7 +154,6 @@ class ProcessInfo:
env=spawn_env,
preexec_fn=self._preexec_work)
self.pid = self.process.pid
- self.restart_schedule.set_run_start_time()
# spawn() and respawn() are the same for now, but in the future they
# may have different functionality
@@ -219,7 +172,7 @@ class BoB:
def __init__(self, msgq_socket_file=None, data_path=None,
config_filename=None, nocache=False, verbose=False, setuid=None,
- username=None, cmdctl_port=None, brittle=False, wait_time=10):
+ username=None, cmdctl_port=None, wait_time=10):
"""
Initialize the Boss of BIND. This is a singleton (only one can run).
@@ -233,22 +186,13 @@ class BoB:
The cmdctl_port is passed to cmdctl and specify on which port it
should listen.
- brittle is a debug option that controls whether the Boss shuts down
- after any process dies.
-
wait_time controls the amount of time (in seconds) that Boss waits
for selected processes to initialize before continuing with the
initialization. Currently this is only the configuration manager.
"""
self.cc_session = None
self.ccs = None
- self.cfg_start_auth = True
- self.cfg_start_resolver = False
- self.cfg_start_dhcp6 = False
- self.cfg_start_dhcp4 = False
self.curproc = None
- # XXX: Not used now, waits for reintroduction of restarts.
- self.dead_processes = {}
self.msgq_socket_file = msgq_socket_file
self.nocache = nocache
self.component_config = {}
@@ -257,6 +201,9 @@ class BoB:
# inapropriate. But as the code isn't probably completely ready
# for it, we leave it at components for now.
self.components = {}
+ # Simply list of components that died and need to wait for a
+ # restart. Components manage their own restart schedule now
+ self.components_to_restart = []
self.runnable = False
self.uid = setuid
self.username = username
@@ -264,7 +211,6 @@ class BoB:
self.data_path = data_path
self.config_filename = config_filename
self.cmdctl_port = cmdctl_port
- self.brittle = brittle
self.wait_time = wait_time
self._component_configurator = isc.bind10.component.Configurator(self,
isc.bind10.special_component.get_specials())
@@ -628,21 +574,10 @@ class BoB:
# ... and start
return self.start_process("b10-resolver", resargs, self.c_channel_env)
- def start_cmdctl(self):
- """
- Starts the command control process
- """
- args = ["b10-cmdctl"]
- if self.cmdctl_port is not None:
- args.append("--port=" + str(self.cmdctl_port))
- if self.verbose:
- args.append("-v")
- return self.start_process("b10-cmdctl", args, self.c_channel_env,
- self.cmdctl_port)
-
- def start_xfrin(self):
- # XXX: a quick-hack workaround. xfrin will implicitly use dynamically
- # loadable data source modules, which will be installed in $(libdir).
+ def __ld_path_hack(self):
+ # XXX: a quick-hack workaround. xfrin/out will implicitly use
+ # dynamically loadable data source modules, which will be installed in
+ # $(libdir).
# On some OSes (including MacOS X and *BSDs) the main process (python)
# cannot find the modules unless they are located in a common shared
# object path or a path in the (DY)LD_LIBRARY_PATH. We should seek
@@ -655,21 +590,44 @@ class BoB:
# the same as for the libexec path addition
# TODO: Once #1292 is finished, remove this method and the special
# component, use it as normal component.
- c_channel_env = dict(self.c_channel_env)
+ env = dict(self.c_channel_env)
if ADD_LIBEXEC_PATH:
cur_path = os.getenv('DYLD_LIBRARY_PATH')
cur_path = '' if cur_path is None else ':' + cur_path
- c_channel_env['DYLD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
+ env['DYLD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
cur_path = os.getenv('LD_LIBRARY_PATH')
cur_path = '' if cur_path is None else ':' + cur_path
- c_channel_env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
+ env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
+ return env
+
+ def start_cmdctl(self):
+ """
+ Starts the command control process
+ """
+ args = ["b10-cmdctl"]
+ if self.cmdctl_port is not None:
+ args.append("--port=" + str(self.cmdctl_port))
+ if self.verbose:
+ args.append("-v")
+ return self.start_process("b10-cmdctl", args, self.c_channel_env,
+ self.cmdctl_port)
+
+ def start_xfrin(self):
# Set up the command arguments.
args = ['b10-xfrin']
if self.verbose:
args += ['-v']
- return self.start_process("b10-xfrin", args, c_channel_env)
+ return self.start_process("b10-xfrin", args, self.__ld_path_hack())
+
+ def start_xfrout(self):
+ # Set up the command arguments.
+ args = ['b10-xfrout']
+ if self.verbose:
+ args += ['-v']
+
+ return self.start_process("b10-xfrout", args, self.__ld_path_hack())
def start_all_components(self):
"""
@@ -742,7 +700,7 @@ class BoB:
If we did not start yet, it raises an exception, which is meant
to propagate through the component and configurator to the startup
- routine and abort the startup imediatelly. If it is started up already,
+ routine and abort the startup immediately. If it is started up already,
we just mark it so we terminate soon.
It does set the exit code in both cases.
@@ -817,7 +775,11 @@ class BoB:
# Tell it it failed. But only if it matters (we are
# not shutting down and the component considers itself
# to be running.
- component.failed(exit_status);
+ component_restarted = component.failed(exit_status);
+ # if the process wants to be restarted, but not just yet,
+ # it returns False
+ if not component_restarted:
+ self.components_to_restart.append(component)
else:
logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
@@ -833,39 +795,22 @@ class BoB:
timeout value.
"""
- # TODO: This is an artefact of previous way of handling processes. The
- # restart queue is currently empty at all times, so this returns None
- # every time it is called (thought is a relict that is obviously wrong,
- # it is called and it doesn't hurt).
- #
- # It is preserved for archeological reasons for the time when we return
- # the delayed restarts, most of it might be useful then (or, if it is
- # found useless, removed).
- next_restart = None
- # if we're shutting down, then don't restart
if not self.runnable:
return 0
- # otherwise look through each dead process and try to restart
- still_dead = {}
+ still_dead = []
+ # keep track of the first time we need to check this queue again,
+ # if at all
+ next_restart_time = None
now = time.time()
- for proc_info in self.dead_processes.values():
- restart_time = proc_info.restart_schedule.get_restart_time(now)
- if restart_time > now:
- if (next_restart is None) or (next_restart > restart_time):
- next_restart = restart_time
- still_dead[proc_info.pid] = proc_info
- else:
- logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
- try:
- proc_info.respawn()
- self.components[proc_info.pid] = proc_info
- logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
- except:
- still_dead[proc_info.pid] = proc_info
- # remember any processes that refuse to be resurrected
- self.dead_processes = still_dead
- # return the time when the next process is ready to be restarted
- return next_restart
+ for component in self.components_to_restart:
+ if not component.restart(now):
+ still_dead.append(component)
+ if next_restart_time is None or\
+ next_restart_time > component.get_restart_time():
+ next_restart_time = component.get_restart_time()
+ self.components_to_restart = still_dead
+
+ return next_restart_time
# global variables, needed for signal handlers
options = None
@@ -928,8 +873,6 @@ def parse_args(args=sys.argv[1:], Parser=OptionParser):
parser.add_option("--pid-file", dest="pid_file", type="string",
default=None,
help="file to dump the PID of the BIND 10 process")
- parser.add_option("--brittle", dest="brittle", action="store_true",
- help="debugging flag: exit if any component dies")
parser.add_option("-w", "--wait", dest="wait_time", type="int",
default=10, help="Time (in seconds) to wait for config manager to start up")
@@ -1034,7 +977,7 @@ def main():
# Go bob!
boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
options.config_file, options.nocache, options.verbose,
- setuid, username, options.cmdctl_port, options.brittle,
+ setuid, username, options.cmdctl_port,
options.wait_time)
startup_result = boss_of_bind.startup()
if startup_result:
@@ -1050,10 +993,6 @@ def main():
while boss_of_bind.runnable:
# clean up any processes that exited
boss_of_bind.reap_children()
- # XXX: As we don't put anything into the processes to be restarted,
- # this is really a complicated NOP. But we will try to reintroduce
- # delayed restarts, so it stays here for now, until we find out if
- # it's useful.
next_restart = boss_of_bind.restart_processes()
if next_restart is None:
wait_time = None
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 4a3cc85..4267b70 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -15,7 +15,7 @@
"kind": "dispensable"
},
"b10-xfrin": { "special": "xfrin", "kind": "dispensable" },
- "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+ "b10-xfrout": { "special": "xfrout", "kind": "dispensable" },
"b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
"b10-stats": { "address": "Stats", "kind": "dispensable" },
"b10-stats-httpd": {
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 0aa6778..b7b741c 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -105,16 +105,10 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.cc_session, None)
self.assertEqual(bob.ccs, None)
self.assertEqual(bob.components, {})
- self.assertEqual(bob.dead_processes, {})
self.assertEqual(bob.runnable, False)
self.assertEqual(bob.uid, None)
self.assertEqual(bob.username, None)
self.assertEqual(bob.nocache, False)
- self.assertEqual(bob.cfg_start_auth, True)
- self.assertEqual(bob.cfg_start_resolver, False)
-
- self.assertEqual(bob.cfg_start_dhcp4, False)
- self.assertEqual(bob.cfg_start_dhcp6, False)
def test_init_alternate_socket(self):
bob = BoB("alt_socket_file")
@@ -123,15 +117,10 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.cc_session, None)
self.assertEqual(bob.ccs, None)
self.assertEqual(bob.components, {})
- self.assertEqual(bob.dead_processes, {})
self.assertEqual(bob.runnable, False)
self.assertEqual(bob.uid, None)
self.assertEqual(bob.username, None)
self.assertEqual(bob.nocache, False)
- self.assertEqual(bob.cfg_start_auth, True)
- self.assertEqual(bob.cfg_start_resolver, False)
- self.assertEqual(bob.cfg_start_dhcp4, False)
- self.assertEqual(bob.cfg_start_dhcp6, False)
def test_command_handler(self):
class DummySession():
@@ -274,8 +263,7 @@ class MockBob(BoB):
return procinfo
def start_simple(self, name):
- procmap = { 'b10-xfrout': self.start_xfrout,
- 'b10-zonemgr': self.start_zonemgr,
+ procmap = { 'b10-zonemgr': self.start_zonemgr,
'b10-stats': self.start_stats,
'b10-stats-httpd': self.start_stats_httpd,
'b10-cmdctl': self.start_cmdctl,
@@ -475,7 +463,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
if start_auth:
config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
config['b10-xfrout'] = { 'kind': 'dispensable',
- 'address': 'Xfrout' }
+ 'special': 'xfrout' }
config['b10-xfrin'] = { 'kind': 'dispensable', 'special': 'xfrin' }
config['b10-zonemgr'] = { 'kind': 'dispensable',
'address': 'Zonemgr' }
@@ -741,15 +729,6 @@ class TestParseArgs(unittest.TestCase):
options = parse_args(['--cmdctl-port=1234'], TestOptParser)
self.assertEqual(1234, options.cmdctl_port)
- def test_brittle(self):
- """
- Test we can use the "brittle" flag.
- """
- options = parse_args([], TestOptParser)
- self.assertFalse(options.brittle)
- options = parse_args(['--brittle'], TestOptParser)
- self.assertTrue(options.brittle)
-
class TestPIDFile(unittest.TestCase):
def setUp(self):
self.pid_file = '@builddir@' + os.sep + 'bind10.pid'
@@ -797,37 +776,6 @@ class TestPIDFile(unittest.TestCase):
self.assertRaises(IOError, dump_pid,
'nonexistent_dir' + os.sep + 'bind10.pid')
-# TODO: Do we want brittle mode? Probably yes. So we need to re-enable to after that.
- at unittest.skip("Brittle mode temporarily broken")
-class TestBrittle(unittest.TestCase):
- def test_brittle_disabled(self):
- bob = MockBob()
- bob.start_all_components()
- bob.runnable = True
-
- bob.reap_children()
- self.assertTrue(bob.runnable)
-
- def simulated_exit(self):
- ret_val = self.exit_info
- self.exit_info = (0, 0)
- return ret_val
-
- def test_brittle_enabled(self):
- bob = MockBob()
- bob.start_all_components()
- bob.runnable = True
-
- bob.brittle = True
- self.exit_info = (5, 0)
- bob._get_process_exit_status = self.simulated_exit
-
- old_stdout = sys.stdout
- sys.stdout = open("/dev/null", "w")
- bob.reap_children()
- sys.stdout = old_stdout
- self.assertFalse(bob.runnable)
-
class TestBossComponents(unittest.TestCase):
"""
Test the boss propagates component configuration properly to the
@@ -928,9 +876,9 @@ class TestBossComponents(unittest.TestCase):
(anyway it is not told so). It does not die if it is killed
the first time. It dies only when killed forcefully.
"""
- def kill(self, forcefull=False):
- killed.append(forcefull)
- if forcefull:
+ def kill(self, forceful=False):
+ killed.append(forceful)
+ if forceful:
bob.components = {}
def pid(self):
return 1
diff --git a/src/bin/stats/stats-httpd-xml.tpl b/src/bin/stats/stats-httpd-xml.tpl
index d5846ad..ed91423 100644
--- a/src/bin/stats/stats-httpd-xml.tpl
+++ b/src/bin/stats/stats-httpd-xml.tpl
@@ -1,24 +1,3 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="$xsl_url_path"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<stats:stats_data version="1.0"
- xmlns:stats="$xsd_namespace"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="$xsd_namespace $xsd_url_path">
- $xml_string
-</stats:stats_data>
+$xml_string
\ No newline at end of file
diff --git a/src/bin/stats/stats-httpd-xsd.tpl b/src/bin/stats/stats-httpd-xsd.tpl
index 6ad1280..cc5578a 100644
--- a/src/bin/stats/stats-httpd-xsd.tpl
+++ b/src/bin/stats/stats-httpd-xsd.tpl
@@ -1,38 +1,2 @@
<?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<schema targetNamespace="$xsd_namespace"
- xmlns="http://www.w3.org/2001/XMLSchema"
- xmlns:stats="$xsd_namespace">
- <annotation>
- <documentation xml:lang="en">XML schema of the statistics
- data in BIND 10</documentation>
- </annotation>
- <element name="stats_data">
- <annotation>
- <documentation>A set of statistics data</documentation>
- </annotation>
- <complexType>
- $xsd_string
- <attribute name="version" type="token" use="optional" default="1.0">
- <annotation>
- <documentation>Version number of syntax</documentation>
- </annotation>
- </attribute>
- </complexType>
- </element>
-</schema>
+$xsd_string
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
index a1f6406..7c2e7ae 100644
--- a/src/bin/stats/stats-httpd-xsl.tpl
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -1,23 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml"
- xmlns:stats="$xsd_namespace">
+ xmlns:bind10="$xsd_namespace">
<xsl:output method="html" encoding="UTF-8"
doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"
doctype-system=" http://www.w3.org/TR/html4/loose.dtd " />
@@ -42,14 +26,7 @@ td.title {
</head>
<body>
<h1>BIND 10 Statistics</h1>
- <table>
- <tr>
- <th>Owner</th>
- <th>Title</th>
- <th>Value</th>
- </tr>
- <xsl:apply-templates />
- </table>
+ <xsl:apply-templates />
</body>
</html>
</xsl:template>
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
index 3a7f47a..51c4e09 100755
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -246,12 +246,12 @@ class Stats:
self.update_statistics_data()
if owner and name:
try:
- return self.statistics_data[owner][name]
+ return {owner:{name:self.statistics_data[owner][name]}}
except KeyError:
pass
elif owner:
try:
- return self.statistics_data[owner]
+ return {owner: self.statistics_data[owner]}
except KeyError:
pass
elif name:
@@ -360,9 +360,9 @@ class Stats:
if owner:
try:
if name:
- return isc.config.create_answer(0, schema_byname[owner][name])
+ return isc.config.create_answer(0, {owner:[schema_byname[owner][name]]})
else:
- return isc.config.create_answer(0, schema[owner])
+ return isc.config.create_answer(0, {owner:schema[owner]})
except KeyError:
pass
else:
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
index 042630d..f265abb 100644
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -29,6 +29,7 @@ import http.server
import socket
import string
import xml.etree.ElementTree
+import urllib.parse
import isc.cc
import isc.config
@@ -66,7 +67,7 @@ XML_URL_PATH = '/bind10/statistics/xml'
XSD_URL_PATH = '/bind10/statistics/xsd'
XSL_URL_PATH = '/bind10/statistics/xsl'
# TODO: This should be considered later.
-XSD_NAMESPACE = 'http://bind10.isc.org' + XSD_URL_PATH
+XSD_NAMESPACE = 'http://bind10.isc.org/bind10'
# Assign this process name
isc.util.process.rename()
@@ -85,14 +86,29 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
def send_head(self):
try:
- if self.path == XML_URL_PATH:
- body = self.server.xml_handler()
- elif self.path == XSD_URL_PATH:
- body = self.server.xsd_handler()
- elif self.path == XSL_URL_PATH:
- body = self.server.xsl_handler()
+ req_path = self.path
+ req_path = urllib.parse.urlsplit(req_path).path
+ req_path = urllib.parse.unquote(req_path)
+ req_path = os.path.normpath(req_path)
+ path_dirs = req_path.split('/')
+ path_dirs = [ d for d in filter(None, path_dirs) ]
+ req_path = '/'+"/".join(path_dirs)
+ module_name = None
+ item_name = None
+ # in case of /bind10/statistics/xxx/YYY/zzz
+ if len(path_dirs) >= 5:
+ item_name = path_dirs[4]
+ # in case of /bind10/statistics/xxx/YYY ...
+ if len(path_dirs) >= 4:
+ module_name = path_dirs[3]
+ if req_path == '/'.join([XML_URL_PATH] + path_dirs[3:5]):
+ body = self.server.xml_handler(module_name, item_name)
+ elif req_path == '/'.join([XSD_URL_PATH] + path_dirs[3:5]):
+ body = self.server.xsd_handler(module_name, item_name)
+ elif req_path == '/'.join([XSL_URL_PATH] + path_dirs[3:5]):
+ body = self.server.xsl_handler(module_name, item_name)
else:
- if self.path == '/' and 'Host' in self.headers.keys():
+ if req_path == '/' and 'Host' in self.headers.keys():
# redirect to XML URL only when requested with '/'
self.send_response(302)
self.send_header(
@@ -104,6 +120,12 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
# Couldn't find HOST
self.send_error(404)
return None
+ except StatsHttpdDataError as err:
+ # Couldn't find neither specified module name nor
+ # specified item name
+ self.send_error(404)
+ logger.error(STATHTTPD_SERVER_DATAERROR, err)
+ return None
except StatsHttpdError as err:
self.send_error(500)
logger.error(STATHTTPD_SERVER_ERROR, err)
@@ -145,6 +167,12 @@ class StatsHttpdError(Exception):
main routine."""
pass
+class StatsHttpdDataError(Exception):
+ """Exception class for StatsHttpd class. The reason seems to be
+ due to the data. It is intended to be thrown from the the
+ StatsHttpd object to the HttpHandler object or main routine."""
+ pass
+
class StatsHttpd:
"""The main class of HTTP server of HTTP/XML interface for
statistics module. It handles HTTP requests, and command channel
@@ -334,12 +362,27 @@ class StatsHttpd:
return isc.config.ccsession.create_answer(
1, "Unknown command: " + str(command))
- def get_stats_data(self):
+ def get_stats_data(self, owner=None, name=None):
"""Requests statistics data to the Stats daemon and returns
- the data which obtains from it"""
+ the data which obtains from it. The first argument is the
+ module name which owns the statistics data, the second
+ argument is one name of the statistics items which the the
+ module owns. The second argument cannot be specified when the
+ first argument is not specified. It returns the statistics
+ data of the specified module or item. When the session timeout
+ or the session error is occurred, it raises
+ StatsHttpdError. When the stats daemon returns none-zero
+ value, it raises StatsHttpdDataError."""
+ param = {}
+ if owner is None and name is None:
+ param = None
+ if owner is not None:
+ param['owner'] = owner
+ if name is not None:
+ param['name'] = name
try:
seq = self.cc_session.group_sendmsg(
- isc.config.ccsession.create_command('show'), 'Stats')
+ isc.config.ccsession.create_command('show', param), 'Stats')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
@@ -351,131 +394,409 @@ class StatsHttpd:
if rcode == 0:
return value
else:
- raise StatsHttpdError("Stats module: %s" % str(value))
+ raise StatsHttpdDataError("Stats module: %s" % str(value))
- def get_stats_spec(self):
+ def get_stats_spec(self, owner=None, name=None):
"""Requests statistics data to the Stats daemon and returns
- the data which obtains from it"""
+ the data which obtains from it. The first argument is the
+ module name which owns the statistics data, the second
+ argument is one name of the statistics items which the the
+ module owns. The second argument cannot be specified when the
+ first argument is not specified. It returns the statistics
+ specification of the specified module or item. When the
+ session timeout or the session error is occurred, it raises
+ StatsHttpdError. When the stats daemon returns none-zero
+ value, it raises StatsHttpdDataError."""
+ param = {}
+ if owner is None and name is None:
+ param = None
+ if owner is not None:
+ param['owner'] = owner
+ if name is not None:
+ param['name'] = name
try:
seq = self.cc_session.group_sendmsg(
- isc.config.ccsession.create_command('showschema'), 'Stats')
+ isc.config.ccsession.create_command('showschema', param), 'Stats')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
if rcode == 0:
return value
else:
- raise StatsHttpdError("Stats module: %s" % str(value))
+ raise StatsHttpdDataError("Stats module: %s" % str(value))
except (isc.cc.session.SessionTimeout,
isc.cc.session.SessionError) as err:
raise StatsHttpdError("%s: %s" %
(err.__class__.__name__, err))
- def xml_handler(self):
- """Handler which requests to Stats daemon to obtain statistics
- data and returns the body of XML document"""
- xml_list=[]
- for (mod, spec) in self.get_stats_data().items():
- if not spec: continue
- elem1 = xml.etree.ElementTree.Element(str(mod))
- for (k, v) in spec.items():
- elem2 = xml.etree.ElementTree.Element(str(k))
- elem2.text = str(v)
- elem1.append(elem2)
- # The coding conversion is tricky. xml..tostring() of Python 3.2
- # returns bytes (not string) regardless of the coding, while
- # tostring() of Python 3.1 returns a string. To support both
- # cases transparently, we first make sure tostring() returns
- # bytes by specifying utf-8 and then convert the result to a
- # plain string (code below assume it).
- xml_list.append(
- str(xml.etree.ElementTree.tostring(elem1, encoding='utf-8'),
- encoding='us-ascii'))
- xml_string = "".join(xml_list)
+
+ def xml_handler(self, module_name=None, item_name=None):
+ """Requests the specified statistics data and specification by
+ using the functions get_stats_data and get_stats_spec
+ respectively and loads the XML template file and returns the
+ string of the XML document.The first argument is the module
+ name which owns the statistics data, the second argument is
+ one name of the statistics items which the the module
+ owns. The second argument cannot be specified when the first
+ argument is not specified."""
+
+ # TODO: Separate the following recursive function by type of
+ # the parameter. Because we should be sure what type there is
+ # when we call it recursively.
+ def stats_data2xml(stats_spec, stats_data, xml_elem):
+ """Internal use for xml_handler. Reads stats_data and
+ stats_spec specified as first and second arguments, and
+ modify the xml object specified as third
+ argument. xml_elem must be modified and always returns
+ None."""
+ # assumed started with module_spec or started with
+ # item_spec in statistics
+ if type(stats_spec) is dict:
+ # assumed started with module_spec
+ if 'item_name' not in stats_spec \
+ and 'item_type' not in stats_spec:
+ for module_name in stats_spec.keys():
+ elem = xml.etree.ElementTree.Element(module_name)
+ stats_data2xml(stats_spec[module_name],
+ stats_data[module_name], elem)
+ xml_elem.append(elem)
+ # started with item_spec in statistics
+ else:
+ elem = xml.etree.ElementTree.Element(stats_spec['item_name'])
+ if stats_spec['item_type'] == 'map':
+ stats_data2xml(stats_spec['map_item_spec'],
+ stats_data,
+ elem)
+ elif stats_spec['item_type'] == 'list':
+ for item in stats_data:
+ stats_data2xml(stats_spec['list_item_spec'],
+ item, elem)
+ else:
+ elem.text = str(stats_data)
+ xml_elem.append(elem)
+ # assumed started with stats_spec
+ elif type(stats_spec) is list:
+ for item_spec in stats_spec:
+ stats_data2xml(item_spec,
+ stats_data[item_spec['item_name']],
+ xml_elem)
+
+ stats_spec = self.get_stats_spec(module_name, item_name)
+ stats_data = self.get_stats_data(module_name, item_name)
+ # make the path xxx/module/item if specified respectively
+ path_info = ''
+ if module_name is not None and item_name is not None:
+ path_info = '/' + module_name + '/' + item_name
+ elif module_name is not None:
+ path_info = '/' + module_name
+ xml_elem = xml.etree.ElementTree.Element(
+ 'bind10:statistics',
+ attrib={ 'xsi:schemaLocation' : XSD_NAMESPACE + ' ' + XSD_URL_PATH + path_info,
+ 'xmlns:bind10' : XSD_NAMESPACE,
+ 'xmlns:xsi' : "http://www.w3.org/2001/XMLSchema-instance" })
+ stats_data2xml(stats_spec, stats_data, xml_elem)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ # FIXME: Non-ASCII characters might be lost here. Consider how
+ # the whole system should handle non-ASCII characters.
+ xml_string = str(xml.etree.ElementTree.tostring(xml_elem, encoding='utf-8'),
+ encoding='us-ascii')
self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
xml_string=xml_string,
- xsd_namespace=XSD_NAMESPACE,
- xsd_url_path=XSD_URL_PATH,
- xsl_url_path=XSL_URL_PATH)
+ xsl_url_path=XSL_URL_PATH + path_info)
assert self.xml_body is not None
return self.xml_body
- def xsd_handler(self):
- """Handler which just returns the body of XSD document"""
+ def xsd_handler(self, module_name=None, item_name=None):
+ """Requests the specified statistics specification by using
+ the function get_stats_spec respectively and loads the XSD
+ template file and returns the string of the XSD document.The
+ first argument is the module name which owns the statistics
+ data, the second argument is one name of the statistics items
+ which the the module owns. The second argument cannot be
+ specified when the first argument is not specified."""
+
+ # TODO: Separate the following recursive function by type of
+ # the parameter. Because we should be sure what type there is
+ # when we call it recursively.
+ def stats_spec2xsd(stats_spec, xsd_elem):
+ """Internal use for xsd_handler. Reads stats_spec
+ specified as first arguments, and modify the xml object
+ specified as second argument. xsd_elem must be
+ modified. Always returns None with no exceptions."""
+ # assumed module_spec or one stats_spec
+ if type(stats_spec) is dict:
+ # assumed module_spec
+ if 'item_name' not in stats_spec:
+ for mod in stats_spec.keys():
+ elem = xml.etree.ElementTree.Element(
+ "element", { "name" : mod })
+ complextype = xml.etree.ElementTree.Element("complexType")
+ alltag = xml.etree.ElementTree.Element("all")
+ stats_spec2xsd(stats_spec[mod], alltag)
+ complextype.append(alltag)
+ elem.append(complextype)
+ xsd_elem.append(elem)
+ # assumed stats_spec
+ else:
+ if stats_spec['item_type'] == 'map':
+ alltag = xml.etree.ElementTree.Element("all")
+ stats_spec2xsd(stats_spec['map_item_spec'], alltag)
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ elem = xml.etree.ElementTree.Element(
+ "element", attrib={ "name" : stats_spec["item_name"],
+ "minOccurs": "0" \
+ if stats_spec["item_optional"] \
+ else "1",
+ "maxOccurs": "unbounded" })
+ elem.append(complextype)
+ xsd_elem.append(elem)
+ elif stats_spec['item_type'] == 'list':
+ alltag = xml.etree.ElementTree.Element("sequence")
+ stats_spec2xsd(stats_spec['list_item_spec'], alltag)
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ elem = xml.etree.ElementTree.Element(
+ "element", attrib={ "name" : stats_spec["item_name"],
+ "minOccurs": "0" \
+ if stats_spec["item_optional"] \
+ else "1",
+ "maxOccurs": "1" })
+ elem.append(complextype)
+ xsd_elem.append(elem)
+ else:
+ # determine the datatype of XSD
+ # TODO: Should consider other item_format types
+ datatype = stats_spec["item_type"] \
+ if stats_spec["item_type"].lower() != 'real' \
+ else 'float'
+ if "item_format" in stats_spec:
+ item_format = stats_spec["item_format"]
+ if datatype.lower() == 'string' \
+ and item_format.lower() == 'date-time':
+ datatype = 'dateTime'
+ elif datatype.lower() == 'string' \
+ and (item_format.lower() == 'date' \
+ or item_format.lower() == 'time'):
+ datatype = item_format.lower()
+ elem = xml.etree.ElementTree.Element(
+ "element",
+ attrib={
+ 'name' : stats_spec["item_name"],
+ 'type' : datatype,
+ 'minOccurs' : "0" \
+ if stats_spec["item_optional"] \
+ else "1",
+ 'maxOccurs' : "1"
+ }
+ )
+ annotation = xml.etree.ElementTree.Element("annotation")
+ appinfo = xml.etree.ElementTree.Element("appinfo")
+ documentation = xml.etree.ElementTree.Element("documentation")
+ if "item_title" in stats_spec:
+ appinfo.text = stats_spec["item_title"]
+ if "item_description" in stats_spec:
+ documentation.text = stats_spec["item_description"]
+ annotation.append(appinfo)
+ annotation.append(documentation)
+ elem.append(annotation)
+ xsd_elem.append(elem)
+ # multiple stats_specs
+ elif type(stats_spec) is list:
+ for item_spec in stats_spec:
+ stats_spec2xsd(item_spec, xsd_elem)
+
# for XSD
- xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
- for (mod, spec) in self.get_stats_spec().items():
- if not spec: continue
- alltag = xml.etree.ElementTree.Element("all")
- for item in spec:
- element = xml.etree.ElementTree.Element(
- "element",
- dict( name=item["item_name"],
- type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
- minOccurs="1",
- maxOccurs="1" ),
- )
- annotation = xml.etree.ElementTree.Element("annotation")
- appinfo = xml.etree.ElementTree.Element("appinfo")
- documentation = xml.etree.ElementTree.Element("documentation")
- appinfo.text = item["item_title"]
- documentation.text = item["item_description"]
- annotation.append(appinfo)
- annotation.append(documentation)
- element.append(annotation)
- alltag.append(element)
-
- complextype = xml.etree.ElementTree.Element("complexType")
- complextype.append(alltag)
- mod_element = xml.etree.ElementTree.Element("element", { "name" : mod })
- mod_element.append(complextype)
- xsd_root.append(mod_element)
+ stats_spec = self.get_stats_spec(module_name, item_name)
+ alltag = xml.etree.ElementTree.Element("all")
+ stats_spec2xsd(stats_spec, alltag)
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ documentation = xml.etree.ElementTree.Element("documentation")
+ documentation.text = "A set of statistics data"
+ annotation = xml.etree.ElementTree.Element("annotation")
+ annotation.append(documentation)
+ elem = xml.etree.ElementTree.Element(
+ "element", attrib={ 'name' : 'statistics' })
+ elem.append(annotation)
+ elem.append(complextype)
+ documentation = xml.etree.ElementTree.Element("documentation")
+ documentation.text = "XML schema of the statistics data in BIND 10"
+ annotation = xml.etree.ElementTree.Element("annotation")
+ annotation.append(documentation)
+ xsd_root = xml.etree.ElementTree.Element(
+ "schema",
+ attrib={ 'xmlns' : "http://www.w3.org/2001/XMLSchema",
+ 'targetNamespace' : XSD_NAMESPACE,
+ 'xmlns:bind10' : XSD_NAMESPACE })
+ xsd_root.append(annotation)
+ xsd_root.append(elem)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
# cases transparently, we first make sure tostring() returns
# bytes by specifying utf-8 and then convert the result to a
# plain string (code below assume it).
+ # FIXME: Non-ASCII characters might be lost here. Consider how
+ # the whole system should handle non-ASCII characters.
xsd_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
encoding='us-ascii')
self.xsd_body = self.open_template(XSD_TEMPLATE_LOCATION).substitute(
- xsd_string=xsd_string,
- xsd_namespace=XSD_NAMESPACE
- )
+ xsd_string=xsd_string)
assert self.xsd_body is not None
return self.xsd_body
- def xsl_handler(self):
- """Handler which just returns the body of XSL document"""
+ def xsl_handler(self, module_name=None, item_name=None):
+ """Requests the specified statistics specification by using
+ the function get_stats_spec respectively and loads the XSL
+ template file and returns the string of the XSL document.The
+ first argument is the module name which owns the statistics
+ data, the second argument is one name of the statistics items
+ which the the module owns. The second argument cannot be
+ specified when the first argument is not specified."""
+
+ # TODO: Separate the following recursive function by type of
+ # the parameter. Because we should be sure what type there is
+ # when we call it recursively.
+ def stats_spec2xsl(stats_spec, xsl_elem, path=XML_URL_PATH):
+ """Internal use for xsl_handler. Reads stats_spec
+ specified as first arguments, and modify the xml object
+ specified as second argument. xsl_elem must be
+ modified. The third argument is a base path used for
+ making anchor tag in XSL. Always returns None with no
+ exceptions."""
+ # assumed module_spec or one stats_spec
+ if type(stats_spec) is dict:
+ # assumed module_spec
+ if 'item_name' not in stats_spec:
+ table = xml.etree.ElementTree.Element("table")
+ tr = xml.etree.ElementTree.Element("tr")
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Module Name"
+ tr.append(th)
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Module Item"
+ tr.append(th)
+ table.append(tr)
+ for mod in stats_spec.keys():
+ foreach = xml.etree.ElementTree.Element(
+ "xsl:for-each", attrib={ "select" : mod })
+ tr = xml.etree.ElementTree.Element("tr")
+ td = xml.etree.ElementTree.Element("td")
+ a = xml.etree.ElementTree.Element(
+ "a", attrib={ "href": urllib.parse.quote(path + "/" + mod) })
+ a.text = mod
+ td.append(a)
+ tr.append(td)
+ td = xml.etree.ElementTree.Element("td")
+ stats_spec2xsl(stats_spec[mod], td,
+ path + "/" + mod)
+ tr.append(td)
+ foreach.append(tr)
+ table.append(foreach)
+ xsl_elem.append(table)
+ # assumed stats_spec
+ else:
+ if stats_spec['item_type'] == 'map':
+ table = xml.etree.ElementTree.Element("table")
+ tr = xml.etree.ElementTree.Element("tr")
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Item Name"
+ tr.append(th)
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Item Value"
+ tr.append(th)
+ table.append(tr)
+ foreach = xml.etree.ElementTree.Element(
+ "xsl:for-each", attrib={ "select" : stats_spec['item_name'] })
+ tr = xml.etree.ElementTree.Element("tr")
+ td = xml.etree.ElementTree.Element(
+ "td",
+ attrib={ "class" : "title",
+ "title" : stats_spec["item_description"] \
+ if "item_description" in stats_spec \
+ else "" })
+ # TODO: Consider whether we should always use
+ # the identical name "item_name" for the
+ # user-visible name in XSL.
+ td.text = stats_spec[ "item_title" if "item_title" in stats_spec else "item_name" ]
+ tr.append(td)
+ td = xml.etree.ElementTree.Element("td")
+ stats_spec2xsl(stats_spec['map_item_spec'], td,
+ path + "/" + stats_spec["item_name"])
+ tr.append(td)
+ foreach.append(tr)
+ table.append(foreach)
+ xsl_elem.append(table)
+ elif stats_spec['item_type'] == 'list':
+ stats_spec2xsl(stats_spec['list_item_spec'], xsl_elem,
+ path + "/" + stats_spec["item_name"])
+ else:
+ xsl_valueof = xml.etree.ElementTree.Element(
+ "xsl:value-of",
+ attrib={'select': stats_spec["item_name"]})
+ xsl_elem.append(xsl_valueof)
+
+ # multiple stats_specs
+ elif type(stats_spec) is list:
+ table = xml.etree.ElementTree.Element("table")
+ tr = xml.etree.ElementTree.Element("tr")
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Item Name"
+ tr.append(th)
+ th = xml.etree.ElementTree.Element("th")
+ th.text = "Item Value"
+ tr.append(th)
+ table.append(tr)
+ for item_spec in stats_spec:
+ tr = xml.etree.ElementTree.Element("tr")
+ td = xml.etree.ElementTree.Element(
+ "td",
+ attrib={ "class" : "title",
+ "title" : item_spec["item_description"] \
+ if "item_description" in item_spec \
+ else "" })
+ # if the path length is equal to or shorter than
+ # XML_URL_PATH + /Module/Item, add the anchor tag.
+ if len(path.split('/')) <= len((XML_URL_PATH + '/Module/Item').split('/')):
+ a = xml.etree.ElementTree.Element(
+ "a", attrib={ "href": urllib.parse.quote(path + "/" + item_spec["item_name"]) })
+ a.text = item_spec[ "item_title" if "item_title" in item_spec else "item_name" ]
+ td.append(a)
+ else:
+ td.text = item_spec[ "item_title" if "item_title" in item_spec else "item_name" ]
+ tr.append(td)
+ td = xml.etree.ElementTree.Element("td")
+ stats_spec2xsl(item_spec, td, path)
+ tr.append(td)
+ if item_spec['item_type'] == 'list':
+ foreach = xml.etree.ElementTree.Element(
+ "xsl:for-each", attrib={ "select" : item_spec['item_name'] })
+ foreach.append(tr)
+ table.append(foreach)
+ else:
+ table.append(tr)
+ xsl_elem.append(table)
+
# for XSL
- xsd_root = xml.etree.ElementTree.Element(
+ stats_spec = self.get_stats_spec(module_name, item_name)
+ xsd_root = xml.etree.ElementTree.Element( # started with xml:template tag
"xsl:template",
- dict(match="*")) # started with xml:template tag
- for (mod, spec) in self.get_stats_spec().items():
- if not spec: continue
- for item in spec:
- tr = xml.etree.ElementTree.Element("tr")
- td0 = xml.etree.ElementTree.Element("td")
- td0.text = str(mod)
- td1 = xml.etree.ElementTree.Element(
- "td", { "class" : "title",
- "title" : item["item_description"] })
- td1.text = item["item_title"]
- td2 = xml.etree.ElementTree.Element("td")
- xsl_valueof = xml.etree.ElementTree.Element(
- "xsl:value-of",
- dict(select=mod+'/'+item["item_name"]))
- td2.append(xsl_valueof)
- tr.append(td0)
- tr.append(td1)
- tr.append(td2)
- xsd_root.append(tr)
+ attrib={'match': "bind10:statistics"})
+ stats_spec2xsl(stats_spec, xsd_root)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
# cases transparently, we first make sure tostring() returns
# bytes by specifying utf-8 and then convert the result to a
# plain string (code below assume it).
+ # FIXME: Non-ASCII characters might be lost here. Consider how
+ # the whole system should handle non-ASCII characters.
xsl_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
encoding='us-ascii')
self.xsl_body = self.open_template(XSL_TEMPLATE_LOCATION).substitute(
diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes
index 0e984dc..dbd0650 100644
--- a/src/bin/stats/stats_httpd_messages.mes
+++ b/src/bin/stats/stats_httpd_messages.mes
@@ -55,6 +55,12 @@ response will be sent back, and the specific error is printed. This
is an error condition that likely points to a module that is not
responding correctly to statistic requests.
+% STATHTTPD_SERVER_DATAERROR HTTP server data error: %1
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
+
% STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
There was a problem initializing the HTTP server in the stats-httpd
module upon receiving its configuration data. The most likely cause
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index afd572f..01254d4 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -1,7 +1,7 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-stats_test.py b10-stats-httpd_test.py
EXTRA_DIST = $(PYTESTS) test_utils.py
-CLEANFILES = test_utils.pyc msgq_socket_test
+CLEANFILES = test_utils.pyc
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index e867080..b6847bd 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -45,7 +45,12 @@ DUMMY_DATA = {
},
'Auth' : {
"queries.tcp": 2,
- "queries.udp": 3
+ "queries.udp": 3,
+ "queries.perzone": [{
+ "zonename": "test.example",
+ "queries.tcp": 2,
+ "queries.udp": 3
+ }]
},
'Stats' : {
"report_time": "2011-03-04T11:59:19Z",
@@ -129,68 +134,295 @@ class TestHttpHandler(unittest.TestCase):
self.assertEqual(len(self.stats_httpd.httpd), 1)
self.assertEqual((self.address, self.port), self.stats_httpd.http_addrs[0])
- # URL is '/bind10/statistics/xml'
- self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
- self.client.endheaders()
- response = self.client.getresponse()
- self.assertEqual(response.getheader("Content-type"), "text/xml")
- self.assertTrue(int(response.getheader("Content-Length")) > 0)
- self.assertEqual(response.status, 200)
- root = xml.etree.ElementTree.parse(response).getroot()
- self.assertTrue(root.tag.find('stats_data') > 0)
- for (k,v) in root.attrib.items():
- if k.find('schemaLocation') > 0:
- self.assertEqual(v, stats_httpd.XSD_NAMESPACE + ' ' + stats_httpd.XSD_URL_PATH)
- for mod in DUMMY_DATA:
- for (item, value) in DUMMY_DATA[mod].items():
+ def check_XML_URL_PATH(mod=None, item=None):
+ url_path = stats_httpd.XML_URL_PATH
+ if mod is not None:
+ url_path = url_path + '/' + mod
+ if item is not None:
+ url_path = url_path + '/' + item
+ self.client.putrequest('GET', url_path)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ xml_doctype = response.readline().decode()
+ xsl_doctype = response.readline().decode()
+ self.assertTrue(len(xml_doctype) > 0)
+ self.assertTrue(len(xsl_doctype) > 0)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ self.assertTrue(root.tag.find('statistics') > 0)
+ schema_loc = '{http://www.w3.org/2001/XMLSchema-instance}schemaLocation'
+ if item is None and mod is None:
+ # check the path of XSD
+ self.assertEqual(root.attrib[schema_loc],
+ stats_httpd.XSD_NAMESPACE + ' '
+ + stats_httpd.XSD_URL_PATH)
+ # check the path of XSL
+ self.assertTrue(xsl_doctype.startswith(
+ '<?xml-stylesheet type="text/xsl" href="' +
+ stats_httpd.XSL_URL_PATH
+ + '"?>'))
+ for m in DUMMY_DATA:
+ for k in DUMMY_DATA[m].keys():
+ self.assertIsNotNone(root.find(m + '/' + k))
+ itm = root.find(m + '/' + k)
+ if type(DUMMY_DATA[m][k]) is list:
+ for v in DUMMY_DATA[m][k]:
+ for i in v:
+ self.assertIsNotNone(itm.find('zones/' + i))
+ elif item is None:
+ # check the path of XSD
+ self.assertEqual(root.attrib[schema_loc],
+ stats_httpd.XSD_NAMESPACE + ' '
+ + stats_httpd.XSD_URL_PATH + '/' + mod)
+ # check the path of XSL
+ self.assertTrue(xsl_doctype.startswith(
+ '<?xml-stylesheet type="text/xsl" href="'
+ + stats_httpd.XSL_URL_PATH + '/' + mod
+ + '"?>'))
+ for k in DUMMY_DATA[mod].keys():
+ self.assertIsNotNone(root.find(mod + '/' + k))
+ itm = root.find(mod + '/' + k)
+ self.assertIsNotNone(itm)
+ if type(DUMMY_DATA[mod][k]) is list:
+ for v in DUMMY_DATA[mod][k]:
+ for i in v:
+ self.assertIsNotNone(itm.find('zones/' + i))
+ else:
+ # check the path of XSD
+ self.assertEqual(root.attrib[schema_loc],
+ stats_httpd.XSD_NAMESPACE + ' '
+ + stats_httpd.XSD_URL_PATH + '/' + mod + '/' + item)
+ # check the path of XSL
+ self.assertTrue(xsl_doctype.startswith(
+ '<?xml-stylesheet type="text/xsl" href="'
+ + stats_httpd.XSL_URL_PATH + '/' + mod + '/' + item
+ + '"?>'))
self.assertIsNotNone(root.find(mod + '/' + item))
- # URL is '/bind10/statitics/xsd'
- self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
- self.client.endheaders()
- response = self.client.getresponse()
- self.assertEqual(response.getheader("Content-type"), "text/xml")
- self.assertTrue(int(response.getheader("Content-Length")) > 0)
- self.assertEqual(response.status, 200)
- root = xml.etree.ElementTree.parse(response).getroot()
- url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
- tags = [ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ]
- xsdpath = '/'.join(tags)
- self.assertTrue(root.tag.find('schema') > 0)
- self.assertTrue(hasattr(root, 'attrib'))
- self.assertTrue('targetNamespace' in root.attrib)
- self.assertEqual(root.attrib['targetNamespace'],
- stats_httpd.XSD_NAMESPACE)
- for elm in root.findall(xsdpath):
- self.assertIsNotNone(elm.attrib['name'])
- self.assertTrue(elm.attrib['name'] in DUMMY_DATA)
-
- # URL is '/bind10/statitics/xsl'
- self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
- self.client.endheaders()
- response = self.client.getresponse()
- self.assertEqual(response.getheader("Content-type"), "text/xml")
- self.assertTrue(int(response.getheader("Content-Length")) > 0)
- self.assertEqual(response.status, 200)
- root = xml.etree.ElementTree.parse(response).getroot()
- url_trans = '{http://www.w3.org/1999/XSL/Transform}'
- url_xhtml = '{http://www.w3.org/1999/xhtml}'
- xslpath = url_trans + 'template/' + url_xhtml + 'tr'
- self.assertEqual(root.tag, url_trans + 'stylesheet')
- for tr in root.findall(xslpath):
- tds = tr.findall(url_xhtml + 'td')
- self.assertIsNotNone(tds)
- self.assertEqual(type(tds), list)
- self.assertTrue(len(tds) > 2)
- self.assertTrue(hasattr(tds[0], 'text'))
- self.assertTrue(tds[0].text in DUMMY_DATA)
- valueof = tds[2].find(url_trans + 'value-of')
- self.assertIsNotNone(valueof)
- self.assertTrue(hasattr(valueof, 'attrib'))
- self.assertIsNotNone(valueof.attrib)
- self.assertTrue('select' in valueof.attrib)
- self.assertTrue(valueof.attrib['select'] in \
- [ tds[0].text+'/'+item for item in DUMMY_DATA[tds[0].text].keys() ])
+ # URL is '/bind10/statistics/xml'
+ check_XML_URL_PATH(mod=None, item=None)
+ for m in DUMMY_DATA:
+ # URL is '/bind10/statistics/xml/Module'
+ check_XML_URL_PATH(mod=m)
+ for k in DUMMY_DATA[m].keys():
+ # URL is '/bind10/statistics/xml/Module/Item'
+ check_XML_URL_PATH(mod=m, item=k)
+
+ def check_XSD_URL_PATH(mod=None, item=None):
+ url_path = stats_httpd.XSD_URL_PATH
+ if mod is not None:
+ url_path = url_path + '/' + mod
+ if item is not None:
+ url_path = url_path + '/' + item
+ self.client.putrequest('GET', url_path)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
+ self.assertTrue(root.tag.find('schema') > 0)
+ self.assertTrue(hasattr(root, 'attrib'))
+ self.assertTrue('targetNamespace' in root.attrib)
+ self.assertEqual(root.attrib['targetNamespace'],
+ stats_httpd.XSD_NAMESPACE)
+ if mod is None and item is None:
+ for (mod, itm) in DUMMY_DATA.items():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+ mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+ self.assertTrue(mod in mod_elm)
+ for (it, val) in itm.items():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+ self.assertTrue(it in itm_elm)
+ if type(val) is list:
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+ itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[it].findall(xsdpath) ])
+ self.assertTrue('zones' in itm_elm2)
+ for i in val:
+ for k in i.keys():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ self.assertTrue(
+ k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+ elif item is None:
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+ mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+ self.assertTrue(mod in mod_elm)
+ for (it, val) in DUMMY_DATA[mod].items():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+ self.assertTrue(it in itm_elm)
+ if type(val) is list:
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+ itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[it].findall(xsdpath) ])
+ self.assertTrue('zones' in itm_elm2)
+ for i in val:
+ for k in i.keys():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ self.assertTrue(
+ k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+ else:
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+ mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+ self.assertTrue(mod in mod_elm)
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+ self.assertTrue(item in itm_elm)
+ if type(DUMMY_DATA[mod][item]) is list:
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+ itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[item].findall(xsdpath) ])
+ self.assertTrue('zones' in itm_elm2)
+ for i in DUMMY_DATA[mod][item]:
+ for k in i.keys():
+ xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+ self.assertTrue(
+ k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+
+ # URL is '/bind10/statistics/xsd'
+ check_XSD_URL_PATH(mod=None, item=None)
+ for m in DUMMY_DATA:
+ # URL is '/bind10/statistics/xsd/Module'
+ check_XSD_URL_PATH(mod=m)
+ for k in DUMMY_DATA[m].keys():
+ # URL is '/bind10/statistics/xsd/Module/Item'
+ check_XSD_URL_PATH(mod=m, item=k)
+
+ def check_XSL_URL_PATH(mod=None, item=None):
+ url_path = stats_httpd.XSL_URL_PATH
+ if mod is not None:
+ url_path = url_path + '/' + mod
+ if item is not None:
+ url_path = url_path + '/' + item
+ self.client.putrequest('GET', url_path)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_trans = '{http://www.w3.org/1999/XSL/Transform}'
+ url_xhtml = '{http://www.w3.org/1999/xhtml}'
+ self.assertEqual(root.tag, url_trans + 'stylesheet')
+ if item is None and mod is None:
+ xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+ mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+ for (mod, itms) in DUMMY_DATA.items():
+ self.assertTrue(mod in mod_fe)
+ for (k, v) in itms.items():
+ if type(v) is list:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+ self.assertTrue(k in itm_fe)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in itm_fe[k].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+ for itms in v:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[k].findall(xslpath) ])
+ self.assertTrue('zones' in itm_fe)
+ for (k, v) in itms.items():
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+ self.assertTrue(k in itm_vo)
+ else:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(k in itm_vo)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+ elif item is None:
+ xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+ mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+ self.assertTrue(mod in mod_fe)
+ for (k, v) in DUMMY_DATA[mod].items():
+ if type(v) is list:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+ self.assertTrue(k in itm_fe)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in itm_fe[k].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+ for itms in v:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[k].findall(xslpath) ])
+ self.assertTrue('zones' in itm_fe)
+ for (k, v) in itms.items():
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+ self.assertTrue(k in itm_vo)
+ else:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(k in itm_vo)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+ else:
+ xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+ mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+ self.assertTrue(mod in mod_fe)
+ if type(DUMMY_DATA[mod][item]) is list:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+ self.assertTrue(item in itm_fe)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in itm_fe[item].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + item in itm_a)
+ for itms in DUMMY_DATA[mod][item]:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_trans + 'for-each'
+ itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[item].findall(xslpath) ])
+ self.assertTrue('zones' in itm_fe)
+ for (k, v) in itms.items():
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+ self.assertTrue(k in itm_vo)
+ else:
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_trans + 'value-of'
+ itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(item in itm_vo)
+ xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+ + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+ + url_xhtml + 'td/' + url_xhtml + 'a'
+ itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+ self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + item in itm_a)
+
+ # URL is '/bind10/statistics/xsl'
+ check_XSL_URL_PATH(mod=None, item=None)
+ for m in DUMMY_DATA:
+ # URL is '/bind10/statistics/xsl/Module'
+ check_XSL_URL_PATH(mod=m)
+ for k in DUMMY_DATA[m].keys():
+ # URL is '/bind10/statistics/xsl/Module/Item'
+ check_XSL_URL_PATH(mod=m, item=k)
# 302 redirect
self.client._http_vsn_str = 'HTTP/1.1'
@@ -202,13 +434,102 @@ class TestHttpHandler(unittest.TestCase):
self.assertEqual(response.getheader('Location'),
"http://%s:%d%s" % (self.address, self.port, stats_httpd.XML_URL_PATH))
- # 404 NotFound
+ # 404 NotFound (random path)
self.client._http_vsn_str = 'HTTP/1.0'
self.client.putrequest('GET', '/path/to/foo/bar')
self.client.endheaders()
response = self.client.getresponse()
self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', '/bind10/foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', '/bind10/statistics/foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + 'Auth') # with no slash
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+ # 200 ok
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '#foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '?foo=bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
+
+ # 404 NotFound (too long path)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Boss/boot_time/a')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+ # 404 NotFound (nonexistent module name)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Foo')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+ # 404 NotFound (nonexistent item name)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ # 404 NotFound (existent module but nonexistent item name)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Auth/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Auth/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Auth/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
def test_do_GET_failed1(self):
# checks status
@@ -242,26 +563,26 @@ class TestHttpHandler(unittest.TestCase):
# failure case(Stats replies an error)
self.stats.mccs.set_command_handler(
lambda cmd, args: \
- isc.config.ccsession.create_answer(1, "I have an error.")
+ isc.config.ccsession.create_answer(1, "specified arguments are incorrect: I have an error.")
)
# request XML
self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
self.client.endheaders()
response = self.client.getresponse()
- self.assertEqual(response.status, 500)
+ self.assertEqual(response.status, 404)
# request XSD
self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
self.client.endheaders()
response = self.client.getresponse()
- self.assertEqual(response.status, 500)
+ self.assertEqual(response.status, 404)
# request XSL
self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
self.client.endheaders()
response = self.client.getresponse()
- self.assertEqual(response.status, 500)
+ self.assertEqual(response.status, 404)
def test_do_HEAD(self):
self.client.putrequest('HEAD', stats_httpd.XML_URL_PATH)
@@ -306,12 +627,18 @@ class TestHttpServer(unittest.TestCase):
class TestStatsHttpdError(unittest.TestCase):
"""Tests for StatsHttpdError exception"""
- def test_raises(self):
+ def test_raises1(self):
try:
raise stats_httpd.StatsHttpdError('Nothing')
except stats_httpd.StatsHttpdError as err:
self.assertEqual(str(err), 'Nothing')
+ def test_raises2(self):
+ try:
+ raise stats_httpd.StatsHttpdDataError('Nothing')
+ except stats_httpd.StatsHttpdDataError as err:
+ self.assertEqual(str(err), 'Nothing')
+
class TestStatsHttpd(unittest.TestCase):
"""Tests for StatsHttpd class"""
@@ -488,17 +815,13 @@ class TestStatsHttpd(unittest.TestCase):
self.assertTrue(isinstance(tmpl, string.Template))
opts = dict(
xml_string="<dummy></dummy>",
- xsd_namespace="http://host/path/to/",
- xsd_url_path="/path/to/",
xsl_url_path="/path/to/")
lines = tmpl.substitute(opts)
for n in opts:
self.assertTrue(lines.find(opts[n])>0)
tmpl = self.stats_httpd.open_template(stats_httpd.XSD_TEMPLATE_LOCATION)
self.assertTrue(isinstance(tmpl, string.Template))
- opts = dict(
- xsd_string="<dummy></dummy>",
- xsd_namespace="http://host/path/to/")
+ opts = dict(xsd_string="<dummy></dummy>")
lines = tmpl.substitute(opts)
for n in opts:
self.assertTrue(lines.find(opts[n])>0)
@@ -580,26 +903,172 @@ class TestStatsHttpd(unittest.TestCase):
def test_xml_handler(self):
self.stats_httpd = MyStatsHttpd(get_availaddr())
- self.stats_httpd.get_stats_data = lambda: \
- { 'Dummy' : { 'foo':'bar' } }
+ self.stats_httpd.get_stats_spec = lambda x,y: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ },
+ {
+ "item_name": "foo2",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Foo bar",
+ "item_description": "Foo bar",
+ "list_item_spec": {
+ "item_name": "foo2-1",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "foo2-1-1",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Foo2 1 1",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-2",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 2",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-3",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 3",
+ "item_description": "Foo bar"
+ }
+ ]
+ }
+ }]
+ }
+ self.stats_httpd.get_stats_data = lambda x,y: \
+ { 'Dummy' : { 'foo':'bar',
+ 'foo2': [
+ {
+ "foo2-1-1" : "bar1",
+ "foo2-1-2" : 10,
+ "foo2-1-3" : 9
+ },
+ {
+ "foo2-1-1" : "bar2",
+ "foo2-1-2" : 8,
+ "foo2-1-3" : 7
+ }
+ ] } }
xml_body1 = self.stats_httpd.open_template(
stats_httpd.XML_TEMPLATE_LOCATION).substitute(
- xml_string='<Dummy><foo>bar</foo></Dummy>',
- xsd_namespace=stats_httpd.XSD_NAMESPACE,
- xsd_url_path=stats_httpd.XSD_URL_PATH,
+ xml_string='<bind10:statistics xmlns:bind10="http://bind10.isc.org/bind10" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://bind10.isc.org/bind10 ' + stats_httpd.XSD_URL_PATH + '"><Dummy><foo>bar</foo><foo2><foo2-1><foo2-1-1>bar1</foo2-1-1><foo2-1-2>10</foo2-1-2><foo2-1-3>9</foo2-1-3></foo2-1><foo2-1><foo2-1-1>bar2</foo2-1-1><foo2-1-2>8</foo2-1-2><foo2-1-3>7</foo2-1-3></foo2-1></foo2></Dummy></bind10:statistics>',
xsl_url_path=stats_httpd.XSL_URL_PATH)
xml_body2 = self.stats_httpd.xml_handler()
self.assertEqual(type(xml_body1), str)
self.assertEqual(type(xml_body2), str)
self.assertEqual(xml_body1, xml_body2)
- self.stats_httpd.get_stats_data = lambda: \
- { 'Dummy' : {'bar':'foo'} }
+ self.stats_httpd.get_stats_spec = lambda x,y: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar foo",
+ "item_title": "Bar"
+ },
+ {
+ "item_name": "bar2",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Bar foo",
+ "item_description": "Bar foo",
+ "list_item_spec": {
+ "item_name": "bar2-1",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "bar2-1-1",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Bar2 1 1",
+ "item_description": "Bar foo"
+ },
+ {
+ "item_name": "bar2-1-2",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Bar2 1 2",
+ "item_description": "Bar foo"
+ },
+ {
+ "item_name": "bar2-1-3",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Bar2 1 3",
+ "item_description": "Bar foo"
+ }
+ ]
+ }
+ }]
+ }
+ self.stats_httpd.get_stats_data = lambda x,y: \
+ { 'Dummy' : { 'bar':'foo',
+ 'bar2': [
+ {
+ "bar2-1-1" : "foo1",
+ "bar2-1-2" : 10,
+ "bar2-1-3" : 9
+ },
+ {
+ "bar2-1-1" : "foo2",
+ "bar2-1-2" : 8,
+ "bar2-1-3" : 7
+ }
+ ] } }
xml_body2 = self.stats_httpd.xml_handler()
self.assertNotEqual(xml_body1, xml_body2)
def test_xsd_handler(self):
self.stats_httpd = MyStatsHttpd(get_availaddr())
- self.stats_httpd.get_stats_spec = lambda: \
+ self.stats_httpd.get_stats_spec = lambda x,y: \
{ "Dummy" :
[{
"item_name": "foo",
@@ -608,23 +1077,76 @@ class TestStatsHttpd(unittest.TestCase):
"item_default": "bar",
"item_description": "foo is bar",
"item_title": "Foo"
+ },
+ {
+ "item_name": "hoo_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "2011-01-01T01:01:01Z",
+ "item_description": "hoo time",
+ "item_title": "Hoo Time",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "foo2",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Foo bar",
+ "item_description": "Foo bar",
+ "list_item_spec": {
+ "item_name": "foo2-1",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "foo2-1-1",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Foo2 1 1",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-2",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 2",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-3",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 3",
+ "item_description": "Foo bar"
+ }
+ ]
+ }
}]
}
xsd_body1 = self.stats_httpd.open_template(
stats_httpd.XSD_TEMPLATE_LOCATION).substitute(
- xsd_string=\
- '<all><element name="Dummy"><complexType><all>' \
- + '<element maxOccurs="1" minOccurs="1" name="foo" type="string">' \
- + '<annotation><appinfo>Foo</appinfo>' \
- + '<documentation>foo is bar</documentation>' \
- + '</annotation></element></all>' \
- + '</complexType></element></all>',
- xsd_namespace=stats_httpd.XSD_NAMESPACE)
+ xsd_string='<schema targetNamespace="' + stats_httpd.XSD_NAMESPACE + '" xmlns="http://www.w3.org/2001/XMLSchema" xmlns:bind10="' + stats_httpd.XSD_NAMESPACE + '"><annotation><documentation>XML schema of the statistics data in BIND 10</documentation></annotation><element name="statistics"><annotation><documentation>A set of statistics data</documentation></annotation><complexType><all><element name="Dummy"><complexType><all><element maxOccurs="1" minOccurs="1" name="foo" type="string"><annotation><appinfo>Foo</appinfo><documentation>foo is bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="hoo_time" type="dateTime"><annotation><appinfo>Hoo Time</appinfo><documentation>hoo time</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2"><complexType><sequence><element maxOccurs="unbounded" minOccurs="1" name="foo2-1"><complexType><all><element maxOccurs="1" minOccurs="1" name="foo2-1-1" type="string"><ann
otation><appinfo>Foo2 1 1</appinfo><documentation>Foo bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2-1-2" type="integer"><annotation><appinfo>Foo2 1 2</appinfo><documentation>Foo bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2-1-3" type="integer"><annotation><appinfo>Foo2 1 3</appinfo><documentation>Foo bar</documentation></annotation></element></all></complexType></element></sequence></complexType></element></all></complexType></element></all></complexType></element></schema>')
xsd_body2 = self.stats_httpd.xsd_handler()
self.assertEqual(type(xsd_body1), str)
self.assertEqual(type(xsd_body2), str)
self.assertEqual(xsd_body1, xsd_body2)
- self.stats_httpd.get_stats_spec = lambda: \
+ self.stats_httpd.get_stats_spec = lambda x,y: \
{ "Dummy" :
[{
"item_name": "bar",
@@ -633,6 +1155,66 @@ class TestStatsHttpd(unittest.TestCase):
"item_default": "foo",
"item_description": "bar is foo",
"item_title": "bar"
+ },
+ {
+ "item_name": "boo_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "2012-02-02T02:02:02Z",
+ "item_description": "boo time",
+ "item_title": "Boo Time",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "foo2",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Foo bar",
+ "item_description": "Foo bar",
+ "list_item_spec": {
+ "item_name": "foo2-1",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "foo2-1-1",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Foo2 1 1",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-2",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 2",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-3",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 3",
+ "item_description": "Foo bar"
+ }
+ ]
+ }
}]
}
xsd_body2 = self.stats_httpd.xsd_handler()
@@ -640,30 +1222,77 @@ class TestStatsHttpd(unittest.TestCase):
def test_xsl_handler(self):
self.stats_httpd = MyStatsHttpd(get_availaddr())
- self.stats_httpd.get_stats_spec = lambda: \
+ self.stats_httpd.get_stats_spec = lambda x,y: \
{ "Dummy" :
[{
"item_name": "foo",
"item_type": "string",
"item_optional": False,
"item_default": "bar",
- "item_description": "foo is bar",
+ "item_description": "foo bar",
"item_title": "Foo"
+ },
+ {
+ "item_name": "foo2",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Foo bar",
+ "item_description": "Foo bar",
+ "list_item_spec": {
+ "item_name": "foo2-1",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "foo2-1-1",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Foo2 1 1",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-2",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 2",
+ "item_description": "Foo bar"
+ },
+ {
+ "item_name": "foo2-1-3",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Foo2 1 3",
+ "item_description": "Foo bar"
+ }
+ ]
+ }
}]
}
xsl_body1 = self.stats_httpd.open_template(
stats_httpd.XSL_TEMPLATE_LOCATION).substitute(
- xsl_string='<xsl:template match="*"><tr>' \
- + '<td>Dummy</td>' \
- + '<td class="title" title="foo is bar">Foo</td>' \
- + '<td><xsl:value-of select="Dummy/foo" /></td>' \
- + '</tr></xsl:template>',
+ xsl_string='<xsl:template match="bind10:statistics"><table><tr><th>Module Name</th><th>Module Item</th></tr><xsl:for-each select="Dummy"><tr><td><a href="' + stats_httpd.XML_URL_PATH + '/Dummy">Dummy</a></td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><tr><td class="title" title="foo bar"><a href="' + stats_httpd.XML_URL_PATH + '/Dummy/foo">Foo</a></td><td><xsl:value-of select="foo" /></td></tr><xsl:for-each select="foo2"><tr><td class="title" title="Foo bar"><a href="' + stats_httpd.XML_URL_PATH + '/Dummy/foo2">Foo bar</a></td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><xsl:for-each select="foo2-1"><tr><td class="title" title="">foo2-1</td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><tr><td class="title" title="Foo bar">Foo2 1 1</td><td><xsl:value-of select="foo2-1-1" /></td></tr><tr><td class="title" title="Foo bar">Foo2 1 2</td><td><xsl:value-of select="foo2-1-2" /></td></tr><tr><td class="title" title="Foo bar">Foo2 1 3
</td><td><xsl:value-of select="foo2-1-3" /></td></tr></table></td></tr></xsl:for-each></table></td></tr></xsl:for-each></table></td></tr></xsl:for-each></table></xsl:template>',
xsd_namespace=stats_httpd.XSD_NAMESPACE)
xsl_body2 = self.stats_httpd.xsl_handler()
self.assertEqual(type(xsl_body1), str)
self.assertEqual(type(xsl_body2), str)
self.assertEqual(xsl_body1, xsl_body2)
- self.stats_httpd.get_stats_spec = lambda: \
+ self.stats_httpd.get_stats_spec = lambda x,y: \
{ "Dummy" :
[{
"item_name": "bar",
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index 3813c7e..3c8599a 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -226,7 +226,7 @@ class TestStats(unittest.TestCase):
'show', 'Stats',
params={ 'owner' : 'Boss',
'name' : 'boot_time' }),
- (0, self.const_datetime))
+ (0, {'Boss': {'boot_time': self.const_datetime}}))
self.assertEqual(
send_command(
'set', 'Stats',
@@ -238,7 +238,7 @@ class TestStats(unittest.TestCase):
'show', 'Stats',
params={ 'owner' : 'Boss',
'name' : 'boot_time' }),
- (0, self.const_datetime))
+ (0, {'Boss': {'boot_time': self.const_datetime}}))
self.assertEqual(
send_command('status', 'Stats'),
(0, "Stats is up. (PID " + str(os.getpid()) + ")"))
@@ -321,25 +321,25 @@ class TestStats(unittest.TestCase):
my_statistics_data = self.stats.get_statistics_data()
self.assertTrue('Stats' in my_statistics_data)
self.assertTrue('Boss' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data['Boss'])
my_statistics_data = self.stats.get_statistics_data(owner='Stats')
- self.assertTrue('report_time' in my_statistics_data)
- self.assertTrue('boot_time' in my_statistics_data)
- self.assertTrue('last_update_time' in my_statistics_data)
- self.assertTrue('timestamp' in my_statistics_data)
- self.assertTrue('lname' in my_statistics_data)
+ self.assertTrue('Stats' in my_statistics_data)
+ self.assertTrue('report_time' in my_statistics_data['Stats'])
+ self.assertTrue('boot_time' in my_statistics_data['Stats'])
+ self.assertTrue('last_update_time' in my_statistics_data['Stats'])
+ self.assertTrue('timestamp' in my_statistics_data['Stats'])
+ self.assertTrue('lname' in my_statistics_data['Stats'])
self.assertRaises(stats.StatsError, self.stats.get_statistics_data, owner='Foo')
- my_statistics_data = self.stats.get_statistics_data(owner='Stats')
- self.assertTrue('boot_time' in my_statistics_data)
my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='report_time')
- self.assertEqual(my_statistics_data, self.const_default_datetime)
+ self.assertEqual(my_statistics_data['Stats']['report_time'], self.const_default_datetime)
my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='boot_time')
- self.assertEqual(my_statistics_data, self.const_default_datetime)
+ self.assertEqual(my_statistics_data['Stats']['boot_time'], self.const_default_datetime)
my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='last_update_time')
- self.assertEqual(my_statistics_data, self.const_default_datetime)
+ self.assertEqual(my_statistics_data['Stats']['last_update_time'], self.const_default_datetime)
my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='timestamp')
- self.assertEqual(my_statistics_data, 0.0)
+ self.assertEqual(my_statistics_data['Stats']['timestamp'], 0.0)
my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='lname')
- self.assertEqual(my_statistics_data, '')
+ self.assertEqual(my_statistics_data, {'Stats': {'lname':''}})
self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
owner='Stats', name='Bar')
self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
@@ -385,10 +385,25 @@ class TestStats(unittest.TestCase):
1, "specified arguments are incorrect: owner: Foo, name: bar"))
self.assertEqual(self.stats.command_show(owner='Auth'),
isc.config.create_answer(
- 0, {'queries.tcp': 0, 'queries.udp': 0}))
+ 0, {'Auth':{ 'queries.udp': 0,
+ 'queries.tcp': 0,
+ 'queries.perzone': [{ 'zonename': 'test1.example',
+ 'queries.udp': 1,
+ 'queries.tcp': 2 },
+ { 'zonename': 'test2.example',
+ 'queries.udp': 3,
+ 'queries.tcp': 4 }] }}))
self.assertEqual(self.stats.command_show(owner='Auth', name='queries.udp'),
isc.config.create_answer(
- 0, 0))
+ 0, {'Auth': {'queries.udp':0}}))
+ self.assertEqual(self.stats.command_show(owner='Auth', name='queries.perzone'),
+ isc.config.create_answer(
+ 0, {'Auth': {'queries.perzone': [{ 'zonename': 'test1.example',
+ 'queries.udp': 1,
+ 'queries.tcp': 2 },
+ { 'zonename': 'test2.example',
+ 'queries.udp': 3,
+ 'queries.tcp': 4 }]}}))
orig_get_timestamp = stats.get_timestamp
orig_get_datetime = stats.get_datetime
stats.get_timestamp = lambda : self.const_timestamp
@@ -396,7 +411,7 @@ class TestStats(unittest.TestCase):
self.assertEqual(stats.get_timestamp(), self.const_timestamp)
self.assertEqual(stats.get_datetime(), self.const_datetime)
self.assertEqual(self.stats.command_show(owner='Stats', name='report_time'), \
- isc.config.create_answer(0, self.const_datetime))
+ isc.config.create_answer(0, {'Stats': {'report_time':self.const_datetime}}))
self.assertEqual(self.stats.statistics_data['Stats']['timestamp'], self.const_timestamp)
self.assertEqual(self.stats.statistics_data['Stats']['boot_time'], self.const_default_datetime)
stats.get_timestamp = orig_get_timestamp
@@ -442,9 +457,12 @@ class TestStats(unittest.TestCase):
self.assertTrue('item_format' in item)
schema = value['Auth']
- self.assertEqual(len(schema), 2)
+ self.assertEqual(len(schema), 3)
for item in schema:
- self.assertTrue(len(item) == 6)
+ if item['item_type'] == 'list':
+ self.assertEqual(len(item), 7)
+ else:
+ self.assertEqual(len(item), 6)
self.assertTrue('item_name' in item)
self.assertTrue('item_type' in item)
self.assertTrue('item_optional' in item)
@@ -455,10 +473,10 @@ class TestStats(unittest.TestCase):
(rcode, value) = isc.config.ccsession.parse_answer(
self.stats.command_showschema(owner='Stats'))
self.assertEqual(rcode, 0)
- self.assertFalse('Stats' in value)
+ self.assertTrue('Stats' in value)
self.assertFalse('Boss' in value)
self.assertFalse('Auth' in value)
- for item in value:
+ for item in value['Stats']:
self.assertTrue(len(item) == 6 or len(item) == 7)
self.assertTrue('item_name' in item)
self.assertTrue('item_type' in item)
@@ -472,19 +490,19 @@ class TestStats(unittest.TestCase):
(rcode, value) = isc.config.ccsession.parse_answer(
self.stats.command_showschema(owner='Stats', name='report_time'))
self.assertEqual(rcode, 0)
- self.assertFalse('Stats' in value)
+ self.assertTrue('Stats' in value)
self.assertFalse('Boss' in value)
self.assertFalse('Auth' in value)
- self.assertTrue(len(value) == 7)
- self.assertTrue('item_name' in value)
- self.assertTrue('item_type' in value)
- self.assertTrue('item_optional' in value)
- self.assertTrue('item_default' in value)
- self.assertTrue('item_title' in value)
- self.assertTrue('item_description' in value)
- self.assertTrue('item_format' in value)
- self.assertEqual(value['item_name'], 'report_time')
- self.assertEqual(value['item_format'], 'date-time')
+ self.assertEqual(len(value['Stats'][0]), 7)
+ self.assertTrue('item_name' in value['Stats'][0])
+ self.assertTrue('item_type' in value['Stats'][0])
+ self.assertTrue('item_optional' in value['Stats'][0])
+ self.assertTrue('item_default' in value['Stats'][0])
+ self.assertTrue('item_title' in value['Stats'][0])
+ self.assertTrue('item_description' in value['Stats'][0])
+ self.assertTrue('item_format' in value['Stats'][0])
+ self.assertEqual(value['Stats'][0]['item_name'], 'report_time')
+ self.assertEqual(value['Stats'][0]['item_format'], 'date-time')
self.assertEqual(self.stats.command_showschema(owner='Foo'),
isc.config.create_answer(
@@ -494,7 +512,7 @@ class TestStats(unittest.TestCase):
1, "specified arguments are incorrect: owner: Foo, name: bar"))
self.assertEqual(self.stats.command_showschema(owner='Auth'),
isc.config.create_answer(
- 0, [{
+ 0, {'Auth': [{
"item_default": 0,
"item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
"item_name": "queries.tcp",
@@ -509,17 +527,121 @@ class TestStats(unittest.TestCase):
"item_optional": False,
"item_title": "Queries UDP",
"item_type": "integer"
- }]))
+ },
+ {
+ "item_name": "queries.perzone",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1.example",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2.example",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Queries per zone",
+ "item_description": "Queries per zone",
+ "list_item_spec": {
+ "item_name": "zones",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "zonename",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Zonename",
+ "item_description": "Zonename"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Queries UDP per zone",
+ "item_description": "A number of UDP query counts per zone"
+ },
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Queries TCP per zone",
+ "item_description": "A number of TCP query counts per zone"
+ }
+ ]
+ }
+ }]}))
self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.tcp'),
isc.config.create_answer(
- 0, {
+ 0, {'Auth': [{
"item_default": 0,
"item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
"item_name": "queries.tcp",
"item_optional": False,
"item_title": "Queries TCP",
"item_type": "integer"
- }))
+ }]}))
+ self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.perzone'),
+ isc.config.create_answer(
+ 0, {'Auth':[{
+ "item_name": "queries.perzone",
+ "item_type": "list",
+ "item_optional": False,
+ "item_default": [
+ {
+ "zonename" : "test1.example",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2.example",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Queries per zone",
+ "item_description": "Queries per zone",
+ "list_item_spec": {
+ "item_name": "zones",
+ "item_type": "map",
+ "item_optional": False,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "zonename",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Zonename",
+ "item_description": "Zonename"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Queries UDP per zone",
+ "item_description": "A number of UDP query counts per zone"
+ },
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": False,
+ "item_default": 0,
+ "item_title": "Queries TCP per zone",
+ "item_description": "A number of TCP query counts per zone"
+ }
+ ]
+ }
+ }]}))
self.assertEqual(self.stats.command_showschema(owner='Stats', name='bar'),
isc.config.create_answer(
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
index 5eb8f92..3f6ff33 100644
--- a/src/bin/stats/tests/test_utils.py
+++ b/src/bin/stats/tests/test_utils.py
@@ -232,6 +232,57 @@ class MockAuth:
"item_default": 0,
"item_title": "Queries UDP",
"item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ },
+ {
+ "item_name": "queries.perzone",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [
+ {
+ "zonename" : "test1.example",
+ "queries.udp" : 1,
+ "queries.tcp" : 2
+ },
+ {
+ "zonename" : "test2.example",
+ "queries.udp" : 3,
+ "queries.tcp" : 4
+ }
+ ],
+ "item_title": "Queries per zone",
+ "item_description": "Queries per zone",
+ "list_item_spec": {
+ "item_name": "zones",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "zonename",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "",
+ "item_title": "Zonename",
+ "item_description": "Zonename"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP per zone",
+ "item_description": "A number of UDP query counts per zone"
+ },
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP per zone",
+ "item_description": "A number of TCP query counts per zone"
+ }
+ ]
+ }
}
]
}
@@ -251,6 +302,11 @@ class MockAuth:
self.got_command_name = ''
self.queries_tcp = 3
self.queries_udp = 2
+ self.queries_per_zone = [{
+ 'zonename': 'test1.example',
+ 'queries.tcp': 5,
+ 'queries.udp': 4
+ }]
def run(self):
self.mccs.start()
@@ -273,7 +329,8 @@ class MockAuth:
if command == 'sendstats':
params = { "owner": "Auth",
"data": { 'queries.tcp': self.queries_tcp,
- 'queries.udp': self.queries_udp } }
+ 'queries.udp': self.queries_udp,
+ 'queries.per-zone' : self.queries_per_zone } }
return send_command("set", "Stats", params=params, session=self.cc_session)
return isc.config.create_answer(1, "Unknown Command")
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index 8f4fa91..cffafe1 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -10,7 +10,7 @@ LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
else
-# sunstudio needs the ds path even if not all paths are necessary
+# Some systems need the ds path even if not all paths are necessary
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
endif
diff --git a/src/bin/xfrin/tests/testdata/example.com.sqlite3 b/src/bin/xfrin/tests/testdata/example.com.sqlite3
index ed241c3..3538e3d 100644
Binary files a/src/bin/xfrin/tests/testdata/example.com.sqlite3 and b/src/bin/xfrin/tests/testdata/example.com.sqlite3 differ
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 1e4d942..3c41110 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -14,8 +14,10 @@
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
+import re
import shutil
import socket
+import sqlite3
import sys
import io
from isc.testutils.tsigctx_mock import MockTSIGContext
@@ -170,7 +172,8 @@ class MockDataSourceClient():
return (ZoneFinder.SUCCESS, dup_soa_rrset)
raise ValueError('Unexpected input to mock finder: bug in test case?')
- def get_updater(self, zone_name, replace):
+ def get_updater(self, zone_name, replace, journaling=False):
+ self._journaling_enabled = journaling
return self
def add_rrset(self, rrset):
@@ -1132,6 +1135,7 @@ class TestAXFR(TestXfrinConnection):
def test_do_xfrin(self):
self.conn.response_generator = self._create_normal_response_data
self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+ self.assertFalse(self.conn._datasrc_client._journaling_enabled)
def test_do_xfrin_with_tsig(self):
# use TSIG with a mock context. we fake all verify results to
@@ -1283,6 +1287,7 @@ class TestIXFRResponse(TestXfrinConnection):
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertTrue(self.conn._datasrc_client._journaling_enabled)
self.assertEqual([], self.conn._datasrc_client.diffs)
check_diffs(self.assertEqual,
[[('delete', begin_soa_rrset), ('add', soa_rrset)]],
@@ -1387,6 +1392,8 @@ class TestIXFRResponse(TestXfrinConnection):
answers=[soa_rrset, ns_rr, a_rr, soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ # In the case AXFR-style IXFR, journaling must have been disabled.
+ self.assertFalse(self.conn._datasrc_client._journaling_enabled)
self.assertEqual([], self.conn._datasrc_client.diffs)
# The SOA should be added exactly once, and in our implementation
# it should be added at the end of the sequence.
@@ -1540,6 +1547,19 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
self.assertEqual(1234, self.get_zone_serial())
+ # Also confirm the corresponding diffs are stored in the diffs table
+ conn = sqlite3.connect(self.sqlite3db_obj)
+ cur = conn.cursor()
+ cur.execute('SELECT name, rrtype, ttl, rdata FROM diffs ORDER BY id')
+ soa_rdata_base = 'master.example.com. admin.example.com. ' + \
+ 'SERIAL 3600 1800 2419200 7200'
+ self.assertEqual(cur.fetchall(),
+ [(TEST_ZONE_NAME_STR, 'SOA', 3600,
+ re.sub('SERIAL', str(1230), soa_rdata_base)),
+ (TEST_ZONE_NAME_STR, 'SOA', 3600,
+ re.sub('SERIAL', str(1234), soa_rdata_base))])
+ conn.close()
+
def test_do_ixfrin_sqlite3_fail(self):
'''Similar to the previous test, but xfrin fails due to error.
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 911b3b3..445683e 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -367,7 +367,10 @@ class XfrinIXFRDeleteSOA(XfrinState):
' RR is given in IXFRDeleteSOA state')
# This is the beginning state of one difference sequence (changes
# for one SOA update). We need to create a new Diff object now.
- conn._diff = Diff(conn._datasrc_client, conn._zone_name)
+ # Note also that we (unconditionally) enable journaling here. The
+ # Diff constructor may internally disable it, however, if the
+ # underlying data source doesn't support journaling.
+ conn._diff = Diff(conn._datasrc_client, conn._zone_name, False, True)
conn._diff.delete_data(rr)
self.set_xfrstate(conn, XfrinIXFRDelete())
return True
diff --git a/src/bin/xfrout/b10-xfrout.8 b/src/bin/xfrout/b10-xfrout.8
index c8b4b07..c810c2f 100644
--- a/src/bin/xfrout/b10-xfrout.8
+++ b/src/bin/xfrout/b10-xfrout.8
@@ -71,6 +71,19 @@ The configurable settings are:
defines the maximum number of outgoing zone transfers that can run concurrently\&. The default is 10\&.
.PP
+\fItsig_key_ring\fR
+A list of TSIG keys (each of which is in the form of name:base64\-key[:algorithm]) used for access control on transfer requests\&. The default is an empty list\&.
+.PP
+
+\fItransfer_acl\fR
+A list of ACL elements that apply to all transfer requests by default (unless overridden in zone_config)\&. See the BIND 10 guide for configuration examples\&. The default is an element that allows any transfer requests\&.
+.PP
+
+\fIzone_config\fR
+A list of JSON objects (i\&.e\&. maps) that define per zone configuration concerning
+\fBb10\-xfrout\fR\&. The supported names of each object are "origin" (the origin name of the zone), "class" (the RR class of the zone, optional, default to "IN"), and "acl_element" (ACL only applicable to transfer requests for that zone)\&. See the BIND 10 guide for configuration examples\&. The default is an empty list, that is, no zone specific configuration\&.
+.PP
+
\fIlog_name\fR
.PP
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index 9889b80..4f6a7fa 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -98,6 +98,31 @@
that can run concurrently. The default is 10.
</para>
<para>
+ <varname>tsig_key_ring</varname>
+ A list of TSIG keys (each of which is in the form of
+ name:base64-key[:algorithm]) used for access control on transfer
+ requests.
+ The default is an empty list.
+ </para>
+ <para>
+ <varname>transfer_acl</varname>
+ A list of ACL elements that apply to all transfer requests by
+ default (unless overridden in zone_config). See the BIND 10
+ guide for configuration examples.
+ The default is an element that allows any transfer requests.
+ </para>
+ <para>
+ <varname>zone_config</varname>
+ A list of JSON objects (i.e. maps) that define per zone
+ configuration concerning <command>b10-xfrout</command>.
+ The supported names of each object are "origin" (the origin
+ name of the zone), "class" (the RR class of the zone, optional,
+ default to "IN"), and "acl_element" (ACL only applicable to
+ transfer requests for that zone).
+ See the BIND 10 guide for configuration examples.
+ The default is an empty list, that is, no zone specific configuration.
+ </para>
+ <para>
<varname>log_name</varname>
<!-- TODO -->
</para>
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index ace8fc9..ad6d7e6 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -2,11 +2,18 @@ PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = xfrout_test.py
noinst_SCRIPTS = $(PYTESTS)
+EXTRA_DIST = testdata/test.sqlite3
+# These are actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com testdata/creatediff.py
+
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# Some systems need the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -24,5 +31,6 @@ endif
B10_FROM_BUILD=$(abs_top_builddir) \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/xfrout:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+ TESTDATASRCDIR=$(abs_srcdir)/testdata/ \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrout/tests/testdata/creatediff.py b/src/bin/xfrout/tests/testdata/creatediff.py
new file mode 100755
index 0000000..dab6622
--- /dev/null
+++ b/src/bin/xfrout/tests/testdata/creatediff.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3.1
+
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''This script was used to create zone differences for IXFR tests.
+
+The result was stored in the test SQLite3 database file, so this script
+itself isn't necessary for testing. It's provided here for reference
+purposes.
+
+'''
+
+import isc.datasrc
+import isc.log
+from isc.dns import *
+from isc.testutils.rrset_utils import *
+
+isc.log.init("dummy") # XXX
+
+ZONE_NAME = Name('example.com')
+NS_NAME_STR = 'a.dns.example.com'
+NS_NAME = Name(NS_NAME_STR)
+
+client = isc.datasrc.DataSourceClient('sqlite3',
+ '{ "database_file": "test.sqlite3" }')
+
+# Install the initial data
+updater = client.get_updater(ZONE_NAME, True)
+updater.add_rrset(create_soa(2011111802))
+updater.add_rrset(create_ns(NS_NAME_STR))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.53'))
+updater.add_rrset(create_aaaa(NS_NAME, '2001:db8::1'))
+updater.commit()
+
+# Incremental update to generate diffs
+updater = client.get_updater(ZONE_NAME, False, True)
+updater.delete_rrset(create_soa(2011111802))
+updater.add_rrset(create_soa(2011111900))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.2', 7200))
+updater.delete_rrset(create_soa(2011111900))
+updater.delete_rrset(create_a(NS_NAME, '192.0.2.53'))
+updater.delete_rrset(create_aaaa(NS_NAME, '2001:db8::1'))
+updater.add_rrset(create_soa(2011112001))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.1'))
+updater.commit()
diff --git a/src/bin/xfrout/tests/testdata/example.com b/src/bin/xfrout/tests/testdata/example.com
new file mode 100644
index 0000000..8458d09
--- /dev/null
+++ b/src/bin/xfrout/tests/testdata/example.com
@@ -0,0 +1,6 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+example.com. 3600 IN SOA master.example.com. admin.example.com. 2011112001 3600 1800 2419200 7200
+example.com. 3600 IN NS a.dns.example.com.
+a.dns.example.com. 3600 IN A 192.0.2.1
+a.dns.example.com. 7200 IN A 192.0.2.2
diff --git a/src/bin/xfrout/tests/testdata/test.sqlite3 b/src/bin/xfrout/tests/testdata/test.sqlite3
new file mode 100644
index 0000000..9eb14f1
Binary files /dev/null and b/src/bin/xfrout/tests/testdata/test.sqlite3 differ
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 0a9fd3c..37e8993 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -21,14 +21,26 @@ import os
from isc.testutils.tsigctx_mock import MockTSIGContext
from isc.cc.session import *
import isc.config
-from pydnspp import *
+from isc.dns import *
+from isc.testutils.rrset_utils import *
from xfrout import *
import xfrout
import isc.log
import isc.acl.dns
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
+#
+# Commonly used (mostly constant) test parameters
+#
+TEST_ZONE_NAME_STR = "example.com."
+TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
+TEST_RRCLASS = RRClass.IN()
+IXFR_OK_VERSION = 2011111802
+IXFR_NG_VERSION = 2011112800
+SOA_CURRENT_VERSION = 2011112001
+
# our fake socket, where we can read and insert messages
class MySocket():
def __init__(self, family, type):
@@ -55,19 +67,97 @@ class MySocket():
self.sendqueue = self.sendqueue[size:]
return result
- def read_msg(self):
+ def read_msg(self, parse_options=Message.PARSE_DEFAULT):
sent_data = self.readsent()
get_msg = Message(Message.PARSE)
- get_msg.from_wire(bytes(sent_data[2:]))
+ get_msg.from_wire(bytes(sent_data[2:]), parse_options)
return get_msg
def clear_send(self):
del self.sendqueue[:]
-# We subclass the Session class we're testing here, only
-# to override the handle() and _send_data() method
+class MockDataSrcClient:
+ def __init__(self, type, config):
+ pass
+
+ def find_zone(self, zone_name):
+ '''Mock version of find_zone().
+
+ It returns itself (subsequently acting as a mock ZoneFinder) for
+ some test zone names. For a special name it returns NOTFOUND to
+ emulate the condition where the specified zone doen't exist.
+
+ '''
+ self._zone_name = zone_name
+ if zone_name == Name('notauth.example.com'):
+ return (isc.datasrc.DataSourceClient.NOTFOUND, None)
+ return (isc.datasrc.DataSourceClient.SUCCESS, self)
+
+ def find(self, name, rrtype, target, options):
+ '''Mock ZoneFinder.find().
+
+ (At the moment) this method only handles query for type SOA.
+ By default it returns a normal SOA RR(set) whose owner name is
+ the query name It also emulates some unusual cases for special
+ zone names.
+
+ '''
+ if name == Name('nosoa.example.com') and rrtype == RRType.SOA():
+ return (ZoneFinder.NXDOMAIN, None)
+ elif name == Name('multisoa.example.com') and rrtype == RRType.SOA():
+ soa_rrset = create_soa(SOA_CURRENT_VERSION)
+ soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
+ return (ZoneFinder.SUCCESS, soa_rrset)
+ elif rrtype == RRType.SOA():
+ return (ZoneFinder.SUCCESS, create_soa(SOA_CURRENT_VERSION))
+ raise ValueError('Unexpected input to mock finder: bug in test case?')
+
+ def get_iterator(self, zone_name, adjust_ttl=False):
+ if zone_name == Name('notauth.example.com'):
+ raise isc.datasrc.Error('no such zone')
+ self._zone_name = zone_name
+ return self
+
+ def get_soa(self): # emulate ZoneIterator.get_soa()
+ if self._zone_name == Name('nosoa.example.com'):
+ return None
+ soa_rrset = create_soa(SOA_CURRENT_VERSION)
+ if self._zone_name == Name('multisoa.example.com'):
+ soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
+ return soa_rrset
+
+ def get_journal_reader(self, zone_name, begin_serial, end_serial):
+ if zone_name == Name('notauth2.example.com'):
+ return isc.datasrc.ZoneJournalReader.NO_SUCH_ZONE, None
+ if zone_name == Name('nojournal.example.com'):
+ raise isc.datasrc.NotImplemented('journaling not supported')
+ if begin_serial == IXFR_NG_VERSION:
+ return isc.datasrc.ZoneJournalReader.NO_SUCH_VERSION, None
+ return isc.datasrc.ZoneJournalReader.SUCCESS, self
+
+class MyCCSession(isc.config.ConfigData):
+ def __init__(self):
+ module_spec = isc.config.module_spec_from_file(
+ xfrout.SPECFILE_LOCATION)
+ ConfigData.__init__(self, module_spec)
+
+ def get_remote_config_value(self, module_name, identifier):
+ if module_name == "Auth" and identifier == "database_file":
+ return "initdb.file", False
+ else:
+ return "unknown", False
+
+# This constant dictionary stores all default configuration parameters
+# defined in the xfrout spec file.
+DEFAULT_CONFIG = MyCCSession().get_full_config()
+
+# We subclass the Session class we're testing here, only overriding a few
+# methods
class MyXfroutSession(XfroutSession):
- def handle(self):
+ def _handle(self):
+ pass
+
+ def _close_socket(self):
pass
def _send_data(self, sock, data):
@@ -80,12 +170,23 @@ class MyXfroutSession(XfroutSession):
class Dbserver:
def __init__(self):
self._shutdown_event = threading.Event()
+ self.transfer_counter = 0
+ self._max_transfers_out = DEFAULT_CONFIG['transfers_out']
def get_db_file(self):
- return None
+ return 'test.sqlite3'
+ def increase_transfers_counter(self):
+ self.transfer_counter += 1
+ return True
def decrease_transfers_counter(self):
- pass
+ self.transfer_counter -= 1
+
+class TestXfroutSessionBase(unittest.TestCase):
+ '''Base classs for tests related to xfrout sessions
+
+ This class defines common setup/teadown and utility methods. Actual
+ tests are delegated to subclasses.
-class TestXfroutSession(unittest.TestCase):
+ '''
def getmsg(self):
msg = Message(Message.PARSE)
msg.from_wire(self.mdata)
@@ -102,15 +203,44 @@ class TestXfroutSession(unittest.TestCase):
def message_has_tsig(self, msg):
return msg.get_tsig_record() is not None
- def create_request_data(self, with_tsig=False):
+ def create_request_data(self, with_question=True, with_tsig=False,
+ ixfr=None, qtype=None, zone_name=TEST_ZONE_NAME,
+ soa_class=TEST_RRCLASS, num_soa=1):
+ '''Create a commonly used XFR request data.
+
+ By default the request type is AXFR; if 'ixfr' is an integer,
+ the request type will be IXFR and an SOA with the serial being
+ the value of the parameter will be included in the authority
+ section.
+
+ This method has various minor parameters only for creating bad
+ format requests for testing purposes:
+ qtype: the RR type of the question section. By default automatically
+ determined by the value of ixfr, but could be an invalid type
+ for testing.
+ zone_name: the query (zone) name. for IXFR, it's also used as
+ the owner name of the SOA in the authority section.
+ soa_class: IXFR only. The RR class of the SOA RR in the authority
+ section.
+ num_soa: IXFR only. The number of SOA RDATAs in the authority
+ section.
+ '''
msg = Message(Message.RENDER)
query_id = 0x1035
msg.set_qid(query_id)
msg.set_opcode(Opcode.QUERY())
msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name("example.com"), RRClass.IN(),
- RRType.AXFR())
- msg.add_question(query_question)
+ req_type = RRType.AXFR() if ixfr is None else RRType.IXFR()
+ if with_question:
+ msg.add_question(Question(zone_name, RRClass.IN(),
+ req_type if qtype is None else qtype))
+ if req_type == RRType.IXFR():
+ soa = RRset(zone_name, soa_class, RRType.SOA(), RRTTL(0))
+ # In the RDATA only the serial matters.
+ for i in range(0, num_soa):
+ soa.add_rdata(Rdata(RRType.SOA(), soa_class,
+ 'm r ' + str(ixfr) + ' 1 1 1 1'))
+ msg.add_rrset(Message.SECTION_AUTHORITY, soa)
renderer = MessageRenderer()
if with_tsig:
@@ -121,23 +251,98 @@ class TestXfroutSession(unittest.TestCase):
request_data = renderer.get_data()
return request_data
+ def set_request_type(self, type):
+ self.xfrsess._request_type = type
+ if type == RRType.AXFR():
+ self.xfrsess._request_typestr = 'AXFR'
+ else:
+ self.xfrsess._request_typestr = 'IXFR'
+
def setUp(self):
self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(),
- TSIGKeyRing(), ('127.0.0.1', 12345),
+ TSIGKeyRing(),
+ (socket.AF_INET, socket.SOCK_STREAM,
+ ('127.0.0.1', 12345)),
# When not testing ACLs, simply accept
isc.acl.dns.REQUEST_LOADER.load(
[{"action": "ACCEPT"}]),
{})
- self.mdata = self.create_request_data(False)
- self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
+ self.set_request_type(RRType.AXFR()) # test AXFR by default
+ self.mdata = self.create_request_data()
+ self.soa_rrset = create_soa(SOA_CURRENT_VERSION)
+ # some test replaces a module-wide function. We should ensure the
+ # original is used elsewhere.
+ self.orig_get_rrset_len = xfrout.get_rrset_len
+
+ def tearDown(self):
+ xfrout.get_rrset_len = self.orig_get_rrset_len
+ # transfer_counter must be always be reset no matter happens within
+ # the XfroutSession object. We check the condition here.
+ self.assertEqual(0, self.xfrsess._server.transfer_counter)
+
+class TestXfroutSession(TestXfroutSessionBase):
+ def test_quota_error(self):
+ '''Emulating the server being too busy.
+
+ '''
+ self.xfrsess._request_data = self.mdata
+ self.xfrsess._server.increase_transfers_counter = lambda : False
+ XfroutSession._handle(self.xfrsess)
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.REFUSED())
+
+ def test_quota_ok(self):
+ '''The default case in terms of the xfrout quota.
+
+ '''
+ # set up a bogus request, which should result in FORMERR. (it only
+ # has to be something that is different from the previous case)
+ self.xfrsess._request_data = \
+ self.create_request_data(ixfr=IXFR_OK_VERSION, num_soa=2)
+ # Replace the data source client to avoid datasrc related exceptions
+ self.xfrsess.ClientClass = MockDataSrcClient
+ XfroutSession._handle(self.xfrsess)
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.FORMERR())
+
+ def test_exception_from_session(self):
+ '''Test the case where the main processing raises an exception.
+
+ We just check it doesn't any unexpected disruption and (in tearDown)
+ transfer_counter is correctly reset to 0.
+
+ '''
+ def dns_xfrout_start(fd, msg, quota):
+ raise ValueError('fake exception')
+ self.xfrsess.dns_xfrout_start = dns_xfrout_start
+ XfroutSession._handle(self.xfrsess)
def test_parse_query_message(self):
+ # Valid AXFR
[get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(RRType.AXFR(), self.xfrsess._request_type)
self.assertEqual(get_rcode.to_text(), "NOERROR")
+ # Valid IXFR
+ request_data = self.create_request_data(ixfr=2011111801)
+ rcode, msg = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(RRType.IXFR(), self.xfrsess._request_type)
+ self.assertEqual(Rcode.NOERROR(), rcode)
+
+ # Broken request: no question
+ self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
+ self.create_request_data(with_question=False))
+
+ # Broken request: invalid RR type (neither AXFR nor IXFR)
+ self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
+ self.create_request_data(qtype=RRType.A()))
+
+ # NOERROR
+ request_data = self.create_request_data(ixfr=IXFR_OK_VERSION)
+ rcode, msg = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+
# tsig signed query message
- request_data = self.create_request_data(True)
+ request_data = self.create_request_data(with_tsig=True)
# BADKEY
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOTAUTH")
@@ -165,20 +370,23 @@ class TestXfroutSession(unittest.TestCase):
rcode, msg = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "NOERROR")
# This should be dropped completely, therefore returning None
- self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.1', 12345))
rcode, msg = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(None, rcode)
# This should be refused, therefore REFUSED
- self.xfrsess._remote = ('192.0.2.2', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.2', 12345))
rcode, msg = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
# TSIG signed request
- request_data = self.create_request_data(True)
+ request_data = self.create_request_data(with_tsig=True)
# If the TSIG check fails, it should not check ACL
# (If it checked ACL as well, it would just drop the request)
- self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.1', 12345))
self.xfrsess._tsig_key_ring = TSIGKeyRing()
rcode, msg = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOTAUTH")
@@ -216,19 +424,23 @@ class TestXfroutSession(unittest.TestCase):
{"action": "REJECT"}
]))
# both matches
- self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.1', 12345))
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOERROR")
# TSIG matches, but address doesn't
- self.xfrsess._remote = ('192.0.2.2', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.2', 12345))
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "REFUSED")
# Address matches, but TSIG doesn't (not included)
- self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.1', 12345))
[rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
# Neither address nor TSIG matches
- self.xfrsess._remote = ('192.0.2.2', 12345)
+ self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+ ('192.0.2.2', 12345))
[rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
@@ -289,10 +501,6 @@ class TestXfroutSession(unittest.TestCase):
self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
RRClass.IN()))
- def test_get_query_zone_name(self):
- msg = self.getmsg()
- self.assertEqual(self.xfrsess._get_query_zone_name(msg), "example.com.")
-
def test_send_data(self):
self.xfrsess._send_data(self.sock, self.mdata)
senddata = self.sock.readsent()
@@ -315,10 +523,13 @@ class TestXfroutSession(unittest.TestCase):
def test_send_message(self):
msg = self.getmsg()
msg.make_response()
- # soa record data with different cases
- soa_record = (4, 3, 'Example.com.', 'com.Example.', 3600, 'SOA', None, 'master.Example.com. admin.exAmple.com. 1234 3600 1800 2419200 7200')
- rrset_soa = self.xfrsess._create_rrset_from_db_record(soa_record)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+ # SOA record data with different cases
+ soa_rrset = RRset(Name('Example.com.'), RRClass.IN(), RRType.SOA(),
+ RRTTL(3600))
+ soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ 'master.Example.com. admin.exAmple.com. ' +
+ '2011112001 3600 1800 2419200 7200'))
+ msg.add_rrset(Message.SECTION_ANSWER, soa_rrset)
self.xfrsess._send_message(self.sock, msg)
send_out_data = self.sock.readsent()[2:]
@@ -347,61 +558,44 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(msg.get_rcode(), rcode)
self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_AA))
- def test_create_rrset_from_db_record(self):
- rrset = self.xfrsess._create_rrset_from_db_record(self.soa_record)
- self.assertEqual(rrset.get_name().to_text(), "example.com.")
- self.assertEqual(rrset.get_class(), RRClass("IN"))
- self.assertEqual(rrset.get_type().to_text(), "SOA")
- rdata = rrset.get_rdata()
- self.assertEqual(rdata[0].to_text(), self.soa_record[7])
-
def test_send_message_with_last_soa(self):
- rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
msg = self.getmsg()
msg.make_response()
- # packet number less than TSIG_SIGN_EVERY_NTH
- packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
- 0, packet_neet_not_sign)
+ self.xfrsess._send_message_with_last_soa(msg, self.sock,
+ self.soa_rrset, 0)
get_msg = self.sock.read_msg()
- # tsig context is not exist
+ # tsig context does not exist
self.assertFalse(self.message_has_tsig(get_msg))
self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
- #answer_rrset_iter = section_iter(get_msg, section.ANSWER())
- answer = get_msg.get_section(Message.SECTION_ANSWER)[0]#answer_rrset_iter.get_rrset()
+ answer = get_msg.get_section(Message.SECTION_ANSWER)[0]
self.assertEqual(answer.get_name().to_text(), "example.com.")
self.assertEqual(answer.get_class(), RRClass("IN"))
self.assertEqual(answer.get_type().to_text(), "SOA")
rdata = answer.get_rdata()
- self.assertEqual(rdata[0].to_text(), self.soa_record[7])
+ self.assertEqual(rdata[0], self.soa_rrset.get_rdata()[0])
- # msg is the TSIG_SIGN_EVERY_NTH one
- # sending the message with last soa together
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
- 0, TSIG_SIGN_EVERY_NTH)
+ # Sending the message with last soa together
+ self.xfrsess._send_message_with_last_soa(msg, self.sock,
+ self.soa_rrset, 0)
get_msg = self.sock.read_msg()
- # tsig context is not exist
+ # tsig context does not exist
self.assertFalse(self.message_has_tsig(get_msg))
def test_send_message_with_last_soa_with_tsig(self):
# create tsig context
self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
- rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
msg = self.getmsg()
msg.make_response()
- # packet number less than TSIG_SIGN_EVERY_NTH
- packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
- # msg is not the TSIG_SIGN_EVERY_NTH one
- # sending the message with last soa together
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
- 0, packet_neet_not_sign)
+ # Sending the message with last soa together
+ self.xfrsess._send_message_with_last_soa(msg, self.sock,
+ self.soa_rrset, 0)
get_msg = self.sock.read_msg()
self.assertTrue(self.message_has_tsig(get_msg))
@@ -409,33 +603,25 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
- # msg is the TSIG_SIGN_EVERY_NTH one
- # sending the message with last soa together
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
- 0, TSIG_SIGN_EVERY_NTH)
- get_msg = self.sock.read_msg()
- self.assertTrue(self.message_has_tsig(get_msg))
-
def test_trigger_send_message_with_last_soa(self):
rrset_a = RRset(Name("example.com"), RRClass.IN(), RRType.A(), RRTTL(3600))
rrset_a.add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
- rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
msg = self.getmsg()
msg.make_response()
msg.add_rrset(Message.SECTION_ANSWER, rrset_a)
# length larger than MAX-len(rrset)
- length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
- # packet number less than TSIG_SIGN_EVERY_NTH
- packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+ length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
+ get_rrset_len(self.soa_rrset) + 1
# give the function a value that is larger than MAX-len(rrset)
# this should have triggered the sending of two messages
# (1 with the rrset we added manually, and 1 that triggered
# the sending in _with_last_soa)
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
- packet_neet_not_sign)
+ self.xfrsess._send_message_with_last_soa(msg, self.sock,
+ self.soa_rrset,
+ length_need_split)
get_msg = self.sock.read_msg()
self.assertFalse(self.message_has_tsig(get_msg))
self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
@@ -455,100 +641,139 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
- #answer_rrset_iter = section_iter(get_msg, Message.SECTION_ANSWER)
answer = get_msg.get_section(Message.SECTION_ANSWER)[0]
self.assertEqual(answer.get_name().to_text(), "example.com.")
self.assertEqual(answer.get_class(), RRClass("IN"))
self.assertEqual(answer.get_type().to_text(), "SOA")
rdata = answer.get_rdata()
- self.assertEqual(rdata[0].to_text(), self.soa_record[7])
+ self.assertEqual(rdata[0], self.soa_rrset.get_rdata()[0])
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
def test_trigger_send_message_with_last_soa_with_tsig(self):
self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
- rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
msg = self.getmsg()
msg.make_response()
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+ msg.add_rrset(Message.SECTION_ANSWER, self.soa_rrset)
# length larger than MAX-len(rrset)
- length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
- # packet number less than TSIG_SIGN_EVERY_NTH
- packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+ length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
+ get_rrset_len(self.soa_rrset) + 1
# give the function a value that is larger than MAX-len(rrset)
# this should have triggered the sending of two messages
# (1 with the rrset we added manually, and 1 that triggered
# the sending in _with_last_soa)
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
- packet_neet_not_sign)
- get_msg = self.sock.read_msg()
- # msg is not the TSIG_SIGN_EVERY_NTH one, it shouldn't be tsig signed
- self.assertFalse(self.message_has_tsig(get_msg))
- # the last packet should be tsig signed
+ self.xfrsess._send_message_with_last_soa(msg, self.sock,
+ self.soa_rrset,
+ length_need_split)
+ # Both messages should have TSIG RRs
get_msg = self.sock.read_msg()
self.assertTrue(self.message_has_tsig(get_msg))
- # and it should not have sent anything else
- self.assertEqual(0, len(self.sock.sendqueue))
-
-
- # msg is the TSIG_SIGN_EVERY_NTH one, it should be tsig signed
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
- xfrout.TSIG_SIGN_EVERY_NTH)
- get_msg = self.sock.read_msg()
- self.assertTrue(self.message_has_tsig(get_msg))
- # the last packet should be tsig signed
get_msg = self.sock.read_msg()
self.assertTrue(self.message_has_tsig(get_msg))
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
def test_get_rrset_len(self):
- rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
- self.assertEqual(82, get_rrset_len(rrset_soa))
-
- def test_zone_has_soa(self):
- global sqlite3_ds
- def mydb1(zone, file):
- return True
- sqlite3_ds.get_zone_soa = mydb1
- self.assertTrue(self.xfrsess._zone_has_soa(""))
- def mydb2(zone, file):
- return False
- sqlite3_ds.get_zone_soa = mydb2
- self.assertFalse(self.xfrsess._zone_has_soa(""))
-
- def test_zone_exist(self):
- global sqlite3_ds
- def zone_exist(zone, file):
- return zone
- sqlite3_ds.zone_exist = zone_exist
- self.assertTrue(self.xfrsess._zone_exist(True))
- self.assertFalse(self.xfrsess._zone_exist(False))
-
- def test_check_xfrout_available(self):
- def zone_exist(zone):
- return zone
- def zone_has_soa(zone):
- return (not zone)
- self.xfrsess._zone_exist = zone_exist
- self.xfrsess._zone_has_soa = zone_has_soa
- self.assertEqual(self.xfrsess._check_xfrout_available(False).to_text(), "NOTAUTH")
- self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "SERVFAIL")
-
- def zone_empty(zone):
- return zone
- self.xfrsess._zone_has_soa = zone_empty
- def false_func():
- return False
- self.xfrsess._server.increase_transfers_counter = false_func
- self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "REFUSED")
- def true_func():
- return True
- self.xfrsess._server.increase_transfers_counter = true_func
- self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "NOERROR")
+ self.assertEqual(82, get_rrset_len(self.soa_rrset))
+
+ def test_xfrout_axfr_setup(self):
+ self.xfrsess.ClientClass = MockDataSrcClient
+ # Successful case. A zone iterator should be set up.
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.assertNotEqual(None, self.xfrsess._iterator)
+
+ # Failure cases
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), Name('notauth.example.com'), TEST_RRCLASS),
+ Rcode.NOTAUTH())
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), Name('nosoa.example.com'), TEST_RRCLASS),
+ Rcode.SERVFAIL())
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), Name('multisoa.example.com'), TEST_RRCLASS),
+ Rcode.SERVFAIL())
+
+ def test_xfrout_ixfr_setup(self):
+ self.xfrsess.ClientClass = MockDataSrcClient
+ self.set_request_type(RRType.IXFR())
+
+ # Successful case of pure IXFR. A zone journal reader should be set
+ # up.
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.assertNotEqual(None, self.xfrsess._jnl_reader)
+
+ # Successful case, but as a result of falling back to AXFR-style
+ # IXFR. A zone iterator should be set up instead of a journal reader.
+ self.mdata = self.create_request_data(ixfr=IXFR_NG_VERSION)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.assertNotEqual(None, self.xfrsess._iterator)
+ self.assertEqual(None, self.xfrsess._jnl_reader)
+
+ # Successful case, but the requested SOA serial is equal to that of
+ # the local SOA. Both iterator and jnl_reader should be None,
+ # indicating that the response will contain just one SOA.
+ self.mdata = self.create_request_data(ixfr=SOA_CURRENT_VERSION)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.assertEqual(None, self.xfrsess._iterator)
+ self.assertEqual(None, self.xfrsess._jnl_reader)
+
+ # The data source doesn't support journaling. Should fallback to AXFR.
+ zone_name = Name('nojournal.example.com')
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ zone_name=zone_name)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR())
+ self.assertNotEqual(None, self.xfrsess._iterator)
+
+ # Failure cases
+ zone_name = Name('notauth.example.com')
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ zone_name=zone_name)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+ # this is a strange case: zone's SOA will be found but the journal
+ # reader won't be created due to 'no such zone'.
+ zone_name = Name('notauth2.example.com')
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ zone_name=zone_name)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+ zone_name = Name('nosoa.example.com')
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ zone_name=zone_name)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+ zone_name = Name('multisoa.example.com')
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ zone_name=zone_name)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+
+ # query name doesn't match the SOA's owner
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+
+ # query's RR class doesn't match the SOA's class
+ zone_name = TEST_ZONE_NAME # make sure the name matches this time
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ soa_class=RRClass.CH())
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+
+ # multiple SOA RRs
+ self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+ num_soa=2)
+ self.assertEqual(self.xfrsess._xfrout_setup(
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
def test_dns_xfrout_start_formerror(self):
# formerror
@@ -556,102 +781,182 @@ class TestXfroutSession(unittest.TestCase):
sent_data = self.sock.readsent()
self.assertEqual(len(sent_data), 0)
- def default(self, param):
- return "example.com"
-
def test_dns_xfrout_start_notauth(self):
- self.xfrsess._get_query_zone_name = self.default
- def notauth(formpara):
+ def notauth(msg, name, rrclass):
return Rcode.NOTAUTH()
- self.xfrsess._check_xfrout_available = notauth
+ self.xfrsess._xfrout_setup = notauth
self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
get_msg = self.sock.read_msg()
self.assertEqual(get_msg.get_rcode().to_text(), "NOTAUTH")
+ def test_dns_xfrout_start_datasrc_servfail(self):
+ def internal_raise(x, y):
+ raise isc.datasrc.Error('exception for the sake of test')
+ self.xfrsess.ClientClass = internal_raise
+ self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.SERVFAIL())
+
def test_dns_xfrout_start_noerror(self):
- self.xfrsess._get_query_zone_name = self.default
- def noerror(form):
+ def noerror(msg, name, rrclass):
return Rcode.NOERROR()
- self.xfrsess._check_xfrout_available = noerror
+ self.xfrsess._xfrout_setup = noerror
- def myreply(msg, sock, zonename):
+ def myreply(msg, sock):
self.sock.send(b"success")
self.xfrsess._reply_xfrout_query = myreply
self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
self.assertEqual(self.sock.readsent(), b"success")
- def test_reply_xfrout_query_noerror(self):
- global sqlite3_ds
- def get_zone_soa(zonename, file):
- return self.soa_record
-
- def get_zone_datas(zone, file):
- return [self.soa_record]
-
- sqlite3_ds.get_zone_soa = get_zone_soa
- sqlite3_ds.get_zone_datas = get_zone_datas
- self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock, "example.com.")
+ def test_reply_xfrout_query_axfr(self):
+ self.xfrsess._soa = self.soa_rrset
+ self.xfrsess._iterator = [self.soa_rrset]
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
reply_msg = self.sock.read_msg()
self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 2)
- def test_reply_xfrout_query_noerror_with_tsig(self):
- rrset_data = (4, 3, 'a.example.com.', 'com.example.', 3600, 'A', None, '192.168.1.1')
- global sqlite3_ds
+ def test_reply_xfrout_query_axfr_with_tsig(self):
+ rrset = RRset(Name('a.example.com'), RRClass.IN(), RRType.A(),
+ RRTTL(3600))
+ rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), '192.0.2.1'))
global xfrout
- def get_zone_soa(zonename, file):
- return self.soa_record
-
- def get_zone_datas(zone, file):
- zone_rrsets = []
- for i in range(0, 100):
- zone_rrsets.insert(i, rrset_data)
- return zone_rrsets
def get_rrset_len(rrset):
return 65520
- sqlite3_ds.get_zone_soa = get_zone_soa
- sqlite3_ds.get_zone_datas = get_zone_datas
+ self.xfrsess._soa = self.soa_rrset
+ self.xfrsess._iterator = [rrset for i in range(0, 100)]
xfrout.get_rrset_len = get_rrset_len
self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
- self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock, "example.com.")
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
- # tsig signed first package
- reply_msg = self.sock.read_msg()
- self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
- self.assertTrue(self.message_has_tsig(reply_msg))
- # (TSIG_SIGN_EVERY_NTH - 1) packets have no tsig
- for i in range(0, xfrout.TSIG_SIGN_EVERY_NTH - 1):
+ # All messages must have TSIG as we don't support the feature of
+ # skipping intermediate TSIG records (with bulk signing).
+ for i in range(0, 102): # 102 = all 100 RRs from iterator and 2 SOAs
reply_msg = self.sock.read_msg()
- self.assertFalse(self.message_has_tsig(reply_msg))
- # TSIG_SIGN_EVERY_NTH packet has tsig
- reply_msg = self.sock.read_msg()
- self.assertTrue(self.message_has_tsig(reply_msg))
-
- for i in range(0, 100 - TSIG_SIGN_EVERY_NTH):
- reply_msg = self.sock.read_msg()
- self.assertFalse(self.message_has_tsig(reply_msg))
- # tsig signed last package
- reply_msg = self.sock.read_msg()
- self.assertTrue(self.message_has_tsig(reply_msg))
+ # With the hack of get_rrset_len() above, every message must have
+ # exactly one RR in the answer section.
+ self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
+ self.assertTrue(self.message_has_tsig(reply_msg))
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
-class MyCCSession(isc.config.ConfigData):
- def __init__(self):
- module_spec = isc.config.module_spec_from_file(
- xfrout.SPECFILE_LOCATION)
- ConfigData.__init__(self, module_spec)
-
- def get_remote_config_value(self, module_name, identifier):
- if module_name == "Auth" and identifier == "database_file":
- return "initdb.file", False
- else:
- return "unknown", False
-
+ def test_reply_xfrout_query_ixfr(self):
+ # Creating a pure (incremental) IXFR response. Intermediate SOA
+ # RRs won't be skipped.
+ self.xfrsess._soa = create_soa(SOA_CURRENT_VERSION)
+ self.xfrsess._iterator = [create_soa(IXFR_OK_VERSION),
+ create_a(Name('a.example.com'), '192.0.2.2'),
+ create_soa(SOA_CURRENT_VERSION),
+ create_aaaa(Name('a.example.com'),
+ '2001:db8::1')]
+ self.xfrsess._jnl_reader = self.xfrsess._iterator
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+ reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
+ actual_records = reply_msg.get_section(Message.SECTION_ANSWER)
+
+ expected_records = self.xfrsess._iterator[:]
+ expected_records.insert(0, create_soa(SOA_CURRENT_VERSION))
+ expected_records.append(create_soa(SOA_CURRENT_VERSION))
+
+ self.assertEqual(len(expected_records), len(actual_records))
+ for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+ self.assertTrue(expected_rr, actual_rr)
+
+ def test_reply_xfrout_query_ixfr_soa_only(self):
+ # Creating an IXFR response that contains only one RR, which is the
+ # SOA of the current version.
+ self.xfrsess._soa = create_soa(SOA_CURRENT_VERSION)
+ self.xfrsess._iterator = None
+ self.xfrsess._jnl_reader = None
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+ reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
+ answer = reply_msg.get_section(Message.SECTION_ANSWER)
+ self.assertEqual(1, len(answer))
+ self.assertTrue(create_soa(SOA_CURRENT_VERSION), answer[0])
+
+class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
+ '''Tests for XFR-out sessions using an SQLite3 DB.
+
+ These are provided mainly to confirm the implementation actually works
+ in an environment closer to actual operational environments. So we
+ only check a few common cases; other details are tested using mock
+ data sources.
+
+ '''
+ def setUp(self):
+ super().setUp()
+ self.xfrsess._request_data = self.mdata
+ self.xfrsess._server.get_db_file = lambda : TESTDATA_SRCDIR + \
+ 'test.sqlite3'
+ self.ns_name = 'a.dns.example.com'
+
+ def check_axfr_stream(self, response):
+ '''Common checks for AXFR(-style) response for the test zone.
+ '''
+ # This zone contains two A RRs for the same name with different TTLs.
+ # These TTLs should be preseved in the AXFR stream.
+ actual_records = response.get_section(Message.SECTION_ANSWER)
+ expected_records = [create_soa(2011112001),
+ create_ns(self.ns_name),
+ create_a(Name(self.ns_name), '192.0.2.1', 3600),
+ create_a(Name(self.ns_name), '192.0.2.2', 7200),
+ create_soa(2011112001)]
+ self.assertEqual(len(expected_records), len(actual_records))
+ for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+ self.assertTrue(expected_rr, actual_rr)
+
+ def test_axfr_normal_session(self):
+ XfroutSession._handle(self.xfrsess)
+ response = self.sock.read_msg(Message.PRESERVE_ORDER);
+ self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+ self.check_axfr_stream(response)
+
+ def test_ixfr_to_axfr(self):
+ self.xfrsess._request_data = \
+ self.create_request_data(ixfr=IXFR_NG_VERSION)
+ XfroutSession._handle(self.xfrsess)
+ response = self.sock.read_msg(Message.PRESERVE_ORDER);
+ self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+ # This is an AXFR-style IXFR. So the question section should indicate
+ # that it's an IXFR resposne.
+ self.assertEqual(RRType.IXFR(), response.get_question()[0].get_type())
+ self.check_axfr_stream(response)
+
+ def test_ixfr_normal_session(self):
+ # See testdata/creatediff.py. There are 8 changes between two
+ # versions. So the answer section should contain all of these and
+ # two beginning and trailing SOAs.
+ self.xfrsess._request_data = \
+ self.create_request_data(ixfr=IXFR_OK_VERSION)
+ XfroutSession._handle(self.xfrsess)
+ response = self.sock.read_msg(Message.PRESERVE_ORDER);
+ actual_records = response.get_section(Message.SECTION_ANSWER)
+ expected_records = [create_soa(2011112001), create_soa(2011111802),
+ create_soa(2011111900),
+ create_a(Name(self.ns_name), '192.0.2.2', 7200),
+ create_soa(2011111900),
+ create_a(Name(self.ns_name), '192.0.2.53'),
+ create_aaaa(Name(self.ns_name), '2001:db8::1'),
+ create_soa(2011112001),
+ create_a(Name(self.ns_name), '192.0.2.1'),
+ create_soa(2011112001)]
+ self.assertEqual(len(expected_records), len(actual_records))
+ for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+ self.assertTrue(expected_rr, actual_rr)
+
+ def test_ixfr_soa_only(self):
+ # The requested SOA serial is the latest one. The response should
+ # contain exactly one SOA of that serial.
+ self.xfrsess._request_data = \
+ self.create_request_data(ixfr=SOA_CURRENT_VERSION)
+ XfroutSession._handle(self.xfrsess)
+ response = self.sock.read_msg(Message.PRESERVE_ORDER);
+ answers = response.get_section(Message.SECTION_ANSWER)
+ self.assertEqual(1, len(answers))
+ self.assertTrue(create_soa(SOA_CURRENT_VERSION), answers[0])
class MyUnixSockServer(UnixSockServer):
def __init__(self):
@@ -670,23 +975,27 @@ class TestUnixSockServer(unittest.TestCase):
file descriptor. This is needed, because we get only that one
from auth."""
# We test with UDP, as it can be "connected" without other
- # endpoint
+ # endpoint. Note that in the current implementation _guess_remote()
+ # unconditionally returns SOCK_STREAM.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(('127.0.0.1', 12345))
- self.assertEqual(('127.0.0.1', 12345),
+ self.assertEqual((socket.AF_INET, socket.SOCK_STREAM,
+ ('127.0.0.1', 12345)),
self.unix._guess_remote(sock.fileno()))
if socket.has_ipv6:
# Don't check IPv6 address on hosts not supporting them
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.connect(('::1', 12345))
- self.assertEqual(('::1', 12345, 0, 0),
+ self.assertEqual((socket.AF_INET6, socket.SOCK_STREAM,
+ ('::1', 12345, 0, 0)),
self.unix._guess_remote(sock.fileno()))
# Try when pretending there's no IPv6 support
# (No need to pretend when there's really no IPv6)
xfrout.socket.has_ipv6 = False
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(('127.0.0.1', 12345))
- self.assertEqual(('127.0.0.1', 12345),
+ self.assertEqual((socket.AF_INET, socket.SOCK_STREAM,
+ ('127.0.0.1', 12345)),
self.unix._guess_remote(sock.fileno()))
# Return it back
xfrout.socket.has_ipv6 = True
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index cf3b04f..d450138 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -22,7 +22,7 @@ import isc.cc
import threading
import struct
import signal
-from isc.datasrc import sqlite3_ds
+from isc.datasrc import DataSourceClient, ZoneFinder, ZoneJournalReader
from socketserver import *
import os
from isc.config.ccsession import *
@@ -39,6 +39,7 @@ from isc.log_messages.xfrout_messages import *
isc.log.init("b10-xfrout")
logger = isc.log.Logger("xfrout")
+DBG_XFROUT_TRACE = logger.DBGLVL_TRACE_BASIC
try:
from libutil_io_python import *
@@ -46,7 +47,7 @@ try:
except ImportError as e:
# C++ loadable module may not be installed; even so the xfrout process
# must keep running, so we warn about it and move forward.
- log.error(XFROUT_IMPORT, str(e))
+ logger.error(XFROUT_IMPORT, str(e))
from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
from isc.acl.dns import REQUEST_LOADER
@@ -92,21 +93,55 @@ init_paths()
SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
VERBOSE_MODE = False
-# tsig sign every N axfr packets.
-TSIG_SIGN_EVERY_NTH = 96
-
XFROUT_MAX_MESSAGE_SIZE = 65535
+# borrowed from xfrin.py @ #1298. We should eventually unify it.
+def format_zone_str(zone_name, zone_class):
+ """Helper function to format a zone name and class as a string of
+ the form '<name>/<class>'.
+ Parameters:
+ zone_name (isc.dns.Name) name to format
+ zone_class (isc.dns.RRClass) class to format
+ """
+ return zone_name.to_text(True) + '/' + str(zone_class)
+
+# borrowed from xfrin.py @ #1298.
+def format_addrinfo(addrinfo):
+ """Helper function to format the addrinfo as a string of the form
+ <addr>:<port> (for IPv4) or [<addr>]:port (for IPv6). For unix domain
+ sockets, and unknown address families, it returns a basic string
+ conversion of the third element of the passed tuple.
+ Parameters:
+ addrinfo: a 3-tuple consisting of address family, socket type, and,
+ depending on the family, either a 2-tuple with the address
+ and port, or a filename
+ """
+ try:
+ if addrinfo[0] == socket.AF_INET:
+ return str(addrinfo[2][0]) + ":" + str(addrinfo[2][1])
+ elif addrinfo[0] == socket.AF_INET6:
+ return "[" + str(addrinfo[2][0]) + "]:" + str(addrinfo[2][1])
+ else:
+ return str(addrinfo[2])
+ except IndexError:
+ raise TypeError("addrinfo argument to format_addrinfo() does not "
+ "appear to be consisting of (family, socktype, (addr, port))")
+
def get_rrset_len(rrset):
"""Returns the wire length of the given RRset"""
bytes = bytearray()
rrset.to_wire(bytes)
return len(bytes)
+def get_soa_serial(soa_rdata):
+ '''Extract the serial field of an SOA RDATA and returns it as an intger.
+ (borrowed from xfrin)
+ '''
+ return int(soa_rdata.to_text().split()[2])
class XfroutSession():
def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
- default_acl, zone_config):
+ default_acl, zone_config, client_class=DataSourceClient):
self._sock_fd = sock_fd
self._request_data = request_data
self._server = server
@@ -114,23 +149,53 @@ class XfroutSession():
self._tsig_ctx = None
self._tsig_len = 0
self._remote = remote
+ self._request_type = None
+ self._request_typestr = None
self._acl = default_acl
self._zone_config = zone_config
- self.handle()
+ self.ClientClass = client_class # parameterize this for testing
+ self._soa = None # will be set in _xfrout_setup or in tests
+ self._jnl_reader = None # will be set to a reader for IXFR
+ self._handle()
def create_tsig_ctx(self, tsig_record, tsig_key_ring):
return TSIGContext(tsig_record.get_name(), tsig_record.get_rdata().get_algorithm(),
tsig_key_ring)
- def handle(self):
- ''' Handle a xfrout query, send xfrout response '''
+ def _handle(self):
+ ''' Handle a xfrout query, send xfrout response(s).
+
+ This is separated from the constructor so that we can override
+ it from tests.
+
+ '''
+ # Check the xfrout quota. We do both increase/decrease in this
+ # method so it's clear we always release it once acuired.
+ quota_ok = self._server.increase_transfers_counter()
+ ex = None
try:
- self.dns_xfrout_start(self._sock_fd, self._request_data)
- #TODO, avoid catching all exceptions
+ self.dns_xfrout_start(self._sock_fd, self._request_data, quota_ok)
except Exception as e:
- logger.error(XFROUT_HANDLE_QUERY_ERROR, e)
- pass
+ # To avoid resource leak we need catch all possible exceptions
+ # We log it later to exclude the case where even logger raises
+ # an exception.
+ ex = e
+
+ # Release any critical resources
+ if quota_ok:
+ self._server.decrease_transfers_counter()
+ self._close_socket()
+
+ if ex is not None:
+ logger.error(XFROUT_HANDLE_QUERY_ERROR, ex)
+
+ def _close_socket(self):
+ '''Simply close the socket via the given FD.
+ This is a dedicated subroutine of handle() and is sepsarated from it
+ for the convenience of tests.
+
+ '''
os.close(self._sock_fd)
def _check_request_tsig(self, msg, request_data):
@@ -138,7 +203,8 @@ class XfroutSession():
tsig_record = msg.get_tsig_record()
if tsig_record is not None:
self._tsig_len = tsig_record.get_length()
- self._tsig_ctx = self.create_tsig_ctx(tsig_record, self._tsig_key_ring)
+ self._tsig_ctx = self.create_tsig_ctx(tsig_record,
+ self._tsig_key_ring)
tsig_error = self._tsig_ctx.verify(tsig_record, request_data)
if tsig_error != TSIGError.NOERROR:
return Rcode.NOTAUTH()
@@ -157,23 +223,45 @@ class XfroutSession():
# TSIG related checks
rcode = self._check_request_tsig(msg, mdata)
+ if rcode != Rcode.NOERROR():
+ return rcode, msg
+
+ # Make sure the question is valid. This should be ensured by
+ # the auth server, but since it's far from xfrout itself, we check
+ # it by ourselves. A viloation would be an internal bug, so we
+ # raise and stop here rather than returning a FORMERR or SERVFAIL.
+ if msg.get_rr_count(Message.SECTION_QUESTION) != 1:
+ raise RuntimeError('Invalid number of question for XFR: ' +
+ str(msg.get_rr_count(Message.SECTION_QUESTION)))
+ question = msg.get_question()[0]
- if rcode == Rcode.NOERROR():
- # ACL checks
- zone_name = msg.get_question()[0].get_name()
- zone_class = msg.get_question()[0].get_class()
- acl = self._get_transfer_acl(zone_name, zone_class)
- acl_result = acl.execute(
- isc.acl.dns.RequestContext(self._remote,
- msg.get_tsig_record()))
- if acl_result == DROP:
- logger.info(XFROUT_QUERY_DROPPED, zone_name, zone_class,
- self._remote[0], self._remote[1])
- return None, None
- elif acl_result == REJECT:
- logger.info(XFROUT_QUERY_REJECTED, zone_name, zone_class,
- self._remote[0], self._remote[1])
- return Rcode.REFUSED(), msg
+ # Identify the request type
+ self._request_type = question.get_type()
+ if self._request_type == RRType.AXFR():
+ self._request_typestr = 'AXFR'
+ elif self._request_type == RRType.IXFR():
+ self._request_typestr = 'IXFR'
+ else:
+ # Likewise, this should be impossible.
+ raise RuntimeError('Unexpected XFR type: ' +
+ str(self._request_type))
+
+ # ACL checks
+ zone_name = question.get_name()
+ zone_class = question.get_class()
+ acl = self._get_transfer_acl(zone_name, zone_class)
+ acl_result = acl.execute(
+ isc.acl.dns.RequestContext(self._remote[2], msg.get_tsig_record()))
+ if acl_result == DROP:
+ logger.debug(DBG_XFROUT_TRACE, XFROUT_QUERY_DROPPED,
+ self._request_type, format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class))
+ return None, None
+ elif acl_result == REJECT:
+ logger.debug(DBG_XFROUT_TRACE, XFROUT_QUERY_REJECTED,
+ self._request_type, format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class))
+ return Rcode.REFUSED(), msg
return rcode, msg
@@ -195,14 +283,6 @@ class XfroutSession():
return self._zone_config[config_key]['transfer_acl']
return self._acl
- def _get_query_zone_name(self, msg):
- question = msg.get_question()[0]
- return question.get_name().to_text()
-
- def _get_query_zone_class(self, msg):
- question = msg.get_question()[0]
- return question.get_class().to_text()
-
def _send_data(self, sock_fd, data):
size = len(data)
total_count = 0
@@ -238,51 +318,165 @@ class XfroutSession():
msg.set_rcode(rcode_)
self._send_message(sock_fd, msg, self._tsig_ctx)
- def _zone_has_soa(self, zone):
- '''Judge if the zone has an SOA record.'''
- # In some sense, the SOA defines a zone.
- # If the current name server has authority for the
- # specific zone, we need to judge if the zone has an SOA record;
- # if not, we consider the zone has incomplete data, so xfrout can't
- # serve for it.
- if sqlite3_ds.get_zone_soa(zone, self._server.get_db_file()):
- return True
+ def _get_zone_soa(self, zone_name):
+ '''Retrieve the SOA RR of the given zone.
+
+ It returns a pair of RCODE and the SOA (in the form of RRset).
+ On success RCODE is NOERROR and returned SOA is not None;
+ on failure RCODE indicates the appropriate code in the context of
+ xfr processing, and the returned SOA is None.
+
+ '''
+ result, finder = self._datasrc_client.find_zone(zone_name)
+ if result != DataSourceClient.SUCCESS:
+ return (Rcode.NOTAUTH(), None)
+ result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
+ ZoneFinder.FIND_DEFAULT)
+ if result != ZoneFinder.SUCCESS:
+ return (Rcode.SERVFAIL(), None)
+ # Especially for database-based zones, a working zone may be in
+ # a broken state where it has more than one SOA RR. We proactively
+ # check the condition and abort the xfr attempt if we identify it.
+ if soa_rrset.get_rdata_count() != 1:
+ return (Rcode.SERVFAIL(), None)
+ return (Rcode.NOERROR(), soa_rrset)
+
+ def __axfr_setup(self, zone_name):
+ '''Setup a zone iterator for AXFR or AXFR-style IXFR.
- return False
-
- def _zone_exist(self, zonename):
- '''Judge if the zone is configured by config manager.'''
- # Currently, if we find the zone in datasource successfully, we
- # consider the zone is configured, and the current name server has
- # authority for the specific zone.
- # TODO: should get zone's configuration from cfgmgr or other place
- # in future.
- return sqlite3_ds.zone_exist(zonename, self._server.get_db_file())
-
- def _check_xfrout_available(self, zone_name):
- '''Check if xfr request can be responsed.
- TODO, Get zone's configuration from cfgmgr or some other place
- eg. check allow_transfer setting,
'''
- # If the current name server does not have authority for the
- # zone, xfrout can't serve for it, return rcode NOTAUTH.
- if not self._zone_exist(zone_name):
+ try:
+ # Note that we enable 'separate_rrs'. In xfr-out we need to
+ # preserve as many things as possible (even if it's half broken)
+ # stored in the zone.
+ self._iterator = self._datasrc_client.get_iterator(zone_name,
+ True)
+ except isc.datasrc.Error:
+ # If the current name server does not have authority for the
+ # zone, xfrout can't serve for it, return rcode NOTAUTH.
+ # Note: this exception can happen for other reasons. We should
+ # update get_iterator() API so that we can distinguish "no such
+ # zone" and other cases (#1373). For now we consider all these
+ # cases as NOTAUTH.
return Rcode.NOTAUTH()
# If we are an authoritative name server for the zone, but fail
# to find the zone's SOA record in datasource, xfrout can't
# provide zone transfer for it.
- if not self._zone_has_soa(zone_name):
+ self._soa = self._iterator.get_soa()
+ if self._soa is None or self._soa.get_rdata_count() != 1:
return Rcode.SERVFAIL()
- #TODO, check allow_transfer
- if not self._server.increase_transfers_counter():
- return Rcode.REFUSED()
+ return Rcode.NOERROR()
+
+ def __ixfr_setup(self, request_msg, zone_name, zone_class):
+ '''Setup a zone journal reader for IXFR.
+
+ If the underlying data source does not know the requested range
+ of zone differences it automatically falls back to AXFR-style
+ IXFR by setting up a zone iterator instead of a journal reader.
+
+ '''
+ # Check the authority section. Look for a SOA record with
+ # the same name and class as the question.
+ remote_soa = None
+ for auth_rrset in request_msg.get_section(Message.SECTION_AUTHORITY):
+ # Ignore data whose owner name is not the zone apex, and
+ # ignore non-SOA or different class of records.
+ if auth_rrset.get_name() != zone_name or \
+ auth_rrset.get_type() != RRType.SOA() or \
+ auth_rrset.get_class() != zone_class:
+ continue
+ if auth_rrset.get_rdata_count() != 1:
+ logger.info(XFROUT_IXFR_MULTIPLE_SOA,
+ format_addrinfo(self._remote))
+ return Rcode.FORMERR()
+ remote_soa = auth_rrset
+ if remote_soa is None:
+ logger.info(XFROUT_IXFR_NO_SOA, format_addrinfo(self._remote))
+ return Rcode.FORMERR()
+
+ # Retrieve the local SOA
+ rcode, self._soa = self._get_zone_soa(zone_name)
+ if rcode != Rcode.NOERROR():
+ return rcode
+
+ # RFC1995 says "If an IXFR query with the same or newer version
+ # number than that of the server is received, it is replied to with
+ # a single SOA record of the server's current version, just as
+ # in AXFR". The claim about AXFR is incorrect, but other than that,
+ # we do as the RFC says.
+ # Note: until we complete #1278 we can only check equality of the
+ # two serials. The "newer version" case would fall back to AXFR-style.
+ begin_serial = get_soa_serial(remote_soa.get_rdata()[0])
+ end_serial = get_soa_serial(self._soa.get_rdata()[0])
+ if begin_serial == end_serial:
+ # clear both iterator and jnl_reader to signal we won't do
+ # iteration in response generation
+ self._iterator = None
+ self._jnl_reader = None
+ logger.info(XFROUT_IXFR_UPTODATE, format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class),
+ begin_serial, end_serial)
+ return Rcode.NOERROR()
+
+ # Set up the journal reader or fall back to AXFR-style IXFR
+ try:
+ code, self._jnl_reader = self._datasrc_client.get_journal_reader(
+ zone_name, begin_serial, end_serial)
+ except isc.datasrc.NotImplemented as ex:
+ # The underlying data source doesn't support journaling.
+ # Fall back to AXFR-style IXFR.
+ logger.info(XFROUT_IXFR_NO_JOURNAL_SUPPORT,
+ format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class))
+ return self.__axfr_setup(zone_name)
+ if code == ZoneJournalReader.NO_SUCH_VERSION:
+ logger.info(XFROUT_IXFR_NO_VERSION, format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class),
+ begin_serial, end_serial)
+ return self.__axfr_setup(zone_name)
+ if code == ZoneJournalReader.NO_SUCH_ZONE:
+ # this is quite unexpected as we know zone's SOA exists.
+ # It might be a bug or the data source is somehow broken,
+ # but it can still happen if someone has removed the zone
+ # between these two operations. We treat it as NOTAUTH.
+ logger.warn(XFROUT_IXFR_NO_ZONE, format_addrinfo(self._remote),
+ format_zone_str(zone_name, zone_class))
+ return Rcode.NOTAUTH()
+
+ # Use the reader as the iterator to generate the response.
+ self._iterator = self._jnl_reader
return Rcode.NOERROR()
+ def _xfrout_setup(self, request_msg, zone_name, zone_class):
+ '''Setup a context for xfr responses according to the request type.
+
+ This method identifies the most appropriate data source for the
+ request and set up a zone iterator or journal reader depending on
+ whether the request is AXFR or IXFR. If it identifies any protocol
+ level error it returns an RCODE other than NOERROR.
+
+ '''
+
+ # Identify the data source for the requested zone and see if it has
+ # SOA while initializing objects used for request processing later.
+ # We should eventually generalize this so that we can choose the
+ # appropriate data source from (possible) multiple candidates.
+ # We should eventually take into account the RR class here.
+ # For now, we hardcode a particular type (SQLite3-based), and only
+ # consider that one.
+ datasrc_config = '{ "database_file": "' + \
+ self._server.get_db_file() + '"}'
+ self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
+
+ if self._request_type == RRType.AXFR():
+ return self.__axfr_setup(zone_name)
+ else:
+ return self.__ixfr_setup(request_msg, zone_name, zone_class)
- def dns_xfrout_start(self, sock_fd, msg_query):
+ def dns_xfrout_start(self, sock_fd, msg_query, quota_ok=True):
rcode_, msg = self._parse_query_message(msg_query)
#TODO. create query message and parse header
if rcode_ is None: # Dropped by ACL
@@ -292,29 +486,38 @@ class XfroutSession():
elif rcode_ != Rcode.NOERROR():
return self._reply_query_with_error_rcode(msg, sock_fd,
Rcode.FORMERR())
+ elif not quota_ok:
+ logger.warn(XFROUT_QUERY_QUOTA_EXCCEEDED, self._request_typestr,
+ format_addrinfo(self._remote),
+ self._server._max_transfers_out)
+ return self._reply_query_with_error_rcode(msg, sock_fd,
+ Rcode.REFUSED())
- zone_name = self._get_query_zone_name(msg)
- zone_class_str = self._get_query_zone_class(msg)
- # TODO: should we not also include class in the check?
- rcode_ = self._check_xfrout_available(zone_name)
+ question = msg.get_question()[0]
+ zone_name = question.get_name()
+ zone_class = question.get_class()
+ zone_str = format_zone_str(zone_name, zone_class) # for logging
+ try:
+ rcode_ = self._xfrout_setup(msg, zone_name, zone_class)
+ except Exception as ex:
+ logger.error(XFROUT_XFR_TRANSFER_CHECK_ERROR, self._request_typestr,
+ format_addrinfo(self._remote), zone_str, ex)
+ rcode_ = Rcode.SERVFAIL()
if rcode_ != Rcode.NOERROR():
- logger.info(XFROUT_AXFR_TRANSFER_FAILED, zone_name,
- zone_class_str, rcode_.to_text())
+ logger.info(XFROUT_XFR_TRANSFER_FAILED, self._request_typestr,
+ format_addrinfo(self._remote), zone_str, rcode_)
return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
try:
- logger.info(XFROUT_AXFR_TRANSFER_STARTED, zone_name, zone_class_str)
- self._reply_xfrout_query(msg, sock_fd, zone_name)
+ logger.info(XFROUT_XFR_TRANSFER_STARTED, self._request_typestr,
+ format_addrinfo(self._remote), zone_str)
+ self._reply_xfrout_query(msg, sock_fd)
except Exception as err:
- logger.error(XFROUT_AXFR_TRANSFER_ERROR, zone_name,
- zone_class_str, str(err))
- pass
- logger.info(XFROUT_AXFR_TRANSFER_DONE, zone_name, zone_class_str)
-
- self._server.decrease_transfers_counter()
- return
-
+ logger.error(XFROUT_XFR_TRANSFER_ERROR, self._request_typestr,
+ format_addrinfo(self._remote), zone_str, err)
+ logger.info(XFROUT_XFR_TRANSFER_DONE, self._request_typestr,
+ format_addrinfo(self._remote), zone_str)
def _clear_message(self, msg):
qid = msg.get_qid()
@@ -329,87 +532,68 @@ class XfroutSession():
msg.set_header_flag(Message.HEADERFLAG_QR)
return msg
- def _create_rrset_from_db_record(self, record):
- '''Create one rrset from one record of datasource, if the schema of record is changed,
- This function should be updated first.
- '''
- rrtype_ = RRType(record[5])
- rdata_ = Rdata(rrtype_, RRClass("IN"), " ".join(record[7:]))
- rrset_ = RRset(Name(record[2]), RRClass("IN"), rrtype_, RRTTL( int(record[4])))
- rrset_.add_rdata(rdata_)
- return rrset_
-
- def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa, message_upper_len,
- count_since_last_tsig_sign):
+ def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa,
+ message_upper_len):
'''Add the SOA record to the end of message. If it can't be
added, a new message should be created to send out the last soa .
'''
- rrset_len = get_rrset_len(rrset_soa)
-
- if (count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH and
- message_upper_len + rrset_len >= XFROUT_MAX_MESSAGE_SIZE):
- # If tsig context exist, sign the packet with serial number TSIG_SIGN_EVERY_NTH
+ if (message_upper_len + self._tsig_len + get_rrset_len(rrset_soa) >=
+ XFROUT_MAX_MESSAGE_SIZE):
self._send_message(sock_fd, msg, self._tsig_ctx)
msg = self._clear_message(msg)
- elif (count_since_last_tsig_sign != TSIG_SIGN_EVERY_NTH and
- message_upper_len + rrset_len + self._tsig_len >= XFROUT_MAX_MESSAGE_SIZE):
- self._send_message(sock_fd, msg)
- msg = self._clear_message(msg)
# If tsig context exist, sign the last packet
msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
self._send_message(sock_fd, msg, self._tsig_ctx)
-
- def _reply_xfrout_query(self, msg, sock_fd, zone_name):
+ def _reply_xfrout_query(self, msg, sock_fd):
#TODO, there should be a better way to insert rrset.
- count_since_last_tsig_sign = TSIG_SIGN_EVERY_NTH
msg.make_response()
msg.set_header_flag(Message.HEADERFLAG_AA)
- soa_record = sqlite3_ds.get_zone_soa(zone_name, self._server.get_db_file())
- rrset_soa = self._create_rrset_from_db_record(soa_record)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
- message_upper_len = get_rrset_len(rrset_soa) + self._tsig_len
+ # If the iterator is None, we are responding to IXFR with a single
+ # SOA RR.
+ if self._iterator is None:
+ self._send_message_with_last_soa(msg, sock_fd, self._soa, 0)
+ return
+
+ # Add the beginning SOA
+ msg.add_rrset(Message.SECTION_ANSWER, self._soa)
+ message_upper_len = get_rrset_len(self._soa) + self._tsig_len
- for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
- if self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
+ # Add the rest of the zone/diff contets
+ for rrset in self._iterator:
+ # Check if xfrout is shutdown
+ if self._server._shutdown_event.is_set():
logger.info(XFROUT_STOPPING)
return
- # TODO: RRType.SOA() ?
- if RRType(rr_data[5]) == RRType("SOA"): #ignore soa record
- continue
- rrset_ = self._create_rrset_from_db_record(rr_data)
+ # For AXFR (or AXFR-style IXFR), in which case _jnl_reader is None,
+ # we should skip SOAs from the iterator.
+ if self._jnl_reader is None and rrset.get_type() == RRType.SOA():
+ continue
# We calculate the maximum size of the RRset (i.e. the
# size without compression) and use that to see if we
# may have reached the limit
- rrset_len = get_rrset_len(rrset_)
+ rrset_len = get_rrset_len(rrset)
if message_upper_len + rrset_len < XFROUT_MAX_MESSAGE_SIZE:
- msg.add_rrset(Message.SECTION_ANSWER, rrset_)
+ msg.add_rrset(Message.SECTION_ANSWER, rrset)
message_upper_len += rrset_len
continue
- # If tsig context exist, sign every N packets
- if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
- count_since_last_tsig_sign = 0
- self._send_message(sock_fd, msg, self._tsig_ctx)
- else:
- self._send_message(sock_fd, msg)
+ self._send_message(sock_fd, msg, self._tsig_ctx)
- count_since_last_tsig_sign += 1
msg = self._clear_message(msg)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_) # Add the rrset to the new message
+ # Add the RRset to the new message
+ msg.add_rrset(Message.SECTION_ANSWER, rrset)
# Reserve tsig space for signed packet
- if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
- message_upper_len = rrset_len + self._tsig_len
- else:
- message_upper_len = rrset_len
+ message_upper_len = rrset_len + self._tsig_len
- self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len,
- count_since_last_tsig_sign)
+ # Add and send the trailing SOA
+ self._send_message_with_last_soa(msg, sock_fd, self._soa,
+ message_upper_len)
class UnixSockServer(socketserver_mixin.NoPollMixIn,
ThreadingUnixStreamServer):
@@ -483,7 +667,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
try:
self.process_request(request)
except Exception as pre:
- log.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
+ logger.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
break
def _handle_request_noblock(self):
@@ -517,9 +701,12 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
t.start()
def _guess_remote(self, sock_fd):
- """
- Guess remote address and port of the socket. The sock_fd must be a
- socket
+ """Guess remote address and port of the socket.
+
+ The sock_fd must be a file descriptor of a socket.
+ This method retuns a 3-tuple consisting of address family,
+ socket type, and a 2-tuple with the address (string) and port (int).
+
"""
# This uses a trick. If the socket is IPv4 in reality and we pretend
# it to be IPv6, it returns IPv4 address anyway. This doesn't seem
@@ -531,11 +718,23 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
# To make it work even on hosts without IPv6 support
# (Any idea how to simulate this in test?)
sock = socket.fromfd(sock_fd, socket.AF_INET, socket.SOCK_STREAM)
- return sock.getpeername()
+ peer = sock.getpeername()
+
+ # Identify the correct socket family. Due to the above "trick",
+ # we cannot simply use sock.family.
+ family = socket.AF_INET6
+ try:
+ socket.inet_pton(socket.AF_INET6, peer[0])
+ except socket.error:
+ family = socket.AF_INET
+ return (family, socket.SOCK_STREAM, peer)
def finish_request(self, sock_fd, request_data):
'''Finish one request by instantiating RequestHandlerClass.
+ This is an entry point of a separate thread spawned in
+ UnixSockServer.process_request().
+
This method creates a XfroutSession object.
'''
self._lock.acquire()
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index b2e432c..fcc2e59 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -15,30 +15,6 @@
# No namespace declaration - these constants go in the global namespace
# of the xfrout messages python module.
-% XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-
-% XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-
-% XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-# Still a TODO, but when implemented, REFUSED can also mean
-# the client is not allowed to transfer the zone
-
-% XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started
-A transfer out of the given zone has started.
-
% XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1
The TSIG key string as read from the configuration does not represent
a valid TSIG key.
@@ -106,16 +82,27 @@ in the log message, but at this point no specific information other
than that could be given. This points to incomplete exception handling
in the code.
-% XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
+% XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped
+The xfrout process silently dropped a request to transfer zone to
+given host. This is required by the ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
-% XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected
+% XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected
The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs. The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+
+% XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message. The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
There was an error receiving the file descriptor for the transfer
@@ -160,3 +147,72 @@ on, but the file is in use. The most likely cause is that another
xfrout daemon process is still running. This xfrout daemon (the one
printing this message) will not start.
+% XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+
+% XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+
+% XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage. Some detail of the error is also included in the
+message. The xfrout server tries to return a SERVFAIL response in this case.
+
+% XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+# Still a TODO, but when implemented, REFUSED can also mean
+# the client is not allowed to transfer the zone
+
+% XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started
+A transfer out of the given zone has started.
+
+% XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs
+An IXFR request was received with more than one SOA RRs in the authority
+section. The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+
+% XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+
+% XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR
+An IXFR request was received but the underlying data source did
+not support journaling. The xfrout daemon fell back to AXFR-style
+IXFR.
+
+% XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server. The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic. This will soon
+be implemented.
+
+% XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR
+An IXFR request was received, but the requested range of differences
+were not found in the data source. The xfrout daemon fell back to
+AXFR-style IXFR.
+
+% XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source. This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone. The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index 2c3f709..24c8850 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -15,6 +15,8 @@
#ifndef __DATA_SOURCE_CLIENT_H
#define __DATA_SOURCE_CLIENT_H 1
+#include <utility>
+
#include <boost/noncopyable.hpp>
#include <boost/shared_ptr.hpp>
@@ -215,18 +217,19 @@ public:
///
/// \param name The name of zone apex to be traversed. It doesn't do
/// nearest match as findZone.
- /// \param adjust_ttl If true, the iterator will treat RRs with the same
- /// name and type but different TTL values to be of the
- /// same RRset, and will adjust the TTL to the lowest
- /// value found. If false, it will consider the RR to
- /// belong to a different RRset.
+ /// \param separate_rrs If true, the iterator will return each RR as a
+ /// new RRset object. If false, the iterator will
+ /// combine consecutive RRs with the name and type
+ /// into 1 RRset. The capitalization of the RRset will
+ /// be that of the first RR read, and TTLs will be
+ /// adjusted to the lowest one found.
/// \return Pointer to the iterator.
virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
- bool adjust_ttl = true) const {
+ bool separate_rrs = false) const {
// This is here to both document the parameter in doxygen (therefore it
// needs a name) and avoid unused parameter warning.
static_cast<void>(name);
- static_cast<void>(adjust_ttl);
+ static_cast<void>(separate_rrs);
isc_throw(isc::NotImplemented,
"Data source doesn't support iteration");
@@ -272,6 +275,22 @@ public:
/// In such cases this method will result in an \c isc::NotImplemented
/// exception unconditionally or when \c replace is false).
///
+ /// If \c journaling is true, the data source should store a journal
+ /// of changes. These can be used later on by, for example, IXFR-out.
+ /// However, the parameter is a hint only. It might be unable to store
+ /// them and they would be silently discarded. Or it might need to
+ /// store them no matter what (for example a git-based data source would
+ /// store journal implicitly). When the \c journaling is true, it
+ /// requires that the following update be formatted as IXFR transfer
+ /// (SOA to be removed, bunch of RRs to be removed, SOA to be added,
+ /// bunch of RRs to be added, and possibly repeated). However, it is not
+ /// required that the updater checks that. If it is false, it must not
+ /// require so and must accept any order of changes.
+ ///
+ /// We don't support erasing the whole zone (by replace being true) and
+ /// saving a journal at the same time. In such situation, BadValue is
+ /// thrown.
+ ///
/// \note To avoid throwing the exception accidentally with a lazy
/// implementation, we still keep this method pure virtual without
/// an implementation. All derived classes must explicitly define this
@@ -282,14 +301,67 @@ public:
/// \exception DataSourceError Internal error in the underlying data
/// source.
/// \exception std::bad_alloc Resource allocation failure.
+ /// \exception BadValue if both replace and journaling are true.
///
/// \param name The zone name to be updated
/// \param replace Whether to delete existing RRs before making updates
+ /// \param journaling The zone updater should store a journal of the
+ /// changes.
///
/// \return A pointer to the updater; it will be NULL if the specified
/// zone isn't found.
virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
- bool replace) const = 0;
+ bool replace, bool journaling = false)
+ const = 0;
+
+ /// Return a journal reader to retrieve differences of a zone.
+ ///
+ /// A derived version of this method creates a concrete
+ /// \c ZoneJournalReader object specific to the underlying data source
+ /// for the specified name of zone and differences between the versions
+ /// specified by the beginning and ending serials of the corresponding
+ /// SOA RRs.
+ /// The RR class of the zone is the one that the client is expected to
+ /// handle (see the detailed description of this class).
+ ///
+ /// Note that the SOA serials are compared by the semantics of the serial
+ /// number arithmetic. So, for example, \c begin_serial can be larger than
+ /// \c end_serial as bare unsigned integers. The underlying data source
+ /// implementation is assumed to keep track of sufficient history to
+ /// identify (if exist) the corresponding difference between the specified
+ /// versions.
+ ///
+ /// This method returns the result as a pair of a result code and
+ /// a pointer to a \c ZoneJournalReader object. On success, the result
+ /// code is \c SUCCESS and the pointer must be non NULL; otherwise
+ /// the result code is something other than \c SUCCESS and the pinter
+ /// must be NULL.
+ ///
+ /// If the specified zone is not found in the data source, the result
+ /// code is \c NO_SUCH_ZONE.
+ /// Otherwise, if specified range of difference for the zone is not found
+ /// in the data source, the result code is \c NO_SUCH_VERSION.
+ ///
+ /// Handling differences is an optional feature of data source.
+ /// If the underlying data source does not support difference handling,
+ /// this method for that type of data source can throw an exception of
+ /// class \c NotImplemented.
+ ///
+ /// \exception NotImplemented The data source does not support differences.
+ /// \exception DataSourceError Other operational errors at the data source
+ /// level.
+ ///
+ /// \param zone The name of the zone for which the difference should be
+ /// retrieved.
+ /// \param begin_serial The SOA serial of the beginning version of the
+ /// differences.
+ /// \param end_serial The SOA serial of the ending version of the
+ /// differences.
+ ///
+ /// \return A pair of result code and a pointer to \c ZoneJournalReader.
+ virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+ getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+ uint32_t end_serial) const = 0;
};
}
}
diff --git a/src/lib/datasrc/data_source.h b/src/lib/datasrc/data_source.h
index a7a15a9..c35f0d3 100644
--- a/src/lib/datasrc/data_source.h
+++ b/src/lib/datasrc/data_source.h
@@ -53,6 +53,18 @@ public:
isc::Exception(file, line, what) {}
};
+/// \brief No such serial number when obtaining difference iterator
+///
+/// Thrown if either the zone/start serial number or zone/end serial number
+/// combination does not exist in the differences table. (Note that this
+/// includes the case where the differences table contains no records related
+/// to that zone.)
+class NoSuchSerial : public DataSourceError {
+public:
+ NoSuchSerial(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
class AbstractDataSrc {
///
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index f06cdc0..053d4bc 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -13,6 +13,7 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <string>
+#include <utility>
#include <vector>
#include <datasrc/database.h>
@@ -519,7 +520,7 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
// It's not empty non-terminal. So check for wildcards.
// We remove labels one by one and look for the wildcard there.
// Go up to first non-empty domain.
- for (size_t i(1); i <= current_label_count - last_known; ++i) {
+ for (size_t i(1); i + last_known <= current_label_count; ++i) {
// Construct the name with *
const Name superdomain(name.split(i));
const string wildcard("*." + superdomain.toText());
@@ -707,11 +708,11 @@ public:
DatabaseIterator(shared_ptr<DatabaseAccessor> accessor,
const Name& zone_name,
const RRClass& rrclass,
- bool adjust_ttl) :
+ bool separate_rrs) :
accessor_(accessor),
class_(rrclass),
ready_(true),
- adjust_ttl_(adjust_ttl)
+ separate_rrs_(separate_rrs)
{
// Get the zone
const pair<bool, int> zone(accessor_->getZone(zone_name.toText()));
@@ -769,20 +770,19 @@ public:
const RRType rtype(rtype_str);
RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
- if (adjust_ttl_) {
- if (ttl_ != ttl) {
- if (ttl < ttl_) {
- ttl_ = ttl;
- rrset->setTTL(RRTTL(ttl));
- }
- LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
- arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
+ if (ttl_ != ttl) {
+ if (ttl < ttl_) {
+ ttl_ = ttl;
+ rrset->setTTL(RRTTL(ttl));
}
- } else if (ttl_ != ttl) {
- break;
+ LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
+ arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
}
rrset->addRdata(rdata::createRdata(rtype, class_, rdata_));
getData();
+ if (separate_rrs_) {
+ break;
+ }
}
LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE_NEXT).
arg(rrset->getName()).arg(rrset->getType());
@@ -814,18 +814,18 @@ private:
string name_, rtype_, rdata_, ttl_;
// Whether to modify differing TTL values, or treat a different TTL as
// a different RRset
- bool adjust_ttl_;
+ bool separate_rrs_;
};
}
ZoneIteratorPtr
DatabaseClient::getIterator(const isc::dns::Name& name,
- bool adjust_ttl) const
+ bool separate_rrs) const
{
ZoneIteratorPtr iterator = ZoneIteratorPtr(new DatabaseIterator(
accessor_->clone(), name,
- rrclass_, adjust_ttl));
+ rrclass_, separate_rrs));
LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE).
arg(name);
@@ -838,10 +838,12 @@ DatabaseClient::getIterator(const isc::dns::Name& name,
class DatabaseUpdater : public ZoneUpdater {
public:
DatabaseUpdater(shared_ptr<DatabaseAccessor> accessor, int zone_id,
- const Name& zone_name, const RRClass& zone_class) :
+ const Name& zone_name, const RRClass& zone_class,
+ bool journaling) :
committed_(false), accessor_(accessor), zone_id_(zone_id),
db_name_(accessor->getDBName()), zone_name_(zone_name.toText()),
- zone_class_(zone_class),
+ zone_class_(zone_class), journaling_(journaling),
+ diff_phase_(NOT_STARTED),
finder_(new DatabaseClient::Finder(accessor_, zone_id_, zone_name))
{
logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_CREATED)
@@ -877,45 +879,97 @@ public:
virtual void commit();
private:
+ // A short cut typedef only for making the code shorter.
+ typedef DatabaseAccessor Accessor;
+
bool committed_;
shared_ptr<DatabaseAccessor> accessor_;
const int zone_id_;
const string db_name_;
const string zone_name_;
const RRClass zone_class_;
+ const bool journaling_;
+ // For the journals
+ enum DiffPhase {
+ NOT_STARTED,
+ DELETE,
+ ADD
+ };
+ DiffPhase diff_phase_;
+ uint32_t serial_;
boost::scoped_ptr<DatabaseClient::Finder> finder_;
+
+ // This is a set of validation checks commonly used for addRRset() and
+ // deleteRRset to minimize duplicate code logic and to make the main
+ // code concise.
+ void validateAddOrDelete(const char* const op_str, const RRset& rrset,
+ DiffPhase prev_phase,
+ DiffPhase current_phase) const;
};
void
-DatabaseUpdater::addRRset(const RRset& rrset) {
+DatabaseUpdater::validateAddOrDelete(const char* const op_str,
+ const RRset& rrset,
+ DiffPhase prev_phase,
+ DiffPhase current_phase) const
+{
if (committed_) {
- isc_throw(DataSourceError, "Add attempt after commit to zone: "
+ isc_throw(DataSourceError, op_str << " attempt after commit to zone: "
<< zone_name_ << "/" << zone_class_);
}
+ if (rrset.getRdataCount() == 0) {
+ isc_throw(DataSourceError, op_str << " attempt with an empty RRset: "
+ << rrset.getName() << "/" << zone_class_ << "/"
+ << rrset.getType());
+ }
if (rrset.getClass() != zone_class_) {
- isc_throw(DataSourceError, "An RRset of a different class is being "
- << "added to " << zone_name_ << "/" << zone_class_ << ": "
+ isc_throw(DataSourceError, op_str << " attempt for a different class "
+ << zone_name_ << "/" << zone_class_ << ": "
<< rrset.toText());
}
if (rrset.getRRsig()) {
- isc_throw(DataSourceError, "An RRset with RRSIG is being added to "
+ isc_throw(DataSourceError, op_str << " attempt for RRset with RRSIG "
<< zone_name_ << "/" << zone_class_ << ": "
<< rrset.toText());
}
+ if (journaling_) {
+ const RRType rrtype(rrset.getType());
+ if (rrtype == RRType::SOA() && diff_phase_ != prev_phase) {
+ isc_throw(isc::BadValue, op_str << " attempt in an invalid "
+ << "diff phase: " << diff_phase_ << ", rrset: " <<
+ rrset.toText());
+ }
+ if (rrtype != RRType::SOA() && diff_phase_ != current_phase) {
+ isc_throw(isc::BadValue, "diff state change by non SOA: "
+ << rrset.toText());
+ }
+ }
+}
+void
+DatabaseUpdater::addRRset(const RRset& rrset) {
+ validateAddOrDelete("add", rrset, DELETE, ADD);
+
+ // It's guaranteed rrset has at least one RDATA at this point.
RdataIteratorPtr it = rrset.getRdataIterator();
- if (it->isLast()) {
- isc_throw(DataSourceError, "An empty RRset is being added for "
- << rrset.getName() << "/" << zone_class_ << "/"
- << rrset.getType());
- }
- string columns[DatabaseAccessor::ADD_COLUMN_COUNT]; // initialized with ""
- columns[DatabaseAccessor::ADD_NAME] = rrset.getName().toText();
- columns[DatabaseAccessor::ADD_REV_NAME] =
- rrset.getName().reverse().toText();
- columns[DatabaseAccessor::ADD_TTL] = rrset.getTTL().toText();
- columns[DatabaseAccessor::ADD_TYPE] = rrset.getType().toText();
+ string columns[Accessor::ADD_COLUMN_COUNT]; // initialized with ""
+ columns[Accessor::ADD_NAME] = rrset.getName().toText();
+ columns[Accessor::ADD_REV_NAME] = rrset.getName().reverse().toText();
+ columns[Accessor::ADD_TTL] = rrset.getTTL().toText();
+ columns[Accessor::ADD_TYPE] = rrset.getType().toText();
+ string journal[Accessor::DIFF_PARAM_COUNT];
+ if (journaling_) {
+ journal[Accessor::DIFF_NAME] = columns[Accessor::ADD_NAME];
+ journal[Accessor::DIFF_TYPE] = columns[Accessor::ADD_TYPE];
+ journal[Accessor::DIFF_TTL] = columns[Accessor::ADD_TTL];
+ diff_phase_ = ADD;
+ if (rrset.getType() == RRType::SOA()) {
+ serial_ =
+ dynamic_cast<const generic::SOA&>(it->getCurrent()).
+ getSerial();
+ }
+ }
for (; !it->isLast(); it->next()) {
if (rrset.getType() == RRType::RRSIG()) {
// XXX: the current interface (based on the current sqlite3
@@ -925,43 +979,53 @@ DatabaseUpdater::addRRset(const RRset& rrset) {
// the interface, but until then we have to conform to the schema.
const generic::RRSIG& rrsig_rdata =
dynamic_cast<const generic::RRSIG&>(it->getCurrent());
- columns[DatabaseAccessor::ADD_SIGTYPE] =
+ columns[Accessor::ADD_SIGTYPE] =
rrsig_rdata.typeCovered().toText();
}
- columns[DatabaseAccessor::ADD_RDATA] = it->getCurrent().toText();
+ columns[Accessor::ADD_RDATA] = it->getCurrent().toText();
+ if (journaling_) {
+ journal[Accessor::DIFF_RDATA] = columns[Accessor::ADD_RDATA];
+ accessor_->addRecordDiff(zone_id_, serial_, Accessor::DIFF_ADD,
+ journal);
+ }
accessor_->addRecordToZone(columns);
}
}
void
DatabaseUpdater::deleteRRset(const RRset& rrset) {
- if (committed_) {
- isc_throw(DataSourceError, "Delete attempt after commit on zone: "
- << zone_name_ << "/" << zone_class_);
- }
- if (rrset.getClass() != zone_class_) {
- isc_throw(DataSourceError, "An RRset of a different class is being "
- << "deleted from " << zone_name_ << "/" << zone_class_
- << ": " << rrset.toText());
- }
- if (rrset.getRRsig()) {
- isc_throw(DataSourceError, "An RRset with RRSIG is being deleted from "
- << zone_name_ << "/" << zone_class_ << ": "
- << rrset.toText());
+ // If this is the first operation, pretend we are starting a new delete
+ // sequence after adds. This will simplify the validation below.
+ if (diff_phase_ == NOT_STARTED) {
+ diff_phase_ = ADD;
}
+ validateAddOrDelete("delete", rrset, ADD, DELETE);
+
RdataIteratorPtr it = rrset.getRdataIterator();
- if (it->isLast()) {
- isc_throw(DataSourceError, "An empty RRset is being deleted for "
- << rrset.getName() << "/" << zone_class_ << "/"
- << rrset.getType());
- }
- string params[DatabaseAccessor::DEL_PARAM_COUNT]; // initialized with ""
- params[DatabaseAccessor::DEL_NAME] = rrset.getName().toText();
- params[DatabaseAccessor::DEL_TYPE] = rrset.getType().toText();
+ string params[Accessor::DEL_PARAM_COUNT]; // initialized with ""
+ params[Accessor::DEL_NAME] = rrset.getName().toText();
+ params[Accessor::DEL_TYPE] = rrset.getType().toText();
+ string journal[Accessor::DIFF_PARAM_COUNT];
+ if (journaling_) {
+ journal[Accessor::DIFF_NAME] = params[Accessor::DEL_NAME];
+ journal[Accessor::DIFF_TYPE] = params[Accessor::DEL_TYPE];
+ journal[Accessor::DIFF_TTL] = rrset.getTTL().toText();
+ diff_phase_ = DELETE;
+ if (rrset.getType() == RRType::SOA()) {
+ serial_ =
+ dynamic_cast<const generic::SOA&>(it->getCurrent()).
+ getSerial();
+ }
+ }
for (; !it->isLast(); it->next()) {
- params[DatabaseAccessor::DEL_RDATA] = it->getCurrent().toText();
+ params[Accessor::DEL_RDATA] = it->getCurrent().toText();
+ if (journaling_) {
+ journal[Accessor::DIFF_RDATA] = params[Accessor::DEL_RDATA];
+ accessor_->addRecordDiff(zone_id_, serial_, Accessor::DIFF_DELETE,
+ journal);
+ }
accessor_->deleteRecordInZone(params);
}
}
@@ -973,6 +1037,9 @@ DatabaseUpdater::commit() {
<< zone_name_ << "/" << zone_class_ << " on "
<< db_name_);
}
+ if (journaling_ && diff_phase_ == DELETE) {
+ isc_throw(isc::BadValue, "Update sequence not complete");
+ }
accessor_->commit();
committed_ = true; // make sure the destructor won't trigger rollback
@@ -986,7 +1053,13 @@ DatabaseUpdater::commit() {
// The updater factory
ZoneUpdaterPtr
-DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace) const {
+DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace,
+ bool journaling) const
+{
+ if (replace && journaling) {
+ isc_throw(isc::BadValue, "Can't store journal and replace the whole "
+ "zone at the same time");
+ }
shared_ptr<DatabaseAccessor> update_accessor(accessor_->clone());
const std::pair<bool, int> zone(update_accessor->startUpdateZone(
name.toText(), replace));
@@ -995,7 +1068,107 @@ DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace) const {
}
return (ZoneUpdaterPtr(new DatabaseUpdater(update_accessor, zone.second,
- name, rrclass_)));
+ name, rrclass_, journaling)));
+}
+
+//
+// Zone journal reader using some database system as the underlying data
+// source.
+//
+class DatabaseJournalReader : public ZoneJournalReader {
+private:
+ // A shortcut typedef to keep the code concise.
+ typedef DatabaseAccessor Accessor;
+public:
+ DatabaseJournalReader(shared_ptr<Accessor> accessor, const Name& zone,
+ int zone_id, const RRClass& rrclass, uint32_t begin,
+ uint32_t end) :
+ accessor_(accessor), zone_(zone), rrclass_(rrclass),
+ begin_(begin), end_(end), finished_(false)
+ {
+ context_ = accessor_->getDiffs(zone_id, begin, end);
+ }
+ virtual ~DatabaseJournalReader() {}
+ virtual ConstRRsetPtr getNextDiff() {
+ if (finished_) {
+ isc_throw(InvalidOperation,
+ "Diff read attempt past the end of sequence on "
+ << accessor_->getDBName());
+ }
+
+ string data[Accessor::COLUMN_COUNT];
+ if (!context_->getNext(data)) {
+ finished_ = true;
+ LOG_DEBUG(logger, DBG_TRACE_BASIC,
+ DATASRC_DATABASE_JOURNALREADER_END).
+ arg(zone_).arg(rrclass_).arg(accessor_->getDBName()).
+ arg(begin_).arg(end_);
+ return (ConstRRsetPtr());
+ }
+
+ try {
+ RRsetPtr rrset(new RRset(Name(data[Accessor::NAME_COLUMN]),
+ rrclass_,
+ RRType(data[Accessor::TYPE_COLUMN]),
+ RRTTL(data[Accessor::TTL_COLUMN])));
+ rrset->addRdata(rdata::createRdata(rrset->getType(), rrclass_,
+ data[Accessor::RDATA_COLUMN]));
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_JOURNALREADER_NEXT).
+ arg(rrset->getName()).arg(rrset->getType()).
+ arg(zone_).arg(rrclass_).arg(accessor_->getDBName());
+ return (rrset);
+ } catch (const Exception& ex) {
+ LOG_ERROR(logger, DATASRC_DATABASE_JOURNALREADR_BADDATA).
+ arg(zone_).arg(rrclass_).arg(accessor_->getDBName()).
+ arg(begin_).arg(end_).arg(ex.what());
+ isc_throw(DataSourceError, "Failed to create RRset from diff on "
+ << accessor_->getDBName());
+ }
+ }
+
+private:
+ shared_ptr<Accessor> accessor_;
+ const Name zone_;
+ const RRClass rrclass_;
+ Accessor::IteratorContextPtr context_;
+ const uint32_t begin_;
+ const uint32_t end_;
+ bool finished_;
+};
+
+// The JournalReader factory
+pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+DatabaseClient::getJournalReader(const isc::dns::Name& zone,
+ uint32_t begin_serial,
+ uint32_t end_serial) const
+{
+ shared_ptr<DatabaseAccessor> jnl_accessor(accessor_->clone());
+ const pair<bool, int> zoneinfo(jnl_accessor->getZone(zone.toText()));
+ if (!zoneinfo.first) {
+ return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(
+ ZoneJournalReader::NO_SUCH_ZONE,
+ ZoneJournalReaderPtr()));
+ }
+
+ try {
+ const pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> ret(
+ ZoneJournalReader::SUCCESS,
+ ZoneJournalReaderPtr(new DatabaseJournalReader(jnl_accessor,
+ zone,
+ zoneinfo.second,
+ rrclass_,
+ begin_serial,
+ end_serial)));
+ LOG_DEBUG(logger, DBG_TRACE_BASIC,
+ DATASRC_DATABASE_JOURNALREADER_START).arg(zone).arg(rrclass_).
+ arg(jnl_accessor->getDBName()).arg(begin_serial).arg(end_serial);
+ return (ret);
+ } catch (const NoSuchSerial&) {
+ return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(
+ ZoneJournalReader::NO_SUCH_VERSION,
+ ZoneJournalReaderPtr()));
+ }
}
}
}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index b3fda6d..81e6241 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -23,6 +23,8 @@
#include <dns/rrclass.h>
#include <dns/rrset.h>
+#include <datasrc/data_source.h>
+#include <datasrc/client.h>
#include <datasrc/client.h>
#include <dns/name.h>
@@ -274,6 +276,56 @@ public:
*/
virtual IteratorContextPtr getAllRecords(int id) const = 0;
+ /**
+ * \brief Creates an iterator context for a set of differences.
+ *
+ * Returns an IteratorContextPtr that contains all difference records for
+ * the given zone between two versions of a zone.
+ *
+ * The difference records are the set of records that would appear in an
+ * IXFR serving a request for the difference between two versions of a zone.
+ * The records are returned in the same order as they would be in the IXFR.
+ * This means that if the the difference between versions of a zone with SOA
+ * serial numbers of "start" and "end" is required, and the zone contains
+ * the differences between serial number "start" to serial number
+ * "intermediate" and from serial number "intermediate" to serial number
+ * "end", the returned records will be (in order):
+ *
+ * \li SOA for serial "start"
+ * \li Records removed from the zone between versions "start" and
+ * "intermediate" of the zone. The order of these is not guaranteed.
+ * \li SOA for serial "intermediate"
+ * \li Records added to the zone between versions "start" and
+ * "intermediate" of the zone. The order of these is not guaranteed.
+ * \li SOA for serial "intermediate"
+ * \li Records removed from the zone between versions "intermediate" and
+ * "end" of the zone. The order of these is not guaranteed.
+ * \li SOA for serial "end"
+ * \li Records added to the zone between versions "intermediate" and "end"
+ * of the zone. The order of these is not guaranteed.
+ *
+ * Note that there is no requirement that "start" be less than "end". Owing
+ * to serial number arithmetic, it is entirely possible that a later version
+ * of a zone will have a smaller SOA serial number than an earlier version.
+ *
+ * Each call to getNext() on the returned iterator should copy all
+ * column fields of the array that is passed, as defined in the
+ * RecordColumns enum.
+ *
+ * \exception any Since any implementation can be used, the caller should
+ * expect any exception to be thrown.
+ *
+ * \param id The ID of the zone, returned from getZone().
+ * \param start The SOA serial number of the version of the zone from
+ * which the difference sequence should start.
+ * \param end The SOA serial number of the version of the zone at which
+ * the difference sequence should end.
+ *
+ * \return Newly created iterator context. Must not be NULL.
+ */
+ virtual IteratorContextPtr
+ getDiffs(int id, uint32_t start, uint32_t end) const = 0;
+
/// Start a transaction for updating a zone.
///
/// Each derived class version of this method starts a database
@@ -494,12 +546,10 @@ public:
/// is not for the SOA RR; it passes TTL for a diff that deletes an RR
/// while in \c deleteRecordInZone() it's omitted. This is because
/// the stored diffs are expected to be retrieved in the form that
- /// \c getRecordDiffs() is expected to meet. This means if the caller
+ /// \c getDiffs() is expected to meet. This means if the caller
/// wants to use this method with other update operations, it must
/// ensure the additional information is ready when this method is called.
///
- /// \note \c getRecordDiffs() is not yet implemented.
- ///
/// The caller of this method must ensure that the added diffs via
/// this method in a single transaction form an IXFR-style difference
/// sequences: Each difference sequence is a sequence of RRs:
@@ -512,7 +562,7 @@ public:
/// an SOA RR, \c serial must be identical to the serial of that SOA).
/// The underlying derived class implementation may or may not check
/// this condition, but if the caller doesn't meet the condition
- /// a subsequent call to \c getRecordDiffs() will not work as expected.
+ /// a subsequent call to \c getDiffs() will not work as expected.
///
/// Any call to this method must be in a transaction, and, for now,
/// it must be a transaction triggered by \c startUpdateZone() (that is,
@@ -863,22 +913,33 @@ public:
* \exception Anything else the underlying DatabaseConnection might
* want to throw.
* \param name The origin of the zone to iterate.
- * \param adjust_ttl If true, the iterator will treat RRs with the same
- * name and type but different TTL values to be of the
- * same RRset, and will adjust the TTL to the lowest
- * value found. If false, it will consider the RR to
- * belong to a different RRset.
+ * \param separate_rrs If true, the iterator will return each RR as a
+ * new RRset object. If false, the iterator will
+ * combine consecutive RRs with the name and type
+ * into 1 RRset. The capitalization of the RRset will
+ * be that of the first RR read, and TTLs will be
+ * adjusted to the lowest one found.
* \return Shared pointer to the iterator (it will never be NULL)
*/
virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
- bool adjust_ttl = true) const;
+ bool separate_rrs = false) const;
/// This implementation internally clones the accessor from the one
/// used in the client and starts a separate transaction using the cloned
/// accessor. The returned updater will be able to work separately from
/// the original client.
virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
- bool replace) const;
+ bool replace,
+ bool journaling = false) const;
+
+
+ /// This implementation internally clones the accessor from the one
+ /// used in the client for retrieving diffs and iterating over them.
+ /// The returned reader object will be able to work separately from
+ /// the original client.
+ virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+ getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+ uint32_t end_serial) const;
private:
/// \brief The RR class that this client handles.
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index 04ad610..b4d0df7 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -630,3 +630,31 @@ database module are shown in the log message.
Debug information. A set of updates to a zone has been successfully
committed to the corresponding database backend. The zone name,
its class and the database name are printed.
+
+% DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source. The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+
+% DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset. The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+
+% DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences. The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+
+% DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset. The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message. The administrator should examine the diff in the database
+to find any invalid data and fix it.
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 6c0f589..a79ee5b 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -729,10 +729,14 @@ private:
Domain::const_iterator dom_iterator_;
const DomainTree& tree_;
const DomainNode* node_;
+ // Only used when separate_rrs_ is true
+ RdataIteratorPtr rdata_iterator_;
+ bool separate_rrs_;
bool ready_;
public:
- MemoryIterator(const DomainTree& tree, const Name& origin) :
+ MemoryIterator(const DomainTree& tree, const Name& origin, bool separate_rrs) :
tree_(tree),
+ separate_rrs_(separate_rrs),
ready_(true)
{
// Find the first node (origin) and preserve the node chain for future
@@ -747,6 +751,9 @@ public:
// Initialize the iterator if there's somewhere to point to
if (node_ != NULL && node_->getData() != DomainPtr()) {
dom_iterator_ = node_->getData()->begin();
+ if (separate_rrs_ && dom_iterator_ != node_->getData()->end()) {
+ rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+ }
}
}
@@ -766,6 +773,10 @@ public:
// if the map is empty or not
if (node_ != NULL && node_->getData() != NULL) {
dom_iterator_ = node_->getData()->begin();
+ // New RRset, so get a new rdata iterator
+ if (separate_rrs_) {
+ rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+ }
}
}
if (node_ == NULL) {
@@ -773,12 +784,35 @@ public:
ready_ = false;
return (ConstRRsetPtr());
}
- // The iterator points to the next yet unused RRset now
- ConstRRsetPtr result(dom_iterator_->second);
- // This one is used, move it to the next time for next call
- ++dom_iterator_;
- return (result);
+ if (separate_rrs_) {
+ // For separate rrs, reconstruct a new RRset with just the
+ // 'current' rdata
+ RRsetPtr result(new RRset(dom_iterator_->second->getName(),
+ dom_iterator_->second->getClass(),
+ dom_iterator_->second->getType(),
+ dom_iterator_->second->getTTL()));
+ result->addRdata(rdata_iterator_->getCurrent());
+ rdata_iterator_->next();
+ if (rdata_iterator_->isLast()) {
+ // all used up, next.
+ ++dom_iterator_;
+ // New RRset, so get a new rdata iterator, but only if this
+ // was not the final RRset in the chain
+ if (dom_iterator_ != node_->getData()->end()) {
+ rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+ }
+ }
+ return (result);
+ } else {
+ // The iterator points to the next yet unused RRset now
+ ConstRRsetPtr result(dom_iterator_->second);
+
+ // This one is used, move it to the next time for next call
+ ++dom_iterator_;
+
+ return (result);
+ }
}
virtual ConstRRsetPtr getSOA() const {
@@ -789,11 +823,7 @@ public:
} // End of anonymous namespace
ZoneIteratorPtr
-InMemoryClient::getIterator(const Name& name, bool) const {
- // note: adjust_ttl argument is ignored, as the RRsets are already
- // individually stored, and hence cannot have different TTLs anymore at
- // this point
-
+InMemoryClient::getIterator(const Name& name, bool separate_rrs) const {
ZoneTable::FindResult result(impl_->zone_table.findZone(name));
if (result.code != result::SUCCESS) {
isc_throw(DataSourceError, "No such zone: " + name.toText());
@@ -811,14 +841,22 @@ InMemoryClient::getIterator(const Name& name, bool) const {
isc_throw(Unexpected, "The zone at " + name.toText() +
" is not InMemoryZoneFinder");
}
- return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name)));
+ return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name,
+ separate_rrs)));
}
ZoneUpdaterPtr
-InMemoryClient::getUpdater(const isc::dns::Name&, bool) const {
+InMemoryClient::getUpdater(const isc::dns::Name&, bool, bool) const {
isc_throw(isc::NotImplemented, "Update attempt on in memory data source");
}
+pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+InMemoryClient::getJournalReader(const isc::dns::Name&, uint32_t,
+ uint32_t) const
+{
+ isc_throw(isc::NotImplemented, "Journaling isn't supported for "
+ "in memory data source");
+}
namespace {
// convencience function to add an error message to a list of those
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 1b6c120..b852eb3 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -273,7 +273,7 @@ public:
/// \brief Implementation of the getIterator method
virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
- bool adjust_ttl = true) const;
+ bool separate_rrs = false) const;
/// In-memory data source is read-only, so this derived method will
/// result in a NotImplemented exception.
@@ -284,7 +284,12 @@ public:
/// to update via its updater (this may or may not be a good idea and
/// is subject to further discussions).
virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
- bool replace) const;
+ bool replace, bool journaling = false)
+ const;
+
+ virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+ getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+ uint32_t end_serial) const;
private:
// TODO: Do we still need the PImpl if nobody should manipulate this class
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
index 01b9f41..fb2ffef 100644
--- a/src/lib/datasrc/sqlite3_accessor.cc
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -23,6 +23,7 @@
#include <datasrc/logger.h>
#include <datasrc/data_source.h>
#include <datasrc/factory.h>
+#include <datasrc/database.h>
#include <util/filename.h>
using namespace std;
@@ -54,7 +55,10 @@ enum StatementID {
FIND_PREVIOUS = 10,
ADD_RECORD_DIFF = 11,
GET_RECORD_DIFF = 12, // This is temporary for testing "add diff"
- NUM_STATEMENTS = 13
+ LOW_DIFF_ID = 13,
+ HIGH_DIFF_ID = 14,
+ DIFF_RECS = 15,
+ NUM_STATEMENTS = 16
};
const char* const text_statements[NUM_STATEMENTS] = {
@@ -62,33 +66,48 @@ const char* const text_statements[NUM_STATEMENTS] = {
// specifically chosen to match the enum values in RecordColumns
"SELECT id FROM zones WHERE name=?1 AND rdclass = ?2", // ZONE
"SELECT rdtype, ttl, sigtype, rdata FROM records " // ANY
- "WHERE zone_id=?1 AND name=?2",
+ "WHERE zone_id=?1 AND name=?2",
"SELECT rdtype, ttl, sigtype, rdata " // ANY_SUB
- "FROM records WHERE zone_id=?1 AND name LIKE (\"%.\" || ?2)",
+ "FROM records WHERE zone_id=?1 AND name LIKE (\"%.\" || ?2)",
"BEGIN", // BEGIN
"COMMIT", // COMMIT
"ROLLBACK", // ROLLBACK
"DELETE FROM records WHERE zone_id=?1", // DEL_ZONE_RECORDS
"INSERT INTO records " // ADD_RECORD
- "(zone_id, name, rname, ttl, rdtype, sigtype, rdata) "
- "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+ "(zone_id, name, rname, ttl, rdtype, sigtype, rdata) "
+ "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
"DELETE FROM records WHERE zone_id=?1 AND name=?2 " // DEL_RECORD
- "AND rdtype=?3 AND rdata=?4",
+ "AND rdtype=?3 AND rdata=?4",
"SELECT rdtype, ttl, sigtype, rdata, name FROM records " // ITERATE
- "WHERE zone_id = ?1 ORDER BY rname, rdtype",
+ "WHERE zone_id = ?1 ORDER BY rname, rdtype",
/*
* This one looks for previous name with NSEC record. It is done by
* using the reversed name. The NSEC is checked because we need to
* skip glue data, which don't have the NSEC.
*/
"SELECT name FROM records " // FIND_PREVIOUS
- "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
- "rname < $2 ORDER BY rname DESC LIMIT 1",
+ "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+ "rname < $2 ORDER BY rname DESC LIMIT 1",
"INSERT INTO diffs " // ADD_RECORD_DIFF
- "(zone_id, version, operation, name, rrtype, ttl, rdata) "
- "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"
- , "SELECT name, rrtype, ttl, rdata, version, operation " // GET_RECORD_DIFF
- "FROM diffs WHERE zone_id = ?1 ORDER BY id, operation"
+ "(zone_id, version, operation, name, rrtype, ttl, rdata) "
+ "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+ "SELECT name, rrtype, ttl, rdata, version, operation " // GET_RECORD_DIFF
+ "FROM diffs WHERE zone_id = ?1 ORDER BY id, operation",
+
+ // Two statements to select the lowest ID and highest ID in a set of
+ // differences.
+ "SELECT id FROM diffs " // LOW_DIFF_ID
+ "WHERE zone_id=?1 AND version=?2 and OPERATION=?3 "
+ "ORDER BY id ASC LIMIT 1",
+ "SELECT id FROM diffs " // HIGH_DIFF_ID
+ "WHERE zone_id=?1 AND version=?2 and OPERATION=?3 "
+ "ORDER BY id DESC LIMIT 1",
+
+ // In the next statement, note the redundant ID. This is to ensure
+ // that the columns match the column IDs passed to the iterator
+ "SELECT rrtype, ttl, id, rdata, name FROM diffs " // DIFF_RECS
+ "WHERE zone_id=?1 AND id>=?2 and id<=?3 "
+ "ORDER BY id ASC"
};
struct SQLite3Parameters {
@@ -231,23 +250,26 @@ const char* const SCHEMA_LIST[] = {
"dnssec BOOLEAN NOT NULL DEFAULT 0)",
"CREATE INDEX zones_byname ON zones (name)",
"CREATE TABLE records (id INTEGER PRIMARY KEY, "
- "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
- "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
- "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
- "rdata STRING NOT NULL)",
+ "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+ "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
"CREATE INDEX records_byname ON records (name)",
"CREATE INDEX records_byrname ON records (rname)",
"CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
- "hash STRING NOT NULL COLLATE NOCASE, "
- "owner STRING NOT NULL COLLATE NOCASE, "
- "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
- "rdata STRING NOT NULL)",
+ "hash STRING NOT NULL COLLATE NOCASE, "
+ "owner STRING NOT NULL COLLATE NOCASE, "
+ "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
"CREATE INDEX nsec3_byhash ON nsec3 (hash)",
"CREATE TABLE diffs (id INTEGER PRIMARY KEY, "
- "zone_id INTEGER NOT NULL, version INTEGER NOT NULL, "
- "operation INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
- "rrtype STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
- "rdata STRING NOT NULL)",
+ "zone_id INTEGER NOT NULL, "
+ "version INTEGER NOT NULL, "
+ "operation INTEGER NOT NULL, "
+ "name STRING NOT NULL COLLATE NOCASE, "
+ "rrtype STRING NOT NULL COLLATE NOCASE, "
+ "ttl INTEGER NOT NULL, "
+ "rdata STRING NOT NULL)",
NULL
};
@@ -558,6 +580,9 @@ private:
const std::string name_;
};
+
+// Methods to retrieve the various iterators
+
DatabaseAccessor::IteratorContextPtr
SQLite3Accessor::getRecords(const std::string& name, int id,
bool subdomains) const
@@ -571,6 +596,257 @@ SQLite3Accessor::getAllRecords(int id) const {
return (IteratorContextPtr(new Context(shared_from_this(), id)));
}
+
+/// \brief Difference Iterator
+///
+/// This iterator is used to search through the differences table for the
+/// resouce records making up an IXFR between two versions of a zone.
+
+class SQLite3Accessor::DiffContext : public DatabaseAccessor::IteratorContext {
+public:
+
+ /// \brief Constructor
+ ///
+ /// Constructs the iterator for the difference sequence. It is
+ /// passed two parameters, the first and last versions in the difference
+ /// sequence. Note that because of serial number rollover, it may well
+ /// be that the start serial number is greater than the end one.
+ ///
+ /// \param zone_id ID of the zone (in the zone table)
+ /// \param start Serial number of first version in difference sequence
+ /// \param end Serial number of last version in difference sequence
+ ///
+ /// \exception any A number of exceptions can be expected
+ DiffContext(const boost::shared_ptr<const SQLite3Accessor>& accessor,
+ int zone_id, uint32_t start, uint32_t end) :
+ accessor_(accessor),
+ last_status_(SQLITE_ROW)
+ {
+ try {
+ int low_id = findIndex(LOW_DIFF_ID, zone_id, start, DIFF_DELETE);
+ int high_id = findIndex(HIGH_DIFF_ID, zone_id, end, DIFF_ADD);
+
+ // Prepare the statement that will return data values
+ reset(DIFF_RECS);
+ bindInt(DIFF_RECS, 1, zone_id);
+ bindInt(DIFF_RECS, 2, low_id);
+ bindInt(DIFF_RECS, 3, high_id);
+
+ } catch (...) {
+ // Something wrong, clear up everything.
+ accessor_->dbparameters_->finalizeStatements();
+ throw;
+ }
+ }
+
+ /// \brief Destructor
+ virtual ~DiffContext()
+ {}
+
+ /// \brief Get Next Diff Record
+ ///
+ /// Returns the next difference record in the difference sequence.
+ ///
+ /// \param data Array of std::strings COLUMN_COUNT long. The results
+ /// are returned in this.
+ ///
+ /// \return bool true if data is returned, false if not.
+ ///
+ /// \exceptions any Varied
+ bool getNext(std::string (&data)[COLUMN_COUNT]) {
+
+ if (last_status_ != SQLITE_DONE) {
+ // Last call (if any) didn't reach end of result set, so we
+ // can read another row from it.
+ //
+ // Get a pointer to the statement for brevity (this does not
+ // transfer ownership of the statement to this class, so there is
+ // no need to tidy up after we have finished using it).
+ sqlite3_stmt* stmt =
+ accessor_->dbparameters_->getStatement(DIFF_RECS);
+
+ const int rc(sqlite3_step(stmt));
+ if (rc == SQLITE_ROW) {
+ // Copy the data across to the output array
+ copyColumn(DIFF_RECS, data, TYPE_COLUMN);
+ copyColumn(DIFF_RECS, data, TTL_COLUMN);
+ copyColumn(DIFF_RECS, data, NAME_COLUMN);
+ copyColumn(DIFF_RECS, data, RDATA_COLUMN);
+
+ } else if (rc != SQLITE_DONE) {
+ isc_throw(DataSourceError,
+ "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ last_status_ = rc;
+ }
+ return (last_status_ == SQLITE_ROW);
+ }
+
+private:
+
+ /// \brief Reset prepared statement
+ ///
+ /// Sets up the statement so that new parameters can be attached to it and
+ /// that it can be used to query for another difference sequence.
+ ///
+ /// \param stindex Index of prepared statement to which to bind
+ void reset(int stindex) {
+ sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+ if ((sqlite3_reset(stmt) != SQLITE_OK) ||
+ (sqlite3_clear_bindings(stmt) != SQLITE_OK)) {
+ isc_throw(SQLite3Error, "Could not clear statement bindings in '" <<
+ text_statements[stindex] << "': " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ }
+
+ /// \brief Bind Int
+ ///
+ /// Binds an integer to a specific variable in a prepared statement.
+ ///
+ /// \param stindex Index of prepared statement to which to bind
+ /// \param varindex Index of variable to which to bind
+ /// \param value Value of variable to bind
+ /// \exception SQLite3Error on an error
+ void bindInt(int stindex, int varindex, sqlite3_int64 value) {
+ if (sqlite3_bind_int64(accessor_->dbparameters_->getStatement(stindex),
+ varindex, value) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind value to parameter " <<
+ varindex << " in statement '" <<
+ text_statements[stindex] << "': " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ }
+
+ ///\brief Get Single Value
+ ///
+ /// Executes a prepared statement (which has parameters bound to it)
+ /// for which the result of a single value is expected.
+ ///
+ /// \param stindex Index of prepared statement in statement table.
+ ///
+ /// \return Value of SELECT.
+ ///
+ /// \exception TooMuchData Multiple rows returned when one expected
+ /// \exception TooLittleData Zero rows returned when one expected
+ /// \exception DataSourceError SQLite3-related error
+ int getSingleValue(StatementID stindex) {
+
+ // Get a pointer to the statement for brevity (does not transfer
+ // resources)
+ sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+
+ // Execute the data. Should be just one result
+ int rc = sqlite3_step(stmt);
+ int result = -1;
+ if (rc == SQLITE_ROW) {
+
+ // Got some data, extract the value
+ result = sqlite3_column_int(stmt, 0);
+ rc = sqlite3_step(stmt);
+ if (rc == SQLITE_DONE) {
+
+ // All OK, exit with the value.
+ return (result);
+
+ } else if (rc == SQLITE_ROW) {
+ isc_throw(TooMuchData, "request to return one value from "
+ "diffs table returned multiple values");
+ }
+ } else if (rc == SQLITE_DONE) {
+
+ // No data in the table. A bare exception with no explanation is
+ // thrown, as it will be replaced by a more informative one by
+ // the caller.
+ isc_throw(TooLittleData, "");
+ }
+
+ // We get here on an error.
+ isc_throw(DataSourceError, "could not get data from diffs table: " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+
+ // Keep the compiler happy with a return value.
+ return (result);
+ }
+
+ /// \brief Find index
+ ///
+ /// Executes the prepared statement locating the high or low index in
+ /// the diffs table and returns that index.
+ ///
+ /// \param stmt_id Index of the prepared statement to execute
+ /// \param zone_id ID of the zone for which the index is being sought
+ /// \param serial Zone serial number for which an index is being sought.
+ /// \param diff Code to delete record additions or deletions
+ ///
+ /// \return int ID of the row in the difss table corresponding to the
+ /// statement.
+ ///
+ /// \exception TooLittleData Internal error, no result returned when one
+ /// was expected.
+ /// \exception NoSuchSerial Serial number not found.
+ /// \exception NoDiffsData No data for this zone found in diffs table
+ int findIndex(StatementID stindex, int zone_id, uint32_t serial, int diff) {
+
+ // Set up the statement
+ reset(stindex);
+ bindInt(stindex, 1, zone_id);
+ bindInt(stindex, 2, serial);
+ bindInt(stindex, 3, diff);
+
+ // Execute the statement
+ int result = -1;
+ try {
+ result = getSingleValue(stindex);
+
+ } catch (const TooLittleData&) {
+
+ // No data returned but the SQL query succeeded. Only possibility
+ // is that there is no entry in the differences table for the given
+ // zone and version.
+ isc_throw(NoSuchSerial, "No entry in differences table for " <<
+ " zone ID " << zone_id << ", serial number " << serial);
+ }
+
+ return (result);
+ }
+
+ /// \brief Copy Column to Output
+ ///
+ /// Copies the textual data in the result set to the specified column
+ /// in the output.
+ ///
+ /// \param stindex Index of prepared statement used to access data
+ /// \param data Array of columns passed to getNext
+ /// \param column Column of output to copy
+ void copyColumn(StatementID stindex, std::string (&data)[COLUMN_COUNT],
+ int column) {
+
+ // Get a pointer to the statement for brevity (does not transfer
+ // resources)
+ sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+ data[column] = convertToPlainChar(sqlite3_column_text(stmt,
+ column),
+ accessor_->dbparameters_->db_);
+ }
+
+ // Attributes
+
+ boost::shared_ptr<const SQLite3Accessor> accessor_; // Accessor object
+ int last_status_; // Last status received from sqlite3_step
+};
+
+// ... and return the iterator
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getDiffs(int id, uint32_t start, uint32_t end) const {
+ return (IteratorContextPtr(new DiffContext(shared_from_this(), id, start,
+ end)));
+}
+
+
+
pair<bool, int>
SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
if (dbparameters_->updating_zone) {
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index 6b5369c..08be824 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -17,6 +17,7 @@
#define __DATASRC_SQLITE3_ACCESSOR_H
#include <datasrc/database.h>
+#include <datasrc/data_source.h>
#include <exceptions/exceptions.h>
@@ -40,10 +41,34 @@ namespace datasrc {
* It might mean corrupt database file, invalid request or that something is
* rotten in the library.
*/
-class SQLite3Error : public Exception {
+class SQLite3Error : public DataSourceError {
public:
SQLite3Error(const char* file, size_t line, const char* what) :
- isc::Exception(file, line, what) {}
+ DataSourceError(file, line, what) {}
+};
+
+/**
+ * \brief Too Much Data
+ *
+ * Thrown if a query expecting a certain number of rows back returned too
+ * many rows.
+ */
+class TooMuchData : public DataSourceError {
+public:
+ TooMuchData(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
+/**
+ * \brief Too Little Data
+ *
+ * Thrown if a query expecting a certain number of rows back returned too
+ * few rows (including none).
+ */
+class TooLittleData : public DataSourceError {
+public:
+ TooLittleData(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
};
struct SQLite3Parameters;
@@ -128,6 +153,27 @@ public:
*/
virtual IteratorContextPtr getAllRecords(int id) const;
+ /** \brief Creates an iterator context for a set of differences.
+ *
+ * Implements the getDiffs() method from DatabaseAccessor
+ *
+ * \exception NoSuchSerial if either of the versions do not exist in
+ * the difference table.
+ * \exception SQLite3Error if there is an sqlite3 error when performing
+ * the query
+ *
+ * \param id The ID of the zone, returned from getZone().
+ * \param start The SOA serial number of the version of the zone from
+ * which the difference sequence should start.
+ * \param end The SOA serial number of the version of the zone at which
+ * the difference sequence should end.
+ *
+ * \return Iterator containing difference records.
+ */
+ virtual IteratorContextPtr
+ getDiffs(int id, uint32_t start, uint32_t end) const;
+
+
virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
bool replace);
@@ -192,14 +238,20 @@ private:
const std::string filename_;
/// \brief The class for which the queries are done
const std::string class_;
+ /// \brief Database name
+ const std::string database_name_;
+
/// \brief Opens the database
void open(const std::string& filename);
/// \brief Closes the database
void close();
- /// \brief SQLite3 implementation of IteratorContext
+
+ /// \brief SQLite3 implementation of IteratorContext for all records
class Context;
friend class Context;
- const std::string database_name_;
+ /// \brief SQLite3 implementation of IteratorContext for differences
+ class DiffContext;
+ friend class DiffContext;
};
/// \brief Creates an instance of the SQlite3 datasource client
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index 32f52b6..70f2999 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -109,3 +109,4 @@ EXTRA_DIST += testdata/test-root.sqlite3
EXTRA_DIST += testdata/test.sqlite3
EXTRA_DIST += testdata/test.sqlite3.nodiffs
EXTRA_DIST += testdata/rwtest.sqlite3
+EXTRA_DIST += testdata/diffs.sqlite3
diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc
index 5b2c91a..64ad25f 100644
--- a/src/lib/datasrc/tests/client_unittest.cc
+++ b/src/lib/datasrc/tests/client_unittest.cc
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <utility>
+
#include <datasrc/client.h>
#include <dns/name.h>
@@ -32,9 +34,16 @@ public:
virtual FindResult findZone(const isc::dns::Name&) const {
return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
}
- virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name&, bool) const {
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name&, bool, bool)
+ const
+ {
return (ZoneUpdaterPtr());
}
+ virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+ getJournalReader(const isc::dns::Name&, uint32_t, uint32_t) const {
+ isc_throw(isc::NotImplemented, "Journaling isn't supported "
+ "in Nop data source");
+ }
};
class ClientTest : public ::testing::Test {
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index 1a471bf..920c9a2 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -12,10 +12,15 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <stdlib.h>
+
#include <boost/shared_ptr.hpp>
+#include <boost/lexical_cast.hpp>
#include <gtest/gtest.h>
+#include <exceptions/exceptions.h>
+
#include <dns/name.h>
#include <dns/rrttl.h>
#include <dns/rrset.h>
@@ -30,6 +35,7 @@
#include <testutils/dnsmessage_test.h>
#include <map>
+#include <vector>
using namespace isc::datasrc;
using namespace std;
@@ -37,6 +43,7 @@ using namespace std;
// for some systems.
using boost::shared_ptr;
using boost::dynamic_pointer_cast;
+using boost::lexical_cast;
using namespace isc::dns;
namespace {
@@ -255,6 +262,11 @@ public:
"This database datasource can't be iterated");
}
+ virtual IteratorContextPtr getDiffs(int, uint32_t, uint32_t) const {
+ isc_throw(isc::NotImplemented,
+ "This database datasource doesn't support diffs");
+ }
+
virtual std::string findPreviousName(int, const std::string&) const {
isc_throw(isc::NotImplemented,
"This data source doesn't support DNSSEC");
@@ -264,6 +276,52 @@ private:
};
+/**
+ * Single journal entry in the mock database.
+ *
+ * All the members there are public for simplicity, as it only stores data.
+ * We use the implicit constructor and operator. The members can't be const
+ * because of the assignment operator (used in the vectors).
+ */
+struct JournalEntry {
+ JournalEntry(int id, uint32_t serial,
+ DatabaseAccessor::DiffOperation operation,
+ const std::string (&data)[DatabaseAccessor::DIFF_PARAM_COUNT])
+ : id_(id), serial_(serial), operation_(operation)
+ {
+ data_[DatabaseAccessor::DIFF_NAME] = data[DatabaseAccessor::DIFF_NAME];
+ data_[DatabaseAccessor::DIFF_TYPE] = data[DatabaseAccessor::DIFF_TYPE];
+ data_[DatabaseAccessor::DIFF_TTL] = data[DatabaseAccessor::DIFF_TTL];
+ data_[DatabaseAccessor::DIFF_RDATA] =
+ data[DatabaseAccessor::DIFF_RDATA];
+ }
+ JournalEntry(int id, uint32_t serial,
+ DatabaseAccessor::DiffOperation operation,
+ const std::string& name, const std::string& type,
+ const std::string& ttl, const std::string& rdata):
+ id_(id), serial_(serial), operation_(operation)
+ {
+ data_[DatabaseAccessor::DIFF_NAME] = name;
+ data_[DatabaseAccessor::DIFF_TYPE] = type;
+ data_[DatabaseAccessor::DIFF_TTL] = ttl;
+ data_[DatabaseAccessor::DIFF_RDATA] = rdata;
+ }
+ int id_;
+ uint32_t serial_;
+ DatabaseAccessor::DiffOperation operation_;
+ std::string data_[DatabaseAccessor::DIFF_PARAM_COUNT];
+ bool operator==(const JournalEntry& other) const {
+ for (size_t i(0); i < DatabaseAccessor::DIFF_PARAM_COUNT; ++ i) {
+ if (data_[i] != other.data_[i]) {
+ return false;
+ }
+ }
+ // No need to check data here, checked above
+ return (id_ == other.id_ && serial_ == other.serial_ &&
+ operation_ == other.operation_);
+ }
+};
+
/*
* A virtual database accessor that pretends it contains single zone --
* example.org.
@@ -288,6 +346,7 @@ public:
readonly_records_ = &readonly_records_master_;
update_records_ = &update_records_master_;
empty_records_ = &empty_records_master_;
+ journal_entries_ = &journal_entries_master_;
fillData();
}
@@ -296,6 +355,7 @@ public:
cloned_accessor->readonly_records_ = &readonly_records_master_;
cloned_accessor->update_records_ = &update_records_master_;
cloned_accessor->empty_records_ = &empty_records_master_;
+ cloned_accessor->journal_entries_ = &journal_entries_master_;
latest_clone_ = cloned_accessor;
return (cloned_accessor);
}
@@ -495,6 +555,29 @@ private:
}
}
};
+ class MockDiffIteratorContext : public IteratorContext {
+ const vector<JournalEntry> diffs_;
+ vector<JournalEntry>::const_iterator it_;
+ public:
+ MockDiffIteratorContext(const vector<JournalEntry>& diffs) :
+ diffs_(diffs), it_(diffs_.begin())
+ {}
+ virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ if (it_ == diffs_.end()) {
+ return (false);
+ }
+ data[DatabaseAccessor::NAME_COLUMN] =
+ (*it_).data_[DatabaseAccessor::DIFF_NAME];
+ data[DatabaseAccessor::TYPE_COLUMN] =
+ (*it_).data_[DatabaseAccessor::DIFF_TYPE];
+ data[DatabaseAccessor::TTL_COLUMN] =
+ (*it_).data_[DatabaseAccessor::DIFF_TTL];
+ data[DatabaseAccessor::RDATA_COLUMN] =
+ (*it_).data_[DatabaseAccessor::DIFF_RDATA];
+ ++it_;
+ return (true);
+ }
+ };
public:
virtual IteratorContextPtr getAllRecords(int id) const {
if (id == READONLY_ZONE_ID) {
@@ -544,7 +627,13 @@ public:
*update_records_ = *readonly_records_;
}
- return (pair<bool, int>(true, WRITABLE_ZONE_ID));
+ if (zone_name == "bad.example.org.") {
+ return (pair<bool, int>(true, -1));
+ } else if (zone_name == "null.example.org.") {
+ return (pair<bool, int>(true, 13));
+ } else {
+ return (pair<bool, int>(true, WRITABLE_ZONE_ID));
+ }
}
virtual void commit() {
*readonly_records_ = *update_records_;
@@ -658,6 +747,70 @@ public:
isc_throw(isc::Unexpected, "Unknown zone ID");
}
}
+ virtual void addRecordDiff(int id, uint32_t serial,
+ DiffOperation operation,
+ const std::string (&data)[DIFF_PARAM_COUNT])
+ {
+ if (id == 13) { // The null zone doesn't support journaling
+ isc_throw(isc::NotImplemented, "Test not implemented behaviour");
+ } else if (id == -1) { // Bad zone throws
+ isc_throw(DataSourceError, "Test error");
+ } else {
+ journal_entries_->push_back(JournalEntry(id, serial, operation,
+ data));
+ }
+ }
+
+ virtual IteratorContextPtr getDiffs(int id, uint32_t start,
+ uint32_t end) const
+ {
+ vector<JournalEntry> selected_jnl;
+
+ for (vector<JournalEntry>::const_iterator it =
+ journal_entries_->begin();
+ it != journal_entries_->end(); ++it)
+ {
+ // For simplicity we assume this method is called for the
+ // "readonly" zone possibly after making updates on the "writable"
+ // copy and committing them.
+ if (id != READONLY_ZONE_ID) {
+ continue;
+ }
+
+ // Note: the following logic is not 100% accurate in terms of
+ // serial number arithmetic; we prefer brevity for testing.
+ // Skip until we see the starting serial. Once we started
+ // recording this condition is ignored (to support wrap-around
+ // case). Also, it ignores the RR type; it only checks the
+ // versions.
+ if ((*it).serial_ < start && selected_jnl.empty()) {
+ continue;
+ }
+ if ((*it).serial_ > end) { // gone over the end serial. we're done.
+ break;
+ }
+ selected_jnl.push_back(*it);
+ }
+
+ // Check if we've found the requested range. If not, throw.
+ if (selected_jnl.empty() || selected_jnl.front().serial_ != start ||
+ selected_jnl.back().serial_ != end) {
+ isc_throw(NoSuchSerial, "requested diff range is not found");
+ }
+
+ return (IteratorContextPtr(new MockDiffIteratorContext(selected_jnl)));
+ }
+
+ // Check the journal is as expected and clear the journal
+ void checkJournal(const std::vector<JournalEntry> &expected) const {
+ std::vector<JournalEntry> journal;
+ // Clean the journal, but keep local copy to check
+ journal.swap(*journal_entries_);
+ ASSERT_EQ(expected.size(), journal.size());
+ for (size_t i(0); i < expected.size(); ++ i) {
+ EXPECT_TRUE(expected[i] == journal[i]);
+ }
+ }
private:
// The following member variables are storage and/or update work space
@@ -677,6 +830,10 @@ private:
const Domains empty_records_master_;
const Domains* empty_records_;
+ // The journal data
+ std::vector<JournalEntry> journal_entries_master_;
+ std::vector<JournalEntry>* journal_entries_;
+
// used as temporary storage after searchForRecord() and during
// getNextRecord() calls, as well as during the building of the
// fake data
@@ -794,6 +951,10 @@ public:
rrset_.reset(new RRset(qname_, qclass_, qtype_, rrttl_));
rrset_->addRdata(rdata::createRdata(rrset_->getType(),
rrset_->getClass(), "192.0.2.2"));
+ soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
+ soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200"));
// And its RRSIG. Also different from the configured one.
rrsigset_.reset(new RRset(qname_, qclass_, RRType::RRSIG(),
@@ -810,6 +971,24 @@ public:
* times per test.
*/
void createClient() {
+ // To make sure we always have empty diffs table at the beginning of
+ // each test, we re-install the writable data source here.
+ // Note: this is SQLite3 specific and a waste (though otherwise
+ // harmless) for other types of data sources. If and when we support
+ // more types of data sources in this test framework, we should
+ // probably move this to some specialized templated method specific
+ // to SQLite3 (or for even a longer term we should add an API to
+ // purge the diffs table).
+ const char* const install_cmd = INSTALL_PROG " " TEST_DATA_DIR
+ "/rwtest.sqlite3 " TEST_DATA_BUILDDIR
+ "/rwtest.sqlite3.copied";
+ if (system(install_cmd) != 0) {
+ // any exception will do, this is failure in test setup, but nice
+ // to show the command that fails, and shouldn't be caught
+ isc_throw(isc::Exception,
+ "Error setting up; command failed: " << install_cmd);
+ }
+
current_accessor_ = new ACCESSOR_TYPE();
is_mock_ = (dynamic_cast<MockAccessor*>(current_accessor_) != NULL);
client_.reset(new DatabaseClient(qclass_,
@@ -875,6 +1054,48 @@ public:
}
}
+ void checkJournal(const vector<JournalEntry>& expected) {
+ if (is_mock_) {
+ const MockAccessor* mock_accessor =
+ dynamic_cast<const MockAccessor*>(current_accessor_);
+ mock_accessor->checkJournal(expected);
+ } else {
+ // For other generic databases, retrieve the diff using the
+ // reader class and compare the resulting sequence of RRset.
+ // For simplicity we only consider the case where the expected
+ // sequence is not empty.
+ ASSERT_FALSE(expected.empty());
+ const Name zone_name(expected.front().
+ data_[DatabaseAccessor::DIFF_NAME]);
+ ZoneJournalReaderPtr jnl_reader =
+ client_->getJournalReader(zone_name,
+ expected.front().serial_,
+ expected.back().serial_).second;
+ ASSERT_TRUE(jnl_reader);
+ ConstRRsetPtr rrset;
+ vector<JournalEntry>::const_iterator it = expected.begin();
+ for (rrset = jnl_reader->getNextDiff();
+ rrset && it != expected.end();
+ rrset = jnl_reader->getNextDiff(), ++it) {
+ typedef DatabaseAccessor Accessor;
+ RRsetPtr expected_rrset(
+ new RRset(Name((*it).data_[Accessor::DIFF_NAME]),
+ qclass_,
+ RRType((*it).data_[Accessor::DIFF_TYPE]),
+ RRTTL((*it).data_[Accessor::DIFF_TTL])));
+ expected_rrset->addRdata(
+ rdata::createRdata(expected_rrset->getType(),
+ expected_rrset->getClass(),
+ (*it).data_[Accessor::DIFF_RDATA]));
+ isc::testutils::rrsetCheck(expected_rrset, rrset);
+ }
+ // We should have examined all entries of both expected and
+ // actual data.
+ EXPECT_TRUE(it == expected.end());
+ ASSERT_FALSE(rrset);
+ }
+ }
+
// Some tests only work for MockAccessor. We remember whether our accessor
// is of that type.
bool is_mock_;
@@ -895,6 +1116,7 @@ public:
const RRTTL rrttl_; // commonly used RR TTL
RRsetPtr rrset_; // for adding/deleting an RRset
RRsetPtr rrsigset_; // for adding/deleting an RRset
+ RRsetPtr soa_; // for adding/deleting an RRset
// update related objects to be tested
ZoneUpdaterPtr updater_;
@@ -1246,8 +1468,8 @@ TEST_F(MockDatabaseClientTest, ttldiff) {
// Unless we ask for individual RRs in our iterator request. In that case
// every RR should go into its own 'rrset'
-TEST_F(MockDatabaseClientTest, ttldiff_no_adjust_ttl) {
- ZoneIteratorPtr it(this->client_->getIterator(Name("example.org"), false));
+TEST_F(MockDatabaseClientTest, ttldiff_separate_rrs) {
+ ZoneIteratorPtr it(this->client_->getIterator(Name("example.org"), true));
// Walk through the full iterator, we should see 1 rrset with name
// ttldiff1.example.org., and two rdatas. Same for ttldiff2
@@ -2703,4 +2925,336 @@ TEST_F(MockDatabaseClientTest, badName) {
DataSourceError);
}
+/*
+ * Test correct use of the updater with a journal.
+ */
+TYPED_TEST(DatabaseClientTest, journal) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ this->updater_->deleteRRset(*this->soa_);
+ this->updater_->deleteRRset(*this->rrset_);
+ this->soa_.reset(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ this->soa_->addRdata(rdata::createRdata(this->soa_->getType(),
+ this->soa_->getClass(),
+ "ns1.example.org. "
+ "admin.example.org. "
+ "1235 3600 1800 2419200 7200"));
+ this->updater_->addRRset(*this->soa_);
+ this->updater_->addRRset(*this->rrset_);
+ ASSERT_NO_THROW(this->updater_->commit());
+ std::vector<JournalEntry> expected;
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+ DatabaseAccessor::DIFF_DELETE,
+ "example.org.", "SOA", "3600",
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200"));
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+ DatabaseAccessor::DIFF_DELETE,
+ "www.example.org.", "A", "3600",
+ "192.0.2.2"));
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1235,
+ DatabaseAccessor::DIFF_ADD,
+ "example.org.", "SOA", "3600",
+ "ns1.example.org. admin.example.org. "
+ "1235 3600 1800 2419200 7200"));
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1235,
+ DatabaseAccessor::DIFF_ADD,
+ "www.example.org.", "A", "3600",
+ "192.0.2.2"));
+ this->checkJournal(expected);
+}
+
+/*
+ * Push multiple delete-add sequences. Checks it is allowed and all is
+ * saved.
+ */
+TYPED_TEST(DatabaseClientTest, journalMultiple) {
+ std::vector<JournalEntry> expected;
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ std::string soa_rdata = "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200";
+ for (size_t i(1); i < 100; ++ i) {
+ // Remove the old SOA
+ this->updater_->deleteRRset(*this->soa_);
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i - 1,
+ DatabaseAccessor::DIFF_DELETE,
+ "example.org.", "SOA", "3600",
+ soa_rdata));
+ // Create a new SOA
+ soa_rdata = "ns1.example.org. admin.example.org. " +
+ lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
+ this->soa_.reset(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ this->soa_->addRdata(rdata::createRdata(this->soa_->getType(),
+ this->soa_->getClass(),
+ soa_rdata));
+ // Add the new SOA
+ this->updater_->addRRset(*this->soa_);
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i,
+ DatabaseAccessor::DIFF_ADD,
+ "example.org.", "SOA", "3600",
+ soa_rdata));
+ }
+ ASSERT_NO_THROW(this->updater_->commit());
+ // Check the journal contains everything.
+ this->checkJournal(expected);
+}
+
+/*
+ * Test passing a forbidden sequence to it and expect it to throw.
+ *
+ * Note that we implicitly test in different testcases (these for add and
+ * delete) that if the journaling is false, it doesn't expect the order.
+ *
+ * In this test we don't check with the real databases as this case shouldn't
+ * contain backend specific behavior.
+ */
+TEST_F(MockDatabaseClientTest, journalBadSequence) {
+ std::vector<JournalEntry> expected;
+ {
+ SCOPED_TRACE("Delete A before SOA");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_),
+ isc::BadValue);
+ // Make sure the journal is empty now
+ this->checkJournal(expected);
+ }
+
+ {
+ SCOPED_TRACE("Add before delete");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ EXPECT_THROW(this->updater_->addRRset(*this->soa_), isc::BadValue);
+ // Make sure the journal is empty now
+ this->checkJournal(expected);
+ }
+
+ {
+ SCOPED_TRACE("Add A before SOA");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ // So far OK
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+ // But we miss the add SOA here
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), isc::BadValue);
+ // Make sure the journal contains only the first one
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+ DatabaseAccessor::DIFF_DELETE,
+ "example.org.", "SOA", "3600",
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200"));
+ this->checkJournal(expected);
+ }
+
+ {
+ SCOPED_TRACE("Commit before add");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ // So far OK
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+ // Commit at the wrong time
+ EXPECT_THROW(updater_->commit(), isc::BadValue);
+ current_accessor_->checkJournal(expected);
+ }
+
+ {
+ SCOPED_TRACE("Delete two SOAs");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ // So far OK
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+ // Delete the SOA again
+ EXPECT_THROW(this->updater_->deleteRRset(*this->soa_), isc::BadValue);
+ this->checkJournal(expected);
+ }
+
+ {
+ SCOPED_TRACE("Add two SOAs");
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+ // So far OK
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+ // Still OK
+ EXPECT_NO_THROW(this->updater_->addRRset(*this->soa_));
+ // But this one is added again
+ EXPECT_THROW(this->updater_->addRRset(*this->soa_), isc::BadValue);
+ expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+ DatabaseAccessor::DIFF_ADD,
+ "example.org.", "SOA", "3600",
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200"));
+ this->checkJournal(expected);
+ }
+}
+
+/*
+ * Test it rejects to store journals when we request it together with
+ * erasing the whole zone.
+ */
+TYPED_TEST(DatabaseClientTest, journalOnErase) {
+ EXPECT_THROW(this->client_->getUpdater(this->zname_, true, true),
+ isc::BadValue);
+}
+
+/*
+ * Check that exception is propagated when the journal is not implemented.
+ */
+TEST_F(MockDatabaseClientTest, journalNotImplemented) {
+ updater_ = client_->getUpdater(Name("null.example.org"), false, true);
+ EXPECT_THROW(updater_->deleteRRset(*soa_), isc::NotImplemented);
+ soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
+ soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419201 7200"));
+ EXPECT_THROW(updater_->addRRset(*soa_), isc::NotImplemented);
+}
+
+/*
+ * Test that different exceptions are propagated.
+ */
+TEST_F(MockDatabaseClientTest, journalException) {
+ updater_ = client_->getUpdater(Name("bad.example.org"), false, true);
+ EXPECT_THROW(updater_->deleteRRset(*soa_), DataSourceError);
+}
+
+//
+// Tests for the ZoneJournalReader
+//
+
+// Install a simple, commonly used diff sequence: making an update from one
+// SOA to another. Return the end SOA RRset for the convenience of the caller.
+ConstRRsetPtr
+makeSimpleDiff(DataSourceClient& client, const Name& zname,
+ const RRClass& rrclass, ConstRRsetPtr begin_soa)
+{
+ ZoneUpdaterPtr updater = client.getUpdater(zname, false, true);
+ updater->deleteRRset(*begin_soa);
+ RRsetPtr soa_end(new RRset(zname, rrclass, RRType::SOA(), RRTTL(3600)));
+ soa_end->addRdata(rdata::createRdata(RRType::SOA(), rrclass,
+ "ns1.example.org. admin.example.org. "
+ "1235 3600 1800 2419200 7200"));
+ updater->addRRset(*soa_end);
+ updater->commit();
+
+ return (soa_end);
+}
+
+TYPED_TEST(DatabaseClientTest, journalReader) {
+ // Check the simple case made by makeSimpleDiff.
+ ConstRRsetPtr soa_end = makeSimpleDiff(*this->client_, this->zname_,
+ this->qclass_, this->soa_);
+ pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+ this->client_->getJournalReader(this->zname_, 1234, 1235);
+ EXPECT_EQ(ZoneJournalReader::SUCCESS, result.first);
+ ZoneJournalReaderPtr jnl_reader = result.second;
+ ASSERT_TRUE(jnl_reader);
+ ConstRRsetPtr rrset = jnl_reader->getNextDiff();
+ ASSERT_TRUE(rrset);
+ isc::testutils::rrsetCheck(this->soa_, rrset);
+ rrset = jnl_reader->getNextDiff();
+ ASSERT_TRUE(rrset);
+ isc::testutils::rrsetCheck(soa_end, rrset);
+ rrset = jnl_reader->getNextDiff();
+ ASSERT_FALSE(rrset);
+
+ // Once it reaches the end of the sequence, further read attempt will
+ // result in exception.
+ EXPECT_THROW(jnl_reader->getNextDiff(), isc::InvalidOperation);
+}
+
+TYPED_TEST(DatabaseClientTest, readLargeJournal) {
+ // Similar to journalMultiple, but check that at a higher level.
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+
+ vector<ConstRRsetPtr> expected;
+ for (size_t i = 0; i < 100; ++i) {
+ // Create the old SOA and remove it, and record it in the expected list
+ RRsetPtr rrset1(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ string soa_rdata = "ns1.example.org. admin.example.org. " +
+ lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
+ rrset1->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+ soa_rdata));
+ this->updater_->deleteRRset(*rrset1);
+ expected.push_back(rrset1);
+
+ // Create a new SOA, add it, and record it.
+ RRsetPtr rrset2(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ soa_rdata = "ns1.example.org. admin.example.org. " +
+ lexical_cast<std::string>(1234 + i + 1) +
+ " 3600 1800 2419200 7200";
+ rrset2->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+ soa_rdata));
+ this->updater_->addRRset(*rrset2);
+ expected.push_back(rrset2);
+ }
+ this->updater_->commit();
+
+ ZoneJournalReaderPtr jnl_reader(this->client_->getJournalReader(
+ this->zname_, 1234, 1334).second);
+ ConstRRsetPtr actual;
+ int i = 0;
+ while ((actual = jnl_reader->getNextDiff()) != NULL) {
+ isc::testutils::rrsetCheck(expected.at(i++), actual);
+ }
+ EXPECT_EQ(expected.size(), i); // we should have eaten all expected data
+}
+
+TYPED_TEST(DatabaseClientTest, readJournalForNoRange) {
+ makeSimpleDiff(*this->client_, this->zname_, this->qclass_, this->soa_);
+
+ // The specified range does not exist in the diff storage. The factory
+ // method should result in NO_SUCH_VERSION
+ pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+ this->client_->getJournalReader(this->zname_, 1200, 1235);
+ EXPECT_EQ(ZoneJournalReader::NO_SUCH_VERSION, result.first);
+ EXPECT_FALSE(result.second);
+}
+
+TYPED_TEST(DatabaseClientTest, journalReaderForNXZone) {
+ pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+ this->client_->getJournalReader(Name("nosuchzone"), 0, 1);
+ EXPECT_EQ(ZoneJournalReader::NO_SUCH_ZONE, result.first);
+ EXPECT_FALSE(result.second);
+}
+
+// A helper function for journalWithBadData. It installs a simple diff
+// from one serial (of 'begin') to another ('begin' + 1), tweaking a specified
+// field of data with some invalid value.
+void
+installBadDiff(MockAccessor& accessor, uint32_t begin,
+ DatabaseAccessor::DiffRecordParams modify_param,
+ const char* const data)
+{
+ string data1[] = {"example.org.", "SOA", "3600", "ns. root. 1 1 1 1 1"};
+ string data2[] = {"example.org.", "SOA", "3600", "ns. root. 2 1 1 1 1"};
+ data1[modify_param] = data;
+ accessor.addRecordDiff(READONLY_ZONE_ID, begin,
+ DatabaseAccessor::DIFF_DELETE, data1);
+ accessor.addRecordDiff(READONLY_ZONE_ID, begin + 1,
+ DatabaseAccessor::DIFF_ADD, data2);
+}
+
+TEST_F(MockDatabaseClientTest, journalWithBadData) {
+ MockAccessor& mock_accessor =
+ dynamic_cast<MockAccessor&>(*current_accessor_);
+
+ // One of the fields from the data source is broken as an RR parameter.
+ // The journal reader should still be constructed, but getNextDiff()
+ // should result in exception.
+ installBadDiff(mock_accessor, 1, DatabaseAccessor::DIFF_NAME,
+ "example..org");
+ installBadDiff(mock_accessor, 3, DatabaseAccessor::DIFF_TYPE,
+ "bad-rrtype");
+ installBadDiff(mock_accessor, 5, DatabaseAccessor::DIFF_TTL,
+ "bad-ttl");
+ installBadDiff(mock_accessor, 7, DatabaseAccessor::DIFF_RDATA,
+ "bad rdata");
+ EXPECT_THROW(this->client_->getJournalReader(this->zname_, 1, 2).
+ second->getNextDiff(), DataSourceError);
+ EXPECT_THROW(this->client_->getJournalReader(this->zname_, 3, 4).
+ second->getNextDiff(), DataSourceError);
+ EXPECT_THROW(this->client_->getJournalReader(this->zname_, 5, 6).
+ second->getNextDiff(), DataSourceError);
+ EXPECT_THROW(this->client_->getJournalReader(this->zname_, 7, 8).
+ second->getNextDiff(), DataSourceError);
+}
+
}
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 2b854db..a1bd94e 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -177,6 +177,54 @@ TEST_F(InMemoryClientTest, iterator) {
EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
}
+TEST_F(InMemoryClientTest, iterator_separate_rrs) {
+ // Exactly the same tests as for iterator, but now with separate_rrs = true
+ // For the one that returns actual data, the AAAA should now be split up
+ boost::shared_ptr<InMemoryZoneFinder>
+ zone(new InMemoryZoneFinder(RRClass::IN(), Name("a")));
+ RRsetPtr aRRsetA(new RRset(Name("a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ aRRsetA->addRdata(rdata::in::A("192.0.2.1"));
+ RRsetPtr aRRsetAAAA(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+ RRTTL(300)));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::1"));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::2"));
+ RRsetPtr aRRsetAAAA_r1(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+ RRTTL(300)));
+ aRRsetAAAA_r1->addRdata(rdata::in::AAAA("2001:db8::1"));
+ RRsetPtr aRRsetAAAA_r2(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+ RRTTL(300)));
+ aRRsetAAAA_r2->addRdata(rdata::in::AAAA("2001:db8::2"));
+
+ RRsetPtr subRRsetA(new RRset(Name("sub.x.a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ subRRsetA->addRdata(rdata::in::A("192.0.2.2"));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(zone));
+
+ // First, the zone is not there, so it should throw
+ EXPECT_THROW(memory_client.getIterator(Name("b"), true), DataSourceError);
+ // This zone is not there either, even when there's a zone containing this
+ EXPECT_THROW(memory_client.getIterator(Name("x.a")), DataSourceError);
+ // Now, an empty zone
+ ZoneIteratorPtr iterator(memory_client.getIterator(Name("a"), true));
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+ // It throws Unexpected when we are past the end
+ EXPECT_THROW(iterator->getNextRRset(), isc::Unexpected);
+
+ ASSERT_EQ(result::SUCCESS, zone->add(aRRsetA));
+ ASSERT_EQ(result::SUCCESS, zone->add(aRRsetAAAA));
+ ASSERT_EQ(result::SUCCESS, zone->add(subRRsetA));
+ // Check it with full zone, one by one.
+ // It should be in ascending order in case of InMemory data source
+ // (isn't guaranteed in general)
+ iterator = memory_client.getIterator(Name("a"), true);
+ EXPECT_EQ(aRRsetA->toText(), iterator->getNextRRset()->toText());
+ EXPECT_EQ(aRRsetAAAA_r1->toText(), iterator->getNextRRset()->toText());
+ EXPECT_EQ(aRRsetAAAA_r2->toText(), iterator->getNextRRset()->toText());
+ EXPECT_EQ(subRRsetA->toText(), iterator->getNextRRset()->toText());
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+}
+
TEST_F(InMemoryClientTest, getZoneCount) {
EXPECT_EQ(0, memory_client.getZoneCount());
memory_client.addZone(
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
index 90b2ac1..61341f6 100644
--- a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -46,6 +46,7 @@ std::string SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
std::string SQLITE_DBFILE_MEMORY = ":memory:";
std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+std::string SQLITE_DBFILE_DIFFS = TEST_DATA_DIR "/diffs.sqlite3";
// The following file must be non existent and must be non"creatable";
// the sqlite3 library will try to create a new DB file if it doesn't exist,
@@ -116,6 +117,26 @@ TEST_F(SQLite3AccessorTest, noClass) {
EXPECT_FALSE(accessor->getZone("example.com.").first);
}
+// Simple check to test that the sequence is valid. It gets the next record
+// from the iterator, checks that it is not null, then checks the data.
+void checkRR(DatabaseAccessor::IteratorContextPtr& context,
+ std::string name, std::string ttl, std::string type, std::string rdata) {
+
+ // Mark where we are in the text
+ SCOPED_TRACE(name + " " + ttl + " " + type + " " + rdata);
+
+ std::string data[DatabaseAccessor::COLUMN_COUNT];
+
+ // Get next record
+ EXPECT_TRUE(context->getNext(data));
+
+ // ... and check expected values
+ EXPECT_EQ(name, data[DatabaseAccessor::NAME_COLUMN]);
+ EXPECT_EQ(ttl, data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ(type, data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ(rdata, data[DatabaseAccessor::RDATA_COLUMN]);
+}
+
// This tests the iterator context
TEST_F(SQLite3AccessorTest, iterator) {
// Our test zone is conveniently small, but not empty
@@ -130,80 +151,138 @@ TEST_F(SQLite3AccessorTest, iterator) {
ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context);
std::string data[DatabaseAccessor::COLUMN_COUNT];
- // Get and check the first and only record
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("MX", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("10 mail.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("ns1.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ checkRR(context, "example.org.", "3600", "MX", "10 mail.example.org.");
+ checkRR(context, "example.org.", "3600", "NS", "ns1.example.org.");
+ checkRR(context, "example.org.", "3600", "NS", "ns2.example.org.");
+ checkRR(context, "example.org.", "3600", "NS", "ns3.example.org.");
+ checkRR(context, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200");
+ checkRR(context, "dname.example.org.", "3600", "DNAME",
+ "dname.example.info.");
+ checkRR(context, "dname2.foo.example.org.", "3600", "DNAME",
+ "dname2.example.info.");
+ checkRR(context, "mail.example.org.", "3600", "A", "192.0.2.10");
+ checkRR(context, "sub.example.org.", "3600", "NS", "ns.sub.example.org.");
+ checkRR(context, "ns.sub.example.org.", "3600", "A", "192.0.2.101");
+ checkRR(context, "www.example.org.", "3600", "A", "192.0.2.1");
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("ns2.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // Check there's no other
+ EXPECT_FALSE(context->getNext(data));
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("ns3.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // And make sure calling it again won't cause problems.
+ EXPECT_FALSE(context->getNext(data));
+}
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("SOA", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("ns1.example.org. admin.example.org. "
- "1234 3600 1800 2419200 7200",
- data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+// This tests the difference iterator context
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("dname.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("dname.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+// Test that at attempt to create a difference iterator for a serial number
+// that does not exist throws an exception.
+TEST_F(SQLite3AccessorTest, diffIteratorNoRecords) {
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("dname2.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("dname2.foo.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // Our test zone is conveniently small, but not empty
+ initAccessor(SQLITE_DBFILE_DIFFS, "IN");
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("192.0.2.10", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("mail.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+ ASSERT_TRUE(zone_info.first);
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // Get the iterator context. Difference of version 1 does not exist, so
+ // this should throw an exception.
+ EXPECT_THROW(accessor->getDiffs(zone_info.second, 1, 1234),
+ isc::datasrc::NoSuchSerial);
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("192.0.2.101", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // Check that an invalid high version number also throws an exception.
+ EXPECT_THROW(accessor->getDiffs(zone_info.second, 1231, 2234),
+ NoSuchSerial);
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("192.0.2.1", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("www.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ // Check that valid versions - but for the wrong zone which does not hold
+ // any records - also throws this exception.
+ EXPECT_THROW(accessor->getDiffs(zone_info.second + 42, 1231, 1234),
+ NoSuchSerial);
- // Check there's no other
- EXPECT_FALSE(context->getNext(data));
+}
- // And make sure calling it again won't cause problems.
- EXPECT_FALSE(context->getNext(data));
+// Try to iterate through a valid sets of differences
+TEST_F(SQLite3AccessorTest, diffIteratorSequences) {
+ std::string data[DatabaseAccessor::COLUMN_COUNT];
+
+ // Our test zone is conveniently small, but not empty
+ initAccessor(SQLITE_DBFILE_DIFFS, "IN");
+ const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+ ASSERT_TRUE(zone_info.first);
+
+
+ // Check the difference sequence 1230-1231 (two adjacent differences)
+ // Get the iterator context
+ DatabaseAccessor::IteratorContextPtr
+ context1(accessor->getDiffs(zone_info.second, 1230, 1231));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context1);
+
+ // Change: 1230-1231
+ checkRR(context1, "example.org.", "1800", "SOA",
+ "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+ checkRR(context1, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+
+ // Check there's no other and that calling it again after no records doesn't
+ // cause problems.
+ EXPECT_FALSE(context1->getNext(data));
+ EXPECT_FALSE(context1->getNext(data));
+
+
+ // Check that the difference sequence 1231-1233 (two separate difference
+ // sequences) is OK.
+ DatabaseAccessor::IteratorContextPtr
+ context2(accessor->getDiffs(zone_info.second, 1231, 1233));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context2);
+
+ // Change 1231-1232
+ checkRR(context2, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+ checkRR(context2, "unused.example.org.", "3600", "A", "192.0.2.102");
+ checkRR(context2, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+ // Change: 1232-1233
+ checkRR(context2, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+ checkRR(context2, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+ checkRR(context2, "sub.example.org.", "3600", "NS", "ns.sub.example.org.");
+ checkRR(context2, "ns.sub.example.org.", "3600", "A", "192.0.2.101");
+
+ // Check there's no other and that calling it again after no records doesn't
+ // cause problems.
+ EXPECT_FALSE(context2->getNext(data));
+ EXPECT_FALSE(context2->getNext(data));
+
+
+ // Check that the difference sequence 4294967280 to 1230 (serial number
+ // rollover) is OK
+ DatabaseAccessor::IteratorContextPtr
+ context3(accessor->getDiffs(zone_info.second, 4294967280U, 1230));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context3);
+
+ // Change 4294967280 to 1230.
+ checkRR(context3, "example.org.", "3600", "SOA",
+ "ns1.example.org. admin.example.org. 4294967280 3600 1800 2419200 7200");
+ checkRR(context3, "www.example.org.", "3600", "A", "192.0.2.31");
+ checkRR(context3, "example.org.", "1800", "SOA",
+ "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+ checkRR(context3, "www.example.org.", "3600", "A", "192.0.2.21");
+
+ EXPECT_FALSE(context3->getNext(data));
+ EXPECT_FALSE(context3->getNext(data));
+
+
+ // Check the difference sequence 1233-1231 (versions in wrong order). This
+ // should give an empty difference set.
+ DatabaseAccessor::IteratorContextPtr
+ context4(accessor->getDiffs(zone_info.second, 1233, 1231));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context2);
+
+ EXPECT_FALSE(context4->getNext(data));
+ EXPECT_FALSE(context4->getNext(data));
}
TEST(SQLite3Open, getDBNameExample2) {
diff --git a/src/lib/datasrc/tests/testdata/Makefile.am b/src/lib/datasrc/tests/testdata/Makefile.am
index 64ae955..6a35fe3 100644
--- a/src/lib/datasrc/tests/testdata/Makefile.am
+++ b/src/lib/datasrc/tests/testdata/Makefile.am
@@ -1,6 +1 @@
CLEANFILES = *.copied
-BUILT_SOURCES = rwtest.sqlite3.copied
-
-# We use install-sh with the -m option to make sure it's writable
-rwtest.sqlite3.copied: $(srcdir)/rwtest.sqlite3
- $(top_srcdir)/install-sh -m 644 $(srcdir)/rwtest.sqlite3 $@
diff --git a/src/lib/datasrc/tests/testdata/brokendb.sqlite3 b/src/lib/datasrc/tests/testdata/brokendb.sqlite3
index 7aad3af..63f3cc5 100644
Binary files a/src/lib/datasrc/tests/testdata/brokendb.sqlite3 and b/src/lib/datasrc/tests/testdata/brokendb.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/diffs.sqlite3 b/src/lib/datasrc/tests/testdata/diffs.sqlite3
new file mode 100644
index 0000000..3820563
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/diffs.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/diffs_table.sql b/src/lib/datasrc/tests/testdata/diffs_table.sql
new file mode 100644
index 0000000..0e05207
--- /dev/null
+++ b/src/lib/datasrc/tests/testdata/diffs_table.sql
@@ -0,0 +1,123 @@
+-- Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+--
+-- Permission to use, copy, modify, and/or distribute this software for any
+-- purpose with or without fee is hereby granted, provided that the above
+-- copyright notice and this permission notice appear in all copies.
+--
+-- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+-- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+-- AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+-- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+-- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+-- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+-- PERFORMANCE OF THIS SOFTWARE.
+
+-- \brief Create Differences Table
+--
+-- This is a short-term solution to creating the differences table for testing
+-- purposes.
+--
+-- It is assumed that the database used is a copy of the "example.org.sqlite3"
+-- database in this test directory. The diffs table is created and populated
+-- with a set of RRs that purport to represent differences that end in the
+-- zone as is.
+--
+-- The file can be executed by the command:
+-- % sqlite3 -init <this-file> <database-file> ".quit"
+--
+-- The file gets executed as the set of SQL statements on the database file,
+-- the ".quit" on the command line then getting executed to exit SQLite3.
+
+-- Create the diffs table
+DROP TABLE diffs;
+CREATE TABLE diffs (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ version INTEGER NOT NULL,
+ operation INTEGER NOT NULL,
+ name STRING NOT NULL COLLATE NOCASE,
+ rrtype STRING NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdata STRING NOT NULL);
+
+-- Populate it. A dummy zone_id is used for now - this will be updated last of
+-- all.
+
+-- Change from 4294967280 (0xfffffff0) to 1230 to show serial rollover
+-- Update one record in the zone.
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 4294967280, 1, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 4294967280 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 4294967280, 1, "www.example.org.", "A", 3600, "192.0.2.31");
+
+-- Records added in version 1230 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1230, 0, "example.org.", "SOA", 1800,
+ "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1230, 0, "www.example.org.", "A", 3600, "192.0.2.21");
+
+-- Change 1230 to 1231: Change change a parameter of the SOA record
+-- Records removed from version 1230 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1230, 1, "example.org.", "SOA", 1800,
+ "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+
+-- Records added in version 1231 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1231, 0, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+
+
+-- Change 1231 to 1232: Remove one record, don't add anything.
+-- Records removed from version 1231 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1231, 1, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1231, 1, "unused.example.org.", "A", 3600, "192.0.2.102");
+
+-- Records added in version 1232 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1232, 0, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+-- Change 1232 to 1233: Add two, don't remove anything.
+-- Records removed from version 1232 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1232, 1, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+-- Records added in version 1233 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 0, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 0, "sub.example.org.", "NS", 3600, "ns.sub.example.org.");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 0, "ns.sub.example.org.", "A", 3600, "192.0.2.101");
+
+
+-- Change 1233 to 1234: change addresses of two A records
+-- Records removed from version 1233 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 1, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 1, "www.example.org.", "A", 3600, "192.0.2.21");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1233, 1, "mail.example.org.", "A", 3600, "192.0.2.210");
+
+-- Records added in version 1234 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1234, 0, "example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1234, 0, "www.example.org.", "A", 3600, "192.0.2.1");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+ VALUES(1, 1234, 0, "mail.example.org.", "A", 3600, "192.0.2.10");
+
+-- Finally, update the zone_id in the diffs table with what is actually
+-- in the zone table.
+UPDATE diffs SET zone_id =
+ (SELECT id FROM ZONES LIMIT 1);
diff --git a/src/lib/datasrc/tests/testdata/example.org.sqlite3 b/src/lib/datasrc/tests/testdata/example.org.sqlite3
index 070012f..60e6e05 100644
Binary files a/src/lib/datasrc/tests/testdata/example.org.sqlite3 and b/src/lib/datasrc/tests/testdata/example.org.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/example2.com.sqlite3 b/src/lib/datasrc/tests/testdata/example2.com.sqlite3
index 8d3bb34..9da7d0e 100644
Binary files a/src/lib/datasrc/tests/testdata/example2.com.sqlite3 and b/src/lib/datasrc/tests/testdata/example2.com.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 b/src/lib/datasrc/tests/testdata/rwtest.sqlite3
index ce95a1d..ccbb884 100644
Binary files a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 and b/src/lib/datasrc/tests/testdata/rwtest.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test-root.sqlite3 b/src/lib/datasrc/tests/testdata/test-root.sqlite3
index 7cc6195..c1dae47 100644
Binary files a/src/lib/datasrc/tests/testdata/test-root.sqlite3 and b/src/lib/datasrc/tests/testdata/test-root.sqlite3 differ
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index fa1c744..9fcd289 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -438,6 +438,10 @@ public:
/// calls after \c commit() the implementation must throw a
/// \c DataSourceError exception.
///
+ /// If journaling was requested when getting this updater, it will reject
+ /// to add the RRset if the squence doesn't look like and IXFR (see
+ /// DataSourceClient::getUpdater). In such case isc::BadValue is thrown.
+ ///
/// \todo As noted above we may have to revisit the design details as we
/// gain experiences:
///
@@ -454,6 +458,8 @@ public:
///
/// \exception DataSourceError Called after \c commit(), RRset is invalid
/// (see above), internal data source error
+ /// \exception isc::BadValue Journaling is enabled and the current RRset
+ /// doesn't fit into the IXFR sequence (see above).
/// \exception std::bad_alloc Resource allocation failure
///
/// \param rrset The RRset to be added
@@ -503,6 +509,10 @@ public:
/// calls after \c commit() the implementation must throw a
/// \c DataSourceError exception.
///
+ /// If journaling was requested when getting this updater, it will reject
+ /// to add the RRset if the squence doesn't look like and IXFR (see
+ /// DataSourceClient::getUpdater). In such case isc::BadValue is thrown.
+ ///
/// \todo As noted above we may have to revisit the design details as we
/// gain experiences:
///
@@ -520,6 +530,8 @@ public:
///
/// \exception DataSourceError Called after \c commit(), RRset is invalid
/// (see above), internal data source error
+ /// \exception isc::BadValue Journaling is enabled and the current RRset
+ /// doesn't fit into the IXFR sequence (see above).
/// \exception std::bad_alloc Resource allocation failure
///
/// \param rrset The RRset to be deleted
@@ -540,12 +552,106 @@ public:
///
/// \exception DataSourceError Duplicate call of the method,
/// internal data source error
+ /// \exception isc::BadValue Journaling is enabled and the update is not
+ /// complete IXFR sequence.
virtual void commit() = 0;
};
/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
typedef boost::shared_ptr<ZoneUpdater> ZoneUpdaterPtr;
+/// The base class for retrieving differences between two versions of a zone.
+///
+/// On construction, each derived class object will internally set up
+/// retrieving sequences of differences between two specific version of
+/// a specific zone managed in a particular data source. So the constructor
+/// of a derived class would normally take parameters to identify the zone
+/// and the two versions for which the differences should be retrieved.
+/// See \c DataSourceClient::getJournalReader for more concrete details
+/// used in this API.
+///
+/// Once constructed, an object of this class will act like an iterator
+/// over the sequences. Every time the \c getNextDiff() method is called
+/// it returns one element of the differences in the form of an \c RRset
+/// until it reaches the end of the entire sequences.
+class ZoneJournalReader {
+public:
+ /// Result codes used by a factory method for \c ZoneJournalReader
+ enum Result {
+ SUCCESS, ///< A \c ZoneJournalReader object successfully created
+ NO_SUCH_ZONE, ///< Specified zone does not exist in the data source
+ NO_SUCH_VERSION ///< Specified versions do not exist in the diff storage
+ };
+
+protected:
+ /// The default constructor.
+ ///
+ /// This is intentionally defined as protected to ensure that this base
+ /// class is never instantiated directly.
+ ZoneJournalReader() {}
+
+public:
+ /// The destructor
+ virtual ~ZoneJournalReader() {}
+
+ /// Return the next difference RR of difference sequences.
+ ///
+ /// In this API, the difference between two versions of a zone is
+ /// conceptually represented as IXFR-style difference sequences:
+ /// Each difference sequence is a sequence of RRs: an older version of
+ /// SOA (to be deleted), zero or more other deleted RRs, the
+ /// post-transaction SOA (to be added), and zero or more other
+ /// added RRs. (Note, however, that the underlying data source
+ /// implementation may or may not represent the difference in
+ /// straightforward realization of this concept. The mapping between
+ /// the conceptual difference and the actual implementation is hidden
+ /// in each derived class).
+ ///
+ /// This method provides an application with a higher level interface
+ /// to retrieve the difference along with the conceptual model: the
+ /// \c ZoneJournalReader object iterates over the entire sequences
+ /// from the beginning SOA (which is to be deleted) to one of the
+ /// added RR of with the ending SOA, and each call to this method returns
+ /// one RR in the form of an \c RRset that contains exactly one RDATA
+ /// in the order of the sequences.
+ ///
+ /// Note that the ordering of the sequences specifies the semantics of
+ /// each difference: add or delete. For example, the first RR is to
+ /// be deleted, and the last RR is to be added. So the return value
+ /// of this method does not explicitly indicate whether the RR is to be
+ /// added or deleted.
+ ///
+ /// This method ensures the returned \c RRset represents an RR, that is,
+ /// it contains exactly one RDATA. However, it does not necessarily
+ /// ensure that the resulting sequences are in the form of IXFR-style.
+ /// For example, the first RR is supposed to be an SOA, and it should
+ /// normally be the case, but this interface does not necessarily require
+ /// the derived class implementation ensure this. Normally the
+ /// differences are expected to be stored using this API (via a
+ /// \c ZoneUpdater object), and as long as that is the case and the
+ /// underlying implementation follows the requirement of the API, the
+ /// result of this method should be a valid IXFR-style sequences.
+ /// So this API does not mandate the almost redundant check as part of
+ /// the interface. If the application needs to make it sure 100%, it
+ /// must check the resulting sequence itself.
+ ///
+ /// Once the object reaches the end of the sequences, this method returns
+ /// \c Null. Any subsequent call will result in an exception of
+ /// class \c InvalidOperation.
+ ///
+ /// \exception InvalidOperation The method is called beyond the end of
+ /// the difference sequences.
+ /// \exception DataSourceError Underlying data is broken and the RR
+ /// cannot be created or other low level data source error.
+ ///
+ /// \return An \c RRset that contains one RDATA corresponding to the
+ /// next difference in the sequences.
+ virtual isc::dns::ConstRRsetPtr getNextDiff() = 0;
+};
+
+/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
+typedef boost::shared_ptr<ZoneJournalReader> ZoneJournalReaderPtr;
+
} // end of datasrc
} // end of isc
diff --git a/src/lib/dns/rdata/generic/soa_6.cc b/src/lib/dns/rdata/generic/soa_6.cc
index 7ecd84f..875a957 100644
--- a/src/lib/dns/rdata/generic/soa_6.cc
+++ b/src/lib/dns/rdata/generic/soa_6.cc
@@ -106,6 +106,12 @@ SOA::toWire(AbstractMessageRenderer& renderer) const {
renderer.writeData(numdata_, sizeof(numdata_));
}
+uint32_t
+SOA::getSerial() const {
+ InputBuffer b(numdata_, sizeof(numdata_));
+ return (b.readUint32());
+}
+
string
SOA::toText() const {
InputBuffer b(numdata_, sizeof(numdata_));
diff --git a/src/lib/dns/rdata/generic/soa_6.h b/src/lib/dns/rdata/generic/soa_6.h
index 3f6185e..4c6b6ec 100644
--- a/src/lib/dns/rdata/generic/soa_6.h
+++ b/src/lib/dns/rdata/generic/soa_6.h
@@ -34,6 +34,8 @@ public:
SOA(const Name& mname, const Name& rname, uint32_t serial,
uint32_t refresh, uint32_t retry, uint32_t expire,
uint32_t minimum);
+ /// \brief Returns the serial stored in the SOA.
+ uint32_t getSerial() const;
private:
/// Note: this is a prototype version; we may reconsider
/// this representation later.
diff --git a/src/lib/dns/tests/rdata_soa_unittest.cc b/src/lib/dns/tests/rdata_soa_unittest.cc
index 63fe1f7..17498eb 100644
--- a/src/lib/dns/tests/rdata_soa_unittest.cc
+++ b/src/lib/dns/tests/rdata_soa_unittest.cc
@@ -74,4 +74,9 @@ TEST_F(Rdata_SOA_Test, toText) {
EXPECT_EQ("ns.example.com. root.example.com. "
"2010012601 3600 300 3600000 1200", rdata_soa.toText());
}
+
+TEST_F(Rdata_SOA_Test, getSerial) {
+ EXPECT_EQ(2010012601, rdata_soa.getSerial());
+}
+
}
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index 433bb7d..b68f3c4 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -126,6 +126,17 @@ public:
isc::Exception(file, line, what) {}
};
+/// \brief A generic exception that is thrown if a function is called
+/// in a prohibited way.
+///
+/// For example, this can happen if a class method is called when the object's
+/// state does not allow that particular method.
+class InvalidOperation : public Exception {
+public:
+ InvalidOperation(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
///
/// \brief A generic exception that is thrown when an unexpected
/// error condition occurs.
diff --git a/src/lib/python/isc/bind10/component.py b/src/lib/python/isc/bind10/component.py
index 603653b..91b7064 100644
--- a/src/lib/python/isc/bind10/component.py
+++ b/src/lib/python/isc/bind10/component.py
@@ -39,6 +39,7 @@ START_CMD = 'start'
STOP_CMD = 'stop'
STARTED_OK_TIME = 10
+COMPONENT_RESTART_DELAY = 10
STATE_DEAD = 'dead'
STATE_STOPPED = 'stopped'
@@ -99,11 +100,18 @@ class BaseComponent:
but it is vital part of the service (like auth server). If
it fails to start or crashes in less than 10s after the first
startup, the system is brought down. If it crashes later on,
- it is restarted.
+ it is restarted (see below).
* 'dispensable' means the component should be running, but if it
doesn't start or crashes for some reason, the system simply tries
to restart it and keeps running.
+ For components that are restarted, the restarts are not always
+ immediate; if the component has run for more than
+ COMPONENT_RESTART_DELAY (10) seconds, they are restarted right
+ away. If the component has not run that long, the system waits
+ until that time has passed (since the last start) until the
+ component is restarted.
+
Note that the __init__ method of child class should have these
parameters:
@@ -111,7 +119,7 @@ class BaseComponent:
The extra parameters are:
- `process` - which program should be started.
- - `address` - the address on message buss, used to talk to the
+ - `address` - the address on message bus, used to talk to the
component.
- `params` - parameters to the program.
@@ -134,6 +142,7 @@ class BaseComponent:
self.__state = STATE_STOPPED
self._kind = kind
self._boss = boss
+ self._original_start_time = None
def start(self):
"""
@@ -149,6 +158,9 @@ class BaseComponent:
logger.info(BIND10_COMPONENT_START, self.name())
self.__state = STATE_RUNNING
self.__start_time = time.time()
+ if self._original_start_time is None:
+ self._original_start_time = self.__start_time
+ self._restart_time = None
try:
self._start_internal()
except Exception as e:
@@ -187,7 +199,12 @@ class BaseComponent:
The exit code is used for logging. It might be None.
- It calles _failed_internal internally.
+ It calls _failed_internal internally.
+
+ Returns True if the process was immediately restarted, returns
+ False is the process was not restarted, either because
+ it is considered a core or needed component, or because
+ the component is to be restarted later.
"""
logger.error(BIND10_COMPONENT_FAILED, self.name(), self.pid(),
exit_code if exit_code is not None else "unknown")
@@ -199,14 +216,47 @@ class BaseComponent:
# (including it stopped really soon)
if self._kind == 'core' or \
(self._kind == 'needed' and time.time() - STARTED_OK_TIME <
- self.__start_time):
+ self._original_start_time):
self.__state = STATE_DEAD
logger.fatal(BIND10_COMPONENT_UNSATISFIED, self.name())
self._boss.component_shutdown(1)
+ return False
# This means we want to restart
else:
- logger.warn(BIND10_COMPONENT_RESTART, self.name())
+ # if the component was only running for a short time, don't
+ # restart right away, but set a time it wants to restarted,
+ # and return that it wants to be restarted later
+ self.set_restart_time()
+ return self.restart()
+
+ def set_restart_time(self):
+ """Calculates and sets the time this component should be restarted.
+ Currently, it uses a very basic algorithm; start time +
+ RESTART_DELAY (10 seconds). This algorithm may be improved upon
+ in the future.
+ """
+ self._restart_at = self.__start_time + COMPONENT_RESTART_DELAY
+
+ def get_restart_time(self):
+ """Returns the time at which this component should be restarted."""
+ return self._restart_at
+
+ def restart(self, now = None):
+ """Restarts the component if it has a restart_time and if the value
+ of the restart_time is smaller than 'now'.
+
+ If the parameter 'now' is given, its value will be used instead
+ of calling time.time().
+
+ Returns True if the component is restarted, False if not."""
+ if now is None:
+ now = time.time()
+ if self.get_restart_time() is not None and\
+ self.get_restart_time() < now:
self.start()
+ return True
+ else:
+ return False
def running(self):
"""
@@ -283,7 +333,7 @@ class BaseComponent:
"""
pass
- def kill(self, forcefull=False):
+ def kill(self, forceful=False):
"""
Kills the component.
@@ -403,7 +453,7 @@ class Configurator:
* `special` - Some components are started in a special way. If it is
present, it specifies which class from the specials parameter should
be used to create the component. In that case, some of the following
- items might be irrelevant, depending on the special component choosen.
+ items might be irrelevant, depending on the special component chosen.
If it is not there, the basic Component class is used.
* `process` - Name of the executable to start. If it is not present,
it defaults to the identifier of the component.
@@ -460,7 +510,7 @@ class Configurator:
It is not expected that anyone would want to shutdown and then start
the configurator again, so we don't explicitly make sure that would
- work. However, we are not avare of anything that would make it not
+ work. However, we are not aware of anything that would make it not
work either.
"""
if not self._running:
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
index bac51ff..9972200 100644
--- a/src/lib/python/isc/bind10/special_component.py
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -57,8 +57,8 @@ class SockCreator(BaseComponent):
"""
return self.__creator.pid() if self.__creator else None
- def kill(self, forcefull=False):
- # We don't really care about forcefull here
+ def kill(self, forceful=False):
+ # We don't really care about forceful here
if self.__creator:
self.__creator.kill()
@@ -113,6 +113,11 @@ class XfrIn(Component):
Component.__init__(self, process, boss, kind, 'Xfrin', None,
boss.start_xfrin)
+class XfrOut(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Xfrout', None,
+ boss.start_xfrout)
+
class SetUID(BaseComponent):
"""
This is a pseudo-component which drops root privileges when started
@@ -130,7 +135,7 @@ class SetUID(BaseComponent):
posix.setuid(self.uid)
def _stop_internal(self): pass
- def kill(self, forcefull=False): pass
+ def kill(self, forceful=False): pass
def name(self):
return "Set UID"
@@ -154,6 +159,7 @@ def get_specials():
'cmdctl': CmdCtl,
# FIXME: Temporary workaround before #1292 is done
'xfrin': XfrIn,
+ 'xfrout': XfrOut,
# TODO: Remove when not needed, workaround before sockcreator works
'setuid': SetUID
}
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
index 15fa470..7e18e60 100644
--- a/src/lib/python/isc/bind10/tests/component_test.py
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -221,11 +221,6 @@ class ComponentTests(BossUtils, unittest.TestCase):
"""
Check the component restarted successfully.
- Currently, it is implemented as starting it again right away. This will
- change, it will register itself into the restart schedule in boss. But
- as the integration with boss is not clear yet, we don't know how
- exactly that will happen.
-
Reset the self.__start_called to False before calling the function when
the component should fail.
"""
@@ -237,6 +232,16 @@ class ComponentTests(BossUtils, unittest.TestCase):
# Check it can't be started again
self.assertRaises(ValueError, component.start)
+ def __check_not_restarted(self, component):
+ """
+ Check the component has not (yet) restarted successfully.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertTrue(self.__failed_called)
+ self.assertFalse(component.running())
+
def __do_start_stop(self, kind):
"""
This is a body of a test. It creates a component of given kind,
@@ -296,7 +301,9 @@ class ComponentTests(BossUtils, unittest.TestCase):
component.start()
self.__check_started(component)
# Pretend the component died
- component.failed(1)
+ restarted = component.failed(1)
+ # Since it is a core component, it should not be restarted
+ self.assertFalse(restarted)
# It should bring down the whole server
self.__check_dead(component)
@@ -312,7 +319,9 @@ class ComponentTests(BossUtils, unittest.TestCase):
self.__check_started(component)
self._timeskip()
# Pretend the component died some time later
- component.failed(1)
+ restarted = component.failed(1)
+ # Should not be restarted
+ self.assertFalse(restarted)
# Check the component is still dead
self.__check_dead(component)
@@ -328,7 +337,9 @@ class ComponentTests(BossUtils, unittest.TestCase):
component.start()
self.__check_started(component)
# Make it fail right away.
- component.failed(1)
+ restarted = component.failed(1)
+ # Should not have restarted
+ self.assertFalse(restarted)
self.__check_dead(component)
def test_start_fail_needed_later(self):
@@ -344,37 +355,65 @@ class ComponentTests(BossUtils, unittest.TestCase):
# Make it fail later on
self.__start_called = False
self._timeskip()
- component.failed(1)
+ restarted = component.failed(1)
+ # Should have restarted
+ self.assertTrue(restarted)
self.__check_restarted(component)
def test_start_fail_dispensable(self):
"""
- Start and then fail a dispensable component. Should just get restarted.
+ Start and then fail a dispensable component. Should not get restarted.
"""
# Just ordinary startup
- component = self.__create_component('needed')
+ component = self.__create_component('dispensable')
self.__check_startup(component)
component.start()
self.__check_started(component)
# Make it fail right away
- self.__start_called = False
- component.failed(1)
- self.__check_restarted(component)
+ restarted = component.failed(1)
+ # Should signal that it did not restart
+ self.assertFalse(restarted)
+ self.__check_not_restarted(component)
- def test_start_fail_dispensable(self):
+ def test_start_fail_dispensable_later(self):
"""
Start and then later on fail a dispensable component. Should just get
restarted.
"""
# Just ordinary startup
- component = self.__create_component('needed')
+ component = self.__create_component('dispensable')
self.__check_startup(component)
component.start()
self.__check_started(component)
# Make it fail later on
- self.__start_called = False
self._timeskip()
- component.failed(1)
+ restarted = component.failed(1)
+ # should signal that it restarted
+ self.assertTrue(restarted)
+ # and check if it really did
+ self.__check_restarted(component)
+
+ def test_start_fail_dispensable_restart_later(self):
+ """
+ Start and then fail a dispensable component, wait a bit and try to
+ restart. Should get restarted after the wait.
+ """
+ # Just ordinary startup
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail immediately
+ restarted = component.failed(1)
+ # should signal that it did not restart
+ self.assertFalse(restarted)
+ self.__check_not_restarted(component)
+ self._timeskip()
+ # try to restart again
+ restarted = component.restart()
+ # should signal that it restarted
+ self.assertTrue(restarted)
+ # and check if it really did
self.__check_restarted(component)
def test_fail_core(self):
@@ -402,14 +441,56 @@ class ComponentTests(BossUtils, unittest.TestCase):
def test_fail_dispensable(self):
"""
Failure to start a dispensable component. The exception should get
- through, but it should be restarted.
+ through, but it should be restarted after a time skip.
"""
component = self.__create_component('dispensable')
self.__check_startup(component)
component._start_internal = self.__fail_to_start
self.assertRaises(TestError, component.start)
+ # tell it to see if it must restart
+ restarted = component.restart()
+ # should not have restarted yet
+ self.assertFalse(restarted)
+ self.__check_not_restarted(component)
+ self._timeskip()
+ # tell it to see if it must restart and do so, with our vision of time
+ restarted = component.restart()
+ # should have restarted now
+ self.assertTrue(restarted)
+ self.__check_restarted(component)
+
+ def test_component_start_time(self):
+ """
+ Check that original start time is set initially, and remains the same
+ after a restart, while the internal __start_time does change
+ """
+ # Just ordinary startup
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ self.assertIsNone(component._original_start_time)
+ component.start()
+ self.__check_started(component)
+
+ self.assertIsNotNone(component._original_start_time)
+ self.assertIsNotNone(component._BaseComponent__start_time)
+ original_start_time = component._original_start_time
+ start_time = component._BaseComponent__start_time
+ # Not restarted yet, so they should be the same
+ self.assertEqual(original_start_time, start_time)
+
+ self._timeskip()
+ # Make it fail
+ restarted = component.failed(1)
+ # should signal that it restarted
+ self.assertTrue(restarted)
+ # and check if it really did
self.__check_restarted(component)
+ # original start time should not have changed
+ self.assertEqual(original_start_time, component._original_start_time)
+ # but actual start time should
+ self.assertNotEqual(start_time, component._BaseComponent__start_time)
+
def test_bad_kind(self):
"""
Test the component rejects nonsensical kinds. This includes bad
@@ -592,7 +673,7 @@ class TestComponent(BaseComponent):
def _failed_internal(self):
self.log('failed')
- def kill(self, forcefull=False):
+ def kill(self, forceful=False):
self.log('killed')
class FailComponent(BaseComponent):
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index a5b4ca3..fb6d151 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -17,6 +17,7 @@ datasrc_la_SOURCES += client_python.cc client_python.h
datasrc_la_SOURCES += iterator_python.cc iterator_python.h
datasrc_la_SOURCES += finder_python.cc finder_python.h
datasrc_la_SOURCES += updater_python.cc updater_python.h
+datasrc_la_SOURCES += journal_reader_python.cc journal_reader_python.h
datasrc_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
@@ -30,6 +31,7 @@ EXTRA_DIST = client_inc.cc
EXTRA_DIST += finder_inc.cc
EXTRA_DIST += iterator_inc.cc
EXTRA_DIST += updater_inc.cc
+EXTRA_DIST += journal_reader_inc.cc
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
index 6465bf3..e0c0f06 100644
--- a/src/lib/python/isc/datasrc/client_inc.cc
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -89,7 +89,7 @@ None\n\
";
const char* const DataSourceClient_getIterator_doc = "\
-get_iterator(name, adjust_ttl=True) -> ZoneIterator\n\
+get_iterator(name, separate_rrs=False) -> ZoneIterator\n\
\n\
Returns an iterator to the given zone.\n\
\n\
@@ -111,17 +111,18 @@ anything else.\n\
Parameters:\n\
isc.dns.Name The name of zone apex to be traversed. It doesn't do\n\
nearest match as find_zone.\n\
- adjust_ttl If True, the iterator will treat RRs with the same\n\
- name and type but different TTL values to be of the\n\
- same RRset, and will adjust the TTL to the lowest\n\
- value found. If false, it will consider the RR to\n\
- belong to a different RRset.\n\
+ separate_rrs If true, the iterator will return each RR as a\n\
+ new RRset object. If false, the iterator will\n\
+ combine consecutive RRs with the name and type\n\
+ into 1 RRset. The capitalization of the RRset will\n\
+ be that of the first RR read, and TTLs will be\n\
+ adjusted to the lowest one found.\n\
\n\
Return Value(s): Pointer to the iterator.\n\
";
const char* const DataSourceClient_getUpdater_doc = "\
-get_updater(name, replace) -> ZoneUpdater\n\
+get_updater(name, replace, journaling=False) -> ZoneUpdater\n\
\n\
Return an updater to make updates to a specific zone.\n\
\n\
@@ -162,6 +163,22 @@ A data source can be \"read only\" or can prohibit partial updates. In\n\
such cases this method will result in an isc.datasrc.NotImplemented exception\n\
unconditionally or when replace is false).\n\
\n\
+If journaling is True, the data source should store a journal of\n\
+changes. These can be used later on by, for example, IXFR-out.\n\
+However, the parameter is a hint only. It might be unable to store\n\
+them and they would be silently discarded. Or it might need to store\n\
+them no matter what (for example a git-based data source would store\n\
+journal implicitly). When the journaling is True, it requires that the\n\
+following update be formatted as IXFR transfer (SOA to be removed,\n\
+bunch of RRs to be removed, SOA to be added, bunch of RRs to be added,\n\
+and possibly repeated). However, it is not required that the updater\n\
+checks that. If it is False, it must not require so and must accept\n\
+any order of changes.\n\
+\n\
+We don't support erasing the whole zone (by replace being True) and\n\
+saving a journal at the same time. In such situation, isc.datasrc.Error\n\
+is thrown.\n\
+\n\
Exceptions:\n\
isc.datasrc. NotImplemented The underlying data source does not support\n\
updates.\n\
@@ -170,6 +187,63 @@ Exceptions:\n\
Parameters:\n\
name The zone name to be updated\n\
replace Whether to delete existing RRs before making updates\n\
+ journaling The zone updater should store a journal of the changes.\n\
+\n\
+";
+
+// Modifications from C++ doc:
+// pointer -> (removed)
+// Null -> None
+// exception types
+const char* const DataSourceClient_getJournalReader_doc = "\
+get_journal_reader(zone, begin_serial, end_serial) ->\n\
+ (int, ZoneJournalReader)\n\
+\n\
+Return a journal reader to retrieve differences of a zone.\n\
+\n\
+A derived version of this method creates a concrete ZoneJournalReader\n\
+object specific to the underlying data source for the specified name\n\
+of zone and differences between the versions specified by the\n\
+beginning and ending serials of the corresponding SOA RRs. The RR\n\
+class of the zone is the one that the client is expected to handle\n\
+(see the detailed description of this class).\n\
+\n\
+Note that the SOA serials are compared by the semantics of the serial\n\
+number arithmetic. So, for example, begin_serial can be larger than\n\
+end_serial as bare unsigned integers. The underlying data source\n\
+implementation is assumed to keep track of sufficient history to\n\
+identify (if exist) the corresponding difference between the specified\n\
+versions.\n\
+\n\
+This method returns the result as a pair of a result code and a\n\
+ZoneJournalReader object. On success, the result code is\n\
+SUCCESS and the object must not be None; otherwise the result code is\n\
+something other than SUCCESS and the object must be None.\n\
+\n\
+If the specified zone is not found in the data source, the result code\n\
+is NO_SUCH_ZONE. Otherwise, if specified range of difference for the\n\
+zone is not found in the data source, the result code is\n\
+NO_SUCH_VERSION.\n\
+\n\
+Handling differences is an optional feature of data source. If the\n\
+underlying data source does not support difference handling, this\n\
+method for that type of data source can throw an exception of class\n\
+isc.datasrc.NotImplemented.\n\
\n\
+Exceptions:\n\
+ isc.datasrc.NotImplemented The data source does not support differences.\n\
+ isc.datasrc.Error Other operational errors at the data source level.\n\
+ SystemError An unexpected error in the backend C++ code. Either a rare\n\
+ system error such as short memory or an implementation bug.\n\
+\n\
+Parameters:\n\
+ zone The name of the zone for which the difference should be\n\
+ retrieved.\n\
+ begin_serial The SOA serial of the beginning version of the\n\
+ differences.\n\
+ end_serial The SOA serial of the ending version of the differences.\n\
+\n\
+Return Value(s): A pair of result code and a ZoneJournalReader object\n\
+(which can be None)\n \
";
} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
index 49235a6..bdf84a3 100644
--- a/src/lib/python/isc/datasrc/client_python.cc
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -38,6 +38,7 @@
#include "finder_python.h"
#include "iterator_python.h"
#include "updater_python.h"
+#include "journal_reader_python.h"
#include "client_inc.cc"
using namespace std;
@@ -84,26 +85,26 @@ PyObject*
DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
PyObject* name_obj;
- PyObject* adjust_ttl_obj = NULL;
+ PyObject* separate_rrs_obj = NULL;
if (PyArg_ParseTuple(args, "O!|O", &name_type, &name_obj,
- &adjust_ttl_obj)) {
+ &separate_rrs_obj)) {
try {
- bool adjust_ttl = true;
- if (adjust_ttl_obj != NULL) {
+ bool separate_rrs = false;
+ if (separate_rrs_obj != NULL) {
// store result in local var so we can explicitely check for
// -1 error return value
- int adjust_ttl_no = PyObject_Not(adjust_ttl_obj);
- if (adjust_ttl_no == 1) {
- adjust_ttl = false;
- } else if (adjust_ttl_no == -1) {
+ int separate_rrs_true = PyObject_IsTrue(separate_rrs_obj);
+ if (separate_rrs_true == 1) {
+ separate_rrs = true;
+ } else if (separate_rrs_true == -1) {
PyErr_SetString(getDataSourceException("Error"),
- "Error getting value of adjust_ttl");
+ "Error getting value of separate_rrs");
return (NULL);
}
}
return (createZoneIteratorObject(
self->cppobj->getInstance().getIterator(PyName_ToName(name_obj),
- adjust_ttl),
+ separate_rrs),
po_self));
} catch (const isc::NotImplemented& ne) {
PyErr_SetString(getDataSourceException("NotImplemented"),
@@ -129,14 +130,17 @@ PyObject*
DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
PyObject *name_obj;
- PyObject *replace_obj;
- if (PyArg_ParseTuple(args, "O!O", &name_type, &name_obj, &replace_obj) &&
- PyBool_Check(replace_obj)) {
- bool replace = (replace_obj != Py_False);
+ PyObject *replace_obj = NULL;
+ PyObject *journaling_obj = Py_False;
+ if (PyArg_ParseTuple(args, "O!O|O", &name_type, &name_obj,
+ &replace_obj, &journaling_obj) &&
+ PyBool_Check(replace_obj) && PyBool_Check(journaling_obj)) {
+ const bool replace = (replace_obj != Py_False);
+ const bool journaling = (journaling_obj == Py_True);
try {
ZoneUpdaterPtr updater =
self->cppobj->getInstance().getUpdater(PyName_ToName(name_obj),
- replace);
+ replace, journaling);
if (!updater) {
return (Py_None);
}
@@ -157,10 +161,56 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
return (NULL);
}
} else {
+ // PyBool_Check doesn't set the error, so we have to set it ourselves.
+ if (replace_obj != NULL && !PyBool_Check(replace_obj)) {
+ PyErr_SetString(PyExc_TypeError, "'replace' for "
+ "DataSourceClient.get_updater must be boolean");
+ }
+ if (!PyBool_Check(journaling_obj)) {
+ PyErr_SetString(PyExc_TypeError, "'journaling' for "
+ "DataSourceClient.get_updater must be boolean");
+ }
return (NULL);
}
}
+PyObject*
+DataSourceClient_getJournalReader(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ unsigned long begin_obj, end_obj;
+
+ if (PyArg_ParseTuple(args, "O!kk", &name_type, &name_obj,
+ &begin_obj, &end_obj)) {
+ try {
+ pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+ self->cppobj->getInstance().getJournalReader(
+ PyName_ToName(name_obj), static_cast<uint32_t>(begin_obj),
+ static_cast<uint32_t>(end_obj));
+ PyObject* po_reader;
+ if (result.first == ZoneJournalReader::SUCCESS) {
+ po_reader = createZoneJournalReaderObject(result.second,
+ po_self);
+ } else {
+ po_reader = Py_None;
+ Py_INCREF(po_reader); // this will soon be released
+ }
+ PyObjectContainer container(po_reader);
+ return (Py_BuildValue("(iO)", result.first, container.get()));
+ } catch (const isc::NotImplemented& ex) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ex.what());
+ } catch (const DataSourceError& ex) {
+ PyErr_SetString(getDataSourceException("Error"), ex.what());
+ } catch (const std::exception& ex) {
+ PyErr_SetString(PyExc_SystemError, ex.what());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected exception");
+ }
+ }
+ return (NULL);
+}
+
// This list contains the actual set of functions we have in
// python. Each entry has
// 1. Python method name
@@ -168,18 +218,21 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
// 3. Argument type
// 4. Documentation
PyMethodDef DataSourceClient_methods[] = {
- { "find_zone", reinterpret_cast<PyCFunction>(DataSourceClient_findZone),
- METH_VARARGS, DataSourceClient_findZone_doc },
+ { "find_zone", DataSourceClient_findZone, METH_VARARGS,
+ DataSourceClient_findZone_doc },
{ "get_iterator",
- reinterpret_cast<PyCFunction>(DataSourceClient_getIterator), METH_VARARGS,
+ DataSourceClient_getIterator, METH_VARARGS,
DataSourceClient_getIterator_doc },
- { "get_updater", reinterpret_cast<PyCFunction>(DataSourceClient_getUpdater),
+ { "get_updater", DataSourceClient_getUpdater,
METH_VARARGS, DataSourceClient_getUpdater_doc },
+ { "get_journal_reader", DataSourceClient_getJournalReader,
+ METH_VARARGS, DataSourceClient_getJournalReader_doc },
{ NULL, NULL, 0, NULL }
};
int
-DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
+DataSourceClient_init(PyObject* po_self, PyObject* args, PyObject*) {
+ s_DataSourceClient* self = static_cast<s_DataSourceClient*>(po_self);
char* ds_type_str;
char* ds_config_str;
try {
@@ -224,7 +277,8 @@ DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
}
void
-DataSourceClient_destroy(s_DataSourceClient* const self) {
+DataSourceClient_destroy(PyObject* po_self) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
delete self->cppobj;
self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
@@ -243,7 +297,7 @@ PyTypeObject datasourceclient_type = {
"datasrc.DataSourceClient",
sizeof(s_DataSourceClient), // tp_basicsize
0, // tp_itemsize
- reinterpret_cast<destructor>(DataSourceClient_destroy),// tp_dealloc
+ DataSourceClient_destroy, // tp_dealloc
NULL, // tp_print
NULL, // tp_getattr
NULL, // tp_setattr
@@ -274,7 +328,7 @@ PyTypeObject datasourceclient_type = {
NULL, // tp_descr_get
NULL, // tp_descr_set
0, // tp_dictoffset
- reinterpret_cast<initproc>(DataSourceClient_init),// tp_init
+ DataSourceClient_init, // tp_init
NULL, // tp_alloc
PyType_GenericNew, // tp_new
NULL, // tp_free
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
index 6ab29d8..1573b81 100644
--- a/src/lib/python/isc/datasrc/datasrc.cc
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -27,6 +27,7 @@
#include "finder_python.h"
#include "iterator_python.h"
#include "updater_python.h"
+#include "journal_reader_python.h"
#include <util/python/pycppwrapper_util.h>
#include <dns/python/pydnspp_common.h>
@@ -192,6 +193,41 @@ initModulePart_ZoneUpdater(PyObject* mod) {
return (true);
}
+bool
+initModulePart_ZoneJournalReader(PyObject* mod) {
+ if (PyType_Ready(&journal_reader_type) < 0) {
+ return (false);
+ }
+ void* p = &journal_reader_type;
+ if (PyModule_AddObject(mod, "ZoneJournalReader",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&journal_reader_type);
+
+ try {
+ installClassVariable(journal_reader_type, "SUCCESS",
+ Py_BuildValue("I", ZoneJournalReader::SUCCESS));
+ installClassVariable(journal_reader_type, "NO_SUCH_ZONE",
+ Py_BuildValue("I",
+ ZoneJournalReader::NO_SUCH_ZONE));
+ installClassVariable(journal_reader_type, "NO_SUCH_VERSION",
+ Py_BuildValue("I",
+ ZoneJournalReader::NO_SUCH_VERSION));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in ZoneJournalReader initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in ZoneJournalReader initialization");
+ return (false);
+ }
+
+ return (true);
+}
PyObject* po_DataSourceError;
PyObject* po_NotImplemented;
@@ -239,6 +275,11 @@ PyInit_datasrc(void) {
return (NULL);
}
+ if (!initModulePart_ZoneJournalReader(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
try {
po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
NULL);
diff --git a/src/lib/python/isc/datasrc/journal_reader_inc.cc b/src/lib/python/isc/datasrc/journal_reader_inc.cc
new file mode 100644
index 0000000..35ba70e
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_inc.cc
@@ -0,0 +1,80 @@
+namespace {
+const char* const ZoneJournalReader_doc = "\
+The base class for retrieving differences between two versions of a\n\
+zone.\n\
+\n\
+On construction, each derived class object will internally set up\n\
+retrieving sequences of differences between two specific version of a\n\
+specific zone managed in a particular data source. So the constructor\n\
+of a derived class would normally take parameters to identify the zone\n\
+and the two versions for which the differences should be retrieved.\n\
+See DataSourceClient.get_journal_reader for more concrete details used\n\
+in this API.\n\
+\n\
+Once constructed, an object of this class will act like an iterator\n\
+over the sequences. Every time the get_next_diff() method is called it\n\
+returns one element of the differences in the form of an RRset until\n\
+it reaches the end of the entire sequences.\n\
+\n\
+";
+
+// Modifications from C++ doc:
+// ConstRRsetPtr -> RRset
+// Null -> None
+// InvalidOperation -> ValueError
+const char* const ZoneJournalReader_getNextDiff_doc = "\
+get_next_diff() -> isc.dns.RRset\n\
+\n\
+Return the next difference RR of difference sequences.\n\
+\n\
+In this API, the difference between two versions of a zone is\n\
+conceptually represented as IXFR-style difference sequences: Each\n\
+difference sequence is a sequence of RRs: an older version of SOA (to\n\
+be deleted), zero or more other deleted RRs, the post-transaction SOA\n\
+(to be added), and zero or more other added RRs. (Note, however, that\n\
+the underlying data source implementation may or may not represent the\n\
+difference in straightforward realization of this concept. The mapping\n\
+between the conceptual difference and the actual implementation is\n\
+hidden in each derived class).\n\
+\n\
+This method provides an application with a higher level interface to\n\
+retrieve the difference along with the conceptual model: the\n\
+ZoneJournalReader object iterates over the entire sequences from the\n\
+beginning SOA (which is to be deleted) to one of the added RR of with\n\
+the ending SOA, and each call to this method returns one RR in the\n\
+form of an RRset that contains exactly one RDATA in the order of the\n\
+sequences.\n\
+\n\
+Note that the ordering of the sequences specifies the semantics of\n\
+each difference: add or delete. For example, the first RR is to be\n\
+deleted, and the last RR is to be added. So the return value of this\n\
+method does not explicitly indicate whether the RR is to be added or\n\
+deleted.\n\
+\n\
+This method ensures the returned RRset represents an RR, that is, it\n\
+contains exactly one RDATA. However, it does not necessarily ensure\n\
+that the resulting sequences are in the form of IXFR-style. For\n\
+example, the first RR is supposed to be an SOA, and it should normally\n\
+be the case, but this interface does not necessarily require the\n\
+derived class implementation ensure this. Normally the differences are\n\
+expected to be stored using this API (via a ZoneUpdater object), and\n\
+as long as that is the case and the underlying implementation follows\n\
+the requirement of the API, the result of this method should be a\n\
+valid IXFR-style sequences. So this API does not mandate the almost\n\
+redundant check as part of the interface. If the application needs to\n\
+make it sure 100%, it must check the resulting sequence itself.\n\
+\n\
+Once the object reaches the end of the sequences, this method returns\n\
+None. Any subsequent call will result in an exception of class\n\
+ValueError.\n\
+\n\
+Exceptions:\n\
+ ValueError The method is called beyond the end of the\n\
+ difference sequences.\n\
+ isc.datasrc.Error Underlying data is broken and the RR cannot be\n\
+ created or other low level data source error.\n\
+\n\
+Return Value(s): An RRset that contains one RDATA corresponding to the\n\
+next difference in the sequences.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.cc b/src/lib/python/isc/datasrc/journal_reader_python.cc
new file mode 100644
index 0000000..ff398d1
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.cc
@@ -0,0 +1,200 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "journal_reader_python.h"
+
+#include "journal_reader_inc.cc"
+
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneJournalReader : public PyObject {
+public:
+ s_ZoneJournalReader() : cppobj(ZoneJournalReaderPtr()), base_obj(NULL) {};
+ ZoneJournalReaderPtr cppobj;
+ // This is a reference to a base object; if the object of this class
+ // depends on another object to be in scope during its lifetime,
+ // we use INCREF the base object upon creation, and DECREF it at
+ // the end of the destructor
+ // This is an optional argument to createXXX(). If NULL, it is ignored.
+ PyObject* base_obj;
+};
+
+// General creation and destruction
+int
+ZoneJournalReader_init(PyObject*, PyObject*, PyObject*) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneJournalReader cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneJournalReader_destroy(PyObject* po_self) {
+ s_ZoneJournalReader* const self =
+ static_cast<s_ZoneJournalReader*>(po_self) ;
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ if (self->base_obj != NULL) {
+ Py_DECREF(self->base_obj);
+ }
+ Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneJournalReader_getNextDiff(PyObject* po_self, PyObject*) {
+ s_ZoneJournalReader* self = static_cast<s_ZoneJournalReader*>(po_self);
+ try {
+ isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextDiff();
+ if (!rrset) {
+ Py_RETURN_NONE;
+ }
+ return (createRRsetObject(*rrset));
+ } catch (const isc::InvalidOperation& ex) {
+ PyErr_SetString(PyExc_ValueError, ex.what());
+ return (NULL);
+ } catch (const isc::Exception& isce) {
+ PyErr_SetString(getDataSourceException("Error"), isce.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneJournalReader_iter(PyObject *self) {
+ Py_INCREF(self);
+ return (self);
+}
+
+PyObject*
+ZoneJournalReader_next(PyObject* self) {
+ PyObject* result = ZoneJournalReader_getNextDiff(self, NULL);
+ // iter_next must return NULL without error instead of Py_None
+ if (result == Py_None) {
+ Py_DECREF(result);
+ return (NULL);
+ } else {
+ return (result);
+ }
+}
+
+PyMethodDef ZoneJournalReader_methods[] = {
+ { "get_next_diff", ZoneJournalReader_getNextDiff, METH_NOARGS,
+ ZoneJournalReader_getNextDiff_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject journal_reader_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneJournalReader",
+ sizeof(s_ZoneJournalReader), // tp_basicsize
+ 0, // tp_itemsize
+ ZoneJournalReader_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneJournalReader_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ ZoneJournalReader_iter, // tp_iter
+ ZoneJournalReader_next, // tp_iternext
+ ZoneJournalReader_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ ZoneJournalReader_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneJournalReaderObject(ZoneJournalReaderPtr source,
+ PyObject* base_obj)
+{
+ s_ZoneJournalReader* po = static_cast<s_ZoneJournalReader*>(
+ journal_reader_type.tp_alloc(&journal_reader_type, 0));
+ if (po != NULL) {
+ po->cppobj = source;
+ po->base_obj = base_obj;
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
+ }
+ }
+ return (po);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.h b/src/lib/python/isc/datasrc/journal_reader_python.h
new file mode 100644
index 0000000..56344df
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.h
@@ -0,0 +1,47 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_JOURNAL_READER_H
+#define __PYTHON_DATASRC_JOURNAL_READER_H 1
+
+#include <Python.h>
+
+#include <datasrc/zone.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+extern PyTypeObject journal_reader_type;
+
+/// \brief Create a ZoneJournalReader python object
+///
+/// \param source The zone journal reader pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneJournalReader depends on
+/// Its refcount is increased, and will be decreased when
+/// this reader is destroyed, making sure that the
+/// base object is never destroyed before this reader.
+PyObject* createZoneJournalReaderObject(
+ isc::datasrc::ZoneJournalReaderPtr source,
+ PyObject* base_obj = NULL);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_JOURNAL_READER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index fd63741..daa12fc 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -72,6 +72,14 @@ def create(cur):
rdtype STRING NOT NULL COLLATE NOCASE,
rdata STRING NOT NULL)""")
cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
+ cur.execute("""CREATE TABLE diffs (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ version INTEGER NOT NULL,
+ operation INTEGER NOT NULL,
+ name STRING NOT NULL COLLATE NOCASE,
+ rrtype STRING NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdata STRING NOT NULL)""")
row = [1]
cur.execute("COMMIT TRANSACTION")
return row
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 411b5cc..400abcf 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -6,6 +6,7 @@ EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += testdata/brokendb.sqlite3
EXTRA_DIST += testdata/example.com.sqlite3
+EXTRA_DIST += testdata/test.sqlite3.nodiffs
CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
# If necessary (rare cases), explicitly specify paths to dynamic libraries
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index 68e075a..e46c177 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -15,9 +15,11 @@
import isc.log
import isc.datasrc
-from isc.datasrc import ZoneFinder
-import isc.dns
+from isc.datasrc import ZoneFinder, ZoneJournalReader
+from isc.dns import *
+from isc.testutils.rrset_utils import rrsets_equal
import unittest
+import sqlite3
import os
import shutil
import sys
@@ -39,19 +41,6 @@ def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
rrset_to_add.add_rdata(isc.dns.Rdata(rrtype, rrclass, rdata))
rrset_list.append(rrset_to_add)
-# helper function, we have no direct rrset comparison atm
-def rrsets_equal(a, b):
- # no accessor for sigs either (so this only checks name, class, type, ttl,
- # and rdata)
- # also, because of the fake data in rrsigs, if the type is rrsig, the
- # rdata is not checked
- return a.get_name() == b.get_name() and\
- a.get_class() == b.get_class() and\
- a.get_type() == b.get_type() and \
- a.get_ttl() == b.get_ttl() and\
- (a.get_type() == isc.dns.RRType.RRSIG() or
- sorted(a.get_rdata()) == sorted(b.get_rdata()))
-
# returns true if rrset is in expected_rrsets
# will remove the rrset from expected_rrsets if found
def check_for_rrset(expected_rrsets, rrset):
@@ -61,6 +50,13 @@ def check_for_rrset(expected_rrsets, rrset):
return True
return False
+def create_soa(serial):
+ soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+ soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ 'ns1.example.org. admin.example.org. ' +
+ str(serial) + ' 3600 1800 2419200 7200'))
+ return soa
+
class DataSrcClient(unittest.TestCase):
def test_(self):
@@ -82,13 +78,12 @@ class DataSrcClient(unittest.TestCase):
isc.datasrc.DataSourceClient, "memory",
"{ \"foo\": 1 }")
- @unittest.skip("This test may fail depending on sqlite3 library behavior")
def test_iterate(self):
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
# for RRSIGS, the TTL's are currently modified. This test should
# start failing when we fix that.
- rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."), False)
+ rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."), True)
# we do not know the order in which they are returned by the iterator
# but we do want to check them, so we put all records into one list
@@ -115,7 +110,11 @@ class DataSrcClient(unittest.TestCase):
"256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
"N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
"TlALxMmspcfdpqun3Yr3YYnztuj06rV7RqmveYckWvAUXVYMSMQZfJ30"+
- "5fs0dE/xLztL/CzZ",
+ "5fs0dE/xLztL/CzZ"
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ [
"257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
"KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
"ZIyvcKq+9RXmV3HK3bUdHnQZ88IZWBRmWKfZ6wnzHo53kdYKAemTErkz"+
@@ -127,8 +126,16 @@ class DataSrcClient(unittest.TestCase):
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
[
- "dns01.example.com.",
- "dns02.example.com.",
+ "dns01.example.com."
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ [
+ "dns02.example.com."
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ [
"dns03.example.com."
])
add_rrset(expected_rrset_list, name, rrclass,
@@ -139,15 +146,19 @@ class DataSrcClient(unittest.TestCase):
# For RRSIGS, we can't add the fake data through the API, so we
# simply pass no rdata at all (which is skipped by the check later)
- # Since we passed adjust_ttl = False to get_iterator, we get several
+ # Since we passed separate_rrs = True to get_iterator, we get several
# sets of RRSIGs, one for each TTL
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+ add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
[
"master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
@@ -191,26 +202,26 @@ class DataSrcClient(unittest.TestCase):
# instead of failing?
self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
- # Without the adjust_ttl argument, it should return 55 RRsets
+ # Without the separate_rrs argument, it should return 55 RRsets
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
rrets = dsc.get_iterator(isc.dns.Name("example.com"))
# there are more than 80 RRs in this zone... let's just count them
# (already did a full check of the smaller zone above)
self.assertEqual(55, len(list(rrets)))
- # same test, but now with explicit True argument for adjust_ttl
+ # same test, but now with explicit False argument for separate_rrs
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
- rrets = dsc.get_iterator(isc.dns.Name("example.com"), True)
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"), False)
# there are more than 80 RRs in this zone... let's just count them
# (already did a full check of the smaller zone above)
self.assertEqual(55, len(list(rrets)))
# Count should be 71 if we request individual rrsets for differing ttls
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
- rrets = dsc.get_iterator(isc.dns.Name("example.com"), False)
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"), True)
# there are more than 80 RRs in this zone... let's just count them
# (already did a full check of the smaller zone above)
- self.assertEqual(71, len(list(rrets)))
+ self.assertEqual(84, len(list(rrets)))
# TODO should we catch this (iterating past end) and just return None
# instead of failing?
self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
@@ -565,6 +576,230 @@ class DataSrcUpdater(unittest.TestCase):
self.assertEqual(None, iterator.get_soa())
self.assertEqual(None, iterator.get_next_rrset())
+class JournalWrite(unittest.TestCase):
+ def setUp(self):
+ # Make a fresh copy of the writable database with all original content
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+ self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+ WRITE_ZONE_DB_CONFIG)
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+
+ def tearDown(self):
+ self.dsc = None
+ self.updater = None
+
+ def check_journal(self, expected_list):
+ # This assumes sqlite3 DB and directly fetches stored data from
+ # the DB file. It should be generalized using ZoneJournalReader
+ # once it's supported.
+ conn = sqlite3.connect(WRITE_ZONE_DB_FILE)
+ cur = conn.cursor()
+ cur.execute('SELECT name, rrtype, ttl, rdata FROM diffs ORDER BY id')
+ actual_list = cur.fetchall()
+ self.assertEqual(len(expected_list), len(actual_list))
+ for (expected, actual) in zip(expected_list, actual_list):
+ self.assertEqual(expected, actual)
+ conn.close()
+
+ def create_a(self, address):
+ a_rr = RRset(Name('www.example.org'), RRClass.IN(), RRType.A(),
+ RRTTL(3600))
+ a_rr.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+ return (a_rr)
+
+ def test_journal_write(self):
+ # This is a straightforward port of the C++ 'journal' test
+ # Note: we add/delete 'out of zone' data (example.org in the
+ # example.com zone for convenience.
+ self.updater.delete_rrset(create_soa(1234))
+ self.updater.delete_rrset(self.create_a('192.0.2.2'))
+ self.updater.add_rrset(create_soa(1235))
+ self.updater.add_rrset(self.create_a('192.0.2.2'))
+ self.updater.commit()
+
+ expected = []
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ "1234 3600 1800 2419200 7200"))
+ expected.append(("www.example.org.", "A", 3600, "192.0.2.2"))
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ "1235 3600 1800 2419200 7200"))
+ expected.append(("www.example.org.", "A", 3600, "192.0.2.2"))
+ self.check_journal(expected)
+
+ def test_journal_write_multiple(self):
+ # This is a straightforward port of the C++ 'journalMultiple' test
+ expected = []
+ for i in range(1, 100):
+ self.updater.delete_rrset(create_soa(1234 + i - 1))
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ str(1234 + i - 1) + " 3600 1800 2419200 7200"))
+ self.updater.add_rrset(create_soa(1234 + i))
+ expected.append(("example.org.", "SOA", 3600,
+ "ns1.example.org. admin.example.org. " +
+ str(1234 + i) + " 3600 1800 2419200 7200"))
+ self.updater.commit()
+ self.check_journal(expected)
+
+ def test_journal_write_bad_sequence(self):
+ # This is a straightforward port of the C++ 'journalBadSequence' test
+
+ # Delete A before SOA
+ self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
+ self.create_a('192.0.2.1'))
+ # Add before delete
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+ create_soa(1234))
+ # Add A before SOA
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+ self.create_a('192.0.2.1'))
+ # Commit before add
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.assertRaises(isc.datasrc.Error, self.updater.commit)
+ # Delete two SOAs
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
+ create_soa(1235))
+ # Add two SOAs
+ self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+ self.updater.delete_rrset(create_soa(1234))
+ self.updater.add_rrset(create_soa(1235))
+ self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+ create_soa(1236))
+
+ def test_journal_write_onerase(self):
+ self.updater = None
+ self.assertRaises(isc.datasrc.Error, self.dsc.get_updater,
+ Name("example.com"), True, True)
+
+ def test_journal_write_badparam(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ self.assertRaises(TypeError, dsc.get_updater, 0, False, True)
+ self.assertRaises(TypeError, dsc.get_updater, Name('example.com'),
+ False, 0)
+ self.assertRaises(TypeError, dsc.get_updater, Name("example.com"),
+ 1, True)
+
+class JournalRead(unittest.TestCase):
+ def setUp(self):
+ # Make a fresh copy of the writable database with all original content
+ self.zname = Name('example.com')
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+ self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+ WRITE_ZONE_DB_CONFIG)
+ self.reader = None
+
+ def tearDown(self):
+ # Some tests leave the reader in the middle of sequence, holding
+ # the lock. Since the unittest framework keeps each test object
+ # until the end of the entire tests, we need to make sure the reader
+ # is released at the end of each test. The client shouldn't do harm
+ # but we clean it up, too, just in case.
+ self.dsc = None
+ self.reader = None
+
+ def make_simple_diff(self, begin_soa):
+ updater = self.dsc.get_updater(self.zname, False, True)
+ updater.delete_rrset(begin_soa)
+ updater.add_rrset(create_soa(1235))
+ updater.commit()
+
+ def test_journal_reader(self):
+ # This is a straightforward port of the C++ 'journalReader' test
+ self.make_simple_diff(create_soa(1234))
+ result, self.reader = self.dsc.get_journal_reader(self.zname, 1234,
+ 1235)
+ self.assertEqual(ZoneJournalReader.SUCCESS, result)
+ self.assertNotEqual(None, self.reader)
+ rrsets_equal(create_soa(1234), self.reader.get_next_diff())
+ rrsets_equal(create_soa(1235), self.reader.get_next_diff())
+ self.assertEqual(None, self.reader.get_next_diff())
+ self.assertRaises(ValueError, self.reader.get_next_diff)
+
+ def test_journal_reader_with_large_serial(self):
+ # similar to the previous one, but use a very large serial to check
+ # if the python wrapper code has unexpected integer overflow
+ self.make_simple_diff(create_soa(4294967295))
+ result, self.reader = self.dsc.get_journal_reader(self.zname,
+ 4294967295, 1235)
+ self.assertNotEqual(None, self.reader)
+ # dump to text and compare them in case create_soa happens to have
+ # an overflow bug
+ self.assertEqual('example.org. 3600 IN SOA ns1.example.org. ' + \
+ 'admin.example.org. 4294967295 3600 1800 ' + \
+ '2419200 7200\n',
+ self.reader.get_next_diff().to_text())
+
+ def test_journal_reader_large_journal(self):
+ # This is a straightforward port of the C++ 'readLargeJournal' test.
+ # In this test we use the ZoneJournalReader object as a Python
+ # iterator.
+ updater = self.dsc.get_updater(self.zname, False, True)
+ expected = []
+ for i in range(0, 100):
+ rrset = create_soa(1234 + i)
+ updater.delete_rrset(rrset)
+ expected.append(rrset)
+
+ rrset = create_soa(1234 + i + 1)
+ updater.add_rrset(rrset)
+ expected.append(rrset)
+
+ updater.commit()
+ _, self.reader = self.dsc.get_journal_reader(self.zname, 1234, 1334)
+ self.assertNotEqual(None, self.reader)
+ i = 0
+ for rr in self.reader:
+ self.assertNotEqual(len(expected), i)
+ rrsets_equal(expected[i], rr)
+ i += 1
+ self.assertEqual(len(expected), i)
+
+ def test_journal_reader_no_range(self):
+ # This is a straightforward port of the C++ 'readJournalForNoRange'
+ # test
+ self.make_simple_diff(create_soa(1234))
+ result, self.reader = self.dsc.get_journal_reader(self.zname, 1200,
+ 1235)
+ self.assertEqual(ZoneJournalReader.NO_SUCH_VERSION, result)
+ self.assertEqual(None, self.reader)
+
+ def test_journal_reader_no_zone(self):
+ # This is a straightforward port of the C++ 'journalReaderForNXZone'
+ # test
+ result, self.reader = self.dsc.get_journal_reader(Name('nosuchzone'),
+ 0, 1)
+ self.assertEqual(ZoneJournalReader.NO_SUCH_ZONE, result)
+ self.assertEqual(None, self.reader)
+
+ def test_journal_reader_bad_params(self):
+ self.assertRaises(TypeError, self.dsc.get_journal_reader,
+ 'example.com.', 0, 1)
+ self.assertRaises(TypeError, self.dsc.get_journal_reader,
+ self.zname, 'must be int', 1)
+ self.assertRaises(TypeError, self.dsc.get_journal_reader,
+ self.zname, 0, 'must be int')
+
+ def test_journal_reader_direct_construct(self):
+ # ZoneJournalReader can only be constructed via a factory
+ self.assertRaises(TypeError, ZoneJournalReader)
+
+ def test_journal_reader_old_schema(self):
+ # The database doesn't have a "diffs" table.
+ dbfile = TESTDATA_PATH + 'test.sqlite3.nodiffs'
+ client = isc.datasrc.DataSourceClient("sqlite3",
+ "{ \"database_file\": \"" + \
+ dbfile + "\" }")
+ self.assertRaises(isc.datasrc.Error, client.get_journal_reader,
+ self.zname, 0, 1)
+
if __name__ == "__main__":
isc.log.init("bind10")
isc.log.resetUnitTestRootLogger()
diff --git a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3
index cc8cfc3..521cf31 100644
Binary files a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 and b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs
new file mode 100644
index 0000000..cc8cfc3
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs differ
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index c7112b3..2e4a28f 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -303,7 +303,8 @@ public:
extern PyTypeObject logger_type;
int
-Logger_init(LoggerWrapper* self, PyObject* args) {
+Logger_init(PyObject* po_self, PyObject* args, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
const char* name;
if (!PyArg_ParseTuple(args, "s", &name)) {
return (-1);
@@ -323,7 +324,9 @@ Logger_init(LoggerWrapper* self, PyObject* args) {
}
void
-Logger_destroy(LoggerWrapper* const self) {
+//Logger_destroy(LoggerWrapper* const self) {
+Logger_destroy(PyObject* po_self) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
delete self->logger_;
self->logger_ = NULL;
Py_TYPE(self)->tp_free(self);
@@ -351,7 +354,8 @@ severityToText(const Severity& severity) {
}
PyObject*
-Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveSeverity(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
try {
return (Py_BuildValue("s",
severityToText(
@@ -368,7 +372,8 @@ Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
}
PyObject*
-Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveDebugLevel(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
try {
return (Py_BuildValue("i", self->logger_->getEffectiveDebugLevel()));
}
@@ -383,7 +388,8 @@ Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
}
PyObject*
-Logger_setSeverity(LoggerWrapper* self, PyObject* args) {
+Logger_setSeverity(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
const char* severity;
int dbgLevel = 0;
if (!PyArg_ParseTuple(args, "z|i", &severity, &dbgLevel)) {
@@ -425,27 +431,32 @@ Logger_isLevelEnabled(LoggerWrapper* self, FPtr function) {
}
PyObject*
-Logger_isInfoEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isInfoEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isInfoEnabled));
}
PyObject*
-Logger_isWarnEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isWarnEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isWarnEnabled));
}
PyObject*
-Logger_isErrorEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isErrorEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isErrorEnabled));
}
PyObject*
-Logger_isFatalEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isFatalEnabled(PyObject* po_self, PyObject*) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_isLevelEnabled(self, &Logger::isFatalEnabled));
}
PyObject*
-Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
+Logger_isDebugEnabled(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
int level = MIN_DEBUG_LEVEL;
if (!PyArg_ParseTuple(args, "|i", &level)) {
return (NULL);
@@ -470,53 +481,39 @@ Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
string
objectToStr(PyObject* object, bool convert) {
- PyObject* cleanup(NULL);
+ PyObjectContainer objstr_container;
if (convert) {
- object = cleanup = PyObject_Str(object);
- if (object == NULL) {
+ PyObject* text_obj = PyObject_Str(object);
+ if (text_obj == NULL) {
+ // PyObject_Str could fail for various reasons, including because
+ // the object cannot be converted to a string. We exit with
+ // InternalError to preserve the PyErr set in PyObject_Str.
throw InternalError();
}
- }
- const char* value;
- PyObject* tuple(Py_BuildValue("(O)", object));
- if (tuple == NULL) {
- if (cleanup != NULL) {
- Py_DECREF(cleanup);
- }
- throw InternalError();
+ objstr_container.reset(text_obj);
+ object = objstr_container.get();
}
- if (!PyArg_ParseTuple(tuple, "s", &value)) {
- Py_DECREF(tuple);
- if (cleanup != NULL) {
- Py_DECREF(cleanup);
- }
+ PyObjectContainer tuple_container(Py_BuildValue("(O)", object));
+ const char* value;
+ if (!PyArg_ParseTuple(tuple_container.get(), "s", &value)) {
throw InternalError();
}
- string result(value);
- Py_DECREF(tuple);
- if (cleanup != NULL) {
- Py_DECREF(cleanup);
- }
- return (result);
+ return (string(value));
}
// Generic function to output the logging message. Called by the real functions.
-template<class Function>
+template <class Function>
PyObject*
Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
try {
- Py_ssize_t number(PyObject_Length(args));
+ const Py_ssize_t number(PyObject_Length(args));
if (number < 0) {
return (NULL);
}
// Which argument is the first to format?
- size_t start(1);
- if (dbgLevel) {
- start ++;
- }
-
+ const size_t start = dbgLevel ? 2 : 1;
if (number < start) {
return (PyErr_Format(PyExc_TypeError, "Too few arguments to "
"logging call, at least %zu needed and %zd "
@@ -524,18 +521,10 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
}
// Extract the fixed arguments
- PyObject *midO(PySequence_GetItem(args, start - 1));
- if (midO == NULL) {
- return (NULL);
- }
- string mid(objectToStr(midO, false));
long dbg(0);
if (dbgLevel) {
- PyObject *dbgO(PySequence_GetItem(args, 0));
- if (dbgO == NULL) {
- return (NULL);
- }
- dbg = PyLong_AsLong(dbgO);
+ PyObjectContainer dbg_container(PySequence_GetItem(args, 0));
+ dbg = PyLong_AsLong(dbg_container.get());
if (PyErr_Occurred()) {
return (NULL);
}
@@ -544,16 +533,16 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
// We create the logging message right now. If we fail to convert a
// parameter to string, at least the part that we already did will
// be output
+ PyObjectContainer msgid_container(PySequence_GetItem(args, start - 1));
+ const string mid(objectToStr(msgid_container.get(), false));
Logger::Formatter formatter(function(dbg, mid.c_str()));
// Now process the rest of parameters, convert each to string and put
// into the formatter. It will print itself in the end.
for (size_t i(start); i < number; ++ i) {
- PyObject* param(PySequence_GetItem(args, i));
- if (param == NULL) {
- return (NULL);
- }
- formatter = formatter.arg(objectToStr(param, true));
+ PyObjectContainer param_container(PySequence_GetItem(args, i));
+ formatter = formatter.arg(objectToStr(param_container.get(),
+ true));
}
Py_RETURN_NONE;
}
@@ -573,72 +562,74 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
// Now map the functions into the performOutput. I wish C++ could do
// functional programming.
PyObject*
-Logger_debug(LoggerWrapper* self, PyObject* args) {
+Logger_debug(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::debug, self->logger_, _1, _2),
args, true));
}
PyObject*
-Logger_info(LoggerWrapper* self, PyObject* args) {
+Logger_info(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::info, self->logger_, _2),
args, false));
}
PyObject*
-Logger_warn(LoggerWrapper* self, PyObject* args) {
+Logger_warn(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::warn, self->logger_, _2),
args, false));
}
PyObject*
-Logger_error(LoggerWrapper* self, PyObject* args) {
+Logger_error(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::error, self->logger_, _2),
args, false));
}
PyObject*
-Logger_fatal(LoggerWrapper* self, PyObject* args) {
+Logger_fatal(PyObject* po_self, PyObject* args) {
+ LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
return (Logger_performOutput(bind(&Logger::fatal, self->logger_, _2),
args, false));
}
PyMethodDef loggerMethods[] = {
- { "get_effective_severity",
- reinterpret_cast<PyCFunction>(Logger_getEffectiveSeverity),
- METH_NOARGS, "Returns the effective logging severity as string" },
- { "get_effective_debug_level",
- reinterpret_cast<PyCFunction>(Logger_getEffectiveDebugLevel),
- METH_NOARGS, "Returns the current debug level." },
- { "set_severity",
- reinterpret_cast<PyCFunction>(Logger_setSeverity), METH_VARARGS,
+ { "get_effective_severity", Logger_getEffectiveSeverity, METH_NOARGS,
+ "Returns the effective logging severity as string" },
+ { "get_effective_debug_level", Logger_getEffectiveDebugLevel, METH_NOARGS,
+ "Returns the current debug level." },
+ { "set_severity", Logger_setSeverity, METH_VARARGS,
"Sets the severity of a logger. The parameters are severity as a "
"string and, optionally, a debug level (integer in range 0-99). "
"The severity may be NULL, in which case an inherited value is taken."
},
- { "is_debug_enabled", reinterpret_cast<PyCFunction>(Logger_isDebugEnabled),
- METH_VARARGS, "Returns if the logger would log debug message now. "
+ { "is_debug_enabled", Logger_isDebugEnabled, METH_VARARGS,
+ "Returns if the logger would log debug message now. "
"You can provide a desired debug level." },
- { "is_info_enabled", reinterpret_cast<PyCFunction>(Logger_isInfoEnabled),
- METH_NOARGS, "Returns if the logger would log info message now." },
- { "is_warn_enabled", reinterpret_cast<PyCFunction>(Logger_isWarnEnabled),
- METH_NOARGS, "Returns if the logger would log warn message now." },
- { "is_error_enabled", reinterpret_cast<PyCFunction>(Logger_isErrorEnabled),
- METH_NOARGS, "Returns if the logger would log error message now." },
- { "is_fatal_enabled", reinterpret_cast<PyCFunction>(Logger_isFatalEnabled),
- METH_NOARGS, "Returns if the logger would log fatal message now." },
- { "debug", reinterpret_cast<PyCFunction>(Logger_debug), METH_VARARGS,
+ { "is_info_enabled", Logger_isInfoEnabled, METH_NOARGS,
+ "Returns if the logger would log info message now." },
+ { "is_warn_enabled", Logger_isWarnEnabled, METH_NOARGS,
+ "Returns if the logger would log warn message now." },
+ { "is_error_enabled", Logger_isErrorEnabled, METH_NOARGS,
+ "Returns if the logger would log error message now." },
+ { "is_fatal_enabled", Logger_isFatalEnabled, METH_NOARGS,
+ "Returns if the logger would log fatal message now." },
+ { "debug", Logger_debug, METH_VARARGS,
"Logs a debug-severity message. It takes the debug level, message ID "
"and any number of stringifiable arguments to the message." },
- { "info", reinterpret_cast<PyCFunction>(Logger_info), METH_VARARGS,
+ { "info", Logger_info, METH_VARARGS,
"Logs a info-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
- { "warn", reinterpret_cast<PyCFunction>(Logger_warn), METH_VARARGS,
+ { "warn", Logger_warn, METH_VARARGS,
"Logs a warn-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
- { "error", reinterpret_cast<PyCFunction>(Logger_error), METH_VARARGS,
+ { "error", Logger_error, METH_VARARGS,
"Logs a error-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
- { "fatal", reinterpret_cast<PyCFunction>(Logger_fatal), METH_VARARGS,
+ { "fatal", Logger_fatal, METH_VARARGS,
"Logs a fatal-severity message. It taskes the message ID and any "
"number of stringifiable arguments to the message." },
{ NULL, NULL, 0, NULL }
@@ -649,7 +640,7 @@ PyTypeObject logger_type = {
"isc.log.Logger",
sizeof(LoggerWrapper), // tp_basicsize
0, // tp_itemsize
- reinterpret_cast<destructor>(Logger_destroy), // tp_dealloc
+ Logger_destroy, // tp_dealloc
NULL, // tp_print
NULL, // tp_getattr
NULL, // tp_setattr
@@ -681,7 +672,7 @@ PyTypeObject logger_type = {
NULL, // tp_descr_get
NULL, // tp_descr_set
0, // tp_dictoffset
- reinterpret_cast<initproc>(Logger_init), // tp_init
+ Logger_init, // tp_init
NULL, // tp_alloc
PyType_GenericNew, // tp_new
NULL, // tp_free
@@ -718,21 +709,21 @@ PyInit_log(void) {
return (NULL);
}
- if (PyType_Ready(&logger_type) < 0) {
- return (NULL);
- }
-
- if (PyModule_AddObject(mod, "Logger",
- static_cast<PyObject*>(static_cast<void*>(
- &logger_type))) < 0) {
- return (NULL);
- }
-
- // Add in the definitions of the standard debug levels. These can then
- // be referred to in Python through the constants log.DBGLVL_XXX.
+ // Finalize logger class and add in the definitions of the standard debug
+ // levels. These can then be referred to in Python through the constants
+ // log.DBGLVL_XXX.
// N.B. These should be kept in sync with the constants defined in
// log_dbglevels.h.
try {
+ if (PyType_Ready(&logger_type) < 0) {
+ throw InternalError();
+ }
+ void* p = &logger_type;
+ if (PyModule_AddObject(mod, "Logger",
+ static_cast<PyObject*>(p)) < 0) {
+ throw InternalError();
+ }
+
installClassVariable(logger_type, "DBGLVL_START_SHUT",
Py_BuildValue("I", DBGLVL_START_SHUT));
installClassVariable(logger_type, "DBGLVL_COMMAND",
@@ -747,15 +738,20 @@ PyInit_log(void) {
Py_BuildValue("I", DBGLVL_TRACE_DETAIL));
installClassVariable(logger_type, "DBGLVL_TRACE_DETAIL_DATA",
Py_BuildValue("I", DBGLVL_TRACE_DETAIL_DATA));
+ } catch (const InternalError&) {
+ Py_DECREF(mod);
+ return (NULL);
} catch (const std::exception& ex) {
const std::string ex_what =
"Unexpected failure in Log initialization: " +
std::string(ex.what());
PyErr_SetString(PyExc_SystemError, ex_what.c_str());
+ Py_DECREF(mod);
return (NULL);
} catch (...) {
PyErr_SetString(PyExc_SystemError,
"Unexpected failure in Log initialization");
+ Py_DECREF(mod);
return (NULL);
}
diff --git a/src/lib/python/isc/log/tests/log_test.py b/src/lib/python/isc/log/tests/log_test.py
index 8deaeae..1337654 100644
--- a/src/lib/python/isc/log/tests/log_test.py
+++ b/src/lib/python/isc/log/tests/log_test.py
@@ -17,6 +17,7 @@
import isc.log
import unittest
import json
+import sys
import bind10_config
from isc.config.ccsession import path_search
@@ -89,6 +90,7 @@ class Logger(unittest.TestCase):
def setUp(self):
isc.log.init("root", "DEBUG", 50)
self.sevs = ['INFO', 'WARN', 'ERROR', 'FATAL']
+ self.TEST_MSG = isc.log.create_message('TEST_MESSAGE', '%1')
# Checks defaults of the logger
def defaults(self, logger):
@@ -169,5 +171,34 @@ class Logger(unittest.TestCase):
logger = isc.log.Logger("child")
self.assertEqual(logger.DBGLVL_COMMAND, 10)
+ def test_param_reference(self):
+ """
+ Check whether passing a parameter to a logger causes a reference leak.
+ """
+ class LogParam:
+ def __str__(self):
+ return 'LogParam'
+ logger = isc.log.Logger("child")
+ param = LogParam()
+ orig_msgrefcnt = sys.getrefcount(param)
+ orig_idrefcnt = sys.getrefcount(self.TEST_MSG)
+ logger.info(self.TEST_MSG, param);
+ self.assertEqual(sys.getrefcount(self.TEST_MSG), orig_idrefcnt)
+ self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+ # intentionally pass an invalid type for debug level. It will
+ # result in TypeError. The passed object still shouldn't leak a
+ # reference.
+ self.assertRaises(TypeError, logger.debug, param, self.TEST_MSG, param)
+ self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+ def test_bad_parameter(self):
+ # a log parameter cannot be converted to a string object.
+ class LogParam:
+ def __str__(self):
+ raise ValueError("LogParam can't be converted to string")
+ logger = isc.log.Logger("child")
+ self.assertRaises(ValueError, logger.info, self.TEST_MSG, LogParam())
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index 6b91c87..af79b7c 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -21,6 +21,7 @@ import threading
import time
import errno
from isc.datasrc import sqlite3_ds
+from isc.datasrc import DataSourceClient
from isc.net import addr
import isc
from isc.log_messages.notify_out_messages import *
@@ -31,7 +32,7 @@ logger = isc.log.Logger("notify_out")
# we can't import we should not start anyway, and logging an error
# is a bad idea since the logging system is most likely not
# initialized yet. see trac ticket #1103
-from pydnspp import *
+from isc.dns import *
ZONE_NEW_DATA_READY_CMD = 'zone_new_data_ready'
_MAX_NOTIFY_NUM = 30
@@ -51,6 +52,24 @@ _BAD_REPLY_PACKET = 5
SOCK_DATA = b's'
+# borrowed from xfrin.py @ #1298. We should eventually unify it.
+def format_zone_str(zone_name, zone_class):
+ """Helper function to format a zone name and class as a string of
+ the form '<name>/<class>'.
+ Parameters:
+ zone_name (isc.dns.Name) name to format
+ zone_class (isc.dns.RRClass) class to format
+ """
+ return zone_name.to_text() + '/' + str(zone_class)
+
+class NotifyOutDataSourceError(Exception):
+ """An exception raised when data source error happens within notify out.
+
+ This exception is expected to be caught within the notify_out module.
+
+ """
+ pass
+
class ZoneNotifyInfo:
'''This class keeps track of notify-out information for one zone.'''
@@ -123,16 +142,20 @@ class NotifyOut:
self._nonblock_event = threading.Event()
def _init_notify_out(self, datasrc_file):
- '''Get all the zones name and its notify target's address
+ '''Get all the zones name and its notify target's address.
+
TODO, currently the zones are got by going through the zone
table in database. There should be a better way to get them
and also the setting 'also_notify', and there should be one
- mechanism to cover the changed datasrc.'''
+ mechanism to cover the changed datasrc.
+
+ '''
self._db_file = datasrc_file
for zone_name, zone_class in sqlite3_ds.get_zones_info(datasrc_file):
zone_id = (zone_name, zone_class)
self._notify_infos[zone_id] = ZoneNotifyInfo(zone_name, zone_class)
- slaves = self._get_notify_slaves_from_ns(zone_name)
+ slaves = self._get_notify_slaves_from_ns(Name(zone_name),
+ RRClass(zone_class))
for item in slaves:
self._notify_infos[zone_id].notify_slaves.append((item, 53))
@@ -234,7 +257,7 @@ class NotifyOut:
def _get_rdata_data(self, rr):
return rr[7].strip()
- def _get_notify_slaves_from_ns(self, zone_name):
+ def _get_notify_slaves_from_ns(self, zone_name, zone_class):
'''Get all NS records, then remove the primary master from ns rrset,
then use the name in NS record rdata part to get the a/aaaa records
in the same zone. the targets listed in a/aaaa record rdata are treated
@@ -242,28 +265,56 @@ class NotifyOut:
Note: this is the simplest way to get the address of slaves,
but not correct, it can't handle the delegation slaves, or the CNAME
and DNAME logic.
- TODO. the function should be provided by one library.'''
- ns_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'NS', self._db_file)
- soa_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
- ns_rr_name = []
- for ns in ns_rrset:
- ns_rr_name.append(self._get_rdata_data(ns))
-
- if len(soa_rrset) > 0:
- sname = (soa_rrset[0][sqlite3_ds.RR_RDATA_INDEX].split(' '))[0].strip() #TODO, bad hardcode to get rdata part
- if sname in ns_rr_name:
- ns_rr_name.remove(sname)
-
- addr_list = []
- for rr_name in ns_rr_name:
- a_rrset = sqlite3_ds.get_zone_rrset(zone_name, rr_name, 'A', self._db_file)
- aaaa_rrset = sqlite3_ds.get_zone_rrset(zone_name, rr_name, 'AAAA', self._db_file)
- for rr in a_rrset:
- addr_list.append(self._get_rdata_data(rr))
- for rr in aaaa_rrset:
- addr_list.append(self._get_rdata_data(rr))
-
- return addr_list
+ TODO. the function should be provided by one library.
+
+ '''
+ # Prepare data source client. This should eventually be moved to
+ # an earlier stage of initialization and also support multiple
+ # data sources.
+ datasrc_config = '{ "database_file": "' + self._db_file + '"}'
+ try:
+ result, finder = DataSourceClient('sqlite3',
+ datasrc_config).find_zone(
+ zone_name)
+ except isc.datasrc.Error as ex:
+ logger.error(NOTIFY_OUT_DATASRC_ACCESS_FAILURE, ex)
+ return []
+ if result is not DataSourceClient.SUCCESS:
+ logger.error(NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND,
+ format_zone_str(zone_name, zone_class))
+ return []
+
+ result, ns_rrset = finder.find(zone_name, RRType.NS(), None,
+ finder.FIND_DEFAULT)
+ if result is not finder.SUCCESS or ns_rrset is None:
+ logger.warn(NOTIFY_OUT_ZONE_NO_NS,
+ format_zone_str(zone_name, zone_class))
+ return []
+ result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
+ finder.FIND_DEFAULT)
+ if result is not finder.SUCCESS or soa_rrset is None or \
+ soa_rrset.get_rdata_count() != 1:
+ logger.warn(NOTIFY_OUT_ZONE_BAD_SOA,
+ format_zone_str(zone_name, zone_class))
+ return [] # broken zone anyway, stop here.
+ soa_mname = Name(soa_rrset.get_rdata()[0].to_text().split(' ')[0])
+
+ addrs = []
+ for ns_rdata in ns_rrset.get_rdata():
+ ns_name = Name(ns_rdata.to_text())
+ if soa_mname == ns_name:
+ continue
+ result, rrset = finder.find(ns_name, RRType.A(), None,
+ finder.FIND_DEFAULT)
+ if result is finder.SUCCESS and rrset is not None:
+ addrs.extend([a.to_text() for a in rrset.get_rdata()])
+
+ result, rrset = finder.find(ns_name, RRType.AAAA(), None,
+ finder.FIND_DEFAULT)
+ if result is finder.SUCCESS and rrset is not None:
+ addrs.extend([aaaa.to_text() for aaaa in rrset.get_rdata()])
+
+ return addrs
def _prepare_select_info(self):
'''
@@ -404,8 +455,9 @@ class NotifyOut:
self._nonblock_event.set()
def _send_notify_message_udp(self, zone_notify_info, addrinfo):
- msg, qid = self._create_notify_message(zone_notify_info.zone_name,
- zone_notify_info.zone_class)
+ msg, qid = self._create_notify_message(
+ Name(zone_notify_info.zone_name),
+ RRClass(zone_notify_info.zone_class))
render = MessageRenderer()
render.set_length_limit(512)
msg.to_wire(render)
@@ -426,17 +478,6 @@ class NotifyOut:
return True
- def _create_rrset_from_db_record(self, record, zone_class):
- '''Create one rrset from one record of datasource, if the schema of record is changed,
- This function should be updated first. TODO, the function is copied from xfrout, there
- should be library for creating one rrset. '''
- rrtype_ = RRType(record[sqlite3_ds.RR_TYPE_INDEX])
- rdata_ = Rdata(rrtype_, RRClass(zone_class), " ".join(record[sqlite3_ds.RR_RDATA_INDEX:]))
- rrset_ = RRset(Name(record[sqlite3_ds.RR_NAME_INDEX]), RRClass(zone_class), \
- rrtype_, RRTTL( int(record[sqlite3_ds.RR_TTL_INDEX])))
- rrset_.add_rdata(rdata_)
- return rrset_
-
def _create_notify_message(self, zone_name, zone_class):
msg = Message(Message.RENDER)
qid = random.randint(0, 0xFFFF)
@@ -444,14 +485,36 @@ class NotifyOut:
msg.set_opcode(Opcode.NOTIFY())
msg.set_rcode(Rcode.NOERROR())
msg.set_header_flag(Message.HEADERFLAG_AA)
- question = Question(Name(zone_name), RRClass(zone_class), RRType('SOA'))
- msg.add_question(question)
- # Add soa record to answer section
- soa_record = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
- rrset_soa = self._create_rrset_from_db_record(soa_record[0], zone_class)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+ msg.add_question(Question(zone_name, zone_class, RRType.SOA()))
+ msg.add_rrset(Message.SECTION_ANSWER, self._get_zone_soa(zone_name,
+ zone_class))
return msg, qid
+ def _get_zone_soa(self, zone_name, zone_class):
+ # We create (and soon drop) the data source client here because
+ # clients should be thread specific. We could let the main thread
+ # loop (_dispatcher) create and retain the client in order to avoid
+ # the overhead when we generalize the interface (and we may also
+ # revisit the design of notify_out more substantially anyway).
+ datasrc_config = '{ "database_file": "' + self._db_file + '"}'
+ result, finder = DataSourceClient('sqlite3',
+ datasrc_config).find_zone(zone_name)
+ if result is not DataSourceClient.SUCCESS:
+ raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
+ zone_name.to_text() + '/' +
+ zone_class.to_text() + ' not found')
+
+ result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
+ finder.FIND_DEFAULT)
+ if result is not finder.SUCCESS or soa_rrset is None or \
+ soa_rrset.get_rdata_count() != 1:
+ raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
+ zone_name.to_text() + '/' +
+ zone_class.to_text() +
+ ' is broken: no valid SOA found')
+
+ return soa_rrset
+
def _handle_notify_reply(self, zone_notify_info, msg_data, from_addr):
'''Parse the notify reply message.
rcode will not checked here, If we get the response
diff --git a/src/lib/python/isc/notify/notify_out_messages.mes b/src/lib/python/isc/notify/notify_out_messages.mes
index 570f51e..b77a60c 100644
--- a/src/lib/python/isc/notify/notify_out_messages.mes
+++ b/src/lib/python/isc/notify/notify_out_messages.mes
@@ -81,3 +81,24 @@ programming error, since all exceptions should have been caught
explicitly. Please file a bug report. Since there was a response,
no more notifies will be sent to this server for this notification
event.
+
+% NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message. This can be either a
+configuration error or installation setup failure.
+
+% NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source. This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process. So this means some critical inconsistency in the data
+source or software bug.
+
+% NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR. Notify message won't be sent to such a zone.
+
+% NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs. Notify message won't
+be sent to such a zone.
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index 00c2eee..6b62b90 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -1,12 +1,20 @@
PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = notify_out_test.py
EXTRA_DIST = $(PYTESTS)
+EXTRA_DIST += testdata/test.sqlite3 testdata/brokentest.sqlite3
+# The rest of the files are actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com testdata/example.net
+EXTRA_DIST += testdata/nons.example testdata/nosoa.example
+EXTRA_DIST += testdata/multisoa.example
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# Some systems need the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -20,5 +28,6 @@ endif
echo Running test: $$pytest ; \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
+ TESTDATASRCDIR=$(abs_top_srcdir)/src/lib/python/isc/notify/tests/testdata/ \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/notify/tests/notify_out_test.py b/src/lib/python/isc/notify/tests/notify_out_test.py
index 83f6d1a..d64c203 100644
--- a/src/lib/python/isc/notify/tests/notify_out_test.py
+++ b/src/lib/python/isc/notify/tests/notify_out_test.py
@@ -19,9 +19,11 @@ import os
import tempfile
import time
import socket
-from isc.datasrc import sqlite3_ds
from isc.notify import notify_out, SOCK_DATA
import isc.log
+from isc.dns import *
+
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
# our fake socket, where we can read and insert messages
class MockSocket():
@@ -92,10 +94,8 @@ class TestZoneNotifyInfo(unittest.TestCase):
class TestNotifyOut(unittest.TestCase):
def setUp(self):
- self._db_file = tempfile.NamedTemporaryFile(delete=False)
- sqlite3_ds.load(self._db_file.name, 'example.net.', self._example_net_data_reader)
- sqlite3_ds.load(self._db_file.name, 'example.com.', self._example_com_data_reader)
- self._notify = notify_out.NotifyOut(self._db_file.name)
+ self._db_file = TESTDATA_SRCDIR + '/test.sqlite3'
+ self._notify = notify_out.NotifyOut(self._db_file)
self._notify._notify_infos[('example.com.', 'IN')] = MockZoneNotifyInfo('example.com.', 'IN')
self._notify._notify_infos[('example.com.', 'CH')] = MockZoneNotifyInfo('example.com.', 'CH')
self._notify._notify_infos[('example.net.', 'IN')] = MockZoneNotifyInfo('example.net.', 'IN')
@@ -110,10 +110,6 @@ class TestNotifyOut(unittest.TestCase):
com_ch_info = self._notify._notify_infos[('example.com.', 'CH')]
com_ch_info.notify_slaves.append(('1.1.1.1', 5353))
- def tearDown(self):
- self._db_file.close()
- os.unlink(self._db_file.name)
-
def test_send_notify(self):
notify_out._MAX_NOTIFY_NUM = 2
@@ -309,39 +305,9 @@ class TestNotifyOut(unittest.TestCase):
self._notify._zone_notify_handler(example_net_info, notify_out._EVENT_READ)
self.assertNotEqual(cur_tgt, example_net_info._notify_current)
-
- def _example_net_data_reader(self):
- zone_data = [
- ('example.net.', '1000', 'IN', 'SOA', 'a.dns.example.net. mail.example.net. 1 1 1 1 1'),
- ('example.net.', '1000', 'IN', 'NS', 'a.dns.example.net.'),
- ('example.net.', '1000', 'IN', 'NS', 'b.dns.example.net.'),
- ('example.net.', '1000', 'IN', 'NS', 'c.dns.example.net.'),
- ('a.dns.example.net.', '1000', 'IN', 'A', '1.1.1.1'),
- ('a.dns.example.net.', '1000', 'IN', 'AAAA', '2:2::2:2'),
- ('b.dns.example.net.', '1000', 'IN', 'A', '3.3.3.3'),
- ('b.dns.example.net.', '1000', 'IN', 'AAAA', '4:4::4:4'),
- ('b.dns.example.net.', '1000', 'IN', 'AAAA', '5:5::5:5'),
- ('c.dns.example.net.', '1000', 'IN', 'A', '6.6.6.6'),
- ('c.dns.example.net.', '1000', 'IN', 'A', '7.7.7.7'),
- ('c.dns.example.net.', '1000', 'IN', 'AAAA', '8:8::8:8')]
- for item in zone_data:
- yield item
-
- def _example_com_data_reader(self):
- zone_data = [
- ('example.com.', '1000', 'IN', 'SOA', 'a.dns.example.com. mail.example.com. 1 1 1 1 1'),
- ('example.com.', '1000', 'IN', 'NS', 'a.dns.example.com.'),
- ('example.com.', '1000', 'IN', 'NS', 'b.dns.example.com.'),
- ('example.com.', '1000', 'IN', 'NS', 'c.dns.example.com.'),
- ('a.dns.example.com.', '1000', 'IN', 'A', '1.1.1.1'),
- ('b.dns.example.com.', '1000', 'IN', 'A', '3.3.3.3'),
- ('b.dns.example.com.', '1000', 'IN', 'AAAA', '4:4::4:4'),
- ('b.dns.example.com.', '1000', 'IN', 'AAAA', '5:5::5:5')]
- for item in zone_data:
- yield item
-
def test_get_notify_slaves_from_ns(self):
- records = self._notify._get_notify_slaves_from_ns('example.net.')
+ records = self._notify._get_notify_slaves_from_ns(Name('example.net.'),
+ RRClass.IN())
self.assertEqual(6, len(records))
self.assertEqual('8:8::8:8', records[5])
self.assertEqual('7.7.7.7', records[4])
@@ -350,14 +316,32 @@ class TestNotifyOut(unittest.TestCase):
self.assertEqual('4:4::4:4', records[1])
self.assertEqual('3.3.3.3', records[0])
- records = self._notify._get_notify_slaves_from_ns('example.com.')
+ records = self._notify._get_notify_slaves_from_ns(Name('example.com.'),
+ RRClass.IN())
self.assertEqual(3, len(records))
self.assertEqual('5:5::5:5', records[2])
self.assertEqual('4:4::4:4', records[1])
self.assertEqual('3.3.3.3', records[0])
+ def test_get_notify_slaves_from_ns_unusual(self):
+ self._notify._db_file = TESTDATA_SRCDIR + '/brokentest.sqlite3'
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('nons.example'), RRClass.IN()))
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('nosoa.example'), RRClass.IN()))
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('multisoa.example'), RRClass.IN()))
+
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('nosuchzone.example'), RRClass.IN()))
+
+ # This will cause failure in getting access to the data source.
+ self._notify._db_file = TESTDATA_SRCDIR + '/nodir/error.sqlite3'
+ self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+ Name('example.com'), RRClass.IN()))
+
def test_init_notify_out(self):
- self._notify._init_notify_out(self._db_file.name)
+ self._notify._init_notify_out(self._db_file)
self.assertListEqual([('3.3.3.3', 53), ('4:4::4:4', 53), ('5:5::5:5', 53)],
self._notify._notify_infos[('example.com.', 'IN')].notify_slaves)
@@ -417,6 +401,5 @@ class TestNotifyOut(unittest.TestCase):
if __name__== "__main__":
isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
unittest.main()
-
-
diff --git a/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3
new file mode 100644
index 0000000..61e766c
Binary files /dev/null and b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 differ
diff --git a/src/lib/python/isc/notify/tests/testdata/example.com b/src/lib/python/isc/notify/tests/testdata/example.com
new file mode 100644
index 0000000..5d59819
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/example.com
@@ -0,0 +1,10 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+example.com. 1000 IN SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+example.com. 1000 IN NS a.dns.example.com.
+example.com. 1000 IN NS b.dns.example.com.
+example.com. 1000 IN NS c.dns.example.com.
+a.dns.example.com. 1000 IN A 1.1.1.1
+b.dns.example.com. 1000 IN A 3.3.3.3
+b.dns.example.com. 1000 IN AAAA 4:4::4:4
+b.dns.example.com. 1000 IN AAAA 5:5::5:5
diff --git a/src/lib/python/isc/notify/tests/testdata/example.net b/src/lib/python/isc/notify/tests/testdata/example.net
new file mode 100644
index 0000000..001d2d9
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/example.net
@@ -0,0 +1,14 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+example.net. 1000 IN SOA a.dns.example.net. mail.example.net. 1 1 1 1 1
+example.net. 1000 IN NS a.dns.example.net.
+example.net. 1000 IN NS b.dns.example.net.
+example.net. 1000 IN NS c.dns.example.net.
+a.dns.example.net. 1000 IN A 1.1.1.1
+a.dns.example.net. 1000 IN AAAA 2:2::2:2
+b.dns.example.net. 1000 IN A 3.3.3.3
+b.dns.example.net. 1000 IN AAAA 4:4::4:4
+b.dns.example.net. 1000 IN AAAA 5:5::5:5
+c.dns.example.net. 1000 IN A 6.6.6.6
+c.dns.example.net. 1000 IN A 7.7.7.7
+c.dns.example.net. 1000 IN AAAA 8:8::8:8
diff --git a/src/lib/python/isc/notify/tests/testdata/multisoa.example b/src/lib/python/isc/notify/tests/testdata/multisoa.example
new file mode 100644
index 0000000..eca2fbd
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/multisoa.example
@@ -0,0 +1,5 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+multisoa.example. 1000 IN SOA a.dns.multisoa.example. mail.multisoa.example. 1 1 1 1 1
+multisoa.example. 1000 IN SOA a.dns.multisoa.example. mail.multisoa.example. 2 2 2 2 2
+multisoa.example. 1000 IN NS a.dns.multisoa.example.
diff --git a/src/lib/python/isc/notify/tests/testdata/nons.example b/src/lib/python/isc/notify/tests/testdata/nons.example
new file mode 100644
index 0000000..c1fc1b8
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/nons.example
@@ -0,0 +1,3 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+nons.example. 1000 IN SOA a.dns.nons.example. mail.nons.example. 1 1 1 1 1
diff --git a/src/lib/python/isc/notify/tests/testdata/nosoa.example b/src/lib/python/isc/notify/tests/testdata/nosoa.example
new file mode 100644
index 0000000..18e87e1
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/nosoa.example
@@ -0,0 +1,7 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+;; (SOA has been removed)
+nosoa.example. 1000 IN SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+nosoa.example. 1000 IN NS a.dns.nosoa.example.
+nosoa.example. 1000 IN NS b.dns.nosoa.example.
+nosoa.example. 1000 IN NS c.dns.nosoa.example.
diff --git a/src/lib/python/isc/notify/tests/testdata/test.sqlite3 b/src/lib/python/isc/notify/tests/testdata/test.sqlite3
new file mode 100644
index 0000000..e3cadb0
Binary files /dev/null and b/src/lib/python/isc/notify/tests/testdata/test.sqlite3 differ
diff --git a/src/lib/python/isc/testutils/Makefile.am b/src/lib/python/isc/testutils/Makefile.am
index 0b08257..5479d83 100644
--- a/src/lib/python/isc/testutils/Makefile.am
+++ b/src/lib/python/isc/testutils/Makefile.am
@@ -1,4 +1,4 @@
-EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py
+EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py rrset_utils.py
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/testutils/rrset_utils.py b/src/lib/python/isc/testutils/rrset_utils.py
new file mode 100644
index 0000000..8c22d92
--- /dev/null
+++ b/src/lib/python/isc/testutils/rrset_utils.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Utility functions handling DNS RRsets commonly used for tests'''
+
+from isc.dns import *
+
+def rrsets_equal(a, b):
+ '''Compare two RRsets, return True if equal, otherwise False
+
+ We provide this function as part of test utils as we have no direct rrset
+ comparison atm. There's no accessor for sigs either (so this only checks
+ name, class, type, ttl, and rdata).
+ Also, since we often use fake data in RRSIGs, RRSIG RDATA are not checked.
+
+ '''
+ return a.get_name() == b.get_name() and \
+ a.get_class() == b.get_class() and \
+ a.get_type() == b.get_type() and \
+ a.get_ttl() == b.get_ttl() and \
+ (a.get_type() == RRType.RRSIG() or
+ sorted(a.get_rdata()) == sorted(b.get_rdata()))
+
+# The following are short cut utilities to create an RRset of a specific
+# RR type with one RDATA. Many of the RR parameters are common in most
+# tests, so we define default values for them for convenience.
+
+def create_a(name, address, ttl=3600):
+ rrset = RRset(name, RRClass.IN(), RRType.A(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+ return rrset
+
+def create_aaaa(name, address, ttl=3600):
+ rrset = RRset(name, RRClass.IN(), RRType.AAAA(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.AAAA(), RRClass.IN(), address))
+ return rrset
+
+def create_ns(nsname, name=Name('example.com'), ttl=3600):
+ '''For convenience we use a default name often used as a zone name'''
+ rrset = RRset(name, RRClass.IN(), RRType.NS(), RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.NS(), RRClass.IN(), nsname))
+ return rrset
+
+def create_soa(serial, name=Name('example.com'), ttl=3600):
+ '''For convenience we use a default name often used as a zone name'''
+
+ rrset = RRset(name, RRClass.IN(), RRType.SOA(), RRTTL(ttl))
+ rdata_str = 'master.example.com. admin.example.com. ' + \
+ str(serial) + ' 3600 1800 2419200 7200'
+ rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), rdata_str))
+ return rrset
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
index a2d9a7d..38b7f39 100644
--- a/src/lib/python/isc/xfrin/diff.py
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -59,7 +59,7 @@ class Diff:
the changes to underlying data source right away, but keeps them for
a while.
"""
- def __init__(self, ds_client, zone, replace=False):
+ def __init__(self, ds_client, zone, replace=False, journaling=False):
"""
Initializes the diff to a ready state. It checks the zone exists
in the datasource and if not, NoSuchZone is raised. This also creates
@@ -67,13 +67,25 @@ class Diff:
The ds_client is the datasource client containing the zone. Zone is
isc.dns.Name object representing the name of the zone (its apex).
- If replace is true, the content of the whole zone is wiped out before
+ If replace is True, the content of the whole zone is wiped out before
applying the diff.
+ If journaling is True, the history of subsequent updates will be
+ recorded as well as the updates themselves as long as the underlying
+ data source support the journaling. If the data source allows
+ incoming updates but does not support journaling, the Diff object
+ will still continue applying the diffs with disabling journaling.
+
You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented
exceptions.
"""
- self.__updater = ds_client.get_updater(zone, replace)
+ try:
+ self.__updater = ds_client.get_updater(zone, replace, journaling)
+ except isc.datasrc.NotImplemented as ex:
+ if not journaling:
+ raise ex
+ self.__updater = ds_client.get_updater(zone, replace, False)
+ logger.info(LIBXFRIN_NO_JOURNAL, zone, ds_client)
if self.__updater is None:
# The no such zone case
raise NoSuchZone("Zone " + str(zone) +
diff --git a/src/lib/python/isc/xfrin/libxfrin_messages.mes b/src/lib/python/isc/xfrin/libxfrin_messages.mes
index be943c8..203e31f 100644
--- a/src/lib/python/isc/xfrin/libxfrin_messages.mes
+++ b/src/lib/python/isc/xfrin/libxfrin_messages.mes
@@ -19,3 +19,13 @@
The xfrin module received an update containing multiple rdata changes for the
same RRset. But the TTLs of these don't match each other. As we combine them
together, the later one get's overwritten to the earlier one in the sequence.
+
+% LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled. At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR). Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
index 9fab890..9944404 100644
--- a/src/lib/python/isc/xfrin/tests/diff_tests.py
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -15,6 +15,7 @@
import isc.log
import unittest
+import isc.datasrc
from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
from isc.xfrin.diff import Diff, NoSuchZone
@@ -127,7 +128,7 @@ class DiffTest(unittest.TestCase):
"""
return self.__rrclass
- def get_updater(self, zone_name, replace):
+ def get_updater(self, zone_name, replace, journaling=False):
"""
This one pretends this is the data source client and serves
getting an updater.
@@ -138,11 +139,20 @@ class DiffTest(unittest.TestCase):
# The diff should not delete the old data.
self.assertEqual(self.__should_replace, replace)
self.__updater_requested = True
- # Pretend this zone doesn't exist
if zone_name == Name('none.example.org.'):
+ # Pretend this zone doesn't exist
return None
+
+ # If journaling is enabled, record the fact; for a special zone
+ # pretend that we don't support journaling.
+ if journaling:
+ if zone_name == Name('nodiff.example.org'):
+ raise isc.datasrc.NotImplemented('journaling not supported')
+ self.__journaling_enabled = True
else:
- return self
+ self.__journaling_enabled = False
+
+ return self
def test_create(self):
"""
@@ -152,6 +162,8 @@ class DiffTest(unittest.TestCase):
diff = Diff(self, Name('example.org.'))
self.assertTrue(self.__updater_requested)
self.assertEqual([], diff.get_buffer())
+ # By default journaling is disabled
+ self.assertFalse(self.__journaling_enabled)
def test_create_nonexist(self):
"""
@@ -161,6 +173,14 @@ class DiffTest(unittest.TestCase):
self.assertRaises(NoSuchZone, Diff, self, Name('none.example.org.'))
self.assertTrue(self.__updater_requested)
+ def test_create_withjournal(self):
+ Diff(self, Name('example.org'), False, True)
+ self.assertTrue(self.__journaling_enabled)
+
+ def test_create_nojournal(self):
+ Diff(self, Name('nodiff.example.org'), False, True)
+ self.assertFalse(self.__journaling_enabled)
+
def __data_common(self, diff, method, operation):
"""
Common part of test for test_add and test_delte.
diff --git a/tests/tools/badpacket/badpacket.cc b/tests/tools/badpacket/badpacket.cc
index 86bbc47..be393d5 100644
--- a/tests/tools/badpacket/badpacket.cc
+++ b/tests/tools/badpacket/badpacket.cc
@@ -18,6 +18,7 @@
#include <config.h>
#include <exceptions/exceptions.h>
+#include <log/logger_support.h>
#include "command_options.h"
#include "scan.h"
@@ -44,6 +45,7 @@ using namespace isc::badpacket;
/// \brief Main Program
int main(int argc, char* argv[]) {
+ isc::log::initLogger("badpacket");
try {
// Parse command
More information about the bind10-changes
mailing list