BIND 10 trac1687, updated. 8958f15ce8c14539aaa8b3e6fa11cc58f7f90b41 [trac1687]Merge branch 'master' into trac1687
BIND 10 source code commits
bind10-changes at lists.isc.org
Fri Jun 15 15:15:47 UTC 2012
The branch, trac1687 has been updated
via 8958f15ce8c14539aaa8b3e6fa11cc58f7f90b41 (commit)
via 0a4607b2a469aa91bc60ccfab2212eb10a96c5a2 (commit)
via ba4c6615bbe0060d847f57f1f0f269811559450a (commit)
via 9253e25378df231b5153a0b35e7fcb025463dd7d (commit)
via dbc27ca22d408ac333583edfce29c7651268f8f3 (commit)
via ef5d56d8dbbf4a5e01d6256828d599d51fc74cf9 (commit)
via dcafee23dccbdf2f4b77ffc57d4a485ab4334455 (commit)
via 82706fc52eca0eef8ef9af269ce55b68ff79dc6a (commit)
via d84cc75031f45e6a11c06775443f860d46282567 (commit)
via 58abcc59fb63e4f06f8b5b1406b8f1c4674fe814 (commit)
via 7dcf7f71a999d1e20185fef0e55d4c1447aea24b (commit)
via bf59ff6ae4f426559e3fec86fcc4fe15ae98f485 (commit)
via da15f58fb83ca24cc09812dec9c02b272d730caa (commit)
via b66ca2c6cc969be893e40184de90e12a33602b67 (commit)
via 1fa35a8bca4eb2f8a515516a69c79354dd355e69 (commit)
via 52a26b21697e8986400ab4af46429e3d00c895c5 (commit)
via c7da3e0c3ab187eb6f4293eea34f65deef0cd249 (commit)
via 43feaa8658342eff0c2ddc9b33ef185a6826411e (commit)
via f5abe6318c5b84383cde4f74e79a6de0cfca87e4 (commit)
via 5077b7bfbcd4e9ef7ce16cd3dca603e2582fd945 (commit)
via 82d8a2a27293589bdde01e1385f3e9b05538ab4f (commit)
via 06a3082082feea8f608272753a24a3f014c494c8 (commit)
via cda7cbf264721ada0ab3c959fa3a994cd9597364 (commit)
via 6f6c79d890e93dfed8960ae5b99275db5b94d3fc (commit)
via 3430cd41631b026665fe7b01bd12740e2fee9d9b (commit)
via 31408c8c110f7bbd95bbbea411cc8aee30eaf6b6 (commit)
via 27ab58d1ad4771ec5e71d41e43fb836310bd4d4b (commit)
via 926983df95ba480878a289f34a95449bb8ebde21 (commit)
via eddd2459c8298b1693e3b38c0d6ebebdb237102f (commit)
via 57c8937e293f8849ac962c419027347b3b1afdb3 (commit)
via 202bac3be117c0c93029b7cca008d3d2c6b854a7 (commit)
via 1a5ac34c532df175935d88d0c4a117ed36ca311b (commit)
via a565ca253ed9ab4ffe1413ccd776d53ca212fc82 (commit)
via 3a21a6c5b9f01618ad2b8d16ecaad8e2cbe880ce (commit)
via 4818355e249442e4ffec1f01ace80f9f3544dd00 (commit)
via 185154738e35c88a1d8cf155a21bd6f23c04ac9d (commit)
via 6f0bcc61de660384c2e6c1951cf3d60b45789698 (commit)
via 3eaafc4154640f7c6ebf026fc4592f05a0872aa2 (commit)
via 450385079016a4f99105ecaf85f6d22aa90762b7 (commit)
via 4efe9acc28ed253db206ab91df2382358947a094 (commit)
via 07d1252a747959053f499d03e40e590c21ea0bd5 (commit)
via 4cd359475e5426fca0393246badb07ea686e3283 (commit)
via 3800f59cbac8ac2c3adff1387485d2bf4de0f369 (commit)
via 8479ff275b5a09dd94c8c9311219c0385ff685f8 (commit)
via 45eb75402443584059a2a061f904653b88774d69 (commit)
via d6b2edc832ea90d2470e953a7b4e034c795ff303 (commit)
via 950d0e317681530727eab631b830280e4890eb96 (commit)
via 29ce263555c5e12a0357fd4bc8b57d5eab070610 (commit)
via c4c3340828ab1232f95562217ca1f98044c20273 (commit)
via 6adafccee8a94c34a4d542a2e42fce0a861dbf40 (commit)
via eb9d328b4bb20f8fe04771e20b045827eb3a8667 (commit)
via abf827ef79067d74865bc4e6da349788e71fca7d (commit)
via 6f914bb2c388eb4dd3e5c55297f8988ab9529b3f (commit)
via a11c2a4ee291fe85a64c059cc94622c05de3c2fa (commit)
via a64c77257ca5c110e408f846286da909e8548b9c (commit)
via d17813bd0f896fcbd879a8b08d40c7e396dc2d42 (commit)
via f055c638d53954bcda8783a9471584a55551bb69 (commit)
via 32b494b4f1c01d1694fab23b4b172185bdb21769 (commit)
via df4eddc681aba28e6a85048a5858f7fc652a3727 (commit)
via ef10033b844f147caa9e91c5dae8a33db0241630 (commit)
via 6aa01f0ed3541cbb35510c76a584c6dd50f7401f (commit)
via 380b3e8ec02ef45555c0113ee19329fe80539f71 (commit)
via 6ae6562d99f3917bfaa379a6d2466417e57c4d6e (commit)
via 7e0dc8b5a57c8be2f27817bba1ca50ffd4a1462b (commit)
via 0235da81624be30ea2aeadaf1c93fde1ba1bd057 (commit)
via 8cecc1ec807f782e4fde4f5a21463c304bd07223 (commit)
via 469b4078b27d456cbbce856edfd8b57b437c317b (commit)
via 7817f9484fa5acae4d82c39ac88d010be62d1225 (commit)
via 400bb38f52c2c272c18edd77305310f6c55c4c37 (commit)
via fb2001dc826b485c3d86ab3eff689c3f13c5b710 (commit)
via 18cd6a77d6a673679aaec4717267a06bcce2995a (commit)
via 81239a632d1190e9b836287a41d62df05bf535c8 (commit)
via 0dbe89e24e6757013e58ab3e0e7c5f5b7afb23f2 (commit)
via d8655b3c7a7f98c3f8974fa4c8856f2a6ac056d7 (commit)
via 8e4cd528b8c10c707616b0dedbb3cc61573da7a6 (commit)
via 88cbe7028f70266945fe6b53a46e235b4c9eca83 (commit)
via 115179c9044864a193f2b3c78f0826dd03ed8cad (commit)
via 85fcef34522d4b51557583680a168372da7193f7 (commit)
via 44ff85906acca45a6e94197b8c3eb9c306b94408 (commit)
via be602dc1ab240a660795b66c20350f85e77c4efb (commit)
via ce911b68e171ddced13c91ced5e7a1d417ad62ca (commit)
via fa56b67ff11e54e7eed6331c4470d37bcbc2c5bb (commit)
via 6fc0bdf0ad2a49f5e98c1d78aadf7334c954a5a3 (commit)
via 091abb45296a925f106addc4a237d905b6d2d945 (commit)
via 5496d551ef264130bb017a99906410fa27045cd9 (commit)
via 652738d33dd4a62c676b3bcb26996d646253191c (commit)
via 56951dcff8366dd0bccf09ae233ea90f8c90dbca (commit)
via f84e7baaff3d36c7f5bf8fe5fa068ad65d162527 (commit)
via 86a2f1b40b794f7a9a5973b82d92013764943864 (commit)
via d05e135bd5ea4cbdaf9bda3c287739f23af5018e (commit)
via 5f5ebca253e112f149d67b171ce1c86cbd3d4a1a (commit)
via f79c74e2f7c58b90c2193b068aa27a2b92947909 (commit)
via ef08af28d34cb6f26189af519cb4f5daddf39373 (commit)
via 27e2490c86e63350cb46fd11ac5f2c77415fa25c (commit)
via 37883d293de17711407fd435d69cd6b79c2713d1 (commit)
via 0bd5a88b651787288ceed2c9914870a6ab125f89 (commit)
via 5b0f09471e155d0a8985b2d320c1a5f47667e2d7 (commit)
via ca3f9d967c0806859979ac5508939eb785d90e1d (commit)
via 2d96fb33890ca6e6abe991d694d9408f5264fc57 (commit)
via 6cf1a5f2d046ea30dab739bd907b90dcde04160e (commit)
via 342a2b3bcb1b4f3fe2afdc5e0c2dccaa6f79a575 (commit)
via 513458b740d4ff7290d38b84156c7c5276414e0c (commit)
via cbd9b3e4bc13de855d675fc972c1e0e5ea40025e (commit)
via be57f66a715103218b56b77536807d5c5714cbab (commit)
via 9b2f2c4d4f87cf7672a3aa8efc25fb73400dce3a (commit)
via fd3ab0734141e732049a939e11cc7353fdce98c0 (commit)
via 0ed47de0a1900e15b7c70bfd67fc907ce85ff2cb (commit)
via 6ae5787bbe982a751ed058e7c4ccf77d760627dd (commit)
via 3da2b4dd513ce1108f30a2901bdbab9049845d7e (commit)
via f661e074e4b4b09acc21ffc90155a48e64c11de4 (commit)
via 65ffa0d23485dd6101cbf41d4cc1bd2fd89afee6 (commit)
via 21cdc5cc3a1c5d38a9421349c020d1b1377dfdb3 (commit)
via 453e7ac4d32888e5aca1363048afb4ada7611279 (commit)
via 1592922fd2504f5172fd40e29514372b965e926d (commit)
via dc2fb18aebbf5d72e7e40517c21cc43d5c7e5b0f (commit)
via 2bedd44b4a58222083dc72a85bed101804ee7858 (commit)
via 45ea2c37d6f00d7f0cfe2e342a0a2907c032dc54 (commit)
via 68b33af1da81765fe4f390ca3b6d9a8959736e3c (commit)
via 4ddcc3bd007c6d131b8fb6e0380550a1b8be85b4 (commit)
via 9ede3992a795cecab1644795895f17391c22fe11 (commit)
via e654de7dea5adaa224dd56dc06201a9f902248d3 (commit)
via c6a9d7749573a23bfff82784b124f0602a29f786 (commit)
via 53dfacddb52280654910206f96325c531a480ce0 (commit)
via d16a4ec35b968f8d7af36872a3df62bf7f8fc3d1 (commit)
via 04b0e46c0bfee7922b271d61f8070cd35d4505c9 (commit)
via f378dbd066722c41f39718cf26ec0282645896e8 (commit)
via 1a0a81eb61c625be4681c5b600e07ad9adfc68d9 (commit)
via 81784141da4a239663f094e56a6a77643c8f81cb (commit)
via 5c430ad5edbeb6e799ad5a80ce7f01e6d15abfc3 (commit)
via f9ec0f7c7b3d60c19b258f2c0d95edb7d4a3e8a1 (commit)
via e33d599164b3d1de0617da52ae2b1153238e11d1 (commit)
via 47965690b14b7567183ee041e5ba302b287141e3 (commit)
via 38292a61825ac30e3a589d8b0922c42ad3437055 (commit)
via 36b9d41e18ceda7768fedfc557346b51105b566d (commit)
via 31d7b2369b79150c8d8db697e489216aca50d2fc (commit)
via 93d4730a6135e573efdcd4fe0780119e5d343093 (commit)
via b4c8cff4f3822d78c4c16ae35a2077596a3888ef (commit)
via 1c8b4bfefebe843d295022c5cce0d60007a56269 (commit)
via b56e5b30bdb16a5553c9c45dbe3dca73bc4613d0 (commit)
via c28fd5d9f54d75c77ffb8d1ef3fdd363952ee694 (commit)
via 52abb6303cfee736542f11e4046bb3c89b58ae63 (commit)
via c8fa08e6366e4ce0df1e932c68efef634eba159b (commit)
via 7130da883f823ce837c10cbf6e216a15e1996e5d (commit)
via 739ce253a1702927a5991ff36672df92f9063e65 (commit)
via f1af1f3ba715af99d1fbade8eea193deabb405a5 (commit)
via d95ff3f155636aa484dbba780ed5264335387905 (commit)
via 15a17147855caf469e55b6fc3d3827f496bd0025 (commit)
via 6f8dbd0f299c6c0f97cf24e87116fa32f43c8a0f (commit)
via 41708801415792402d7f83c57aea50dae0550622 (commit)
via ed9fbd276086a6b494f7fd5b573e23f0e467007a (commit)
via 1fdb0ee73a22d8002cbfefe66d70cf22d5d784d8 (commit)
via adca81c006608c1e97b900380ce48834876c2908 (commit)
via ad8d445dd0ba208107eb239405166c5c2070bd8b (commit)
via 170c72d67a741e958611e7267826679b0629286a (commit)
via 128271198f60e6b1f045d6080ef4040760b6ec58 (commit)
via 57e92cc0aff7f42885c0e100116ec85e9eb21dc7 (commit)
via 2f4b6ea6438119eaee2c06c6d97bb66825a174a6 (commit)
via f71a0c646d703351f989c2705e237fa77705c504 (commit)
via 9e812734e020c04ef076f6b2fc38d3f872a0c361 (commit)
via d3e3aa8bb4c6d409c2af01a72270369eb00cc03a (commit)
via 3e0f47f73941c7c1249ee7fb79a880770d7fb76a (commit)
via fa9af9770151197c95bd8366f608bb7b56751243 (commit)
via 23e49e12b90f0360ad02e5d97dd40db50a52b79d (commit)
via 0010be465624ebd2c72aa41e9a56935e76f3da5c (commit)
via 1bf6cdf0b6217de0bfd33116881d8ca252617d13 (commit)
via 0fd36bf98ca106464ef9de56aadc2d6388cb4c83 (commit)
via e0d617df81281066d044c4137beeb3e32921c0d5 (commit)
via 6719c7e0be950cbeba5612b280d859992bda1d02 (commit)
via eab5b84db6c5a0db2e7cc9c53b5433797905a599 (commit)
via 1b9d39ec504ce53be4a9e9ef731a7a4295669800 (commit)
via 5122d4095a0865413c3f80083ef78a54bc3f7ea6 (commit)
via edadf68c0de62bc2dbdce97ff8372f36fd8bec8d (commit)
via 1a56963435c6a771d76c4ab44814564150ba8247 (commit)
via 55330acd18cb29963fc23be6eb59d5a56ab64f9a (commit)
via 11af4fd1af04140c9e64aff58efc5f6661a41cf0 (commit)
via bff3d4786c50018143abe2316c9e837f24c52e81 (commit)
via 91164ce5585a7d659f73ba86da2ffcae6db2f5e6 (commit)
via 7987b09972865cf558dbff4489a198162ec8e3c3 (commit)
via 4d25eba05071e631333c26580ae495c50c6ae895 (commit)
via 1c0e6f80265bb516f5f965eea9b0edac781cbdea (commit)
via 52edf764154ed08161a4683d6b7b7be5a232face (commit)
via cc84b0a02de65a71d80fac9b6f15533e9b0c6037 (commit)
via f68857bea3699eb23c72ddac04a20aa8f42bd551 (commit)
via 07851523e66fe1e045e953d505b97e2cf7f499fc (commit)
via 6306feefed1a1fc462bfff708a47c5fffb351a51 (commit)
via 8d55fd24318cd72d0a624304dfb7c107813e43cf (commit)
via a0bf4ce38c190728f172ee9e2a335248a0ed7a1d (commit)
via e63d7f5a896e05c9a5ee143dbca5fbc7ce9b8950 (commit)
via 7d6667db23ab1724f9e30b148456742a46bfc0e1 (commit)
via 286ae1b0d49368baee845ac2d55ab5369e659951 (commit)
via 25af754fb60cdb8e538eeb2111694bd3cf2bf89e (commit)
via 968d486cb7c77cec9f47a3b155938c5c2cac6747 (commit)
via ff1617fd13a786d41439b7d653e798352e3789b7 (commit)
via bc9715b080230f432420a7796d8ec2c5029e889e (commit)
via db1b2905e965e85882dc7c36903839b0ffbec3f0 (commit)
via 18aa5359405c164ecda151b43efd125028d3a1ef (commit)
via 71fcb404d2cf321b39e898ed21c18a27435a6d34 (commit)
via 07c46c2fd50771f47e70436c3d5e7be801091b93 (commit)
via a318db63500413168e1723119de2eef07c969290 (commit)
via 923f3c0b2a848903c0da0866a7943779bc8bd863 (commit)
via fa2c987ffe91e103a839c4a51ed2a26a1ea23691 (commit)
via b9bde80941d4500821c093b6b28e9752b67b7e3f (commit)
via 6b23d728601ee0972f06c803cce44cccad1aadc6 (commit)
via 6e845d8827c0450396f553cedf2a26448ee35fbf (commit)
via c6544e26eec66ec1c07e52c040a2d4638c941bd6 (commit)
via 25789340a677b38451ce4edb4aa2eb389d69d266 (commit)
via 2b2439252c79f6d1bc6ca1dc38def8ff6f2bf8c7 (commit)
via ebbfe90a30f5ed7bb3929f23d0eecb01b05e163e (commit)
via 100ee5216c3f6402ee08756f35c7af01fa363100 (commit)
via 7a70191fed37bff9ceb0fed6992fdc6c96ff257f (commit)
via 2f88c7ad45869921177bf4dcc477dbdd87c8cda6 (commit)
via 9cd5b8d800ba00b9e0d3d07609853e1221550a2f (commit)
via 088760558f2321ea60b393b37fba0a672ce9f943 (commit)
via 0a3638e4bae9b7ace20bf59f5aa1ee1e1fe50d1a (commit)
via 6643603e8bc40fed0bab6d6a4ca6b6d585be6001 (commit)
via 689b286631a39e2c3273982c8ab5d527f7d6211b (commit)
via 56c895c19be423403c0ddb84f25ea958af9cc371 (commit)
via 0cdd1593225256d36486590cfca23ab35ae15879 (commit)
via a6a3d1b172cfd5ba9db4ddb3b8ffcbf673d8e31f (commit)
via 60c3dcef1055d54231708188639056248258e716 (commit)
via d6859a67235140aeb14a2224198b85e4d65ed71d (commit)
via 162e15111002d1e857222eb309018818b637fbad (commit)
via 6296009e6fe929f80676b6f21ed838361e49358c (commit)
via 0beb60c748a95def56529ccda897fadaaaf926af (commit)
via 61977ad7fa96c33c4b6d45b11b0e31530d5a491b (commit)
via 0e48c8a489e035440dc9f37fab485f9ecf3da8ed (commit)
via 0bfaa798f73e2519b9ce53576f01e4fac2cae842 (commit)
via 57a94c94601e7b6598e2cb73c3c9d252bd675d57 (commit)
via b432bc4116df51b0356d9e3e88943e777ae4137c (commit)
via 57dc3faeae674c5d332dc4929154ce41bf2de891 (commit)
via 6c4729c0a6329a83faa095f10639c9f391bd33ba (commit)
via e5a8ffb5a895a5e51e2d5c3ea4ba00e035746d9f (commit)
via 99e857e87975a05b97942a5b98efed4b425c1e66 (commit)
via 0db71e6a18ea01c52c6fe9280c15ddfc7aebd163 (commit)
via 1f66bb6dd40f0119b985b3a76a224bf6490e24c8 (commit)
via cbd1f113584000f6f6b12e9c155987130d95abf2 (commit)
via 929f452fc947d38dd02f1a66d4ae0c588162fdfe (commit)
via 1e0e7c3c3a0190c1700499671a70d242ad0e4f14 (commit)
via 1f12f3cd5bc0b3b39545680c4dd772303c62d43f (commit)
via 908b273b78455fc4893d3e9d3dd9c4ca42e934c6 (commit)
via 3e8b461db077bf467280cec8c6c2a03dd0df0c6c (commit)
via 949d3d2f07686be8228f3fdb44a756e280dfbd85 (commit)
via e04531f57054b71fa83ad6bc8467e31df311d39c (commit)
via 30987308b9623d0cbd4aa010c14e5238dd09e023 (commit)
via 6b3132f71b397e4925b80ed1dc1e3cb834be1294 (commit)
via 865669e98402f25c41f35e3957bdb9190513bf6f (commit)
via 58c0a8ef0152662b581b7a13fecf92ed0b0021e8 (commit)
via 8c38fe10458178dee5a0045ae2bfa3da986d6a0c (commit)
via e007ac3e1fb0595497757b3590c3023c825965d4 (commit)
via fcb63453e3d835fdca05146482d001035cdbd6af (commit)
via cd373315919dd5a20440d27d49c3c0e47ebb3f19 (commit)
via 277be10c905a1b592cc8ddb2ded6f066c619a0f2 (commit)
via 8eb4eb3e63b8f19155ae8f021436c881d75706c6 (commit)
via e72bc121a7f7e4a85f889a6c1c4eec5bafd51325 (commit)
via 075eef9a1fe8c61c69532ce8b9dc05eda102f08d (commit)
via 2e95197a4b47847b4ec85e83bdb1fb5edf685cd0 (commit)
via 7c1277b0a6750ab51611971adf77a9c2740511cc (commit)
via 994827bfe0a9b77d3bf724768a2403965cb8ad57 (commit)
via 39ebe9d1fd4d04690162c92ebb640f36fa2586ca (commit)
via 9227b2de28f57892f5570ca0f30f0f6c651922be (commit)
via 1d5bd47c50ca03b87972977bb3bdd8d798148698 (commit)
via 328f8781d99bdf1c78abbee743b85244ac55f6b8 (commit)
via 16fc37014cf72d8ca5b3f9461504be2c69a37702 (commit)
via d575fbaa4083182e8d6602bb042934451ae06c65 (commit)
via bd5494fa3a4aded378c5e2167f6c9e1b7eaf2035 (commit)
via 8b555fcb29378dcb0db476bf8f53804acae8fe77 (commit)
via 93fb5cab3e4a0752e2471931e171937392c71ae0 (commit)
via caa5bb9a9b852631d1548460daef2b93cf48c64c (commit)
via 227d01775bdb4dab84c97e5858c0812e2aa845c2 (commit)
via 30198a34b9e464ffa2947955e7dd70a191e39682 (commit)
via c0a328b2d94321966d5ebe97dd8f2058d6d76176 (commit)
via b45a54e8ccf0c947b7f3979ed1083260c47be453 (commit)
via 8c455b3a39a588e11a86523056caa9c379b60a67 (commit)
via 31b153df2003831950bf3fcd6a4da782dc579eb1 (commit)
via 10b434be10f3fa900760180feb030907484c9b6f (commit)
via db2fa0cd177d28c49a9c99aabe44bd9c47c6b78d (commit)
via 09e8c9e8c55571196fbd297fda46e95400aa0990 (commit)
via 2bf7dad904d16760987d3a0b27c02bb8a2b50e55 (commit)
via 876dd74d15bcaa619602bcb60fc769d541923048 (commit)
via 17f327dc2a155c493dfe265cb6fac7b09a7d8836 (commit)
via 96391341079418eeb6c3dd6f0da85239d19f1749 (commit)
via 9427bd4115b59e4498d13dc0a83fc2953b104266 (commit)
via 588bffd5077a9e6cc741100cdc721b444d80c4ca (commit)
via 106ec83cb515ddcc84834ba336dc39679377a8fd (commit)
from d4b87aa5746e3039b1ce78e0f5e9d87506159881 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 8958f15ce8c14539aaa8b3e6fa11cc58f7f90b41
Merge: d4b87aa 0a4607b
Author: Jeremy C. Reed <jreed at ISC.org>
Date: Fri Jun 15 10:15:17 2012 -0500
[trac1687]Merge branch 'master' into trac1687
merge in master; fix one conflict
-----------------------------------------------------------------------
Summary of changes:
AUTHORS | 21 +
ChangeLog | 24 +
Makefile.am | 4 +
compatcheck/Makefile.am | 17 +-
configure.ac | 3 +
doc/Doxyfile | 2 +-
doc/guide/Makefile.am | 10 +-
doc/guide/bind10-guide.xml | 4 +-
src/bin/auth/auth_srv.cc | 5 +-
src/bin/bind10/bind10_src.py.in | 24 +
src/bin/bind10/tests/Makefile.am | 1 +
src/bin/bind10/tests/bind10_test.py.in | 35 +
src/bin/cmdctl/tests/Makefile.am | 1 +
src/bin/dbutil/tests/Makefile.am | 1 +
src/bin/ddns/ddns.py.in | 462 ++++++-
src/bin/ddns/ddns.spec | 24 +-
src/bin/ddns/ddns_messages.mes | 139 ++
src/bin/ddns/tests/Makefile.am | 1 +
src/bin/ddns/tests/ddns_test.py | 940 ++++++++++++-
src/bin/dhcp4/Makefile.am | 6 +
src/bin/dhcp4/tests/Makefile.am | 6 +
src/bin/dhcp6/Makefile.am | 6 +
src/bin/dhcp6/tests/Makefile.am | 7 +
src/bin/stats/tests/Makefile.am | 1 +
src/bin/xfrin/xfrin.py.in | 52 +-
src/bin/xfrin/xfrin_messages.mes | 6 -
src/lib/asiolink/io_endpoint.h | 4 +-
src/lib/datasrc/Makefile.am | 19 +-
src/lib/datasrc/client_list.cc | 162 +++
src/lib/datasrc/client_list.h | 289 ++++
src/lib/datasrc/database.cc | 9 +-
src/lib/datasrc/memory_datasrc.cc | 2 +-
src/lib/datasrc/static.zone.pre | 12 +
src/lib/datasrc/static_datasrc_link.cc | 62 +
src/lib/datasrc/tests/Makefile.am | 2 +
src/lib/datasrc/tests/client_list_unittest.cc | 475 +++++++
src/lib/datasrc/tests/database_unittest.cc | 56 +-
src/lib/datasrc/tests/factory_unittest.cc | 56 +
src/lib/datasrc/tests/testdata/static.zone | 2 +
src/lib/dhcp/Makefile.am | 12 +
src/lib/dhcp/iface_mgr.cc | 8 +
src/lib/dhcp/option.cc | 8 +
src/lib/dhcp/option.h | 9 +
src/lib/dhcp/pkt4.cc | 5 +
src/lib/dhcp/pkt4.h | 47 +
src/lib/dhcp/pkt6.cc | 6 +
src/lib/dhcp/pkt6.h | 47 +
src/lib/dhcp/tests/Makefile.am | 11 +-
src/lib/dhcp/tests/option_unittest.cc | 29 +-
src/lib/dhcp/tests/pkt4_unittest.cc | 32 +-
src/lib/dhcp/tests/pkt6_unittest.cc | 27 +
src/lib/dns/labelsequence.h | 4 +-
src/lib/dns/message.cc | 6 +-
src/lib/dns/python/message_python.cc | 245 ++--
src/lib/dns/python/name_python.cc | 4 +-
src/lib/dns/python/pydnspp_common.h | 5 +
src/lib/dns/python/rdata_python.cc | 1 +
src/lib/dns/python/rrclass_python.cc | 4 +-
src/lib/dns/python/tests/message_python_test.py | 15 +
src/lib/dns/rdata.cc | 2 +-
src/lib/dns/rdata/any_255/tsig_250.cc | 33 +-
src/lib/dns/rdata/ch_3/a_1.cc | 2 +-
src/lib/dns/rdata/generic/dlv_32769.cc | 2 +-
src/lib/dns/rdata/generic/dnskey_48.cc | 2 +-
src/lib/dns/rdata/generic/ds_43.cc | 2 +-
src/lib/dns/rdata/generic/hinfo_13.cc | 2 +-
src/lib/dns/rdata/generic/nsec3_50.cc | 2 +-
src/lib/dns/rdata/generic/nsec3param_51.cc | 2 +-
src/lib/dns/rdata/generic/nsec_47.cc | 2 +-
src/lib/dns/rdata/generic/opt_41.cc | 2 +-
src/lib/dns/rdata/generic/ptr_12.cc | 2 +-
src/lib/dns/rdata/generic/rrsig_46.cc | 2 +-
src/lib/dns/rdata/generic/soa_6.cc | 2 +-
src/lib/dns/rdata/generic/sshfp_44.cc | 2 +-
src/lib/dns/rdata/hs_4/a_1.cc | 2 +-
src/lib/dns/rdata/in_1/a_1.cc | 2 +-
src/lib/dns/rdata/in_1/aaaa_28.cc | 2 +-
src/lib/dns/rdata/in_1/dhcid_49.cc | 2 +-
src/lib/dns/rdata/in_1/srv_33.cc | 12 +-
src/lib/dns/rrclass.cc | 2 +-
src/lib/dns/rrparamregistry-placeholder.cc | 8 +-
src/lib/dns/rrttl.cc | 2 +-
src/lib/dns/rrtype.cc | 2 +-
src/lib/dns/tests/message_unittest.cc | 2 +
src/lib/log/Makefile.am | 1 +
src/lib/log/compiler/message.cc | 10 +-
src/lib/log/logger.cc | 7 +
src/lib/log/logger.h | 23 +
src/lib/log/logger_impl.cc | 35 +-
src/lib/log/logger_impl.h | 18 +-
src/lib/log/logger_manager.cc | 8 +
src/lib/log/logger_unittest_support.cc | 3 +
src/lib/log/message_dictionary.cc | 6 +-
src/lib/log/message_exception.h | 9 +
src/lib/log/tests/.gitignore | 4 +
src/lib/log/tests/Makefile.am | 19 +
.../lib/log/tests/log_test_messages.mes | 24 +-
src/lib/log/tests/logger_example.cc | 10 +-
src/lib/log/tests/logger_lock_test.cc | 64 +
src/lib/log/tests/logger_lock_test.sh.in | 46 +
src/lib/log/tests/logger_unittest.cc | 66 +
src/lib/log/tests/message_dictionary_unittest.cc | 11 +-
src/lib/log/tests/run_initializer_unittests.cc | 1 +
src/lib/log/tests/run_unittests.cc | 1 +
src/lib/python/isc/bind10/tests/Makefile.am | 1 +
src/lib/python/isc/config/tests/Makefile.am | 1 +
src/lib/python/isc/ddns/libddns_messages.mes | 76 +-
src/lib/python/isc/ddns/session.py | 603 ++++++++-
src/lib/python/isc/ddns/tests/session_tests.py | 1377 ++++++++++++++++----
src/lib/python/isc/ddns/tests/zone_config_tests.py | 25 +-
src/lib/python/isc/ddns/zone_config.py | 11 +-
src/lib/python/isc/log/tests/Makefile.am | 3 +
src/lib/python/isc/server_common/Makefile.am | 3 +-
src/lib/python/isc/server_common/auth_command.py | 90 ++
src/lib/python/isc/server_common/dns_tcp.py | 280 ++++
.../__init__.py => server_common/logger.py} | 9 +-
.../isc/server_common/server_common_messages.mes | 29 +
src/lib/python/isc/server_common/tests/Makefile.am | 3 +-
.../python/isc/server_common/tests/dns_tcp_test.py | 246 ++++
src/lib/python/isc/server_common/tsig_keyring.py | 2 +-
src/lib/python/isc/xfrin/diff.py | 220 +++-
src/lib/python/isc/xfrin/tests/Makefile.am | 1 +
src/lib/python/isc/xfrin/tests/diff_tests.py | 577 +++++++-
src/lib/resolve/tests/recursive_query_unittest.cc | 56 +-
src/lib/testutils/socket_request.h | 2 +-
src/lib/testutils/testdata/rwtest.sqlite3 | Bin 17408 -> 17408 bytes
src/lib/util/Makefile.am | 4 +
src/lib/util/buffer.h | 4 +-
src/lib/util/interprocess_sync.h | 149 +++
src/lib/util/interprocess_sync_file.cc | 130 ++
src/lib/util/interprocess_sync_file.h | 91 ++
.../resource.cc => interprocess_sync_null.cc} | 35 +-
src/lib/util/interprocess_sync_null.h | 64 +
src/lib/util/tests/Makefile.am | 2 +
.../util/tests/interprocess_sync_file_unittest.cc | 174 +++
.../util/tests/interprocess_sync_null_unittest.cc | 76 ++
src/lib/util/tests/run_unittests.cc | 2 +
tests/tools/perfdhcp/Makefile.am | 19 +
tests/tools/perfdhcp/command_options.cc | 2 +-
tests/tools/perfdhcp/command_options.h | 2 +-
tests/tools/perfdhcp/localized_option.h | 123 ++
tests/tools/perfdhcp/perf_pkt4.cc | 62 +
tests/tools/perfdhcp/perf_pkt4.h | 113 ++
tests/tools/perfdhcp/perf_pkt6.cc | 64 +
tests/tools/perfdhcp/perf_pkt6.h | 113 ++
tests/tools/perfdhcp/pkt_transform.cc | 222 ++++
tests/tools/perfdhcp/pkt_transform.h | 139 ++
tests/tools/perfdhcp/tests/Makefile.am | 14 +
.../perfdhcp/tests/command_options_unittest.cc | 2 +-
.../perfdhcp/tests/localized_option_unittest.cc | 48 +
tests/tools/perfdhcp/tests/perf_pkt4_unittest.cc | 384 ++++++
tests/tools/perfdhcp/tests/perf_pkt6_unittest.cc | 327 +++++
152 files changed, 9184 insertions(+), 736 deletions(-)
create mode 100644 src/lib/datasrc/client_list.cc
create mode 100644 src/lib/datasrc/client_list.h
create mode 100644 src/lib/datasrc/static.zone.pre
create mode 100644 src/lib/datasrc/static_datasrc_link.cc
create mode 100644 src/lib/datasrc/tests/client_list_unittest.cc
create mode 100644 src/lib/datasrc/tests/testdata/static.zone
copy tests/system/ixfr/clean_ns.sh => src/lib/log/tests/log_test_messages.mes (55%)
create mode 100644 src/lib/log/tests/logger_lock_test.cc
create mode 100755 src/lib/log/tests/logger_lock_test.sh.in
create mode 100644 src/lib/python/isc/server_common/auth_command.py
create mode 100644 src/lib/python/isc/server_common/dns_tcp.py
copy src/lib/python/isc/{testutils/__init__.py => server_common/logger.py} (79%)
create mode 100644 src/lib/python/isc/server_common/tests/dns_tcp_test.py
create mode 100644 src/lib/util/interprocess_sync.h
create mode 100644 src/lib/util/interprocess_sync_file.cc
create mode 100644 src/lib/util/interprocess_sync_file.h
copy src/lib/util/{unittests/resource.cc => interprocess_sync_null.cc} (69%)
create mode 100644 src/lib/util/interprocess_sync_null.h
create mode 100644 src/lib/util/tests/interprocess_sync_file_unittest.cc
create mode 100644 src/lib/util/tests/interprocess_sync_null_unittest.cc
create mode 100644 tests/tools/perfdhcp/localized_option.h
create mode 100644 tests/tools/perfdhcp/perf_pkt4.cc
create mode 100644 tests/tools/perfdhcp/perf_pkt4.h
create mode 100644 tests/tools/perfdhcp/perf_pkt6.cc
create mode 100644 tests/tools/perfdhcp/perf_pkt6.h
create mode 100644 tests/tools/perfdhcp/pkt_transform.cc
create mode 100644 tests/tools/perfdhcp/pkt_transform.h
create mode 100644 tests/tools/perfdhcp/tests/localized_option_unittest.cc
create mode 100644 tests/tools/perfdhcp/tests/perf_pkt4_unittest.cc
create mode 100644 tests/tools/perfdhcp/tests/perf_pkt6_unittest.cc
-----------------------------------------------------------------------
diff --git a/AUTHORS b/AUTHORS
index e69de29..67cb090 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -0,0 +1,21 @@
+Chen Zhengzhang
+Dmitriy Volodin
+Evan Hunt
+Haidong Wang
+Haikuo Zhang
+Han Feng
+Jelte Jansen
+Jeremy C. Reed
+Xie Jiagui
+Jin Jian
+JINMEI Tatuya
+Kazunori Fujiwara
+Michael Graff
+Michal Vaner
+Mukund Sivaraman
+Naoki Kambe
+Shane Kerr
+Shen Tingting
+Stephen Morris
+Yoshitaka Aharen
+Zhang Likun
diff --git a/ChangeLog b/ChangeLog
index c849b55..9408e3d 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,27 @@
+445. [bug]* jinmei
+ The pre-install check for older SQLite3 DB now refers to the DB
+ file with the prefix of DESTDIR. This ensures that 'make install'
+ with specific DESTDIR works regardless of the version of the DB
+ file installed in the default path.
+ (Trac #1982, git 380b3e8ec02ef45555c0113ee19329fe80539f71)
+
+444. [bug] jinmei
+ libdatasrc: fixed ZoneFinder for database-based data sources so
+ that it handles type DS query correctly, i.e., treating it as
+ authoritative data even on a delegation point.
+ (Trac #1912, git 7130da883f823ce837c10cbf6e216a15e1996e5d)
+
+443. [func]* muks
+ The logger now uses a lockfile named `logger_lockfile' that is
+ created in the local state directory to mutually separate
+ individual logging operations from various processes. This is
+ done so that log messages from different processes don't mix
+ together in the middle of lines. The `logger_lockfile` is created
+ with file permission mode 0660. BIND 10's local state directory
+ should be writable and perhaps have g+s mode bit so that the
+ `logger_lockfile` can be opened by a group of processes.
+ (Trac #1704, git ad8d445dd0ba208107eb239405166c5c2070bd8b)
+
442. [func] tomek
b10-dhcp4, b10-dhcp6: Both DHCP servers now accept -p parameter
that can be used to specify listening port number. This capability
diff --git a/Makefile.am b/Makefile.am
index 6871f82..c7f7425 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -16,6 +16,8 @@ DISTCHECK_CONFIGURE_FLAGS = --disable-install-configurations
# Use same --with-gtest flag if set
DISTCHECK_CONFIGURE_FLAGS += $(DISTCHECK_GTEST_CONFIGURE_FLAG)
+dist_doc_DATA = AUTHORS COPYING ChangeLog README
+
clean-cpp-coverage:
@if [ $(USE_LCOV) = yes ] ; then \
$(LCOV) --directory . --zerocounters; \
@@ -409,3 +411,5 @@ EXTRA_DIST += ext/coroutine/coroutine.h
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = dns++.pc
+
+CLEANFILES = $(abs_top_builddir)/logger_lockfile
diff --git a/compatcheck/Makefile.am b/compatcheck/Makefile.am
index 15ef017..6cc4036 100644
--- a/compatcheck/Makefile.am
+++ b/compatcheck/Makefile.am
@@ -1,12 +1,17 @@
-# We're going to abuse install-data-local for a pre-install check.
-# This is to be considered a short term hack and is expected to be removed
-# in a near future version.
+# We're going to abuse install-data-local for a pre-install check. This may
+# not be the cleanest way to do this type of job, but that's the least ugly
+# one we've found.
+#
+# Note also that if any test needs to examine some file that has possibly
+# been installed before (e.g., older DB or configuration file), it should be
+# referenced with the prefix of DESTDIR. Otherwise
+# 'make DESTDIR=/somewhere install' may not work.
install-data-local:
- if test -e $(localstatedir)/$(PACKAGE)/zone.sqlite3; then \
+ if test -e $(DESTDIR)$(localstatedir)/$(PACKAGE)/zone.sqlite3; then \
$(SHELL) $(top_builddir)/src/bin/dbutil/run_dbutil.sh --check \
- $(localstatedir)/$(PACKAGE)/zone.sqlite3 || \
+ $(DESTDIR)$(localstatedir)/$(PACKAGE)/zone.sqlite3 || \
(echo "\nSQLite3 DB file schema version is old. " \
"Please run: " \
"$(abs_top_builddir)/src/bin/dbutil/run_dbutil.sh --upgrade " \
- "$(localstatedir)/$(PACKAGE)/zone.sqlite3"; exit 1) \
+ "$(DESTDIR)$(localstatedir)/$(PACKAGE)/zone.sqlite3"; exit 1) \
fi
diff --git a/configure.ac b/configure.ac
index bfdbfe5..574d502 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1209,6 +1209,7 @@ AC_OUTPUT([doc/version.ent
src/lib/log/tests/destination_test.sh
src/lib/log/tests/init_logger_test.sh
src/lib/log/tests/local_file_test.sh
+ src/lib/log/tests/logger_lock_test.sh
src/lib/log/tests/severity_test.sh
src/lib/log/tests/tempdir.h
src/lib/util/python/mkpywrapper.py
@@ -1257,6 +1258,7 @@ AC_OUTPUT([doc/version.ent
chmod +x src/lib/log/tests/destination_test.sh
chmod +x src/lib/log/tests/init_logger_test.sh
chmod +x src/lib/log/tests/local_file_test.sh
+ chmod +x src/lib/log/tests/logger_lock_test.sh
chmod +x src/lib/log/tests/severity_test.sh
chmod +x src/lib/util/python/mkpywrapper.py
chmod +x src/lib/util/python/gen_wiredata.py
@@ -1314,6 +1316,7 @@ Developer:
Google Tests: $gtest_path
C++ Code Coverage: $USE_LCOV
Python Code Coverage: $USE_PYCOVERAGE
+ Logger checks: $enable_logger_checks
Generate Manuals: $enable_man
END
diff --git a/doc/Doxyfile b/doc/Doxyfile
index 8730ae4..6d91bf2 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -579,7 +579,7 @@ INPUT = ../src/lib/exceptions ../src/lib/cc \
../src/lib/testutils ../src/lib/cache ../src/lib/server_common/ \
../src/bin/sockcreator/ ../src/lib/util/ ../src/lib/util/io/ \
../src/lib/resolve ../src/lib/acl ../src/bin/dhcp6 ../src/lib/dhcp \
- ../src/bin/dhcp4 devel
+ ../src/bin/dhcp4 ../tests/tools/perfdhcp devel
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
diff --git a/doc/guide/Makefile.am b/doc/guide/Makefile.am
index a9903ff..94dfeb0 100644
--- a/doc/guide/Makefile.am
+++ b/doc/guide/Makefile.am
@@ -2,13 +2,15 @@
if ENABLE_MAN
# generated documentation
-DOCS = bind10-messages.html bind10-guide.html bind10-guide.txt
+HTMLDOCS = bind10-guide.html bind10-messages.html
+DOCS = bind10-guide.txt
-doc_DATA = $(DOCS) bind10-guide.css
+dist_doc_DATA = $(DOCS)
+dist_html_DATA = $(HTMLDOCS) bind10-guide.css
# TODO: okay to include the generated bind10-messages.xml in dist tarfile too?
-EXTRA_DIST = bind10-guide.xml bind10-messages.xml $(doc_DATA)
-CLEANFILES = $(DOCS) bind10-messages.xml
+EXTRA_DIST = bind10-guide.xml bind10-messages.xml
+CLEANFILES = $(HTMLDOCS) $(DOCS) bind10-messages.xml
bind10-guide.html: bind10-guide.xml
xsltproc --novalid --xinclude --nonet \
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 7839ea7..30cc9a6 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -131,7 +131,9 @@
and <command>b10-zonemgr</command> components require the
libpython3 library and the Python _sqlite3.so module
(which is included with Python).
- The Python module needs to be built for the corresponding Python 3.
+ The <command>b10-stats-httpd</command> component uses the
+ Python pyexpat.so module.
+ The Python modules need to be built for the corresponding Python 3.
</para>
<!-- TODO: this will change ... -->
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 2bf43ff..2a47c38 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -390,8 +390,9 @@ private:
AuthSrv* server_;
};
-AuthSrv::AuthSrv(const bool use_cache, AbstractXfroutClient& xfrout_client,
- BaseSocketSessionForwarder& ddns_forwarder)
+AuthSrv::AuthSrv(const bool use_cache,
+ isc::xfr::AbstractXfroutClient& xfrout_client,
+ isc::util::io::BaseSocketSessionForwarder& ddns_forwarder)
{
impl_ = new AuthSrvImpl(use_cache, xfrout_client, ddns_forwarder);
checkin_ = new ConfigChecker(this);
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index 37b845d..08e16c6 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -64,6 +64,7 @@ import posix
import copy
from bind10_config import LIBEXECPATH
+import bind10_config
import isc.cc
import isc.util.process
import isc.net.parse
@@ -1122,6 +1123,28 @@ def unlink_pid_file(pid_file):
if error.errno is not errno.ENOENT:
raise
+def remove_lock_files():
+ """
+ Remove various lock files which were created by code such as in the
+ logger. This function should be called after BIND 10 shutdown.
+ """
+
+ lockfiles = ["logger_lockfile"]
+
+ lpath = bind10_config.DATA_PATH
+ if "B10_FROM_BUILD" in os.environ:
+ lpath = os.environ["B10_FROM_BUILD"]
+ if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
+ lpath = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
+ if "B10_LOCKFILE_DIR_FROM_BUILD" in os.environ:
+ lpath = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"]
+
+ for f in lockfiles:
+ fname = lpath + '/' + f
+ if os.path.isfile(fname):
+ os.unlink(fname)
+
+ return
def main():
global options
@@ -1201,6 +1224,7 @@ def main():
finally:
# Clean up the filesystem
unlink_pid_file(options.pid_file)
+ remove_lock_files()
if boss_of_bind is not None:
boss_of_bind.remove_socket_srv()
sys.exit(boss_of_bind.exitcode)
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index d54ee56..a5e3fab 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -23,6 +23,7 @@ endif
chmod +x $(abs_builddir)/$$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 84a9da9..3b80fb5 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -1463,6 +1463,41 @@ class SocketSrvTest(unittest.TestCase):
self.assertEqual({}, self.__boss._unix_sockets)
self.assertTrue(sock.closed)
+class TestFunctions(unittest.TestCase):
+ def setUp(self):
+ self.lockfile_testpath = \
+ "@abs_top_builddir@/src/bin/bind10/tests/lockfile_test"
+ self.assertFalse(os.path.exists(self.lockfile_testpath))
+ os.mkdir(self.lockfile_testpath)
+ self.assertTrue(os.path.isdir(self.lockfile_testpath))
+
+ def tearDown(self):
+ os.rmdir(self.lockfile_testpath)
+ self.assertFalse(os.path.isdir(self.lockfile_testpath))
+ os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = "@abs_top_builddir@"
+
+ def test_remove_lock_files(self):
+ os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = self.lockfile_testpath
+
+ # create lockfiles for the testcase
+ lockfiles = ["logger_lockfile"]
+ for f in lockfiles:
+ fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f
+ self.assertFalse(os.path.exists(fname))
+ open(fname, "w").close()
+ self.assertTrue(os.path.isfile(fname))
+
+ # first call should clear up all the lockfiles
+ bind10_src.remove_lock_files()
+
+ # check if the lockfiles exist
+ for f in lockfiles:
+ fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f
+ self.assertFalse(os.path.isfile(fname))
+
+ # second call should not assert anyway
+ bind10_src.remove_lock_files()
+
if __name__ == '__main__':
# store os.environ for test_unchanged_environment
original_os_environ = copy.deepcopy(os.environ)
diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am
index 89d89ea..b5b65f6 100644
--- a/src/bin/cmdctl/tests/Makefile.am
+++ b/src/bin/cmdctl/tests/Makefile.am
@@ -22,5 +22,6 @@ endif
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SPEC_PATH=$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SRC_PATH=$(abs_top_srcdir)/src/bin/cmdctl \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/dbutil/tests/Makefile.am b/src/bin/dbutil/tests/Makefile.am
index c03b262..b4231b3 100644
--- a/src/bin/dbutil/tests/Makefile.am
+++ b/src/bin/dbutil/tests/Makefile.am
@@ -3,4 +3,5 @@ SUBDIRS = . testdata
# Tests of the update script.
check-local:
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(SHELL) $(abs_builddir)/dbutil_test.sh
diff --git a/src/bin/ddns/ddns.py.in b/src/bin/ddns/ddns.py.in
index de69100..76ffca0 100755
--- a/src/bin/ddns/ddns.py.in
+++ b/src/bin/ddns/ddns.py.in
@@ -18,13 +18,24 @@
import sys; sys.path.append ('@@PYTHONPATH@@')
import isc
+from isc.acl.dns import REQUEST_LOADER
import bind10_config
from isc.dns import *
+import isc.ddns.session
+from isc.ddns.zone_config import ZoneConfig
+from isc.ddns.logger import ClientFormatter, ZoneFormatter
from isc.config.ccsession import *
-from isc.cc import SessionError, SessionTimeout
+from isc.config.module_spec import ModuleSpecError
+from isc.cc import SessionError, SessionTimeout, ProtocolError
import isc.util.process
import isc.util.cio.socketsession
+from isc.notify.notify_out import ZONE_NEW_DATA_READY_CMD
+import isc.server_common.tsig_keyring
+from isc.server_common.dns_tcp import DNSTCPContext
+from isc.datasrc import DataSourceClient
+from isc.server_common.auth_command import auth_loadzone_command
import select
+import time
import errno
from isc.log_messages.ddns_messages import *
@@ -39,26 +50,39 @@ isc.log.init("b10-ddns")
logger = isc.log.Logger("ddns")
TRACE_BASIC = logger.DBGLVL_TRACE_BASIC
+# Well known path settings. We need to define
+# SPECFILE_LOCATION: ddns configuration spec file
+# SOCKET_FILE: Unix domain socket file to communicate with b10-auth
+# AUTH_SPECFILE_LOCATION: b10-auth configuration spec file (tentatively
+# necessarily for sqlite3-only-and-older-datasrc-API stuff). This should be
+# gone once we migrate to the new API and start using generalized config.
+#
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to that, otherwise we use the ones
# installed on the system
if "B10_FROM_SOURCE" in os.environ:
- SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "ddns" + os.sep + "ddns.spec"
+ SPECFILE_PATH = os.environ["B10_FROM_SOURCE"] + "/src/bin/ddns"
else:
PREFIX = "@prefix@"
DATAROOTDIR = "@datarootdir@"
- SPECFILE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@" + os.sep + "ddns.spec"
- SPECFILE_LOCATION = SPECFILE_LOCATION.replace("${datarootdir}", DATAROOTDIR)\
- .replace("${prefix}", PREFIX)
+ SPECFILE_PATH = "@datadir@/@PACKAGE@".replace("${datarootdir}", DATAROOTDIR)
+ SPECFILE_PATH = SPECFILE_PATH.replace("${prefix}", PREFIX)
-SOCKET_FILE = bind10_config.DATA_PATH + '/ddns_socket'
if "B10_FROM_BUILD" in os.environ:
if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
- SOCKET_FILE = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"] + \
- "/ddns_socket"
+ SOCKET_FILE_PATH = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
else:
- SOCKET_FILE = os.environ["B10_FROM_BUILD"] + "/ddns_socket"
+ SOCKET_FILE_PATH = os.environ["B10_FROM_BUILD"]
+else:
+ SOCKET_FILE_PATH = bind10_config.DATA_PATH
+
+SPECFILE_LOCATION = SPECFILE_PATH + "/ddns.spec"
+SOCKET_FILE = SOCKET_FILE_PATH + '/ddns_socket'
+
+# Cooperating or dependency modules
+AUTH_MODULE_NAME = 'Auth'
+XFROUT_MODULE_NAME = 'Xfrout'
+ZONEMGR_MODULE_NAME = 'Zonemgr'
isc.util.process.rename()
@@ -93,7 +117,55 @@ def clear_socket():
if os.path.exists(SOCKET_FILE):
os.remove(SOCKET_FILE)
+def get_datasrc_client(cc_session):
+ '''Return data source client for update requests.
+
+ This is supposed to have a very short lifetime and should soon be replaced
+ with generic data source configuration framework. Based on that
+ observation we simply hardcode everything except the SQLite3 database file,
+ which will be retrieved from the auth server configuration (this behavior
+ will also be deprecated). When something goes wrong with it this function
+ still returns a dummy client so that the caller doesn't have to bother
+ to handle the error (which would also have to be replaced anyway).
+ The caller will subsequently call its find_zone method via an update
+ session object, which will result in an exception, and then result in
+ a SERVFAIL response.
+
+ Once we are ready for introducing the general framework, the whole
+ function will simply be removed.
+
+ '''
+ HARDCODED_DATASRC_CLASS = RRClass.IN()
+ file, is_default = cc_session.get_remote_config_value("Auth",
+ "database_file")
+ # See xfrout.py:get_db_file() for this trick:
+ if is_default and "B10_FROM_BUILD" in os.environ:
+ file = os.environ["B10_FROM_BUILD"] + "/bind10_zones.sqlite3"
+ datasrc_config = '{ "database_file": "' + file + '"}'
+ try:
+ return (HARDCODED_DATASRC_CLASS,
+ DataSourceClient('sqlite3', datasrc_config), file)
+ except isc.datasrc.Error as ex:
+ class DummyDataSourceClient:
+ def __init__(self, ex):
+ self.__ex = ex
+ def find_zone(self, zone_name):
+ raise isc.datasrc.Error(self.__ex)
+ return (HARDCODED_DATASRC_CLASS, DummyDataSourceClient(ex), file)
+
+def add_pause(sec):
+ '''Pause a specified period for inter module synchronization.
+
+ This is a trivial wrapper of time.sleep, but defined as a separate function
+ so tests can customize it.
+ '''
+ time.sleep(sec)
+
class DDNSServer:
+ # The number of TCP clients that can be handled by the server at the same
+ # time (this should be configurable parameter).
+ TCP_CLIENTS = 10
+
def __init__(self, cc_session=None):
'''
Initialize the DDNS Server.
@@ -110,8 +182,32 @@ class DDNSServer:
self.config_handler,
self.command_handler)
+ # Initialize configuration with defaults. Right now 'zones' is the
+ # only configuration, so we simply directly set it here.
self._config_data = self._cc.get_full_config()
+ self._zone_config = self.__update_zone_config(
+ self._cc.get_default_value('zones'))
self._cc.start()
+
+ # Internal attributes derived from other modules. They will be
+ # initialized via dd_remote_xxx below and will be kept updated
+ # through their callbacks. They are defined as 'protected' so tests
+ # can examine them; but they are essentially private to the class.
+ #
+ # Datasource client used for handling update requests: when set,
+ # should a tuple of RRClass and DataSourceClient. Constructed and
+ # maintained based on auth configuration.
+ self._datasrc_info = None
+ # A set of secondary zones, retrieved from zonemgr configuration.
+ self._secondary_zones = None
+
+ # Get necessary configurations from remote modules.
+ for mod in [(AUTH_MODULE_NAME, self.__auth_config_handler),
+ (ZONEMGR_MODULE_NAME, self.__zonemgr_config_handler)]:
+ self.__add_remote_module(mod[0], mod[1])
+ # This should succeed as long as cfgmgr is up.
+ isc.server_common.tsig_keyring.init_keyring(self._cc)
+
self._shutdown = False
# List of the session receivers where we get the requests
self._socksession_receivers = {}
@@ -120,12 +216,54 @@ class DDNSServer:
self._listen_socket.bind(SOCKET_FILE)
self._listen_socket.listen(16)
+ # Create reusable resources
+ self.__request_msg = Message(Message.PARSE)
+ self.__response_renderer = MessageRenderer()
+
+ # The following attribute(s) are essentially private, but defined as
+ # "protected" so that test code can customize/inspect them.
+ # They should not be overridden/referenced for any other purposes.
+ #
+ # DDNS Protocol handling class.
+ self._UpdateSessionClass = isc.ddns.session.UpdateSession
+ # Outstanding TCP context: fileno=>(context_obj, dst)
+ self._tcp_ctxs = {}
+
+ class InternalError(Exception):
+ '''Exception for internal errors in an update session.
+
+ This exception is expected to be caught within the server class,
+ only used for controling the code flow.
+
+ '''
+ pass
+
def config_handler(self, new_config):
'''Update config data.'''
- # TODO: Handle exceptions and turn them to an error response
- # (once we have any configuration)
- answer = create_answer(0)
- return answer
+ try:
+ if 'zones' in new_config:
+ self._zone_config = \
+ self.__update_zone_config(new_config['zones'])
+ return create_answer(0)
+ except Exception as ex:
+ # We catch any exception here. That includes any syntax error
+ # against the configuration spec. The config interface is too
+ # complicated and it's not clear how much validation is performed
+ # there, so, while assuming it's unlikely to happen, we act
+ # proactively.
+ logger.error(DDNS_CONFIG_HANDLER_ERROR, ex)
+ return create_answer(1, "Failed to handle new configuration: " +
+ str(ex))
+
+ def __update_zone_config(self, new_zones_config):
+ '''Handle zones configuration update.'''
+ new_zones = {}
+ for zone_config in new_zones_config:
+ origin = Name(zone_config['origin'])
+ rrclass = RRClass(zone_config['class'])
+ update_acl = zone_config['update_acl']
+ new_zones[(origin, rrclass)] = REQUEST_LOADER.load(update_acl)
+ return new_zones
def command_handler(self, cmd, args):
'''
@@ -141,6 +279,88 @@ class DDNSServer:
answer = create_answer(1, "Unknown command: " + str(cmd))
return answer
+ def __add_remote_module(self, mod_name, callback):
+ '''Register interest in other module's config with a callback.'''
+
+ # Due to startup timing, add_remote_config can fail. We could make it
+ # more sophisticated, but for now we simply retry a few times, each
+ # separated by a short period (3 times and 1 sec, arbitrary chosen,
+ # and hardcoded for now). In practice this should be more than
+ # sufficient, but if it turns out to be a bigger problem we can
+ # consider more elegant solutions.
+ for n_try in range(0, 3):
+ try:
+ # by_name() version can fail with ModuleSpecError in getting
+ # the module spec because cfgmgr returns a "successful" answer
+ # with empty data if it cannot find the specified module.
+ # This seems to be a deviant behavior (see Trac #2039), but
+ # we need to deal with it.
+ self._cc.add_remote_config_by_name(mod_name, callback)
+ return
+ except (ModuleSpecError, ModuleCCSessionError) as ex:
+ logger.warn(DDNS_GET_REMOTE_CONFIG_FAIL, mod_name, n_try + 1,
+ ex)
+ last_ex = ex
+ add_pause(1)
+ raise last_ex
+
+ def __auth_config_handler(self, new_config, module_config):
+ logger.info(DDNS_RECEIVED_AUTH_UPDATE)
+
+ # If we've got the config before and the new config doesn't update
+ # the DB file, there's nothing we should do with it.
+ # Note: there seems to be a bug either in bindctl or cfgmgr, and
+ # new_config can contain 'database_file' even if it's not really
+ # updated. We still perform the check so we can avoid redundant
+ # resetting when the bug is fixed. The redundant reset itself is not
+ # good, but such configuration update should not happen so often and
+ # it should be acceptable in practice.
+ if self._datasrc_info is not None and \
+ not 'database_file' in new_config:
+ return
+ rrclass, client, db_file = get_datasrc_client(self._cc)
+ self._datasrc_info = (rrclass, client)
+ logger.info(DDNS_AUTH_DBFILE_UPDATE, db_file)
+
+ def __zonemgr_config_handler(self, new_config, module_config):
+ logger.info(DDNS_RECEIVED_ZONEMGR_UPDATE)
+
+ # If we've got the config before and the new config doesn't update
+ # the secondary zone list, there's nothing we should do with it.
+ # (Same note as that for auth's config applies)
+ if self._secondary_zones is not None and \
+ not 'secondary_zones' in new_config:
+ return
+
+ # Get the latest secondary zones. Use get_remote_config_value() so
+ # it can work for both the initial default case and updates.
+ sec_zones, _ = self._cc.get_remote_config_value(ZONEMGR_MODULE_NAME,
+ 'secondary_zones')
+ new_secondary_zones = set()
+ try:
+ # Parse the new config and build a new list of secondary zones.
+ # Unfortunately, in the current implementation, even an observer
+ # module needs to perform full validation. This should be changed
+ # so that only post-validation (done by the main module) config is
+ # delivered to observer modules, but until it's supported we need
+ # to protect ourselves.
+ for zone_spec in sec_zones:
+ zname = Name(zone_spec['name'])
+ # class has the default value in case it's unspecified.
+ # ideally this should be merged within the config module, but
+ # the current implementation doesn't esnure that, so we need to
+ # subsitute it ourselves.
+ if 'class' in zone_spec:
+ zclass = RRClass(zone_spec['class'])
+ else:
+ zclass = RRClass(module_config.get_default_value(
+ 'secondary_zones/class'))
+ new_secondary_zones.add((zname, zclass))
+ self._secondary_zones = new_secondary_zones
+ logger.info(DDNS_SECONDARY_ZONES_UPDATE, len(self._secondary_zones))
+ except Exception as ex:
+ logger.error(DDNS_SECONDARY_ZONES_UPDATE_FAIL, ex)
+
def trigger_shutdown(self):
'''Initiate a shutdown sequence.
@@ -168,10 +388,10 @@ class DDNSServer:
Accept another connection and create the session receiver.
"""
try:
- sock = self._listen_socket.accept()
+ (sock, remote_addr) = self._listen_socket.accept()
fileno = sock.fileno()
logger.debug(TRACE_BASIC, DDNS_NEW_CONN, fileno,
- sock.getpeername())
+ remote_addr if remote_addr else '<anonymous address>')
receiver = isc.util.cio.socketsession.SocketSessionReceiver(sock)
self._socksession_receivers[fileno] = (sock, receiver)
except (socket.error, isc.util.cio.socketsession.SocketSessionError) \
@@ -180,7 +400,30 @@ class DDNSServer:
# continue with the rest
logger.error(DDNS_ACCEPT_FAILURE, e)
- def handle_request(self, request):
+ def __check_request_tsig(self, msg, req_data):
+ '''TSIG checker for update requests.
+
+ This is a helper method for handle_request() below. It examines
+ the given update request message to see if it contains a TSIG RR,
+ and verifies the signature if it does. It returs the TSIG context
+ used for the verification, or None if the request doesn't contain
+ a TSIG. If the verification fails it simply raises an exception
+ as handle_request() assumes it should succeed.
+
+ '''
+ tsig_record = msg.get_tsig_record()
+ if tsig_record is None:
+ return None
+ tsig_ctx = TSIGContext(tsig_record.get_name(),
+ tsig_record.get_rdata().get_algorithm(),
+ isc.server_common.tsig_keyring.get_keyring())
+ tsig_error = tsig_ctx.verify(tsig_record, req_data)
+ if tsig_error != TSIGError.NOERROR:
+ raise self.InternalError("Failed to verify request's TSIG: " +
+ str(tsig_error))
+ return tsig_ctx
+
+ def handle_request(self, req_session):
"""
This is the place where the actual DDNS processing is done. Other
methods are either subroutines of this method or methods doing the
@@ -190,27 +433,179 @@ class DDNSServer:
It is called with the request being session as received from
SocketSessionReceiver, i.e. tuple
(socket, local_address, remote_address, data).
+
+ In general, this method doesn't propagate exceptions outside the
+ method. Most of protocol or system errors will result in an error
+ response to the update client or dropping the update request.
+ The update session class should also ensure this. Critical exceptions
+ such as memory allocation failure will be propagated, however, and
+ will subsequently terminate the server process.
+
+ Return: True if a response to the request is successfully sent;
+ False otherwise. The return value wouldn't be useful for the server
+ itself; it's provided mainly for testing purposes.
+
"""
- # TODO: Implement the magic
+ # give tuple elements intuitive names
+ (sock, local_addr, remote_addr, req_data) = req_session
+
+ # The session sender (b10-auth) should have made sure that this is
+ # a validly formed DNS message of OPCODE being UPDATE, and if it's
+ # TSIG signed, its key is known to the system and the signature is
+ # valid. Messages that don't meet these should have been resopnded
+ # or dropped by the sender, so if such error is detected we treat it
+ # as an internal error and don't bother to respond.
+ try:
+ self.__request_msg.clear(Message.PARSE)
+ # specify PRESERVE_ORDER as we need to handle each RR separately.
+ self.__request_msg.from_wire(req_data, Message.PRESERVE_ORDER)
+ if self.__request_msg.get_opcode() != Opcode.UPDATE():
+ raise self.InternalError('Update request has unexpected '
+ 'opcode: ' +
+ str(self.__request_msg.get_opcode()))
+ tsig_ctx = self.__check_request_tsig(self.__request_msg, req_data)
+ except Exception as ex:
+ logger.error(DDNS_REQUEST_PARSE_FAIL, ex)
+ return False
+
+ # Let an update session object handle the request. Note: things around
+ # ZoneConfig will soon be substantially revised. For now we don't
+ # bother to generalize it.
+ zone_cfg = ZoneConfig(self._secondary_zones, self._datasrc_info[0],
+ self._datasrc_info[1], self._zone_config)
+ update_session = self._UpdateSessionClass(self.__request_msg,
+ remote_addr, zone_cfg)
+ result, zname, zclass = update_session.handle()
+
+ # If the request should be dropped, we're done; otherwise, send the
+ # response generated by the session object.
+ if result == isc.ddns.session.UPDATE_DROP:
+ return False
+ msg = update_session.get_message()
+ self.__response_renderer.clear()
+ if tsig_ctx is not None:
+ msg.to_wire(self.__response_renderer, tsig_ctx)
+ else:
+ msg.to_wire(self.__response_renderer)
- # TODO: Don't propagate most of the exceptions (like datasrc errors),
- # just drop the packet.
- pass
+ ret = self.__send_response(sock, self.__response_renderer.get_data(),
+ remote_addr)
+ if result == isc.ddns.session.UPDATE_SUCCESS:
+ self.__notify_auth(zname, zclass)
+ self.__notify_xfrout(zname, zclass)
+ return ret
+
+ def __send_response(self, sock, data, dest):
+ '''Send DDNS response to the client.
+
+ Right now, this is a straightforward subroutine of handle_request(),
+ but is intended to be extended evetually so that it can handle more
+ comlicated operations for TCP (which requires asynchronous write).
+ Further, when we support multiple requests over a single TCP
+ connection, this method may even be shared by multiple methods.
+
+ Parameters:
+ sock: (python socket) the socket to which the response should be sent.
+ data: (binary) the response data
+ dest: (python socket address) the destion address to which the response
+ should be sent.
+
+ Return: True if the send operation succeds; otherwise False.
+
+ '''
+ try:
+ if sock.proto == socket.IPPROTO_UDP:
+ sock.sendto(data, dest)
+ else:
+ tcp_ctx = DNSTCPContext(sock)
+ send_result = tcp_ctx.send(data)
+ if send_result == DNSTCPContext.SENDING:
+ self._tcp_ctxs[sock.fileno()] = (tcp_ctx, dest)
+ elif send_result == DNSTCPContext.CLOSED:
+ raise socket.error("socket error in TCP send")
+ else:
+ tcp_ctx.close()
+ except socket.error as ex:
+ logger.warn(DDNS_RESPONSE_SOCKET_ERROR, ClientFormatter(dest), ex)
+ return False
+
+ return True
+
+ def __notify_auth(self, zname, zclass):
+ '''Notify auth of the update, if necessary.'''
+ msg = auth_loadzone_command(self._cc, zname, zclass)
+ if msg is not None:
+ self.__notify_update(AUTH_MODULE_NAME, msg, zname, zclass)
+
+ def __notify_xfrout(self, zname, zclass):
+ '''Notify xfrout of the update.'''
+ param = {'zone_name': zname.to_text(), 'zone_class': zclass.to_text()}
+ msg = create_command(ZONE_NEW_DATA_READY_CMD, param)
+ self.__notify_update(XFROUT_MODULE_NAME, msg, zname, zclass)
+
+ def __notify_update(self, modname, msg, zname, zclass):
+ '''Notify other module of the update.
+
+ Note that we use blocking communication here. While the internal
+ communication bus is generally expected to be pretty responsive and
+ error free, notable delay can still occur, and in worse cases timeouts
+ or connection reset can happen. In these cases, even if the trouble
+ is temporary, the update service will be suspended for a while.
+ For a longer term we'll need to switch to asynchronous communication,
+ but for now we rely on the blocking operation.
+
+ Note also that we directly refer to the "protected" member of
+ ccsession (_cc._session) rather than creating a separate channel.
+ It's probably not the best practice, but hopefully we can introduce
+ a cleaner way when we support asynchronous communication.
+ At the moment we prefer the brevity with the use of internal channel
+ of the cc session.
+
+ '''
+ try:
+ seq = self._cc._session.group_sendmsg(msg, modname)
+ answer, _ = self._cc._session.group_recvmsg(False, seq)
+ rcode, error_msg = parse_answer(answer)
+ except (SessionTimeout, SessionError, ProtocolError) as ex:
+ rcode = 1
+ error_msg = str(ex)
+ if rcode == 0:
+ logger.debug(TRACE_BASIC, DDNS_UPDATE_NOTIFY, modname,
+ ZoneFormatter(zname, zclass))
+ else:
+ logger.error(DDNS_UPDATE_NOTIFY_FAIL, modname,
+ ZoneFormatter(zname, zclass), error_msg)
def handle_session(self, fileno):
- """
- Handle incoming session on the socket with given fileno.
+ """Handle incoming session on the socket with given fileno.
+
+ Return True if a response (whether positive or negative) has been
+ sent; otherwise False. The return value isn't expected to be used
+ for other purposes than testing.
+
"""
logger.debug(TRACE_BASIC, DDNS_SESSION, fileno)
- (socket, receiver) = self._socksession_receivers[fileno]
+ (session_socket, receiver) = self._socksession_receivers[fileno]
try:
- self.handle_request(receiver.pop())
+ req_session = receiver.pop()
+ (sock, remote_addr) = (req_session[0], req_session[2])
+
+ # If this is a TCP client, check the quota, and immediately reject
+ # it if we cannot accept more.
+ if sock.proto == socket.IPPROTO_TCP and \
+ len(self._tcp_ctxs) >= self.TCP_CLIENTS:
+ logger.warn(DDNS_REQUEST_TCP_QUOTA,
+ ClientFormatter(remote_addr), len(self._tcp_ctxs))
+ sock.close()
+ return False
+ return self.handle_request(req_session)
except isc.util.cio.socketsession.SocketSessionError as se:
# No matter why this failed, the connection is in unknown, possibly
# broken state. So, we close the socket and remove the receiver.
del self._socksession_receivers[fileno]
- socket.close()
+ session_socket.close()
logger.warn(DDNS_DROP_CONN, fileno, se)
+ return False
def run(self):
'''
@@ -231,8 +626,8 @@ class DDNSServer:
try:
(reads, writes, exceptions) = \
select.select([cc_fileno, listen_fileno] +
- list(self._socksession_receivers.keys()), [],
- [])
+ list(self._socksession_receivers.keys()),
+ list(self._tcp_ctxs.keys()), [])
except select.error as se:
# In case it is just interrupted, we continue like nothing
# happened
@@ -247,6 +642,15 @@ class DDNSServer:
self.accept()
else:
self.handle_session(fileno)
+ for fileno in writes:
+ ctx = self._tcp_ctxs[fileno]
+ result = ctx[0].send_ready()
+ if result != DNSTCPContext.SENDING:
+ if result == DNSTCPContext.CLOSED:
+ logger.warn(DDNS_RESPONSE_TCP_SOCKET_ERROR,
+ ClientFormatter(ctx[1]))
+ ctx[0].close()
+ del self._tcp_ctxs[fileno]
self.shutdown_cleanup()
logger.info(DDNS_STOPPED)
@@ -305,7 +709,7 @@ def main(ddns_server=None):
logger.info(DDNS_STOPPED_BY_KEYBOARD)
except SessionError as e:
logger.error(DDNS_CC_SESSION_ERROR, str(e))
- except ModuleCCSessionError as e:
+ except (ModuleSpecError, ModuleCCSessionError) as e:
logger.error(DDNS_MODULECC_SESSION_ERROR, str(e))
except DDNSConfigError as e:
logger.error(DDNS_CONFIG_ERROR, str(e))
diff --git a/src/bin/ddns/ddns.spec b/src/bin/ddns/ddns.spec
index 55dab5c..70611e6 100644
--- a/src/bin/ddns/ddns.spec
+++ b/src/bin/ddns/ddns.spec
@@ -4,22 +4,36 @@
"config_data": [
{
"item_name": "zones",
- "item_type": "named_set",
+ "item_type": "list",
"item_optional": false,
- "item_default": {},
- "named_set_item_spec": {
+ "item_default": [],
+ "list_item_spec": {
"item_name": "entry",
"item_type": "map",
"item_optional": true,
"item_default": {
- "update_acl": [{"action": "ACCEPT", "from": "127.0.0.1"},
- {"action": "ACCEPT", "from": "::1"}]
+ "origin": "",
+ "class": "IN",
+ "update_acl": []
},
"map_item_spec": [
{
+ "item_name": "origin",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ {
+ "item_name": "class",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "IN"
+ },
+ {
"item_name": "update_acl",
"item_type": "list",
"item_optional": false,
+ "item_default": [],
"list_item_spec": {
"item_name": "acl_element",
"item_type": "any",
diff --git a/src/bin/ddns/ddns_messages.mes b/src/bin/ddns/ddns_messages.mes
index 996e663..61311bc 100644
--- a/src/bin/ddns/ddns_messages.mes
+++ b/src/bin/ddns/ddns_messages.mes
@@ -25,6 +25,12 @@ There was a low-level error when we tried to accept an incoming connection
connections we already have, but this connection is dropped. The reason
is logged.
+% DDNS_AUTH_DBFILE_UPDATE updated auth DB file to %1
+b10-ddns was notified of updates to the SQLite3 DB file that b10-auth
+uses for the underlying data source and on which b10-ddns needs to
+make updates. b10-ddns then updated its internal setup so further
+updates would be made on the new DB.
+
% DDNS_CC_SESSION_ERROR error reading from cc channel: %1
There was a problem reading from the command and control channel. The
most likely cause is that the msgq process is not running.
@@ -38,6 +44,14 @@ configuration manager b10-cfgmgr is not running.
The ddns process encountered an error when installing the configuration at
startup time. Details of the error are included in the log message.
+% DDNS_CONFIG_HANDLER_ERROR failed to update ddns configuration: %1
+An update to b10-ddns configuration was delivered but an error was
+found while applying them. None of the delivered updates were applied
+to the running b10-ddns system, and the server will keep running with
+the existing configuration. If this happened in the initial
+configuration setup, the server will be running with the default
+configurations.
+
% DDNS_DROP_CONN dropping connection on file descriptor %1 because of error %2
There was an error on a connection with the b10-auth server (or whatever
connects to the ddns daemon). This might be OK, for example when the
@@ -45,6 +59,29 @@ authoritative server shuts down, the connection would get closed. It also
can mean the system is busy and can't keep up or that the other side got
confused and sent bad data.
+% DDNS_GET_REMOTE_CONFIG_FAIL failed to get %1 module configuration %2 times: %3
+b10-ddns tried to get configuration of some remote modules for its
+operation, but it failed. The most likely cause of this is that the
+remote module has not fully started up and b10-ddns couldn't get the
+configuration in a timely fashion. b10-ddns attempts to retry it a
+few times, imposing a short delay, hoping it eventually succeeds if
+it's just a timing issue. The number of total failed attempts is also
+logged. If it reaches an internal threshold b10-ddns considers it a
+fatal error and terminates. Even in that case, if b10-ddns is
+configured as a "dispensable" component (which is the default), the
+parent bind10 process will restart it, and there will be another
+chance of getting the remote configuration successfully. These are
+not the optimal behavior, but it's believed to be sufficient in
+practice (there would normally be no failure in the first place). If
+it really causes an operational trouble other than having a few of
+these log messages, please submit a bug report; there can be several
+ways to make it more sophisticated. Another, less likely reason for
+having this error is because the remote modules are not actually
+configured to run. If that's the case fixing the configuration should
+solve the problem - either by making sure the remote module will run
+or by not running b10-ddns (without these remote modules b10-ddns is
+not functional, so there's no point in running it in this case).
+
% DDNS_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1
There was a problem in the lower level module handling configuration and
control commands. This could happen for various reasons, but the most likely
@@ -58,14 +95,93 @@ requests from it. The file descriptor number and the address where the request
comes from is logged. The connection is over a unix domain socket and is likely
coming from a b10-auth process.
+% DDNS_RECEIVED_AUTH_UPDATE received configuration updates from auth server
+b10-ddns is notified of updates to b10-auth configuration
+(including a report of the initial configuration) that b10-ddns might
+be interested in.
+
% DDNS_RECEIVED_SHUTDOWN_COMMAND shutdown command received
The ddns process received a shutdown command from the command channel
and will now shut down.
+% DDNS_RECEIVED_ZONEMGR_UPDATE received configuration updates from zonemgr
+b10-ddns is notified of updates to b10-zonemgr's configuration
+(including a report of the initial configuration). It may possibly
+contain changes to the secondary zones, in which case b10-ddns will
+update its internal copy of that configuration.
+
+% DDNS_REQUEST_PARSE_FAIL failed to parse update request: %1
+b10-ddns received an update request via b10-auth, but the received
+data failed to pass minimum validation: it was either broken wire
+format data for a valid DNS message (e.g. it's shorter than the
+fixed-length header), or the opcode is not update, or TSIG is included
+in the request but it fails to validate. Since b10-auth should have
+performed this level of checks, such an error shouldn't be detected at
+this stage and should rather be considered an internal bug. This
+event is therefore logged at the error level, and the request is
+simply dropped. Additional information of the error is also logged.
+
+% DDNS_REQUEST_TCP_QUOTA reject TCP update client %1 (%2 running)
+b10-ddns received a new update request from a client over TCP, but
+the number of TCP clients being handled by the server already reached
+the configured quota, so the latest client was rejected by closing
+the connection. The administrator may want to check the status of
+b10-ddns, and if this happens even if the server is not very busy,
+the quota may have to be increased. Or, if it's more likely to be
+malicious or simply bogus clients that somehow keep the TCP connection
+open for a long period, maybe they should be rejected with an
+appropriate ACL configuration or some lower layer filtering. The
+number of existing TCP clients are shown in the log, which should be
+identical to the current quota.
+
+% DDNS_RESPONSE_SOCKET_ERROR failed to send update response to %1: %2
+Network I/O error happens in sending an update response. The
+client's address that caused the error and error details are also
+logged.
+
+% DDNS_RESPONSE_TCP_SOCKET_ERROR failed to complete sending update response to %1 over TCP
+b10-ddns had tried to send an update response over TCP, and it hadn't
+been completed at that time, and a followup attempt to complete the
+send operation failed due to some network I/O error. While a network
+error can happen any time, this event is quite unexpected for two
+reasons. First, since the size of a response to an update request
+should be generally small, it's unlikely that the initial attempt
+didn't fail but wasn't completed. Second, since the first attempt
+succeeded and the TCP connection had been established in the first
+place, it's more likely for the subsequent attempt to succeed. In any
+case, there may not be able to do anything to fix it at the server
+side, but the administrator may want to check the general reachability
+with the client address.
+
% DDNS_RUNNING ddns server is running and listening for updates
The ddns process has successfully started and is now ready to receive commands
and updates.
+% DDNS_SECONDARY_ZONES_UPDATE updated secondary zone list (%1 zones are listed)
+b10-ddns has successfully updated the internal copy of secondary zones
+obtained from b10-zonemgr, based on a latest update to zonemgr's
+configuration. The number of newly configured (unique) secondary
+zones is logged.
+
+% DDNS_SECONDARY_ZONES_UPDATE_FAIL failed to update secondary zone list: %1
+An error message. b10-ddns was notified of updates to a list of
+secondary zones from b10-zonemgr and tried to update its own internal
+copy of the list, but it failed. This can happen if the configuration
+contains an error, and b10-zonemgr should also reject that update.
+Unfortunately, in the current implementation there is no way to ensure
+that both zonemgr and ddns have consistent information when an update
+contains an error; further, as of this writing zonemgr has a bug that
+it could partially update the list of secondary zones if part of the
+list has an error (see Trac ticket #2038). b10-ddns still keeps
+running with the previous configuration, but it's strongly advisable
+to check log messages from zonemgr, and if it indicates there can be
+inconsistent state, it's better to restart the entire BIND 10 system
+(just restarting b10-ddns wouldn't be enough, because zonemgr can have
+partially updated configuration due to bug #2038). The log message
+contains an error description, but it's intentionally kept simple as
+it's primarily a matter of zonemgr. To know the details of the error,
+log messages of zonemgr should be consulted.
+
% DDNS_SESSION session arrived on file descriptor %1
A debug message, informing there's some activity on the given file descriptor.
It will be either a request or the file descriptor will be closed. See
@@ -88,3 +204,26 @@ process will now shut down.
The b10-ddns process encountered an uncaught exception and will now shut
down. This is indicative of a programming error and should not happen under
normal circumstances. The exception type and message are printed.
+
+% DDNS_UPDATE_NOTIFY notified %1 of updates to %2
+Debug message. b10-ddns has made updates to a zone based on an update
+request and has successfully notified an external module of the updates.
+The notified module will use that information for updating its own
+state or any necessary protocol action such as zone reloading or sending
+notify messages to secondary servers.
+
+% DDNS_UPDATE_NOTIFY_FAIL failed to notify %1 of updates to %2: %3
+b10-ddns has made updates to a zone based on an update request and
+tried to notify an external module of the updates, but the
+notification fails. Severity of this effect depends on the type of
+the module. If it's b10-xfrout, this means DNS notify messages won't
+be sent to secondary servers of the zone. It's suboptimal, but not
+necessarily critical as the secondary servers will try to check the
+zone's status periodically. If it's b10-auth and the notification was
+needed to have it reload the corresponding zone, it's more serious
+because b10-auth won't be able to serve the new version of the zone
+unless some explicit recovery action is taken. So the administrator
+needs to examine this message and takes an appropriate action. In
+either case, this notification is generally expected to succeed; so
+the fact it fails itself means there's something wrong in the BIND 10
+system, and it would be advisable to check other log messages.
diff --git a/src/bin/ddns/tests/Makefile.am b/src/bin/ddns/tests/Makefile.am
index cd1082f..5c824d4 100644
--- a/src/bin/ddns/tests/Makefile.am
+++ b/src/bin/ddns/tests/Makefile.am
@@ -25,5 +25,6 @@ endif
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/ddns:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
TESTDATASRCDIR=$(abs_srcdir)/testdata/ \
+ TESTDATA_PATH=$(abs_top_srcdir)/src/lib/testutils/testdata \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/ddns/tests/ddns_test.py b/src/bin/ddns/tests/ddns_test.py
index 395aacc..b0b34ca 100755
--- a/src/bin/ddns/tests/ddns_test.py
+++ b/src/bin/ddns/tests/ddns_test.py
@@ -15,28 +15,117 @@
'''Tests for the DDNS module'''
-import unittest
-import isc
+from isc.ddns.session import *
+from isc.dns import *
+from isc.acl.acl import ACCEPT
+import isc.util.cio.socketsession
+from isc.cc.session import SessionTimeout, SessionError, ProtocolError
+from isc.datasrc import DataSourceClient
+from isc.config import module_spec_from_file
+from isc.config.config_data import ConfigData
+from isc.config.ccsession import create_answer, ModuleCCSessionError
+from isc.config.module_spec import ModuleSpecError
+from isc.server_common.dns_tcp import DNSTCPContext
import ddns
-import isc.config
-import select
import errno
-import isc.util.cio.socketsession
+import os
+import select
+import shutil
import socket
-import os.path
+import unittest
+
+# Some common test parameters
+TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
+READ_ZONE_DB_FILE = TESTDATA_PATH + "rwtest.sqlite3" # original, to be copied
+TEST_ZONE_NAME = Name('example.org')
+TEST_ZONE_NAME_STR = TEST_ZONE_NAME.to_text()
+UPDATE_RRTYPE = RRType.SOA()
+TEST_QID = 5353 # arbitrary chosen
+TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS_STR = TEST_RRCLASS.to_text()
+TEST_SERVER6 = ('2001:db8::53', 53, 0, 0)
+TEST_CLIENT6 = ('2001:db8::1', 53000, 0, 0)
+TEST_SERVER4 = ('192.0.2.53', 53)
+TEST_CLIENT4 = ('192.0.2.1', 53534)
+TEST_ZONE_RECORD = Question(TEST_ZONE_NAME, TEST_RRCLASS, UPDATE_RRTYPE)
+TEST_ACL_CONTEXT = isc.acl.dns.RequestContext(
+ socket.getaddrinfo("192.0.2.1", 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP, socket.AI_NUMERICHOST)[0][4])
+# TSIG key for tests when needed. The key name is TEST_ZONE_NAME.
+TEST_TSIG_KEY = TSIGKey("example.org:SFuWd/q99SzF8Yzd1QbB9g==")
+# TSIG keyring that contanins the test key
+TEST_TSIG_KEYRING = TSIGKeyRing()
+TEST_TSIG_KEYRING.add(TEST_TSIG_KEY)
+# Another TSIG key not in the keyring, making verification fail
+BAD_TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
+
+# Incorporate it so we can use the real default values of zonemgr config
+# in the tests.
+ZONEMGR_MODULE_SPEC = module_spec_from_file(
+ os.environ["B10_FROM_BUILD"] + "/src/bin/zonemgr/zonemgr.spec")
class FakeSocket:
"""
A fake socket. It only provides a file number, peer name and accept method.
"""
- def __init__(self, fileno):
+ def __init__(self, fileno, proto=socket.IPPROTO_UDP):
+ self.proto = proto
self.__fileno = fileno
+ self._sent_data = None
+ self._sent_addr = None
+ self._close_called = 0 # number of calls to close()
+ self.__send_cc = 0 # waterline of the send buffer (can be reset)
+ # customizable by tests; if set to True, sendto() will throw after
+ # recording the parameters.
+ self._raise_on_send = False
+ self._send_buflen = None # imaginary send buffer for partial send
def fileno(self):
return self.__fileno
def getpeername(self):
+ if self.proto == socket.IPPROTO_UDP or \
+ self.proto == socket.IPPROTO_TCP:
+ return TEST_CLIENT4
return "fake_unix_socket"
def accept(self):
- return FakeSocket(self.__fileno + 1)
+ return FakeSocket(self.__fileno + 1), '/dummy/path'
+ def sendto(self, data, addr):
+ self._sent_data = data
+ self._sent_addr = addr
+ if self._raise_on_send:
+ raise socket.error('test socket failure')
+ def send(self, data):
+ if self._raise_on_send:
+ raise socket.error(errno.EPIPE, 'faked connection disruption')
+ elif self._send_buflen is None:
+ available_space = len(data)
+ else:
+ available_space = self._send_buflen - self.__send_cc
+ if available_space == 0:
+ # if there's no space, (assuming it's nonblocking mode) raise
+ # EAGAIN.
+ raise socket.error(errno.EAGAIN,
+ "Resource temporarily unavailable")
+ # determine the sendable part of the data, record it, update "buffer".
+ cc = min(available_space, len(data))
+ if self._sent_data is None:
+ self._sent_data = data[:cc]
+ else:
+ self._sent_data += data[:cc]
+ self.__send_cc += cc
+ return cc
+ def setblocking(self, on):
+ # We only need a faked NO-OP implementation.
+ pass
+ def close(self):
+ self._close_called += 1
+ def clear(self):
+ '''Clear internal instrumental data.'''
+ self._sent_data = None
+ self._sent_addr = None
+ def make_send_ready(self):
+ # pretend that the accrued data has been cleared, making room in
+ # the send buffer.
+ self.__send_cc = 0
class FakeSessionReceiver:
"""
@@ -51,14 +140,88 @@ class FakeSessionReceiver:
"""
return self._socket
+class FakeUpdateSession:
+ '''A fake update session, emulating isc.ddns.session.UpdateSession.
+
+ It provides the same interfaces as UpdateSession with skipping complicated
+ internal protocol processing and returning given faked results. This
+ will help simplify test setups.
+
+ '''
+ def __init__(self, msg, client_addr, zone_config, faked_result):
+ '''Faked constructor.
+
+ It takes an additional faked_result parameter. It will be used
+ as the result value of handle(). If its value is UPDATE_ERROR,
+ get_message() will create a response message whose Rcode is
+ REFUSED.
+
+ '''
+ self.__msg = msg
+ self.__faked_result = faked_result
+
+ def handle(self):
+ if self.__faked_result == UPDATE_SUCCESS:
+ return self.__faked_result, TEST_ZONE_NAME, TEST_RRCLASS
+ return self.__faked_result, None, None
+
+ def get_message(self):
+ self.__msg.make_response()
+ self.__msg.clear_section(SECTION_ZONE)
+ if self.__faked_result == UPDATE_SUCCESS:
+ self.__msg.set_rcode(Rcode.NOERROR())
+ else:
+ self.__msg.set_rcode(Rcode.REFUSED())
+ return self.__msg
+
+class FakeKeyringModule:
+ '''Fake the entire isc.server_common.tsig_keyring module.'''
+
+ def init_keyring(self, cc):
+ '''Set the instrumental attribute to True when called.
+
+ It can be used for a test that confirms TSIG key initialization is
+ surely performed. This class doesn't use any CC session, so the
+ cc parameter will be ignored.
+
+ '''
+ self.initialized = True
+
+ def get_keyring(self):
+ '''Simply return the predefined TSIG keyring unconditionally.'''
+ return TEST_TSIG_KEYRING
+
class MyCCSession(isc.config.ConfigData):
- '''Fake session with minimal interface compliance'''
+ '''Fake session with minimal interface compliance.'''
+
+ # faked CC sequence used in group_send/recvmsg
+ FAKE_SEQUENCE = 53
+
def __init__(self):
module_spec = isc.config.module_spec_from_file(
ddns.SPECFILE_LOCATION)
isc.config.ConfigData.__init__(self, module_spec)
self._started = False
self._stopped = False
+ # Used as the return value of get_remote_config_value. Customizable.
+ self.auth_db_file = READ_ZONE_DB_FILE
+ # Used as the return value of get_remote_config_value. Customizable.
+ self.auth_datasources = None
+ # faked cc channel, providing group_send/recvmsg itself. The following
+ # attributes are for inspection/customization in tests.
+ self._session = self
+ self._sent_msg = []
+ self._recvmsg_called = 0
+ self._answer_code = 0 # code used in answer returned via recvmsg
+ self._sendmsg_exception = None # will be raised from sendmsg if !None
+ self._recvmsg_exception = None # will be raised from recvmsg if !None
+
+ # Attributes to handle (faked) remote configurations
+ self.__callbacks = {} # record callbacks for updates to remote confs
+ self._raise_mods = {} # map of module to exceptions to be triggered
+ # on add_remote. settable by tests.
+ self._auth_config = {} # faked auth cfg, settable by tests
+ self._zonemgr_config = {} # faked zonemgr cfg, settable by tests
def start(self):
'''Called by DDNSServer initialization, but not used in tests'''
@@ -74,6 +237,71 @@ class MyCCSession(isc.config.ConfigData):
"""
return FakeSocket(1)
+ def add_remote_config_by_name(self, module_name, update_callback=None):
+ # If a list of exceptions is given for the module, raise the front one,
+ # removing that exception from the list (so the list length controls
+ # how many (and which) exceptions should be raised on add_remote).
+ if module_name in self._raise_mods.keys() and \
+ len(self._raise_mods[module_name]) != 0:
+ ex = self._raise_mods[module_name][0]
+ self._raise_mods[module_name] = self._raise_mods[module_name][1:]
+ raise ex('Failure requesting remote config data')
+
+ if update_callback is not None:
+ self.__callbacks[module_name] = update_callback
+ if module_name is 'Auth':
+ if module_name in self.__callbacks:
+ # ddns implementation doesn't use the 2nd element, so just
+ # setting it to None
+ self.__callbacks[module_name](self._auth_config, None)
+ if module_name is 'Zonemgr':
+ if module_name in self.__callbacks:
+ self.__callbacks[module_name](self._zonemgr_config,
+ ConfigData(ZONEMGR_MODULE_SPEC))
+
+ def get_remote_config_value(self, module_name, item):
+ if module_name == "Auth" and item == "database_file":
+ return self.auth_db_file, False
+ if module_name == "Auth" and item == "datasources":
+ if self.auth_datasources is None:
+ return [], True # default
+ else:
+ return self.auth_datasources, False
+ if module_name == 'Zonemgr' and item == 'secondary_zones':
+ if item in self._zonemgr_config:
+ return self._zonemgr_config[item], False
+ else:
+ seczone_default = \
+ ConfigData(ZONEMGR_MODULE_SPEC).get_default_value(
+ 'secondary_zones')
+ return seczone_default, True
+
+ def group_sendmsg(self, msg, group):
+ # remember the passed parameter, and return dummy sequence
+ self._sent_msg.append((msg, group))
+ if self._sendmsg_exception is not None:
+ raise self._sendmsg_exception
+ return self.FAKE_SEQUENCE
+
+ def group_recvmsg(self, nonblock, seq):
+ self._recvmsg_called += 1
+ if seq != self.FAKE_SEQUENCE:
+ raise RuntimeError('unexpected CC sequence: ' + str(seq))
+ if self._recvmsg_exception is not None:
+ raise self._recvmsg_exception
+ if self._answer_code is 0:
+ return create_answer(0), None
+ else:
+ return create_answer(self._answer_code, "dummy error value"), None
+
+ def clear_msg(self):
+ '''Clear instrumental attributes related session messages.'''
+ self._sent_msg = []
+ self._recvmsg_called = 0
+ self._answer_code = 0
+ self._sendmsg_exception = None
+ self._recvmsg_exception = None
+
class MyDDNSServer():
'''Fake DDNS server used to test the main() function'''
def __init__(self):
@@ -104,6 +332,8 @@ class TestDDNSServer(unittest.TestCase):
def setUp(self):
cc_session = MyCCSession()
self.assertFalse(cc_session._started)
+ self.orig_tsig_keyring = isc.server_common.tsig_keyring
+ isc.server_common.tsig_keyring = FakeKeyringModule()
self.ddns_server = ddns.DDNSServer(cc_session)
self.__cc_session = cc_session
self.assertTrue(cc_session._started)
@@ -114,10 +344,19 @@ class TestDDNSServer(unittest.TestCase):
self.ddns_server._listen_socket = FakeSocket(2)
ddns.select.select = self.__select
+ # common private attributes for TCP response tests
+ self.__tcp_sock = FakeSocket(10, socket.IPPROTO_TCP)
+ self.__tcp_ctx = DNSTCPContext(self.__tcp_sock)
+ self.__tcp_data = b'A' * 12 # dummy, just the same size as DNS header
+ # some tests will override this, which will be restored in tearDown:
+ self.__orig_add_pause = ddns.add_pause
+
def tearDown(self):
ddns.select.select = select.select
ddns.isc.util.cio.socketsession.SocketSessionReceiver = \
isc.util.cio.socketsession.SocketSessionReceiver
+ isc.server_common.tsig_keyring = self.orig_tsig_keyring
+ ddns.add_pause = self.__orig_add_pause
def test_listen(self):
'''
@@ -141,12 +380,198 @@ class TestDDNSServer(unittest.TestCase):
ddns.clear_socket()
self.assertFalse(os.path.exists(ddns.SOCKET_FILE))
+ def test_initial_config(self):
+ # right now, the only configuration is the zone configuration, whose
+ # default should be an empty map.
+ self.assertEqual({}, self.ddns_server._zone_config)
+
def test_config_handler(self):
- # Config handler does not do anything yet, but should at least
- # return 'ok' for now.
- new_config = {}
+ # Update with a simple zone configuration: including an accept-all ACL
+ new_config = { 'zones': [ { 'origin': TEST_ZONE_NAME_STR,
+ 'class': TEST_RRCLASS_STR,
+ 'update_acl': [{'action': 'ACCEPT'}] } ] }
answer = self.ddns_server.config_handler(new_config)
self.assertEqual((0, None), isc.config.parse_answer(answer))
+ acl = self.ddns_server._zone_config[(TEST_ZONE_NAME, TEST_RRCLASS)]
+ self.assertEqual(ACCEPT, acl.execute(TEST_ACL_CONTEXT))
+
+ # Slightly more complicated one: containing multiple ACLs
+ new_config = { 'zones': [ { 'origin': 'example.com',
+ 'class': 'CH',
+ 'update_acl': [{'action': 'REJECT',
+ 'from': '2001:db8::1'}] },
+ { 'origin': TEST_ZONE_NAME_STR,
+ 'class': TEST_RRCLASS_STR,
+ 'update_acl': [{'action': 'ACCEPT'}] },
+ { 'origin': 'example.org',
+ 'class': 'CH',
+ 'update_acl': [{'action': 'DROP'}] } ] }
+ answer = self.ddns_server.config_handler(new_config)
+ self.assertEqual((0, None), isc.config.parse_answer(answer))
+ self.assertEqual(3, len(self.ddns_server._zone_config))
+ acl = self.ddns_server._zone_config[(TEST_ZONE_NAME, TEST_RRCLASS)]
+ self.assertEqual(ACCEPT, acl.execute(TEST_ACL_CONTEXT))
+
+ # empty zone config
+ new_config = { 'zones': [] }
+ answer = self.ddns_server.config_handler(new_config)
+ self.assertEqual((0, None), isc.config.parse_answer(answer))
+ self.assertEqual({}, self.ddns_server._zone_config)
+
+ # bad zone config data: bad name. The previous config shouls be kept.
+ bad_config = { 'zones': [ { 'origin': 'bad..example',
+ 'class': TEST_RRCLASS_STR,
+ 'update_acl': [{'action': 'ACCEPT'}] } ] }
+ answer = self.ddns_server.config_handler(bad_config)
+ self.assertEqual(1, isc.config.parse_answer(answer)[0])
+ self.assertEqual({}, self.ddns_server._zone_config)
+
+ # bad zone config data: bad class.
+ bad_config = { 'zones': [ { 'origin': TEST_ZONE_NAME_STR,
+ 'class': 'badclass',
+ 'update_acl': [{'action': 'ACCEPT'}] } ] }
+ answer = self.ddns_server.config_handler(bad_config)
+ self.assertEqual(1, isc.config.parse_answer(answer)[0])
+ self.assertEqual({}, self.ddns_server._zone_config)
+
+ # bad zone config data: bad ACL.
+ bad_config = { 'zones': [ { 'origin': TEST_ZONE_NAME_STR,
+ 'class': TEST_RRCLASS_STR,
+ 'update_acl': [{'action': 'badaction'}]}]}
+ answer = self.ddns_server.config_handler(bad_config)
+ self.assertEqual(1, isc.config.parse_answer(answer)[0])
+ self.assertEqual({}, self.ddns_server._zone_config)
+
+ # the first zone cofig is valid, but not the second. the first one
+ # shouldn't be installed.
+ bad_config = { 'zones': [ { 'origin': TEST_ZONE_NAME_STR,
+ 'class': TEST_RRCLASS_STR,
+ 'update_acl': [{'action': 'ACCEPT'}] },
+ { 'origin': 'bad..example',
+ 'class': TEST_RRCLASS_STR,
+ 'update_acl': [{'action': 'ACCEPT'}] } ] }
+ answer = self.ddns_server.config_handler(bad_config)
+ self.assertEqual(1, isc.config.parse_answer(answer)[0])
+ self.assertEqual({}, self.ddns_server._zone_config)
+
+ # Half-broken case: 'origin, class' pair is duplicate. For now we
+ # we accept it (the latter one will win)
+ dup_config = { 'zones': [ { 'origin': TEST_ZONE_NAME_STR,
+ 'class': TEST_RRCLASS_STR,
+ 'update_acl': [{'action': 'REJECT'}] },
+ { 'origin': TEST_ZONE_NAME_STR,
+ 'class': TEST_RRCLASS_STR,
+ 'update_acl': [{'action': 'ACCEPT'}] } ] }
+ answer = self.ddns_server.config_handler(dup_config)
+ self.assertEqual((0, None), isc.config.parse_answer(answer))
+ acl = self.ddns_server._zone_config[(TEST_ZONE_NAME, TEST_RRCLASS)]
+ self.assertEqual(ACCEPT, acl.execute(TEST_ACL_CONTEXT))
+
+ def test_datasrc_config(self):
+ # By default (in our faked config) it should be derived from the
+ # test data source
+ rrclass, datasrc_client = self.ddns_server._datasrc_info
+ self.assertEqual(RRClass.IN(), rrclass)
+ self.assertEqual(DataSourceClient.SUCCESS,
+ datasrc_client.find_zone(Name('example.org'))[0])
+
+ # emulating an update. calling add_remote_config_by_name is a
+ # convenient faked way to invoke the callback. We set the db file
+ # to a bogus one; the current implementation will create an unusable
+ # data source client.
+ self.__cc_session.auth_db_file = './notexistentdir/somedb.sqlite3'
+ self.__cc_session._auth_config = \
+ {'database_file': './notexistentdir/somedb.sqlite3'}
+ self.__cc_session.add_remote_config_by_name('Auth')
+ rrclass, datasrc_client = self.ddns_server._datasrc_info
+ self.assertEqual(RRClass.IN(), rrclass)
+ self.assertRaises(isc.datasrc.Error,
+ datasrc_client.find_zone, Name('example.org'))
+
+ # Check the current info isn't changed if the new config doesn't
+ # update it.
+ info_orig = self.ddns_server._datasrc_info
+ self.ddns_server._datasrc_info = 42 # dummy value, should be kept.
+ self.__cc_session._auth_config = {'other_config': 'value'}
+ self.__cc_session.add_remote_config_by_name('Auth')
+ self.assertEqual(42, self.ddns_server._datasrc_info)
+ self.ddns_server._datasrc_info = info_orig
+
+ def test_secondary_zones_config(self):
+ # By default it should be an empty list
+ self.assertEqual(set(), self.ddns_server._secondary_zones)
+
+ # emulating an update.
+ self.__cc_session._zonemgr_config = {'secondary_zones': [
+ {'name': TEST_ZONE_NAME_STR, 'class': TEST_RRCLASS_STR}]}
+ self.__cc_session.add_remote_config_by_name('Zonemgr')
+
+ # The new set of secondary zones should be stored.
+ self.assertEqual({(TEST_ZONE_NAME, TEST_RRCLASS)},
+ self.ddns_server._secondary_zones)
+
+ # Similar to the above, but 'class' is unspecified. The default value
+ # should be used.
+ self.__cc_session._zonemgr_config = {'secondary_zones': [
+ {'name': TEST_ZONE_NAME_STR}]}
+ self.__cc_session.add_remote_config_by_name('Zonemgr')
+ self.assertEqual({(TEST_ZONE_NAME, TEST_RRCLASS)},
+ self.ddns_server._secondary_zones)
+
+ # The given list has a duplicate. The resulting set should unify them.
+ self.__cc_session._zonemgr_config = {'secondary_zones': [
+ {'name': TEST_ZONE_NAME_STR, 'class': TEST_RRCLASS_STR},
+ {'name': TEST_ZONE_NAME_STR, 'class': TEST_RRCLASS_STR}]}
+ self.__cc_session.add_remote_config_by_name('Zonemgr')
+ self.assertEqual({(TEST_ZONE_NAME, TEST_RRCLASS)},
+ self.ddns_server._secondary_zones)
+
+ # Check the 2ndary zones aren't changed if the new config doesn't
+ # update it.
+ seczones_orig = self.ddns_server._secondary_zones
+ self.ddns_server._secondary_zones = 42 # dummy value, should be kept.
+ self.__cc_session._zonemgr_config = {}
+ self.__cc_session.add_remote_config_by_name('Zonemgr')
+ self.assertEqual(42, self.ddns_server._secondary_zones)
+ self.ddns_server._secondary_zones = seczones_orig
+
+ # If the update config is broken, the existing set should be intact.
+ self.__cc_session._zonemgr_config = {'secondary_zones': [
+ {'name': 'good.example', 'class': TEST_RRCLASS_STR},
+ {'name': 'badd..example', 'class': TEST_RRCLASS_STR}]}
+ self.__cc_session.add_remote_config_by_name('Zonemgr')
+ self.assertEqual({(TEST_ZONE_NAME, TEST_RRCLASS)},
+ self.ddns_server._secondary_zones)
+
+ def __check_remote_config_fail(self, mod_name, num_ex, expected_ex):
+ '''Subroutine for remote_config_fail test.'''
+
+ # fake pause function for inspection and to avoid having timeouts
+ added_pause = []
+ ddns.add_pause = lambda sec: added_pause.append(sec)
+
+ # In our current implementation, there will be up to 3 tries of
+ # adding the module, each separated by a 1-sec pause. If all attempts
+ # fail the exception will be propagated.
+ exceptions = [expected_ex for i in range(0, num_ex)]
+ self.__cc_session._raise_mods = {mod_name: exceptions}
+ if num_ex >= 3:
+ self.assertRaises(expected_ex, ddns.DDNSServer, self.__cc_session)
+ else:
+ ddns.DDNSServer(self.__cc_session)
+ self.assertEqual([1 for i in range(0, num_ex)], added_pause)
+
+ def test_remote_config_fail(self):
+ # If getting config of Auth or Zonemgr fails on construction of
+ # DDNServer, it should result in an exception and a few times
+ # of retries. We test all possible cases, changing the number of
+ # raised exceptions and the type of exceptions that can happen,
+ # which should also cover the fatal error case.
+ for i in range(0, 4):
+ self.__check_remote_config_fail('Auth', i, ModuleCCSessionError)
+ self.__check_remote_config_fail('Auth', i, ModuleSpecError)
+ self.__check_remote_config_fail('Zonemgr', i, ModuleCCSessionError)
+ self.__check_remote_config_fail('Zonemgr', i, ModuleSpecError)
def test_shutdown_command(self):
'''Test whether the shutdown command works'''
@@ -361,6 +786,483 @@ class TestDDNSServer(unittest.TestCase):
self.__select_expected = ([1, 2], [], [], None)
self.assertRaises(select.error, self.ddns_server.run)
+ def __send_select_tcp(self, buflen, raise_after_select=False):
+ '''Common subroutine for some TCP related tests below.'''
+ fileno = self.__tcp_sock.fileno()
+ self.ddns_server._tcp_ctxs = {fileno: (self.__tcp_ctx, TEST_CLIENT6)}
+
+ # make an initial, incomplete send via the test context
+ self.__tcp_sock._send_buflen = buflen
+ self.assertEqual(DNSTCPContext.SENDING,
+ self.__tcp_ctx.send(self.__tcp_data))
+ self.assertEqual(buflen, len(self.__tcp_sock._sent_data))
+ # clear the socket "send buffer"
+ self.__tcp_sock.make_send_ready()
+ # if requested, set up exception
+ self.__tcp_sock._raise_on_send = raise_after_select
+
+ # Run select
+ self.__select_expected = ([1, 2], [fileno], [], None)
+ self.__select_answer = ([], [fileno], [])
+ self.ddns_server.run()
+
+ def test_select_send_continued(self):
+ '''Test continuation of sending a TCP response.'''
+ # Common setup, with the bufsize that would make it complete after a
+ # single select call.
+ self.__send_select_tcp(7)
+
+ # Now the send should be completed. socket should be closed,
+ # and the context should be removed from the server.
+ self.assertEqual(14, len(self.__tcp_sock._sent_data))
+ self.assertEqual(1, self.__tcp_sock._close_called)
+ self.assertEqual(0, len(self.ddns_server._tcp_ctxs))
+
+ def test_select_send_continued_twice(self):
+ '''Test continuation of sending a TCP response, still continuing.'''
+ # This is similar to the send_continued test, but the continued
+ # operation still won't complete the send.
+ self.__send_select_tcp(5)
+
+ # Only 10 bytes should have been transmitted, socket is still open,
+ # and the context is still in the server (that would require select
+ # watch it again).
+ self.assertEqual(10, len(self.__tcp_sock._sent_data))
+ self.assertEqual(0, self.__tcp_sock._close_called)
+ fileno = self.__tcp_sock.fileno()
+ self.assertEqual(self.__tcp_ctx,
+ self.ddns_server._tcp_ctxs[fileno][0])
+
+ def test_select_send_continued_failed(self):
+ '''Test continuation of sending a TCP response, which fails.'''
+ # Let the socket raise an exception in the second call to send().
+ self.__send_select_tcp(5, raise_after_select=True)
+
+ # Only the data before select() have been transmitted, socket is
+ # closed due to the failure, and the context is removed from the
+ # server.
+ self.assertEqual(5, len(self.__tcp_sock._sent_data))
+ self.assertEqual(1, self.__tcp_sock._close_called)
+ self.assertEqual(0, len(self.ddns_server._tcp_ctxs))
+
+ def test_select_multi_tcp(self):
+ '''Test continuation of sending a TCP response, multiple sockets.'''
+ # Check if the implementation still works with multiple outstanding
+ # TCP contexts. We use three (arbitray choice), of which two will be
+ # writable after select and complete the send.
+ tcp_socks = []
+ for i in range(0, 3):
+ # Use faked FD of 100, 101, 102 (again, arbitrary choice)
+ s = FakeSocket(100 + i, proto=socket.IPPROTO_TCP)
+ ctx = DNSTCPContext(s)
+ self.ddns_server._tcp_ctxs[s.fileno()] = (ctx, TEST_CLIENT6)
+ s._send_buflen = 7 # make sure it requires two send's
+ self.assertEqual(DNSTCPContext.SENDING, ctx.send(self.__tcp_data))
+ s.make_send_ready()
+
+ tcp_socks.append(s)
+
+ self.__select_expected = ([1, 2], [100, 101, 102], [], None)
+ self.__select_answer = ([], [100, 102], [])
+ self.ddns_server.run()
+
+ for i in [0, 2]:
+ self.assertEqual(14, len(tcp_socks[i]._sent_data))
+ self.assertEqual(1, tcp_socks[i]._close_called)
+ self.assertEqual(1, len(self.ddns_server._tcp_ctxs))
+
+ def test_select_bad_writefd(self):
+ # There's no outstanding TCP context, but select somehow returns
+ # writable FD. It should result in an uncaught exception, killing
+ # the server. This is okay, because it shouldn't happen and should be
+ # an internal bug.
+ self.__select_expected = ([1, 2], [], [], None)
+ self.__select_answer = ([], [10], [])
+ self.assertRaises(KeyError, self.ddns_server.run)
+
+def create_msg(opcode=Opcode.UPDATE(), zones=[TEST_ZONE_RECORD], prereq=[],
+ tsigctx=None):
+ msg = Message(Message.RENDER)
+ msg.set_qid(TEST_QID)
+ msg.set_opcode(opcode)
+ msg.set_rcode(Rcode.NOERROR())
+ for z in zones:
+ msg.add_question(z)
+ for p in prereq:
+ msg.add_rrset(SECTION_PREREQUISITE, p)
+
+ renderer = MessageRenderer()
+ if tsigctx is not None:
+ msg.to_wire(renderer, tsigctx)
+ else:
+ msg.to_wire(renderer)
+
+ # re-read the created data in the parse mode
+ msg.clear(Message.PARSE)
+ msg.from_wire(renderer.get_data())
+
+ return renderer.get_data()
+
+
+class TestDDNSSession(unittest.TestCase):
+ def setUp(self):
+ self.__cc_session = MyCCSession()
+ self.assertFalse(self.__cc_session._started)
+ self.orig_tsig_keyring = isc.server_common.tsig_keyring
+ isc.server_common.tsig_keyring = FakeKeyringModule()
+ self.server = ddns.DDNSServer(self.__cc_session)
+ self.server._UpdateSessionClass = self.__fake_session_creator
+ self.__faked_result = UPDATE_SUCCESS # will be returned by fake session
+ self.__sock = FakeSocket(-1)
+
+ def tearDown(self):
+ self.assertTrue(isc.server_common.tsig_keyring.initialized)
+ isc.server_common.tsig_keyring = self.orig_tsig_keyring
+
+ def __fake_session_creator(self, req_message, client_addr, zone_config):
+ # remember the passed message for possible inspection later.
+ self.__req_message = req_message
+ return FakeUpdateSession(req_message, client_addr, zone_config,
+ self.__faked_result)
+
+ def check_update_response(self, resp_wire, expected_rcode=Rcode.NOERROR(),
+ tsig_ctx=None, tcp=False):
+ '''Check if given wire data are valid form of update response.
+
+ In this implementation, zone/prerequisite/update sections should be
+ empty in responses.
+
+ If tsig_ctx (isc.dns.TSIGContext) is not None, the response should
+ be TSIG signed and the signature should be verifiable with the context
+ that has signed the corresponding request.
+
+ if tcp is True, the wire data are expected to be prepended with
+ a 2-byte length field.
+
+ '''
+ if tcp:
+ data_len = resp_wire[0] * 256 + resp_wire[1]
+ resp_wire = resp_wire[2:]
+ self.assertEqual(len(resp_wire), data_len)
+
+ msg = Message(Message.PARSE)
+ msg.from_wire(resp_wire)
+ if tsig_ctx is not None:
+ tsig_record = msg.get_tsig_record()
+ self.assertNotEqual(None, tsig_record)
+ self.assertEqual(TSIGError.NOERROR,
+ tsig_ctx.verify(tsig_record, resp_wire))
+ self.assertEqual(Opcode.UPDATE(), msg.get_opcode())
+ self.assertEqual(expected_rcode, msg.get_rcode())
+ self.assertEqual(TEST_QID, msg.get_qid())
+ for section in [SECTION_ZONE, SECTION_PREREQUISITE, SECTION_UPDATE]:
+ self.assertEqual(0, msg.get_rr_count(section))
+
+ def check_session(self, result=UPDATE_SUCCESS, ipv6=True, tsig_key=None):
+ # reset test parameters
+ self.__sock.clear()
+ self.__faked_result = result
+
+ server_addr = TEST_SERVER6 if ipv6 else TEST_SERVER4
+ client_addr = TEST_CLIENT6 if ipv6 else TEST_CLIENT4
+ tsig = TSIGContext(tsig_key) if tsig_key is not None else None
+ rcode = Rcode.NOERROR() if result == UPDATE_SUCCESS else Rcode.REFUSED()
+ has_response = (result != UPDATE_DROP)
+
+ self.assertEqual(has_response,
+ self.server.handle_request((self.__sock,
+ server_addr, client_addr,
+ create_msg(tsigctx=tsig))))
+ if has_response:
+ self.assertEqual(client_addr, self.__sock._sent_addr)
+ self.check_update_response(self.__sock._sent_data, rcode)
+ else:
+ self.assertEqual((None, None), (self.__sock._sent_addr,
+ self.__sock._sent_data))
+
+ def test_handle_request(self):
+ '''Basic request handling without any unexpected errors.'''
+ # Success, without TSIG
+ self.check_session()
+ # Update will be refused with a response.
+ self.check_session(UPDATE_ERROR, ipv6=False)
+ # Update will be refused and dropped
+ self.check_session(UPDATE_DROP)
+ # Success, with TSIG
+ self.check_session(ipv6=False, tsig_key=TEST_TSIG_KEY)
+ # Update will be refused with a response, with TSIG.
+ self.check_session(UPDATE_ERROR, tsig_key=TEST_TSIG_KEY)
+ # Update will be refused and dropped, with TSIG (doesn't matter though)
+ self.check_session(UPDATE_DROP, ipv6=False, tsig_key=TEST_TSIG_KEY)
+
+ def test_broken_request(self):
+ # Message data too short
+ s = self.__sock
+ self.assertFalse(self.server.handle_request((self.__sock, None,
+ None, b'x' * 11)))
+ self.assertEqual((None, None), (s._sent_data, s._sent_addr))
+
+ # Opcode is not UPDATE
+ self.assertFalse(self.server.handle_request(
+ (self.__sock, None, None, create_msg(opcode=Opcode.QUERY()))))
+ self.assertEqual((None, None), (s._sent_data, s._sent_addr))
+
+ # TSIG verification error. We use UPDATE_DROP to signal check_session
+ # that no response should be given.
+ self.check_session(result=UPDATE_DROP, ipv6=False,
+ tsig_key=BAD_TSIG_KEY)
+
+ def test_socket_error(self):
+ # Have the faked socket raise an exception on sendto()
+ self.__sock._raise_on_send = True
+ # handle_request indicates the failure
+ self.assertFalse(self.server.handle_request((self.__sock, TEST_SERVER6,
+ TEST_CLIENT6,
+ create_msg())))
+ # this check ensures sendto() was really attempted.
+ self.check_update_response(self.__sock._sent_data, Rcode.NOERROR())
+
+ def test_tcp_request(self):
+ # A simple case using TCP: all resopnse data are sent out at once.
+ s = self.__sock
+ s.proto = socket.IPPROTO_TCP
+ self.assertTrue(self.server.handle_request((s, TEST_SERVER6,
+ TEST_CLIENT6,
+ create_msg())))
+ self.check_update_response(s._sent_data, Rcode.NOERROR(), tcp=True)
+ # In the current implementation, the socket should be closed
+ # immedidately after a successful send.
+ self.assertEqual(1, s._close_called)
+ # TCP context shouldn't be held in the server.
+ self.assertEqual(0, len(self.server._tcp_ctxs))
+
+ def test_tcp_request_incomplete(self):
+ # set the size of the socket "send buffer" to a small value, which
+ # should cause partial send.
+ s = self.__sock
+ s.proto = socket.IPPROTO_TCP
+ s._send_buflen = 7
+ # before this request there should be no outstanding TCP context.
+ self.assertEqual(0, len(self.server._tcp_ctxs))
+ self.assertTrue(self.server.handle_request((s, TEST_SERVER6,
+ TEST_CLIENT6,
+ create_msg())))
+ # Only the part of data that fit the send buffer should be transmitted.
+ self.assertEqual(s._send_buflen, len(s._sent_data))
+ # the socket is not yet closed.
+ self.assertEqual(0, s._close_called)
+ # and a new context is stored in the server.
+ self.assertEqual(1, len(self.server._tcp_ctxs))
+
+ # clear the "send buffer" of the fake socket, and continue the send
+ # by hand. The next attempt should complete the send, and the combined
+ # data should be the expected response.
+ s.make_send_ready()
+ self.assertEqual(DNSTCPContext.SEND_DONE,
+ self.server._tcp_ctxs[s.fileno()][0].send_ready())
+ self.check_update_response(s._sent_data, Rcode.NOERROR(), tcp=True)
+
+ def test_tcp_request_error(self):
+ # initial send() on the TCP socket will fail. The request handling
+ # will be considered failure.
+ s = self.__sock
+ s.proto = socket.IPPROTO_TCP
+ s._raise_on_send = True
+ self.assertFalse(self.server.handle_request((s, TEST_SERVER6,
+ TEST_CLIENT6,
+ create_msg())))
+ # the socket should have been closed.
+ self.assertEqual(1, s._close_called)
+
+ def test_tcp_request_quota(self):
+ '''Test'''
+ # Originally the TCP context map should be empty.
+ self.assertEqual(0, len(self.server._tcp_ctxs))
+
+ class FakeReceiver:
+ '''Faked SessionReceiver, just returning given param by pop()'''
+ def __init__(self, param):
+ self.__param = param
+ def pop(self):
+ return self.__param
+
+ def check_tcp_ok(fd, expect_grant):
+ '''Supplemental checker to see if TCP request is handled.'''
+ s = FakeSocket(fd, proto=socket.IPPROTO_TCP)
+ s._send_buflen = 7
+ self.server._socksession_receivers[s.fileno()] = \
+ (None, FakeReceiver((s, TEST_SERVER6, TEST_CLIENT6,
+ create_msg())))
+ self.assertEqual(expect_grant,
+ self.server.handle_session(s.fileno()))
+ self.assertEqual(0 if expect_grant else 1, s._close_called)
+
+ # By default up to 10 TCP clients can coexist (use hardcode
+ # intentionally so we can test the default value itself)
+ for i in range(0, 10):
+ check_tcp_ok(i, True)
+ self.assertEqual(10, len(self.server._tcp_ctxs))
+
+ # Beyond that, it should be rejected (by reset)
+ check_tcp_ok(11, False)
+
+ # If we remove one context from the server, new client can go in again.
+ self.server._tcp_ctxs.pop(5)
+ check_tcp_ok(12, True)
+
+ def test_request_message(self):
+ '''Test if the request message stores RRs separately.'''
+ # Specify 'drop' so the passed message won't be modified.
+ self.__faked_result = UPDATE_DROP
+ # Put the same RR twice in the prerequisite section. We should see
+ # them as separate RRs.
+ dummy_record = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(),
+ RRTTL(0))
+ dummy_record.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, "ns.example"))
+ self.server.handle_request((self.__sock, TEST_SERVER6, TEST_CLIENT6,
+ create_msg(prereq=[dummy_record,
+ dummy_record])))
+ num_rrsets = len(self.__req_message.get_section(SECTION_PREREQUISITE))
+ self.assertEqual(2, num_rrsets)
+
+ def check_session_msg(self, result, expect_recv=1, notify_auth=False):
+ '''Check post update communication with other modules.'''
+ # iff the update succeeds, b10-ddns should tell interested other
+ # modules the information about the update zone. Possible modules
+ # are xfrout and auth: for xfrout, the message format should be:
+ # {'command': ['notify', {'zone_name': <updated_zone_name>,
+ # 'zone_class', <updated_zone_class>}]}
+ # for auth, it should be:
+ # {'command': ['loadzone', {'origin': <updated_zone_name>,
+ # 'class', <updated_zone_class>,
+ # 'datasrc', <datasrc type, should be
+ # "memory" in practice>}]}
+ # and expect an answer by calling group_recvmsg().
+ #
+ # expect_recv indicates the expected number of calls to
+ # group_recvmsg(), which is normally 1, but can be 0 if send fails;
+ # if the message is to be sent
+ if result == UPDATE_SUCCESS:
+ expected_sentmsg = 2 if notify_auth else 1
+ self.assertEqual(expected_sentmsg,
+ len(self.__cc_session._sent_msg))
+ self.assertEqual(expect_recv, self.__cc_session._recvmsg_called)
+ msg_cnt = 0
+ if notify_auth:
+ sent_msg, sent_group = self.__cc_session._sent_msg[msg_cnt]
+ sent_cmd = sent_msg['command']
+ self.assertEqual('Auth', sent_group)
+ self.assertEqual('loadzone', sent_cmd[0])
+ self.assertEqual(3, len(sent_cmd[1]))
+ self.assertEqual(TEST_ZONE_NAME.to_text(),
+ sent_cmd[1]['origin'])
+ self.assertEqual(TEST_RRCLASS.to_text(),
+ sent_cmd[1]['class'])
+ self.assertEqual('memory', sent_cmd[1]['datasrc'])
+ msg_cnt += 1
+ sent_msg, sent_group = self.__cc_session._sent_msg[msg_cnt]
+ sent_cmd = sent_msg['command']
+ self.assertEqual('Xfrout', sent_group)
+ self.assertEqual('notify', sent_cmd[0])
+ self.assertEqual(2, len(sent_cmd[1]))
+ self.assertEqual(TEST_ZONE_NAME.to_text(), sent_cmd[1]['zone_name'])
+ self.assertEqual(TEST_RRCLASS.to_text(), sent_cmd[1]['zone_class'])
+ else:
+ # for other result cases neither send nor recvmsg should be called.
+ self.assertEqual([], self.__cc_session._sent_msg)
+ self.assertEqual(0, self.__cc_session._recvmsg_called)
+
+ def test_session_msg(self):
+ '''Test post update communication with other modules.'''
+ # Normal cases, confirming communication takes place iff update
+ # succeeds
+ for r in [UPDATE_SUCCESS, UPDATE_ERROR, UPDATE_DROP]:
+ self.__cc_session.clear_msg()
+ self.check_session(result=r)
+ self.check_session_msg(r)
+
+ # Return an error from the remote module, which should be just ignored.
+ self.__cc_session.clear_msg()
+ self.__cc_session._answer_code = 1
+ self.check_session()
+ self.check_session_msg(UPDATE_SUCCESS)
+
+ # raise some exceptions from the faked session. Expected ones are
+ # simply (logged and) ignored
+ self.__cc_session.clear_msg()
+ self.__cc_session._recvmsg_exception = SessionTimeout('dummy timeout')
+ self.check_session()
+ self.check_session_msg(UPDATE_SUCCESS)
+
+ self.__cc_session.clear_msg()
+ self.__cc_session._recvmsg_exception = SessionError('dummy error')
+ self.check_session()
+ self.check_session_msg(UPDATE_SUCCESS)
+
+ self.__cc_session.clear_msg()
+ self.__cc_session._recvmsg_exception = ProtocolError('dummy perror')
+ self.check_session()
+ self.check_session_msg(UPDATE_SUCCESS)
+
+ # Similar to the previous cases, but sendmsg() raises, so there should
+ # be no call to recvmsg().
+ self.__cc_session.clear_msg()
+ self.__cc_session._sendmsg_exception = SessionError('send error')
+ self.check_session()
+ self.check_session_msg(UPDATE_SUCCESS, expect_recv=0)
+
+ # Unexpected exception will be propagated (and will terminate the
+ # server)
+ self.__cc_session.clear_msg()
+ self.__cc_session._sendmsg_exception = RuntimeError('unexpected')
+ self.assertRaises(RuntimeError, self.check_session)
+
+ def test_session_msg_for_auth(self):
+ '''Test post update communication with other modules including Auth.'''
+ # Let the CC session return in-memory config with sqlite3 backend.
+ # (The default case was covered by other tests.)
+ self.__cc_session.auth_datasources = \
+ [{'type': 'memory', 'class': 'IN', 'zones': [
+ {'origin': TEST_ZONE_NAME_STR, 'filetype': 'sqlite3'}]}]
+ self.check_session()
+ self.check_session_msg(UPDATE_SUCCESS, expect_recv=2, notify_auth=True)
+
+ # Let sendmsg() raise an exception. The first exception shouldn't
+ # stop sending the second message. There's just no recv calls.
+ self.__cc_session.clear_msg()
+ self.__cc_session._sendmsg_exception = SessionError('send error')
+ self.check_session()
+ self.check_session_msg(UPDATE_SUCCESS, expect_recv=0, notify_auth=True)
+
+ # Likewise, in the case recvmsg() raises (and there should be recv
+ # calls in this case)
+ self.__cc_session.clear_msg()
+ self.__cc_session._recvmsg_exception = SessionError('recv error')
+ self.check_session()
+ self.check_session_msg(UPDATE_SUCCESS, expect_recv=2, notify_auth=True)
+
+ def test_session_with_config(self):
+ '''Check a session with more realistic config setups.
+
+ We don't have to explore various cases in detail in this test.
+ We're just checking if the expected configured objects are passed
+ to the session object.
+
+ '''
+
+ # reset the session class to the real one
+ self.server._UpdateSessionClass = isc.ddns.session.UpdateSession
+
+ # install all-drop ACL
+ new_config = { 'zones': [ { 'origin': TEST_ZONE_NAME_STR,
+ 'class': TEST_RRCLASS_STR,
+ 'update_acl': [{'action': 'DROP'}] } ] }
+ answer = self.server.config_handler(new_config)
+ self.assertEqual((0, None), isc.config.parse_answer(answer))
+
+ # check the result
+ self.check_session(UPDATE_DROP)
+
class TestMain(unittest.TestCase):
def setUp(self):
self._server = MyDDNSServer()
@@ -379,6 +1281,8 @@ class TestMain(unittest.TestCase):
def __clear_socket(self):
self.__clear_called = True
+ # Get rid of the socket file too
+ self.__orig_clear()
def check_exception(self, ex):
'''Common test sequence to see if the given exception is caused.
@@ -414,6 +1318,18 @@ class TestMain(unittest.TestCase):
self.assertRaises(BaseException, ddns.main, self._server)
self.assertTrue(self._server.exception_raised)
+class TestConfig(unittest.TestCase):
+ '''Test some simple config related things that don't need server. '''
+ def setUp(self):
+ self.__ccsession = MyCCSession()
+
+ def test_file_path(self):
+ # Check some common paths
+ self.assertEqual(os.environ["B10_FROM_BUILD"] + "/ddns_socket",
+ ddns.SOCKET_FILE)
+ self.assertEqual(os.environ["B10_FROM_SOURCE"] +
+ "/src/bin/ddns/ddns.spec", ddns.SPECFILE_LOCATION)
+
if __name__== "__main__":
isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/dhcp4/Makefile.am b/src/bin/dhcp4/Makefile.am
index c828fdc..704642f 100644
--- a/src/bin/dhcp4/Makefile.am
+++ b/src/bin/dhcp4/Makefile.am
@@ -32,6 +32,12 @@ pkglibexec_PROGRAMS = b10-dhcp4
b10_dhcp4_SOURCES = main.cc dhcp4_srv.cc dhcp4_srv.h
+if USE_CLANGPP
+# Disable unused parameter warning caused by some of the
+# Boost headers when compiling with clang.
+b10_dhcp4_CXXFLAGS = -Wno-unused-parameter
+endif
+
b10_dhcp4_LDADD = $(top_builddir)/src/lib/dhcp/libdhcp++.la
b10_dhcp4_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
b10_dhcp4_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
diff --git a/src/bin/dhcp4/tests/Makefile.am b/src/bin/dhcp4/tests/Makefile.am
index a327e47..07de393 100644
--- a/src/bin/dhcp4/tests/Makefile.am
+++ b/src/bin/dhcp4/tests/Makefile.am
@@ -47,6 +47,12 @@ dhcp4_unittests_SOURCES = ../dhcp4_srv.h ../dhcp4_srv.cc
dhcp4_unittests_SOURCES += dhcp4_unittests.cc
dhcp4_unittests_SOURCES += dhcp4_srv_unittest.cc
+if USE_CLANGPP
+# Disable unused parameter warning caused by some of the
+# Boost headers when compiling with clang.
+dhcp4_unittests_CXXFLAGS = -Wno-unused-parameter
+endif
+
dhcp4_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
dhcp4_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
dhcp4_unittests_LDADD = $(GTEST_LDADD)
diff --git a/src/bin/dhcp6/Makefile.am b/src/bin/dhcp6/Makefile.am
index 44b4e9b..16b17ed 100644
--- a/src/bin/dhcp6/Makefile.am
+++ b/src/bin/dhcp6/Makefile.am
@@ -34,6 +34,12 @@ pkglibexec_PROGRAMS = b10-dhcp6
b10_dhcp6_SOURCES = main.cc dhcp6_srv.cc dhcp6_srv.h
+if USE_CLANGPP
+# Disable unused parameter warning caused by some of the
+# Boost headers when compiling with clang.
+b10_dhcp6_CXXFLAGS = -Wno-unused-parameter
+endif
+
b10_dhcp6_LDADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/log/liblog.la
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index a1a11a0..c1a8f3c 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -43,6 +43,12 @@ dhcp6_unittests_SOURCES = ../dhcp6_srv.h ../dhcp6_srv.cc
dhcp6_unittests_SOURCES += dhcp6_unittests.cc
dhcp6_unittests_SOURCES += dhcp6_srv_unittest.cc
+if USE_CLANGPP
+# Disable unused parameter warning caused by some of the
+# Boost headers when compiling with clang.
+dhcp6_unittests_CXXFLAGS = -Wno-unused-parameter
+endif
+
dhcp6_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
dhcp6_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
dhcp6_unittests_LDADD = $(GTEST_LDADD)
@@ -50,6 +56,7 @@ dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp++.la
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index 01254d4..b98996a 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -24,6 +24,7 @@ endif
B10_FROM_SOURCE=$(abs_top_srcdir) \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 114bac4..74e0faf 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -33,6 +33,7 @@ import isc.util.process
from isc.datasrc import DataSourceClient, ZoneFinder
import isc.net.parse
from isc.xfrin.diff import Diff
+from isc.server_common.auth_command import auth_loadzone_command
from isc.log_messages.xfrin_messages import *
isc.log.init("b10-xfrin")
@@ -1248,50 +1249,13 @@ class ZoneInfo:
(str(self.master_addr), self.master_port))
def _do_auth_loadzone(server, zone_name, zone_class):
- # On a successful zone transfer, if the zone is served by
- # b10-auth in the in-memory data source using sqlite3 as a
- # backend, send the "loadzone" command for the zone to auth.
- datasources, is_default =\
- server._module_cc.get_remote_config_value(AUTH_MODULE_NAME, "datasources")
- if is_default:
- return
- for d in datasources:
- if "type" not in d:
- continue
- try:
- if "class" in d:
- dclass = RRClass(d["class"])
- else:
- dclass = RRClass("IN")
- except InvalidRRClass as err:
- logger.info(XFRIN_AUTH_CONFIG_RRCLASS_ERROR, str(err))
- continue
-
- if d["type"].lower() == "memory" and dclass == zone_class:
- for zone in d["zones"]:
- if "filetype" not in zone:
- continue
- if "origin" not in zone:
- continue
- if "filetype" not in zone:
- continue
- try:
- name = Name(zone["origin"])
- except (EmptyLabel, TooLongLabel, BadLabelType, BadEscape, TooLongName, IncompleteName):
- logger.info(XFRIN_AUTH_CONFIG_NAME_PARSER_ERROR, str(err))
- continue
-
- if zone["filetype"].lower() == "sqlite3" and name == zone_name:
- param = {"origin": zone_name.to_text(),
- "class": zone_class.to_text(),
- "datasrc": d["type"]}
-
- logger.debug(DBG_XFRIN_TRACE, XFRIN_AUTH_LOADZONE,
- param["origin"], param["class"], param["datasrc"])
-
- msg = create_command("loadzone", param)
- seq = server._send_cc_session.group_sendmsg(msg, AUTH_MODULE_NAME)
- answer, env = server._send_cc_session.group_recvmsg(False, seq)
+ msg = auth_loadzone_command(server._module_cc, zone_name, zone_class)
+ if msg is not None:
+ param = msg['command'][1]
+ logger.debug(DBG_XFRIN_TRACE, XFRIN_AUTH_LOADZONE, param["origin"],
+ param["class"], param["datasrc"])
+ seq = server._send_cc_session.group_sendmsg(msg, AUTH_MODULE_NAME)
+ answer, env = server._send_cc_session.group_recvmsg(False, seq)
class Xfrin:
def __init__(self):
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index 6b51661..ffea249 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -15,12 +15,6 @@
# No namespace declaration - these constants go in the global namespace
# of the xfrin messages python module.
-% XFRIN_AUTH_CONFIG_NAME_PARSER_ERROR Invalid name when parsing Auth configuration: %1
-There was an invalid name when parsing Auth configuration.
-
-% XFRIN_AUTH_CONFIG_RRCLASS_ERROR Invalid RRClass when parsing Auth configuration: %1
-There was an invalid RR class when parsing Auth configuration.
-
% XFRIN_AUTH_LOADZONE sending Auth loadzone for origin=%1, class=%2, datasrc=%3
There was a successful zone transfer, and the zone is served by b10-auth
in the in-memory data source using sqlite3 as a backend. We send the
diff --git a/src/lib/asiolink/io_endpoint.h b/src/lib/asiolink/io_endpoint.h
index dd74036..973fc8b 100644
--- a/src/lib/asiolink/io_endpoint.h
+++ b/src/lib/asiolink/io_endpoint.h
@@ -168,8 +168,8 @@ public:
///
/// This method converts the address and port of the endpoint in the textual
/// format that other BIND 10 modules would use in logging, i.e.,
-/// - For IPv6 address: [<address>]:port (e.g., [2001:db8::5300]:53)
-/// - For IPv4 address: <address>:port (e.g., 192.0.2.53:5300)
+/// - For IPv6 address: [<address>]:port (e.g., [2001:db8::5300]:53)
+/// - For IPv4 address: <address>:port (e.g., 192.0.2.53:5300)
///
/// If it's neither IPv6 nor IPv4, it converts the endpoint into text in the
/// same format as that for IPv4, although in practice such a case is not
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index 2cdb8ea..9a4d733 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -12,8 +12,13 @@ pkglibdir = $(libexecdir)/@PACKAGE@/backends
datasrc_config.h: datasrc_config.h.pre
$(SED) -e "s|@@PKGLIBDIR@@|$(pkglibdir)|" datasrc_config.h.pre >$@
+static.zone: static.zone.pre
+ $(SED) -e "s|@@VERSION_STRING@@|$(PACKAGE_STRING)|" $(srcdir)/static.zone.pre >$@
+ $(SED) -e 's/\(.*\)/AUTHORS.BIND. 0 CH TXT "\1"/' $(top_srcdir)/AUTHORS >>$@
+
CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
CLEANFILES += datasrc_config.h
+CLEANFILES += static.zone
lib_LTLIBRARIES = libdatasrc.la
libdatasrc_la_SOURCES = data_source.h data_source.cc
@@ -30,10 +35,11 @@ libdatasrc_la_SOURCES += logger.h logger.cc
libdatasrc_la_SOURCES += client.h iterator.h
libdatasrc_la_SOURCES += database.h database.cc
libdatasrc_la_SOURCES += factory.h factory.cc
+libdatasrc_la_SOURCES += client_list.h client_list.cc
nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
libdatasrc_la_LDFLAGS = -no-undefined -version-info 1:0:1
-pkglib_LTLIBRARIES = sqlite3_ds.la memory_ds.la
+pkglib_LTLIBRARIES = sqlite3_ds.la memory_ds.la static_ds.la
sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc
sqlite3_ds_la_SOURCES += sqlite3_accessor_link.cc
@@ -49,6 +55,12 @@ memory_ds_la_LDFLAGS = -module -avoid-version
memory_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
memory_ds_la_LIBADD += libdatasrc.la
+static_ds_la_SOURCES = memory_datasrc.h memory_datasrc.cc
+static_ds_la_SOURCES += static_datasrc_link.cc
+static_ds_la_LDFLAGS = -module -avoid-version
+static_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+static_ds_la_LIBADD += libdatasrc.la
+
libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
@@ -59,4 +71,7 @@ BUILT_SOURCES = datasrc_config.h datasrc_messages.h datasrc_messages.cc
datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
-EXTRA_DIST = datasrc_messages.mes
+EXTRA_DIST = datasrc_messages.mes static.zone.pre
+
+zonedir = $(pkgdatadir)
+zone_DATA = static.zone
diff --git a/src/lib/datasrc/client_list.cc b/src/lib/datasrc/client_list.cc
new file mode 100644
index 0000000..549b216
--- /dev/null
+++ b/src/lib/datasrc/client_list.cc
@@ -0,0 +1,162 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "client_list.h"
+#include "client.h"
+#include "factory.h"
+
+#include <memory>
+#include <boost/foreach.hpp>
+
+using namespace isc::data;
+using namespace std;
+
+namespace isc {
+namespace datasrc {
+
+void
+ConfigurableClientList::configure(const Element& config, bool) {
+ // TODO: Implement the cache
+ // TODO: Implement recycling from the old configuration.
+ size_t i(0); // Outside of the try to be able to access it in the catch
+ try {
+ vector<DataSourceInfo> new_data_sources;
+ for (; i < config.size(); ++i) {
+ // Extract the parameters
+ const ConstElementPtr dconf(config.get(i));
+ const ConstElementPtr typeElem(dconf->get("type"));
+ if (typeElem == ConstElementPtr()) {
+ isc_throw(ConfigurationError, "Missing the type option in "
+ "data source no " << i);
+ }
+ const string type(typeElem->stringValue());
+ ConstElementPtr paramConf(dconf->get("params"));
+ if (paramConf == ConstElementPtr()) {
+ paramConf.reset(new NullElement());
+ }
+ // TODO: Special-case the master files type.
+ // Ask the factory to create the data source for us
+ const DataSourcePair ds(this->getDataSourceClient(type,
+ paramConf));
+ // And put it into the vector
+ new_data_sources.push_back(DataSourceInfo(ds.first, ds.second));
+ }
+ // If everything is OK up until now, we have the new configuration
+ // ready. So just put it there and let the old one die when we exit
+ // the scope.
+ data_sources_.swap(new_data_sources);
+ } catch (const TypeError& te) {
+ isc_throw(ConfigurationError, "Malformed configuration at data source "
+ "no. " << i << ": " << te.what());
+ }
+}
+
+ClientList::FindResult
+ConfigurableClientList::find(const dns::Name& name, bool want_exact_match,
+ bool) const
+{
+ // Nothing found yet.
+ //
+ // We have this class as a temporary storage, as the FindResult can't be
+ // assigned.
+ struct MutableResult {
+ MutableResult() :
+ datasrc_client(NULL),
+ matched_labels(0),
+ matched(false)
+ {}
+ DataSourceClient* datasrc_client;
+ ZoneFinderPtr finder;
+ uint8_t matched_labels;
+ bool matched;
+ operator FindResult() const {
+ // Conversion to the right result. If we return this, there was
+ // a partial match at best.
+ return (FindResult(datasrc_client, finder, false));
+ }
+ } candidate;
+
+ BOOST_FOREACH(const DataSourceInfo& info, data_sources_) {
+ // TODO: Once we have support for the caches, consider them too here
+ // somehow. This would probably get replaced by a function, that
+ // checks if there's a cache available, if it is, checks the loaded
+ // zones and zones expected to be in the real data source. If it is
+ // the cached one, provide the cached one. If it is in the external
+ // data source, use the datasource and don't provide the finder yet.
+ const DataSourceClient::FindResult result(
+ info.data_src_client_->findZone(name));
+ switch (result.code) {
+ case result::SUCCESS:
+ // If we found an exact match, we have no hope to getting
+ // a better one. Stop right here.
+
+ // TODO: In case we have only the datasource and not the finder
+ // and the need_updater parameter is true, get the zone there.
+ return (FindResult(info.data_src_client_, result.zone_finder,
+ true));
+ case result::PARTIALMATCH:
+ if (!want_exact_match) {
+ // In case we have a partial match, check if it is better
+ // than what we have. If so, replace it.
+ //
+ // We don't need the labels at the first partial match,
+ // we have nothing to compare with. So we don't get it
+ // (as a performance) and hope we will not need it at all.
+ const uint8_t labels(candidate.matched ?
+ result.zone_finder->getOrigin().getLabelCount() : 0);
+ if (candidate.matched && candidate.matched_labels == 0) {
+ // But if the hope turns out to be false, we need to
+ // compute it for the first match anyway.
+ candidate.matched_labels = candidate.finder->
+ getOrigin().getLabelCount();
+ }
+ if (labels > candidate.matched_labels ||
+ !candidate.matched) {
+ // This one is strictly better. Replace it.
+ candidate.datasrc_client = info.data_src_client_;
+ candidate.finder = result.zone_finder;
+ candidate.matched_labels = labels;
+ candidate.matched = true;
+ }
+ }
+ break;
+ default:
+ // Nothing found, nothing to do.
+ break;
+ }
+ }
+
+ // TODO: In case we have only the datasource and not the finder
+ // and the need_updater parameter is true, get the zone there.
+
+ // Return the partial match we have. In case we didn't want a partial
+ // match, this surely contains the original empty result.
+ return (candidate);
+}
+
+// NOTE: This function is not tested, it would be complicated. However, the
+// purpose of the function is to provide a very thin wrapper to be able to
+// replace the call to DataSourceClientContainer constructor in tests.
+ConfigurableClientList::DataSourcePair
+ConfigurableClientList::getDataSourceClient(const string& type,
+ const ConstElementPtr&
+ configuration)
+{
+ DataSourceClientContainerPtr
+ container(new DataSourceClientContainer(type, configuration));
+ return (DataSourcePair(&container->getInstance(), container));
+}
+
+}
+}
diff --git a/src/lib/datasrc/client_list.h b/src/lib/datasrc/client_list.h
new file mode 100644
index 0000000..599dca8
--- /dev/null
+++ b/src/lib/datasrc/client_list.h
@@ -0,0 +1,289 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DATASRC_CONTAINER_H
+#define DATASRC_CONTAINER_H
+
+#include <dns/name.h>
+#include <cc/data.h>
+#include <exceptions/exceptions.h>
+
+#include <vector>
+#include <boost/shared_ptr.hpp>
+#include <boost/noncopyable.hpp>
+
+namespace isc {
+namespace datasrc {
+
+class ZoneFinder;
+typedef boost::shared_ptr<ZoneFinder> ZoneFinderPtr;
+class DataSourceClient;
+typedef boost::shared_ptr<DataSourceClient> DataSourceClientPtr;
+class DataSourceClientContainer;
+typedef boost::shared_ptr<DataSourceClientContainer>
+ DataSourceClientContainerPtr;
+
+/// \brief The list of data source clients.
+///
+/// The purpose of this class is to hold several data source clients and search
+/// through them to find one containing a zone best matching a request.
+///
+/// All the data source clients should be for the same class. If you need
+/// to handle multiple classes, you need to create multiple separate lists.
+///
+/// This is an abstract base class. It is not expected we would use multiple
+/// implementation inside the servers (but it is not forbidden either), we
+/// have it to allow easy testing. It is possible to create a mock-up class
+/// instead of creating a full-blown configuration. The real implementation
+/// is the ConfigurableClientList.
+class ClientList : public boost::noncopyable {
+protected:
+ /// \brief Constructor.
+ ///
+ /// It is protected to prevent accidental creation of the abstract base
+ /// class.
+ ClientList() {}
+public:
+ /// \brief Virtual destructor
+ virtual ~ClientList() {}
+ /// \brief Structure holding the (compound) result of find.
+ ///
+ /// As this is read-only structure, we don't bother to create accessors.
+ /// Instead, all the member variables are defined as const and can be
+ /// accessed directly.
+ struct FindResult {
+ /// \brief Constructor.
+ ///
+ /// It simply fills in the member variables according to the
+ /// parameters. See the member descriptions for their meaning.
+ FindResult(DataSourceClient* dsrc_client, const ZoneFinderPtr& finder,
+ bool exact_match) :
+ dsrc_client_(dsrc_client),
+ finder_(finder),
+ exact_match_(exact_match)
+ {}
+
+ /// \brief Negative answer constructor.
+ ///
+ /// This conscructs a result for negative answer. Both pointers are
+ /// NULL, and exact_match_ is false.
+ FindResult() :
+ dsrc_client_(NULL),
+ exact_match_(false)
+ {}
+
+ /// \brief Comparison operator.
+ ///
+ /// It is needed for tests and it might be of some use elsewhere
+ /// too.
+ bool operator ==(const FindResult& other) const {
+ return (dsrc_client_ == other.dsrc_client_ &&
+ finder_ == other.finder_ &&
+ exact_match_ == other.exact_match_);
+ }
+
+ /// \brief The found data source client.
+ ///
+ /// The client of the data source containing the best matching zone.
+ /// If no such data source exists, this is NULL pointer.
+ ///
+ /// Note that the pointer is valid only as long the ClientList which
+ /// returned the pointer is alive and was not reconfigured. The
+ /// ownership is preserved within the ClientList.
+ DataSourceClient* const dsrc_client_;
+
+ /// \brief The finder for the requested zone.
+ ///
+ /// This is the finder corresponding to the best matching zone.
+ /// This may be NULL even in case the datasrc_ is something
+ /// else, depending on the find options.
+ ///
+ /// \see find
+ const ZoneFinderPtr finder_;
+
+ /// \brief If the result is an exact match.
+ const bool exact_match_;
+ };
+
+ /// \brief Search for a zone through the data sources.
+ ///
+ /// This searches the contained data source clients for a one that best
+ /// matches the zone name.
+ ///
+ /// There are two expected usage scenarios. One is answering queries. In
+ /// this case, the zone finder is needed and the best matching superzone
+ /// of the searched name is needed. Therefore, the call would look like:
+ ///
+ /// \code FindResult result(list->find(queried_name));
+ /// FindResult result(list->find(queried_name));
+ /// if (result.datasrc_) {
+ /// createTheAnswer(result.finder_);
+ /// } else {
+ /// createNotAuthAnswer();
+ /// } \endcode
+ ///
+ /// The other scenario is manipulating zone data (XfrOut, XfrIn, DDNS,
+ /// ...). In this case, the finder itself is not so important. However,
+ /// we need an exact match (if we want to manipulate zone data, we must
+ /// know exactly, which zone we are about to manipulate). Then the call
+ ///
+ /// \code FindResult result(list->find(zone_name, true, false));
+ /// FindResult result(list->find(zone_name, true, false));
+ /// if (result.datasrc_) {
+ /// ZoneUpdaterPtr updater(result.datasrc_->getUpdater(zone_name);
+ /// ...
+ /// } \endcode
+ ///
+ /// \param zone The name of the zone to look for.
+ /// \param want_exact_match If it is true, it returns only exact matches.
+ /// If the best possible match is partial, a negative result is
+ /// returned instead. It is possible the caller could check it and
+ /// act accordingly if the result would be partial match, but with this
+ /// set to true, the find might be actually faster under some
+ /// circumstances.
+ /// \param want_finder If this is false, the finder_ member of FindResult
+ /// might be NULL even if the corresponding data source is found. This
+ /// is because of performance, in some cases the finder is a side
+ /// result of the searching algorithm (therefore asking for it again
+ /// would be a waste), but under other circumstances it is not, so
+ /// providing it when it is not needed would also be wasteful.
+ ///
+ /// Other things are never the side effect of searching, therefore the
+ /// caller can get them explicitly (the updater, journal reader and
+ /// iterator).
+ /// \return A FindResult describing the data source and zone with the
+ /// longest match against the zone parameter.
+ virtual FindResult find(const dns::Name& zone,
+ bool want_exact_match = false,
+ bool want_finder = true) const = 0;
+};
+
+/// \brief Shared pointer to the list.
+typedef boost::shared_ptr<ClientList> ClientListPtr;
+/// \brief Shared const pointer to the list.
+typedef boost::shared_ptr<const ClientList> ConstClientListPtr;
+
+/// \Concrete implementation of the ClientList, which is constructed based on
+/// configuration.
+///
+/// This is the implementation which is expected to be used in the servers.
+/// However, it is expected most of the code will use it as the ClientList,
+/// only the creation is expected to be direct.
+///
+/// While it is possible to inherit this class, it is not expected to be
+/// inherited except for tests.
+class ConfigurableClientList : public ClientList {
+public:
+ /// \brief Exception thrown when there's an error in configuration.
+ class ConfigurationError : public Exception {
+ public:
+ ConfigurationError(const char* file, size_t line, const char* what) :
+ Exception(file, line, what)
+ {}
+ };
+
+ /// \brief Sets the configuration.
+ ///
+ /// This fills the ClientList with data source clients corresponding to the
+ /// configuration. The data source clients are newly created or recycled
+ /// from previous configuration.
+ ///
+ /// If any error is detected, an exception is thrown and the current
+ /// configuration is preserved.
+ ///
+ /// \param configuration The JSON element describing the configuration to
+ /// use.
+ /// \param allow_cache If it is true, the 'cache' option of the
+ /// configuration is used and some zones are cached into an In-Memory
+ /// data source according to it. If it is false, it is ignored and
+ /// no In-Memory data sources are created.
+ /// \throw DataSourceError if there's a problem creating a data source
+ /// client.
+ /// \throw ConfigurationError if the configuration is invalid in some
+ /// sense.
+ void configure(const data::Element& configuration, bool allow_cache);
+
+ /// \brief Implementation of the ClientList::find.
+ virtual FindResult find(const dns::Name& zone,
+ bool want_exact_match = false,
+ bool want_finder = true) const;
+
+ /// \brief This holds one data source client and corresponding information.
+ ///
+ /// \todo The content yet to be defined.
+ struct DataSourceInfo {
+ /// \brief Default constructor.
+ ///
+ /// Don't use directly. It is here so the structure can live in
+ /// a vector.
+ DataSourceInfo() :
+ data_src_client_(NULL)
+ {}
+ DataSourceInfo(DataSourceClient* data_src_client,
+ const DataSourceClientContainerPtr& container) :
+ data_src_client_(data_src_client),
+ container_(container)
+ {}
+ DataSourceClient* data_src_client_;
+ DataSourceClientContainerPtr container_;
+ };
+
+ /// \brief The collection of data sources.
+ typedef std::vector<DataSourceInfo> DataSources;
+protected:
+ /// \brief The data sources held here.
+ ///
+ /// All our data sources are stored here. It is protected to let the
+ /// tests in. You should consider it private if you ever want to
+ /// derive this class (which is not really recommended anyway).
+ DataSources data_sources_;
+
+ /// \brief Convenience type alias.
+ ///
+ /// \see getDataSource
+ typedef std::pair<DataSourceClient*, DataSourceClientContainerPtr>
+ DataSourcePair;
+
+ /// \brief Create a data source client of given type and configuration.
+ ///
+ /// This is a thin wrapper around the DataSourceClientContainer
+ /// constructor. The function is here to make it possible for tests
+ /// to replace the DataSourceClientContainer with something else.
+ /// Also, derived classes could want to create the data source clients
+ /// in a different way, though inheriting this class is not recommended.
+ ///
+ /// The parameters are the same as of the constructor.
+ /// \return Pair containing both the data source client and the container.
+ /// The container might be NULL in the derived class, it is
+ /// only stored so the data source client is properly destroyed when
+ /// not needed. However, in such case, it is the caller's
+ /// responsibility to ensure the data source client is deleted when
+ /// needed.
+ virtual DataSourcePair getDataSourceClient(const std::string& type,
+ const data::ConstElementPtr&
+ configuration);
+public:
+ /// \brief Access to the data source clients.
+ ///
+ /// It can be used to examine the loaded list of data sources clients
+ /// directly. It is not known if it is of any use other than testing, but
+ /// it might be, so it is just made public (there's no real reason to
+ /// hide it).
+ const DataSources& getDataSources() const { return (data_sources_); }
+};
+
+} // namespace datasrc
+} // namespace isc
+
+#endif // DATASRC_CONTAINER_H
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index 62fa61e..358dce8 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -450,7 +450,8 @@ DatabaseClient::Finder::findDelegationPoint(const isc::dns::Name& name,
const size_t remove_labels = name.getLabelCount() - origin_label_count;
// Go through all superdomains from the origin down searching for nodes
- // that indicate a delegation (.e. NS or DNAME).
+ // that indicate a delegation (.e. NS or DNAME). Note that we only check
+ // pure superdomains; delegation on an exact match will be detected later.
for (int i = remove_labels; i > 0; --i) {
const Name superdomain(name.split(i));
@@ -810,12 +811,14 @@ DatabaseClient::Finder::findOnNameResult(const Name& name,
const FoundIterator cni(found.second.find(RRType::CNAME()));
const FoundIterator wti(found.second.find(type));
- if (!is_origin && (options & FIND_GLUE_OK) == 0 &&
+ if (!is_origin && (options & FIND_GLUE_OK) == 0 && type != RRType::DS() &&
nsi != found.second.end()) {
// A NS RRset was found at the domain we were searching for. As it is
// not at the origin of the zone, it is a delegation and indicates that
// this zone is not authoritative for the data. Just return the
- // delegation information.
+ // delegation information, except:
+ // - when we are looking for glue records (FIND_GLUE_OK), or
+ // - when the query type is DS (which cancels the delegation)
return (logAndCreateResult(name, wildname, type, DELEGATION,
nsi->second,
wild ? DATASRC_DATABASE_WILDCARD_NS :
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 8e834cb..68a42dd 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -1747,7 +1747,7 @@ generateRRsetFromIterator(ZoneIterator* iterator, LoadCallback callback) {
}
void
-InMemoryZoneFinder::load(const string& filename) {
+InMemoryZoneFinder::load(const std::string& filename) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_LOAD).arg(getOrigin()).
arg(filename);
diff --git a/src/lib/datasrc/static.zone.pre b/src/lib/datasrc/static.zone.pre
new file mode 100644
index 0000000..16a7379
--- /dev/null
+++ b/src/lib/datasrc/static.zone.pre
@@ -0,0 +1,12 @@
+;; This is the content of the BIND./CH zone. It contains the version and
+;; authors (called VERSION.BIND. and AUTHORS.BIND.). You can add more or
+;; modify the zone. Then you can reload the zone by issuing the command
+;;
+;; loadzone CH BIND
+;;
+;; in the bindctl.
+
+BIND. 0 CH SOA bind. authors.bind. 0 28800 7200 604800 86400
+
+VERSION.BIND. 0 CH TXT "@@VERSION_STRING@@"
+;; HOSTNAME.BIND 0 CH TXT "localhost"
diff --git a/src/lib/datasrc/static_datasrc_link.cc b/src/lib/datasrc/static_datasrc_link.cc
new file mode 100644
index 0000000..789580d
--- /dev/null
+++ b/src/lib/datasrc/static_datasrc_link.cc
@@ -0,0 +1,62 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "client.h"
+#include "memory_datasrc.h"
+
+#include <cc/data.h>
+#include <dns/rrclass.h>
+
+#include <memory>
+#include <exception>
+
+using namespace isc::data;
+using namespace isc::dns;
+using namespace boost;
+using namespace std;
+
+namespace isc {
+namespace datasrc {
+
+DataSourceClient*
+createInstance(ConstElementPtr config, string& error) {
+ try {
+ // Create the data source
+ auto_ptr<InMemoryClient> client(new InMemoryClient());
+ // Hardcode the origin and class
+ shared_ptr<InMemoryZoneFinder>
+ finder(new InMemoryZoneFinder(RRClass::CH(), Name("BIND")));
+ // Fill it with data
+ const string path(config->stringValue());
+ finder->load(path);
+ // And put the zone inside
+ client->addZone(finder);
+ return (client.release());
+ }
+ catch (const std::exception& e) {
+ error = e.what();
+ }
+ catch (...) {
+ error = "Unknown exception";
+ }
+ return (NULL);
+}
+
+void
+destroyInstance(DataSourceClient* instance) {
+ delete instance;
+}
+
+}
+}
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index 6c4a937..1a54d49 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -59,6 +59,7 @@ run_unittests_SOURCES += memory_datasrc_unittest.cc
run_unittests_SOURCES += rbnode_rrset_unittest.cc
run_unittests_SOURCES += zone_finder_context_unittest.cc
run_unittests_SOURCES += faked_nsec3.h faked_nsec3.cc
+run_unittests_SOURCES += client_list_unittest.cc
# We need the actual module implementation in the tests (they are not part
# of libdatasrc)
@@ -113,3 +114,4 @@ EXTRA_DIST += testdata/test.sqlite3
EXTRA_DIST += testdata/new_minor_schema.sqlite3
EXTRA_DIST += testdata/newschema.sqlite3
EXTRA_DIST += testdata/oldschema.sqlite3
+EXTRA_DIST += testdata/static.zone
diff --git a/src/lib/datasrc/tests/client_list_unittest.cc b/src/lib/datasrc/tests/client_list_unittest.cc
new file mode 100644
index 0000000..4fed961
--- /dev/null
+++ b/src/lib/datasrc/tests/client_list_unittest.cc
@@ -0,0 +1,475 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <datasrc/client_list.h>
+#include <datasrc/client.h>
+#include <datasrc/data_source.h>
+
+#include <dns/rrclass.h>
+
+#include <gtest/gtest.h>
+
+#include <set>
+
+using namespace isc::datasrc;
+using namespace isc::data;
+using namespace isc::dns;
+using namespace boost;
+using namespace std;
+
+namespace {
+
+// A test data source. It pretends it has some zones.
+class MockDataSourceClient : public DataSourceClient {
+public:
+ class Finder : public ZoneFinder {
+ public:
+ Finder(const Name& origin) :
+ origin_(origin)
+ {}
+ Name getOrigin() const { return (origin_); }
+ // The rest is not to be called, so just have them
+ RRClass getClass() const {
+ isc_throw(isc::NotImplemented, "Not implemented");
+ }
+ shared_ptr<Context> find(const Name&, const RRType&,
+ const FindOptions)
+ {
+ isc_throw(isc::NotImplemented, "Not implemented");
+ }
+ shared_ptr<Context> findAll(const Name&,
+ vector<ConstRRsetPtr>&,
+ const FindOptions)
+ {
+ isc_throw(isc::NotImplemented, "Not implemented");
+ }
+ FindNSEC3Result findNSEC3(const Name&, bool) {
+ isc_throw(isc::NotImplemented, "Not implemented");
+ }
+ Name findPreviousName(const Name&) const {
+ isc_throw(isc::NotImplemented, "Not implemented");
+ }
+ private:
+ Name origin_;
+ };
+ // Constructor from a list of zones.
+ MockDataSourceClient(const char* zone_names[]) {
+ for (const char** zone(zone_names); *zone; ++zone) {
+ zones.insert(Name(*zone));
+ }
+ }
+ // Constructor from configuration. The list of zones will be empty, but
+ // it will keep the configuration inside for further inspection.
+ MockDataSourceClient(const string& type,
+ const ConstElementPtr& configuration) :
+ type_(type),
+ configuration_(configuration)
+ {}
+ virtual FindResult findZone(const Name& name) const {
+ if (zones.empty()) {
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+ }
+ set<Name>::const_iterator it(zones.upper_bound(name));
+ if (it == zones.begin()) {
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+ }
+ --it;
+ NameComparisonResult compar(it->compare(name));
+ const ZoneFinderPtr finder(new Finder(*it));
+ switch (compar.getRelation()) {
+ case NameComparisonResult::EQUAL:
+ return (FindResult(result::SUCCESS, finder));
+ case NameComparisonResult::SUPERDOMAIN:
+ return (FindResult(result::PARTIALMATCH, finder));
+ default:
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+ }
+ }
+ // These methods are not used. They just need to be there to have
+ // complete vtable.
+ virtual ZoneUpdaterPtr getUpdater(const Name&, bool, bool) const {
+ isc_throw(isc::NotImplemented, "Not implemented");
+ }
+ virtual pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+ getJournalReader(const Name&, uint32_t, uint32_t) const
+ {
+ isc_throw(isc::NotImplemented, "Not implemented");
+ }
+ const string type_;
+ const ConstElementPtr configuration_;
+private:
+ set<Name> zones;
+};
+
+
+// The test version is the same as the normal version. We, however, add
+// some methods to dig directly in the internals, for the tests.
+class TestedList : public ConfigurableClientList {
+public:
+ DataSources& getDataSources() { return (data_sources_); }
+ // Overwrite the list's method to get a data source with given type
+ // and configuration. We mock the data source and don't create the
+ // container. This is just to avoid some complexity in the tests.
+ virtual DataSourcePair getDataSourceClient(const string& type,
+ const ConstElementPtr&
+ configuration)
+ {
+ if (type == "error") {
+ isc_throw(DataSourceError, "The error data source type");
+ }
+ shared_ptr<MockDataSourceClient>
+ ds(new MockDataSourceClient(type, configuration));
+ // Make sure it is deleted when the test list is deleted.
+ to_delete_.push_back(ds);
+ return (DataSourcePair(ds.get(), DataSourceClientContainerPtr()));
+ }
+private:
+ // Hold list of data sources created internally, so they are preserved
+ // until the end of the test and then deleted.
+ vector<shared_ptr<MockDataSourceClient> > to_delete_;
+};
+
+const char* ds_zones[][3] = {
+ {
+ "example.org.",
+ "example.com.",
+ NULL
+ },
+ {
+ "sub.example.org.",
+ NULL, NULL
+ },
+ {
+ NULL, NULL, NULL
+ },
+ {
+ "sub.example.org.",
+ NULL, NULL
+ }
+};
+
+const size_t ds_count = (sizeof(ds_zones) / sizeof(*ds_zones));
+
+class ListTest : public ::testing::Test {
+public:
+ ListTest() :
+ // The empty list corresponds to a list with no elements inside
+ list_(new TestedList()),
+ config_elem_(Element::fromJSON("["
+ "{"
+ " \"type\": \"test_type\","
+ " \"cache\": \"off\","
+ " \"params\": {}"
+ "}]"))
+ {
+ for (size_t i(0); i < ds_count; ++ i) {
+ shared_ptr<MockDataSourceClient>
+ ds(new MockDataSourceClient(ds_zones[i]));
+ ds_.push_back(ds);
+ ds_info_.push_back(ConfigurableClientList::DataSourceInfo(ds.get(),
+ DataSourceClientContainerPtr()));
+ }
+ }
+ // Check the positive result is as we expect it.
+ void positiveResult(const ClientList::FindResult& result,
+ const shared_ptr<MockDataSourceClient>& dsrc,
+ const Name& name, bool exact,
+ const char* test)
+ {
+ SCOPED_TRACE(test);
+ EXPECT_EQ(dsrc.get(), result.dsrc_client_);
+ ASSERT_NE(ZoneFinderPtr(), result.finder_);
+ EXPECT_EQ(name, result.finder_->getOrigin());
+ EXPECT_EQ(exact, result.exact_match_);
+ }
+ // Configure the list with multiple data sources, according to
+ // some configuration. It uses the index as parameter, to be able to
+ // loop through the configurations.
+ void multiConfiguration(size_t index) {
+ list_->getDataSources().clear();
+ switch (index) {
+ case 2:
+ list_->getDataSources().push_back(ds_info_[2]);
+ // The ds_[2] is empty. We just check that it doesn't confuse
+ // us. Fall through to the case 0.
+ case 0:
+ list_->getDataSources().push_back(ds_info_[0]);
+ list_->getDataSources().push_back(ds_info_[1]);
+ break;
+ case 1:
+ // The other order
+ list_->getDataSources().push_back(ds_info_[1]);
+ list_->getDataSources().push_back(ds_info_[0]);
+ break;
+ case 3:
+ list_->getDataSources().push_back(ds_info_[1]);
+ list_->getDataSources().push_back(ds_info_[0]);
+ // It is the same as ds_[1], but we take from the first one.
+ // The first one to match is the correct one.
+ list_->getDataSources().push_back(ds_info_[3]);
+ break;
+ default:
+ FAIL() << "Unknown configuration index " << index;
+ }
+ }
+ void checkDS(size_t index, const string& type, const string& params) const
+ {
+ ASSERT_GT(list_->getDataSources().size(), index);
+ MockDataSourceClient* ds(dynamic_cast<MockDataSourceClient*>(
+ list_->getDataSources()[index].data_src_client_));
+
+ // Comparing with NULL does not work
+ ASSERT_NE(ds, static_cast<const MockDataSourceClient*>(NULL));
+ EXPECT_EQ(type, ds->type_);
+ EXPECT_TRUE(Element::fromJSON(params)->equals(*ds->configuration_));
+ }
+ shared_ptr<TestedList> list_;
+ const ClientList::FindResult negativeResult_;
+ vector<shared_ptr<MockDataSourceClient> > ds_;
+ vector<ConfigurableClientList::DataSourceInfo> ds_info_;
+ const ConstElementPtr config_elem_;
+};
+
+// Test the test itself
+TEST_F(ListTest, selfTest) {
+ EXPECT_EQ(result::SUCCESS, ds_[0]->findZone(Name("example.org")).code);
+ EXPECT_EQ(result::PARTIALMATCH,
+ ds_[0]->findZone(Name("sub.example.org")).code);
+ EXPECT_EQ(result::NOTFOUND, ds_[0]->findZone(Name("org")).code);
+ EXPECT_EQ(result::NOTFOUND, ds_[1]->findZone(Name("example.org")).code);
+ EXPECT_EQ(result::NOTFOUND, ds_[0]->findZone(Name("aaa")).code);
+ EXPECT_EQ(result::NOTFOUND, ds_[0]->findZone(Name("zzz")).code);
+}
+
+// Test the list we create with empty configuration is, in fact, empty
+TEST_F(ListTest, emptyList) {
+ EXPECT_TRUE(list_->getDataSources().empty());
+}
+
+// Check the values returned by a find on an empty list. It should be
+// a negative answer (nothing found) no matter if we want an exact or inexact
+// match.
+TEST_F(ListTest, emptySearch) {
+ // No matter what we try, we don't get an answer.
+
+ // Note: we don't have operator<< for the result class, so we cannot use
+ // EXPECT_EQ. Same for other similar cases.
+ EXPECT_TRUE(negativeResult_ == list_->find(Name("example.org"), false,
+ false));
+ EXPECT_TRUE(negativeResult_ == list_->find(Name("example.org"), false,
+ true));
+ EXPECT_TRUE(negativeResult_ == list_->find(Name("example.org"), true,
+ false));
+ EXPECT_TRUE(negativeResult_ == list_->find(Name("example.org"), true,
+ true));
+}
+
+// Put a single data source inside the list and check it can find an
+// exact match if there's one.
+TEST_F(ListTest, singleDSExactMatch) {
+ list_->getDataSources().push_back(ds_info_[0]);
+ // This zone is not there
+ EXPECT_TRUE(negativeResult_ == list_->find(Name("org."), true));
+ // But this one is, so check it.
+ positiveResult(list_->find(Name("example.org"), true), ds_[0],
+ Name("example.org"), true, "Exact match");
+ // When asking for a sub zone of a zone there, we get nothing
+ // (we want exact match, this would be partial one)
+ EXPECT_TRUE(negativeResult_ == list_->find(Name("sub.example.org."),
+ true));
+}
+
+// When asking for a partial match, we get all that the exact one, but more.
+TEST_F(ListTest, singleDSBestMatch) {
+ list_->getDataSources().push_back(ds_info_[0]);
+ // This zone is not there
+ EXPECT_TRUE(negativeResult_ == list_->find(Name("org.")));
+ // But this one is, so check it.
+ positiveResult(list_->find(Name("example.org")), ds_[0],
+ Name("example.org"), true, "Exact match");
+ // When asking for a sub zone of a zone there, we get the parent
+ // one.
+ positiveResult(list_->find(Name("sub.example.org.")), ds_[0],
+ Name("example.org"), false, "Subdomain match");
+}
+
+const char* const test_names[] = {
+ "Sub second",
+ "Sub first",
+ "With empty",
+ "With a duplicity"
+};
+
+TEST_F(ListTest, multiExactMatch) {
+ // Run through all the multi-configurations
+ for (size_t i(0); i < sizeof(test_names) / sizeof(*test_names); ++i) {
+ SCOPED_TRACE(test_names[i]);
+ multiConfiguration(i);
+ // Something that is nowhere there
+ EXPECT_TRUE(negativeResult_ == list_->find(Name("org."), true));
+ // This one is there exactly.
+ positiveResult(list_->find(Name("example.org"), true), ds_[0],
+ Name("example.org"), true, "Exact match");
+ // This one too, but in a different data source.
+ positiveResult(list_->find(Name("sub.example.org."), true), ds_[1],
+ Name("sub.example.org"), true, "Subdomain match");
+ // But this one is in neither data source.
+ EXPECT_TRUE(negativeResult_ ==
+ list_->find(Name("sub.example.com."), true));
+ }
+}
+
+TEST_F(ListTest, multiBestMatch) {
+ // Run through all the multi-configurations
+ for (size_t i(0); i < 4; ++ i) {
+ SCOPED_TRACE(test_names[i]);
+ multiConfiguration(i);
+ // Something that is nowhere there
+ EXPECT_TRUE(negativeResult_ == list_->find(Name("org.")));
+ // This one is there exactly.
+ positiveResult(list_->find(Name("example.org")), ds_[0],
+ Name("example.org"), true, "Exact match");
+ // This one too, but in a different data source.
+ positiveResult(list_->find(Name("sub.example.org.")), ds_[1],
+ Name("sub.example.org"), true, "Subdomain match");
+ // But this one is in neither data source. But it is a subdomain
+ // of one of the zones in the first data source.
+ positiveResult(list_->find(Name("sub.example.com.")), ds_[0],
+ Name("example.com."), false, "Subdomain in com");
+ }
+}
+
+// Check the configuration is empty when the list is empty
+TEST_F(ListTest, configureEmpty) {
+ ConstElementPtr elem(new ListElement);
+ list_->configure(*elem, true);
+ EXPECT_TRUE(list_->getDataSources().empty());
+}
+
+// Check we can get multiple data sources and they are in the right order.
+TEST_F(ListTest, configureMulti) {
+ ConstElementPtr elem(Element::fromJSON("["
+ "{"
+ " \"type\": \"type1\","
+ " \"cache\": \"off\","
+ " \"params\": {}"
+ "},"
+ "{"
+ " \"type\": \"type2\","
+ " \"cache\": \"off\","
+ " \"params\": {}"
+ "}]"
+ ));
+ list_->configure(*elem, true);
+ EXPECT_EQ(2, list_->getDataSources().size());
+ checkDS(0, "type1", "{}");
+ checkDS(1, "type2", "{}");
+}
+
+// Check we can pass whatever we want to the params
+TEST_F(ListTest, configureParams) {
+ const char* params[] = {
+ "true",
+ "false",
+ "null",
+ "\"hello\"",
+ "42",
+ "[]",
+ "{}",
+ NULL
+ };
+ for (const char** param(params); *param; ++param) {
+ SCOPED_TRACE(*param);
+ ConstElementPtr elem(Element::fromJSON(string("["
+ "{"
+ " \"type\": \"t\","
+ " \"cache\": \"off\","
+ " \"params\": ") + *param +
+ "}]"));
+ list_->configure(*elem, true);
+ EXPECT_EQ(1, list_->getDataSources().size());
+ checkDS(0, "t", *param);
+ }
+}
+
+TEST_F(ListTest, wrongConfig) {
+ const char* configs[] = {
+ // A lot of stuff missing from there
+ "[{\"type\": \"test_type\", \"params\": 13}, {}]",
+ // Some bad types completely
+ "{}",
+ "true",
+ "42",
+ "null",
+ "[{\"type\": \"test_type\", \"params\": 13}, true]",
+ "[{\"type\": \"test_type\", \"params\": 13}, []]",
+ "[{\"type\": \"test_type\", \"params\": 13}, 42]",
+ // Bad type of type
+ "[{\"type\": \"test_type\", \"params\": 13}, {\"type\": 42}]",
+ "[{\"type\": \"test_type\", \"params\": 13}, {\"type\": true}]",
+ "[{\"type\": \"test_type\", \"params\": 13}, {\"type\": null}]",
+ "[{\"type\": \"test_type\", \"params\": 13}, {\"type\": []}]",
+ "[{\"type\": \"test_type\", \"params\": 13}, {\"type\": {}}]",
+ // TODO: Once cache is supported, add some invalid cache values
+ NULL
+ };
+ // Put something inside to see it survives the exception
+ list_->configure(*config_elem_, true);
+ checkDS(0, "test_type", "{}");
+ for (const char** config(configs); *config; ++config) {
+ SCOPED_TRACE(*config);
+ ConstElementPtr elem(Element::fromJSON(*config));
+ EXPECT_THROW(list_->configure(*elem, true),
+ ConfigurableClientList::ConfigurationError);
+ // Still untouched
+ checkDS(0, "test_type", "{}");
+ EXPECT_EQ(1, list_->getDataSources().size());
+ }
+}
+
+// The param thing defaults to null. Cache is not used yet.
+TEST_F(ListTest, defaults) {
+ ConstElementPtr elem(Element::fromJSON("["
+ "{"
+ " \"type\": \"type1\""
+ "}]"));
+ list_->configure(*elem, true);
+ EXPECT_EQ(1, list_->getDataSources().size());
+ checkDS(0, "type1", "null");
+}
+
+// Check we can call the configure multiple times, to change the configuration
+TEST_F(ListTest, reconfigure) {
+ ConstElementPtr empty(new ListElement);
+ list_->configure(*config_elem_, true);
+ checkDS(0, "test_type", "{}");
+ list_->configure(*empty, true);
+ EXPECT_TRUE(list_->getDataSources().empty());
+ list_->configure(*config_elem_, true);
+ checkDS(0, "test_type", "{}");
+}
+
+// Make sure the data source error exception from the factory is propagated
+TEST_F(ListTest, dataSrcError) {
+ ConstElementPtr elem(Element::fromJSON("["
+ "{"
+ " \"type\": \"error\""
+ "}]"));
+ list_->configure(*config_elem_, true);
+ checkDS(0, "test_type", "{}");
+ EXPECT_THROW(list_->configure(*elem, true), DataSourceError);
+ checkDS(0, "test_type", "{}");
+}
+
+}
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index a0a9ca8..55d8052 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -142,9 +142,11 @@ const char* const TEST_RECORDS[][5] = {
{"delegation.example.org.", "NS", "3600", "", "ns.example.com."},
{"delegation.example.org.", "NS", "3600", "",
"ns.delegation.example.org."},
- {"delegation.example.org.", "DS", "3600", "", "1 RSAMD5 2 abcd"},
+ {"delegation.example.org.", "DS", "3600", "", "1 1 2 abcd"},
{"delegation.example.org.", "RRSIG", "3600", "", "NS 5 3 3600 "
"20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"delegation.example.org.", "RRSIG", "3600", "", "DS 5 3 3600 "
+ "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
{"ns.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
{"deep.below.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
@@ -156,6 +158,16 @@ const char* const TEST_RECORDS[][5] = {
{"below.dname.example.org.", "A", "3600", "", "192.0.2.1"},
+ // Insecure delegation (i.e., no DS at the delegation point)
+ {"insecdelegation.example.org.", "NS", "3600", "", "ns.example.com."},
+ {"insecdelegation.example.org.", "NSEC", "3600", "",
+ "dummy.example.org. NS NSEC"},
+ // and a DS under the zone cut. Such an RR shouldn't exist in a sane zone,
+ // but it could by error or some malicious attempt. It shouldn't confuse
+ // the implementation)
+ {"child.insecdelegation.example.org.", "DS", "3600", "", "DS 5 3 3600 "
+ "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
// Broken NS
{"brokenns1.example.org.", "A", "3600", "", "192.0.2.1"},
{"brokenns1.example.org.", "NS", "3600", "", "ns.example.com."},
@@ -2201,6 +2213,48 @@ TYPED_TEST(DatabaseClientTest, findDelegation) {
DataSourceError);
}
+TYPED_TEST(DatabaseClientTest, findDS) {
+ // Type DS query is an exception to the general delegation case; the NS
+ // should be ignored and it should be treated just like normal
+ // authoritative data.
+
+ boost::shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // DS exists at the delegation point. It should be returned with result
+ // code of SUCCESS.
+ this->expected_rdatas_.push_back("1 1 2 abcd"),
+ this->expected_sig_rdatas_.push_back("DS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, Name("delegation.example.org."),
+ RRType::DS(), RRType::DS(), this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ ZoneFinder::RESULT_DEFAULT);
+
+ // DS doesn't exist at the delegation point. The result should be
+ // NXRRSET, and if DNSSEC is requested and the zone is NSEC-signed,
+ // the corresponding NSEC should be returned (normally with its RRSIG,
+ // but in this simplified test setup it's omitted in the test data).
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("dummy.example.org. NS NSEC");
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, Name("insecdelegation.example.org."),
+ RRType::DS(), RRType::NSEC(), this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ ZoneFinder::RESULT_NSEC_SIGNED,
+ Name("insecdelegation.example.org."), ZoneFinder::FIND_DNSSEC);
+
+ // Some insane case: DS under a zone cut. It's included in the DB, but
+ // shouldn't be visible via finder.
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com");
+ doFindTest(*finder, Name("child.insecdelegation.example.org"),
+ RRType::DS(), RRType::NS(), this->rrttl_,
+ ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->empty_rdatas_, ZoneFinder::RESULT_DEFAULT,
+ Name("insecdelegation.example.org."), ZoneFinder::FIND_DNSSEC);
+}
+
TYPED_TEST(DatabaseClientTest, emptyDomain) {
boost::shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
diff --git a/src/lib/datasrc/tests/factory_unittest.cc b/src/lib/datasrc/tests/factory_unittest.cc
index 58afd7b..2031d50 100644
--- a/src/lib/datasrc/tests/factory_unittest.cc
+++ b/src/lib/datasrc/tests/factory_unittest.cc
@@ -28,6 +28,8 @@ using namespace isc::datasrc;
using namespace isc::data;
std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+const std::string STATIC_DS_FILE = TEST_DATA_DIR "/static.zone";
+const std::string ROOT_ZONE_FILE = TEST_DATA_DIR "/root.zone";
namespace {
@@ -235,5 +237,59 @@ TEST(FactoryTest, badType) {
DataSourceError);
}
+// Check the static data source can be loaded.
+TEST(FactoryTest, staticDS) {
+ // The only configuration is the file to load.
+ const ConstElementPtr config(new StringElement(STATIC_DS_FILE));
+ // Get the data source
+ DataSourceClientContainer dsc("static", config);
+ // And try getting something out to see if it really works.
+ DataSourceClient::FindResult
+ result(dsc.getInstance().findZone(isc::dns::Name("BIND")));
+ ASSERT_EQ(result::SUCCESS, result.code);
+ EXPECT_EQ(isc::dns::Name("BIND"), result.zone_finder->getOrigin());
+ EXPECT_EQ(isc::dns::RRClass::CH(), result.zone_finder->getClass());
+ const isc::dns::ConstRRsetPtr
+ version(result.zone_finder->find(isc::dns::Name("VERSION.BIND"),
+ isc::dns::RRType::TXT())->rrset);
+ ASSERT_NE(isc::dns::ConstRRsetPtr(), version);
+ EXPECT_EQ(isc::dns::Name("VERSION.BIND"), version->getName());
+ EXPECT_EQ(isc::dns::RRClass::CH(), version->getClass());
+ EXPECT_EQ(isc::dns::RRType::TXT(), version->getType());
+}
+
+// Check that file not containing BIND./CH is rejected
+//
+// FIXME: This test is disabled because the InMemoryZoneFinder::load does
+// not check if the data loaded correspond with the origin. The static
+// factory is not the place to fix that.
+TEST(FactoryTest, DISABLED_staticDSBadFile) {
+ // The only configuration is the file to load.
+ const ConstElementPtr config(new StringElement(STATIC_DS_FILE));
+ // See it does not want the file
+ EXPECT_THROW(DataSourceClientContainer("static", config), DataSourceError);
+}
+
+// Check that some bad configs are rejected
+TEST(FactoryTest, staticDSBadConfig) {
+ const char* configs[] = {
+ // The file does not exist
+ "\"/does/not/exist\"",
+ // Bad types
+ "null",
+ "42",
+ "{}",
+ "[]",
+ "true",
+ NULL
+ };
+ for (const char** config(configs); *config; ++config) {
+ SCOPED_TRACE(*config);
+ EXPECT_THROW(DataSourceClientContainer("static",
+ Element::fromJSON(*config)),
+ DataSourceError);
+ }
+}
+
} // end anonymous namespace
diff --git a/src/lib/datasrc/tests/testdata/static.zone b/src/lib/datasrc/tests/testdata/static.zone
new file mode 100644
index 0000000..5e9e8a6
--- /dev/null
+++ b/src/lib/datasrc/tests/testdata/static.zone
@@ -0,0 +1,2 @@
+BIND. 3600 CH SOA BIND. BIND. 1 3600 300 36000 3600
+VERSION.BIND. 3600 CH TXT "10"
diff --git a/src/lib/dhcp/Makefile.am b/src/lib/dhcp/Makefile.am
index 9e6fb0c..dc1545b 100644
--- a/src/lib/dhcp/Makefile.am
+++ b/src/lib/dhcp/Makefile.am
@@ -5,6 +5,12 @@ AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
+# Some versions of GCC warn about some versions of Boost regarding
+# missing initializer for members in its posix_time.
+# https://svn.boost.org/trac/boost/ticket/3477
+# But older GCC compilers don't have the flag.
+AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG)
+
CLEANFILES = *.gcno *.gcda
lib_LTLIBRARIES = libdhcp++.la
@@ -31,3 +37,9 @@ libdhcp___la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES)
libdhcp___la_LIBADD = $(top_builddir)/src/lib/asiolink/libasiolink.la
libdhcp___la_LIBADD += $(top_builddir)/src/lib/util/libutil.la
libdhcp___la_LDFLAGS = -no-undefined -version-info 1:0:0
+
+if USE_CLANGPP
+# Disable unused parameter warning caused by some of the
+# Boost headers when compiling with clang.
+libdhcp___la_CXXFLAGS += -Wno-unused-parameter
+endif
diff --git a/src/lib/dhcp/iface_mgr.cc b/src/lib/dhcp/iface_mgr.cc
index 824b9e3..253e061 100644
--- a/src/lib/dhcp/iface_mgr.cc
+++ b/src/lib/dhcp/iface_mgr.cc
@@ -606,6 +606,8 @@ IfaceMgr::send(const Pkt6Ptr& pkt) {
pktinfo->ipi6_ifindex = pkt->getIndex();
m.msg_controllen = cmsg->cmsg_len;
+ pkt->updateTimestamp();
+
result = sendmsg(getSocket(*pkt), &m, 0);
if (result < 0) {
isc_throw(Unexpected, "Pkt6 send failed: sendmsg() returned " << result);
@@ -665,6 +667,8 @@ IfaceMgr::send(const Pkt4Ptr& pkt)
<< " over socket " << getSocket(*pkt) << " on interface "
<< getIface(pkt->getIface())->getFullName() << endl;
+ pkt->updateTimestamp();
+
int result = sendmsg(getSocket(*pkt), &m, 0);
if (result < 0) {
isc_throw(Unexpected, "Pkt4 send failed.");
@@ -755,6 +759,8 @@ IfaceMgr::receive4() {
// We have all data let's create Pkt4 object.
Pkt4Ptr pkt = Pkt4Ptr(new Pkt4(buf, result));
+ pkt->updateTimestamp();
+
unsigned int ifindex = iface->getIndex();
IOAddress from(htonl(from_addr.sin_addr.s_addr));
@@ -899,6 +905,8 @@ Pkt6Ptr IfaceMgr::receive6() {
return (Pkt6Ptr()); // NULL
}
+ pkt->updateTimestamp();
+
pkt->setLocalAddr(IOAddress::from_bytes(AF_INET6,
reinterpret_cast<const uint8_t*>(&to_addr)));
pkt->setRemoteAddr(IOAddress::from_bytes(AF_INET6,
diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc
index 03b4a3d..0c71606 100644
--- a/src/lib/dhcp/option.cc
+++ b/src/lib/dhcp/option.cc
@@ -270,6 +270,14 @@ void Option::setUint32(uint32_t value) {
writeUint32(value, &data_[0]);
}
+void Option::setData(const OptionBufferConstIter first,
+ const OptionBufferConstIter last) {
+ // We will copy entire option buffer, so we have to resize data_.
+ data_.resize(std::distance(first, last));
+ std::copy(first, last, data_.begin());
+}
+
+
Option::~Option() {
}
diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h
index c7f5d10..0662967 100644
--- a/src/lib/dhcp/option.h
+++ b/src/lib/dhcp/option.h
@@ -244,6 +244,15 @@ public:
/// @param value value to be set
void setUint32(uint32_t value);
+ /// @brief Sets content of this option from buffer.
+ ///
+ /// Option will be resized to length of buffer.
+ ///
+ /// @param first iterator pointing begining of buffer to copy.
+ /// @param last iterator pointing to end of buffer to copy.
+ void setData(const OptionBufferConstIter first,
+ const OptionBufferConstIter last);
+
/// just to force that every option has virtual dtor
virtual ~Option();
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
index e456631..2c3f1eb 100644
--- a/src/lib/dhcp/pkt4.cc
+++ b/src/lib/dhcp/pkt4.cc
@@ -305,6 +305,11 @@ Pkt4::getOption(uint8_t type) {
return boost::shared_ptr<isc::dhcp::Option>(); // NULL
}
+void
+Pkt4::updateTimestamp() {
+ timestamp_ = boost::posix_time::microsec_clock::universal_time();
+}
+
} // end of namespace isc::dhcp
} // end of namespace isc
diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h
index a3f683f..b72c03e 100644
--- a/src/lib/dhcp/pkt4.h
+++ b/src/lib/dhcp/pkt4.h
@@ -16,8 +16,10 @@
#define PKT4_H
#include <iostream>
+#include <time.h>
#include <vector>
#include <boost/shared_ptr.hpp>
+#include <boost/date_time/posix_time/posix_time.hpp>
#include "asiolink/io_address.h"
#include "util/buffer.h"
#include "dhcp/option.h"
@@ -202,6 +204,11 @@ public:
void
setGiaddr(const isc::asiolink::IOAddress& giaddr) { giaddr_ = giaddr; };
+ /// @brief Sets transaction-id value
+ ///
+ /// @param transid transaction-id to be set.
+ void setTransid(uint32_t transid) { transid_ = transid; }
+
/// @brief Returns value of transaction-id field.
///
/// @return transaction-id
@@ -321,6 +328,14 @@ public:
/// @return interface name
std::string getIface() const { return iface_; };
+ /// @brief Returns packet timestamp.
+ ///
+ /// Returns packet timestamp value updated when
+ /// packet is received or send.
+ ///
+ /// @return packet timestamp.
+ const boost::posix_time::ptime& getTimestamp() const { return timestamp_; }
+
/// @brief Sets interface name.
///
/// Sets interface name over which packet was received or is
@@ -387,6 +402,14 @@ public:
/// @return remote port
uint16_t getRemotePort() { return (remote_port_); }
+ /// @brief Update packet timestamp.
+ ///
+ /// Updates packet timestamp. This method is invoked
+ /// by interface manager just before sending or
+ /// just after receiving it.
+ /// @throw isc::Unexpected if timestamp update failed
+ void updateTimestamp();
+
protected:
/// converts DHCP message type to BOOTP op type
@@ -470,12 +493,26 @@ protected:
// end of real DHCPv4 fields
/// output buffer (used during message transmission)
+ ///
+ /// @warning This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt4. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
isc::util::OutputBuffer bufferOut_;
/// that's the data of input buffer used in RX packet. Note that
/// InputBuffer does not store the data itself, but just expects that
/// data will be valid for the whole life of InputBuffer. Therefore we
/// need to keep the data around.
+ ///
+ /// @warning This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt4. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
std::vector<uint8_t> data_;
/// message type (e.g. 1=DHCPDISCOVER)
@@ -484,7 +521,17 @@ protected:
uint8_t msg_type_;
/// collection of options present in this message
+ ///
+ /// @warnig This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt4. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
isc::dhcp::Option::OptionCollection options_;
+
+ /// packet timestamp
+ boost::posix_time::ptime timestamp_;
}; // Pkt4 class
typedef boost::shared_ptr<Pkt4> Pkt4Ptr;
diff --git a/src/lib/dhcp/pkt6.cc b/src/lib/dhcp/pkt6.cc
index aea3cde..e869c7b 100644
--- a/src/lib/dhcp/pkt6.cc
+++ b/src/lib/dhcp/pkt6.cc
@@ -202,5 +202,11 @@ void Pkt6::repack() {
bufferOut_.writeData(&data_[0], data_.size());
}
+void
+Pkt6::updateTimestamp() {
+ timestamp_ = boost::posix_time::microsec_clock::universal_time();
+}
+
+
} // end of isc::dhcp namespace
} // end of isc namespace
diff --git a/src/lib/dhcp/pkt6.h b/src/lib/dhcp/pkt6.h
index 97ac996..2612f27 100644
--- a/src/lib/dhcp/pkt6.h
+++ b/src/lib/dhcp/pkt6.h
@@ -16,8 +16,10 @@
#define PKT6_H
#include <iostream>
+#include <time.h>
#include <boost/shared_ptr.hpp>
#include <boost/shared_array.hpp>
+#include <boost/date_time/posix_time/posix_time.hpp>
#include "asiolink/io_address.h"
#include "dhcp/option.h"
@@ -129,6 +131,11 @@ public:
/// @param type message type to be set
void setType(uint8_t type) { msg_type_=type; };
+ /// @brief Sets transaction-id value
+ ///
+ /// @param transid transaction-id to be set.
+ void setTransid(uint32_t transid) { transid_ = transid; }
+
/// Returns value of transaction-id field
///
/// @return transaction-id
@@ -220,6 +227,14 @@ public:
/// @return interface name
std::string getIface() const { return iface_; };
+ /// @brief Returns packet timestamp.
+ ///
+ /// Returns packet timestamp value updated when
+ /// packet is received or send.
+ ///
+ /// @return packet timestamp.
+ const boost::posix_time::ptime& getTimestamp() const { return timestamp_; }
+
/// @brief Sets interface name.
///
/// Sets interface name over which packet was received or is
@@ -231,8 +246,23 @@ public:
/// TODO Need to implement getOptions() as well
/// collection of options present in this message
+ ///
+ /// @warning This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt6. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
isc::dhcp::Option::OptionCollection options_;
+ /// @brief Update packet timestamp.
+ ///
+ /// Updates packet timestamp. This method is invoked
+ /// by interface manager just before sending or
+ /// just after receiving it.
+ /// @throw isc::Unexpected if timestamp update failed
+ void updateTimestamp();
+
protected:
/// Builds on wire packet for TCP transmission.
///
@@ -278,6 +308,13 @@ protected:
uint32_t transid_;
/// unparsed data (in received packets)
+ ///
+ /// @warning This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt6. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
OptionBuffer data_;
/// name of the network interface the packet was received/to be sent over
@@ -304,7 +341,17 @@ protected:
uint16_t remote_port_;
/// output buffer (used during message transmission)
+ ///
+ /// @warning This protected member is accessed by derived
+ /// classes directly. One of such derived classes is
+ /// @ref perfdhcp::PerfPkt6. The impact on derived clasess'
+ /// behavior must be taken into consideration before making
+ /// changes to this member such as access scope restriction or
+ /// data format change etc.
isc::util::OutputBuffer bufferOut_;
+
+ /// packet timestamp
+ boost::posix_time::ptime timestamp_;
}; // Pkt6 class
typedef boost::shared_ptr<Pkt6> Pkt6Ptr;
diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am
index cc20bd5..4c86ee7 100644
--- a/src/lib/dhcp/tests/Makefile.am
+++ b/src/lib/dhcp/tests/Makefile.am
@@ -7,6 +7,12 @@ AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
AM_CXXFLAGS = $(B10_CXXFLAGS)
+# Some versions of GCC warn about some versions of Boost regarding
+# missing initializer for members in its posix_time.
+# https://svn.boost.org/trac/boost/ticket/3477
+# But older GCC compilers don't have the flag.
+AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG)
+
if USE_STATIC_LINK
AM_LDFLAGS = -static
endif
@@ -38,8 +44,9 @@ libdhcp___unittests_CXXFLAGS = $(AM_CXXFLAGS)
if USE_CLANGPP
# This is to workaround unused variables tcout and tcerr in
-# log4cplus's streams.h.
-libdhcp___unittests_CXXFLAGS += -Wno-unused-variable
+# log4cplus's streams.h and unused parameters from some of the
+# Boost headers.
+libdhcp___unittests_CXXFLAGS += -Wno-unused-variable -Wno-unused-parameter
endif
libdhcp___unittests_LDADD = $(GTEST_LDADD)
libdhcp___unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
index 5daf75d..9b046f0 100644
--- a/src/lib/dhcp/tests/option_unittest.cc
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -485,7 +485,7 @@ TEST_F(OptionTest, setUintX) {
uint8_t exp2[] = {125, 2, 12345/256, 12345%256};
EXPECT_TRUE(0 == memcmp(exp2, outBuf_.getData(), 4));
- // verity getUint32
+ // verify getUint32
outBuf_.clear();
opt4->setUint32(0x12345678);
opt4->pack4(outBuf_);
@@ -495,4 +495,31 @@ TEST_F(OptionTest, setUintX) {
uint8_t exp4[] = {125, 4, 0x12, 0x34, 0x56, 0x78};
EXPECT_TRUE(0 == memcmp(exp4, outBuf_.getData(), 6));
}
+
+TEST_F(OptionTest, setData) {
+ // verify data override with new buffer larger than
+ // initial option buffer size
+ OptionPtr opt1(new Option(Option::V4, 125,
+ buf_.begin(), buf_.begin() + 10));
+ buf_.resize(20, 1);
+ opt1->setData(buf_.begin(), buf_.end());
+ opt1->pack4(outBuf_);
+ ASSERT_EQ(outBuf_.getLength() - opt1->getHeaderLen(), buf_.size());
+ const uint8_t* test_data = static_cast<const uint8_t*>(outBuf_.getData());
+ EXPECT_TRUE(0 == memcmp(&buf_[0], test_data + opt1->getHeaderLen(),
+ buf_.size()));
+
+ // verify data override with new buffer shorter than
+ // initial option buffer size
+ OptionPtr opt2(new Option(Option::V4, 125,
+ buf_.begin(), buf_.begin() + 10));
+ outBuf_.clear();
+ buf_.resize(5, 1);
+ opt2->setData(buf_.begin(), buf_.end());
+ opt2->pack4(outBuf_);
+ ASSERT_EQ(outBuf_.getLength() - opt1->getHeaderLen(), buf_.size());
+ test_data = static_cast<const uint8_t*>(outBuf_.getData());
+ EXPECT_TRUE(0 == memcmp(&buf_[0], test_data + opt1->getHeaderLen(),
+ buf_.size()));
+}
}
diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc
index bed8c2f..9c8cc05 100644
--- a/src/lib/dhcp/tests/pkt4_unittest.cc
+++ b/src/lib/dhcp/tests/pkt4_unittest.cc
@@ -31,7 +31,9 @@ using namespace isc;
using namespace isc::asiolink;
using namespace isc::dhcp;
using namespace isc::util;
-using namespace boost;
+// don't import the entire boost namespace. It will unexpectedly hide uint8_t
+// for some systems.
+using boost::scoped_ptr;
namespace {
@@ -598,4 +600,32 @@ TEST(Pkt4Test, metaFields) {
delete pkt;
}
+TEST(Pkt4Test, Timestamp) {
+ scoped_ptr<Pkt4> pkt(new Pkt4(DHCPOFFER, 1234));
+
+ // Just after construction timestamp is invalid
+ ASSERT_TRUE(pkt->getTimestamp().is_not_a_date_time());
+
+ // Update packet time.
+ pkt->updateTimestamp();
+
+ // Get updated packet time.
+ boost::posix_time::ptime ts_packet = pkt->getTimestamp();
+
+ // After timestamp is updated it should be date-time.
+ ASSERT_FALSE(ts_packet.is_not_a_date_time());
+
+ // Check current time.
+ boost::posix_time::ptime ts_now =
+ boost::posix_time::microsec_clock::universal_time();
+
+ // Calculate period between packet time and now.
+ boost::posix_time::time_period ts_period(ts_packet, ts_now);
+
+ // Duration should be positive or zero.
+ EXPECT_TRUE(ts_period.length().total_microseconds() >= 0);
+}
+
+
+
} // end of anonymous namespace
diff --git a/src/lib/dhcp/tests/pkt6_unittest.cc b/src/lib/dhcp/tests/pkt6_unittest.cc
index e07ea9f..d6ca9b1 100644
--- a/src/lib/dhcp/tests/pkt6_unittest.cc
+++ b/src/lib/dhcp/tests/pkt6_unittest.cc
@@ -16,6 +16,7 @@
#include <iostream>
#include <sstream>
#include <arpa/inet.h>
+#include <boost/date_time/posix_time/posix_time.hpp>
#include <gtest/gtest.h>
#include <asiolink/io_address.h>
@@ -204,4 +205,30 @@ TEST_F(Pkt6Test, addGetDelOptions) {
delete parent;
}
+TEST_F(Pkt6Test, Timestamp) {
+ boost::scoped_ptr<Pkt6> pkt(new Pkt6(DHCPV6_SOLICIT, 0x020304));
+
+ // Just after construction timestamp is invalid
+ ASSERT_TRUE(pkt->getTimestamp().is_not_a_date_time());
+
+ // Update packet time.
+ pkt->updateTimestamp();
+
+ // Get updated packet time.
+ boost::posix_time::ptime ts_packet = pkt->getTimestamp();
+
+ // After timestamp is updated it should be date-time.
+ ASSERT_FALSE(ts_packet.is_not_a_date_time());
+
+ // Check current time.
+ boost::posix_time::ptime ts_now =
+ boost::posix_time::microsec_clock::universal_time();
+
+ // Calculate period between packet time and now.
+ boost::posix_time::time_period ts_period(ts_packet, ts_now);
+
+ // Duration should be positive or zero.
+ EXPECT_TRUE(ts_period.length().total_microseconds() >= 0);
+}
+
}
diff --git a/src/lib/dns/labelsequence.h b/src/lib/dns/labelsequence.h
index 6b10b67..b17eeb4 100644
--- a/src/lib/dns/labelsequence.h
+++ b/src/lib/dns/labelsequence.h
@@ -101,7 +101,7 @@ public:
/// \note No actual memory is changed, this operation merely updates the
/// internal pointers based on the offsets in the Name object.
///
- /// \exeption OutOfRange if i is greater than or equal to the number
+ /// \exception OutOfRange if i is greater than or equal to the number
/// of labels currently pointed to by this LabelSequence
///
/// \param i The number of labels to remove.
@@ -112,7 +112,7 @@ public:
/// \note No actual memory is changed, this operation merely updates the
/// internal pointers based on the offsets in the Name object.
///
- /// \exeption OutOfRange if i is greater than or equal to the number
+ /// \exception OutOfRange if i is greater than or equal to the number
/// of labels currently pointed to by this LabelSequence
///
/// \param i The number of labels to remove.
diff --git a/src/lib/dns/message.cc b/src/lib/dns/message.cc
index a9be8be..0a1625a 100644
--- a/src/lib/dns/message.cc
+++ b/src/lib/dns/message.cc
@@ -573,7 +573,11 @@ Message::clearSection(const Section section) {
if (section >= MessageImpl::NUM_SECTIONS) {
isc_throw(OutOfRange, "Invalid message section: " << section);
}
- impl_->rrsets_[section].clear();
+ if (section == Message::SECTION_QUESTION) {
+ impl_->questions_.clear();
+ } else {
+ impl_->rrsets_[section].clear();
+ }
impl_->counts_[section] = 0;
}
diff --git a/src/lib/dns/python/message_python.cc b/src/lib/dns/python/message_python.cc
index fdd4a20..f08f62c 100644
--- a/src/lib/dns/python/message_python.cc
+++ b/src/lib/dns/python/message_python.cc
@@ -209,12 +209,24 @@ Message_getHeaderFlag(s_Message* self, PyObject* args) {
return (NULL);
}
- if (self->cppobj->getHeaderFlag(
+ try {
+ if (self->cppobj->getHeaderFlag(
static_cast<Message::HeaderFlag>(messageflag))) {
- Py_RETURN_TRUE;
- } else {
- Py_RETURN_FALSE;
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+ } catch (const isc::InvalidParameter& ip) {
+ PyErr_Clear();
+ PyErr_SetString(po_InvalidParameter, ip.what());
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.get_header_flag(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.get_header_flag()");
}
+ return (NULL);
}
PyObject*
@@ -240,12 +252,17 @@ Message_setHeaderFlag(s_Message* self, PyObject* args) {
} catch (const InvalidMessageOperation& imo) {
PyErr_Clear();
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
} catch (const isc::InvalidParameter& ip) {
PyErr_Clear();
PyErr_SetString(po_InvalidParameter, ip.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.set_header_flag(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.set_header_flag()");
}
+ return (NULL);
}
PyObject*
@@ -273,8 +290,14 @@ Message_setQid(s_Message* self, PyObject* args) {
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.get_qid(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.set_qid()");
}
+ return (NULL);
}
PyObject*
@@ -283,11 +306,14 @@ Message_getRcode(s_Message* self) {
return (createRcodeObject(self->cppobj->getRcode()));
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.get_rcode(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
- PyErr_SetString(po_IscException, "Unexpected exception");
- return (NULL);
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.get_rcode()");
}
+ return (NULL);
}
PyObject*
@@ -301,8 +327,14 @@ Message_setRcode(s_Message* self, PyObject* args) {
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.set_rcode(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.set_rcode()");
}
+ return (NULL);
}
PyObject*
@@ -311,17 +343,14 @@ Message_getOpcode(s_Message* self) {
return (createOpcodeObject(self->cppobj->getOpcode()));
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
} catch (const exception& ex) {
- const string ex_what =
- "Failed to get message opcode: " + string(ex.what());
+ const string ex_what = "Error in Message.get_opcode(): " + string(ex.what());
PyErr_SetString(po_IscException, ex_what.c_str());
- return (NULL);
} catch (...) {
PyErr_SetString(po_IscException,
- "Unexpected exception getting opcode from message");
- return (NULL);
+ "Unexpected exception in Message.get_opcode()");
}
+ return (NULL);
}
PyObject*
@@ -335,8 +364,14 @@ Message_setOpcode(s_Message* self, PyObject* args) {
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.set_opcode(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.set_opcode()");
}
+ return (NULL);
}
PyObject*
@@ -348,12 +383,11 @@ Message_getEDNS(s_Message* self) {
try {
return (createEDNSObject(*src));
} catch (const exception& ex) {
- const string ex_what =
- "Failed to get EDNS from message: " + string(ex.what());
+ const string ex_what = "Error in Message.get_edns(): " + string(ex.what());
PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
PyErr_SetString(PyExc_SystemError,
- "Unexpected failure getting EDNS from message");
+ "Unexpected exception in Message.get_edns()");
}
return (NULL);
}
@@ -369,8 +403,14 @@ Message_setEDNS(s_Message* self, PyObject* args) {
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.set_edns(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.set_edns()");
}
+ return (NULL);
}
PyObject*
@@ -386,13 +426,11 @@ Message_getTSIGRecord(s_Message* self) {
} catch (const InvalidMessageOperation& ex) {
PyErr_SetString(po_InvalidMessageOperation, ex.what());
} catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in getting TSIGRecord from message: " +
- string(ex.what());
+ const string ex_what = "Error in Message.get_tsig_record(): " + string(ex.what());
PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
- PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
- "getting TSIGRecord from message");
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.get_tsig_record()");
}
return (NULL);
}
@@ -411,8 +449,14 @@ Message_getRRCount(s_Message* self, PyObject* args) {
static_cast<Message::Section>(section))));
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.get_rr_count(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.get_rr_count()");
}
+ return (NULL);
}
// This is a helper templated class commonly used for getQuestion and
@@ -453,13 +497,11 @@ Message_getQuestion(PyObject* po_self, PyObject*) {
} catch (const InvalidMessageSection& ex) {
PyErr_SetString(po_InvalidMessageSection, ex.what());
} catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in Message.get_question: " +
- string(ex.what());
+ const string ex_what = "Error in Message.get_question(): " + string(ex.what());
PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
- PyErr_SetString(PyExc_SystemError,
- "Unexpected failure in Message.get_question");
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.get_question()");
}
return (NULL);
}
@@ -489,13 +531,11 @@ Message_getSection(PyObject* po_self, PyObject* args) {
} catch (const InvalidMessageSection& ex) {
PyErr_SetString(po_InvalidMessageSection, ex.what());
} catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in Message.get_section: " +
- string(ex.what());
+ const string ex_what = "Error in Message.get_section(): " + string(ex.what());
PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
- PyErr_SetString(PyExc_SystemError,
- "Unexpected failure in Message.get_section");
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.get_section()");
}
return (NULL);
}
@@ -513,9 +553,20 @@ Message_addQuestion(s_Message* self, PyObject* args) {
return (NULL);
}
- self->cppobj->addQuestion(PyQuestion_ToQuestion(question));
-
- Py_RETURN_NONE;
+ try {
+ self->cppobj->addQuestion(PyQuestion_ToQuestion(question));
+ Py_RETURN_NONE;
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_Clear();
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.add_question(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.add_question()");
+ }
+ return (NULL);
}
PyObject*
@@ -534,36 +585,45 @@ Message_addRRset(s_Message* self, PyObject* args) {
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.add_rrset(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
PyErr_SetString(po_IscException,
- "Unexpected exception in adding RRset");
- return (NULL);
+ "Unexpected exception in Message.add_rrset()");
}
+ return (NULL);
}
PyObject*
Message_clear(s_Message* self, PyObject* args) {
int i;
- if (PyArg_ParseTuple(args, "i", &i)) {
- PyErr_Clear();
- if (i == Message::PARSE) {
- self->cppobj->clear(Message::PARSE);
- Py_RETURN_NONE;
- } else if (i == Message::RENDER) {
- self->cppobj->clear(Message::RENDER);
- Py_RETURN_NONE;
- } else {
- PyErr_SetString(PyExc_TypeError,
- "Message mode must be Message.PARSE or Message.RENDER");
- return (NULL);
+
+ try {
+ if (PyArg_ParseTuple(args, "i", &i)) {
+ PyErr_Clear();
+ if (i == Message::PARSE) {
+ self->cppobj->clear(Message::PARSE);
+ Py_RETURN_NONE;
+ } else if (i == Message::RENDER) {
+ self->cppobj->clear(Message::RENDER);
+ Py_RETURN_NONE;
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "Message mode must be Message.PARSE or Message.RENDER");
+ return (NULL);
+ }
}
- } else {
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.clear(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.clear()");
}
+ return (NULL);
}
PyObject*
@@ -579,21 +639,34 @@ Message_clearSection(PyObject* pyself, PyObject* args) {
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.clear_section(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
PyErr_SetString(po_IscException,
- "Unexpected exception in adding RRset");
- return (NULL);
+ "Unexpected exception in Message.clear_section()");
}
+ return (NULL);
}
PyObject*
Message_makeResponse(s_Message* self) {
- self->cppobj->makeResponse();
- Py_RETURN_NONE;
+ try {
+ self->cppobj->makeResponse();
+ Py_RETURN_NONE;
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_Clear();
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.make_response(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.make_response()");
+ }
+ return (NULL);
}
PyObject*
@@ -604,11 +677,14 @@ Message_toText(s_Message* self) {
} catch (const InvalidMessageOperation& imo) {
PyErr_Clear();
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.to_text(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
- PyErr_SetString(po_IscException, "Unexpected exception");
- return (NULL);
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.to_text()");
}
+ return (NULL);
}
PyObject*
@@ -639,22 +715,18 @@ Message_toWire(s_Message* self, PyObject* args) {
} catch (const InvalidMessageOperation& imo) {
PyErr_Clear();
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
} catch (const TSIGContextError& ex) {
// toWire() with a TSIG context can fail due to this if the
// python program has a bug.
PyErr_SetString(po_TSIGContextError, ex.what());
- return (NULL);
- } catch (const std::exception& ex) {
- // Other exceptions should be rare (most likely an implementation
- // bug)
- PyErr_SetString(po_TSIGContextError, ex.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what = "Error in Message.to_wire(): " + string(ex.what());
+ PyErr_SetString(po_TSIGContextError, ex_what.c_str());
} catch (...) {
- PyErr_SetString(PyExc_RuntimeError,
- "Unexpected C++ exception in Message.to_wire");
- return (NULL);
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.to_wire()");
}
+ return (NULL);
}
PyErr_Clear();
PyErr_SetString(PyExc_TypeError,
@@ -682,29 +754,22 @@ Message_fromWire(PyObject* pyself, PyObject* args) {
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
} catch (const DNSMessageFORMERR& dmfe) {
PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
- return (NULL);
} catch (const DNSMessageBADVERS& dmfe) {
PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
- return (NULL);
} catch (const MessageTooShort& mts) {
PyErr_SetString(po_MessageTooShort, mts.what());
- return (NULL);
} catch (const InvalidBufferPosition& ex) {
PyErr_SetString(po_DNSMessageFORMERR, ex.what());
- return (NULL);
} catch (const exception& ex) {
- const string ex_what =
- "Error in Message.from_wire: " + string(ex.what());
- PyErr_SetString(PyExc_RuntimeError, ex_what.c_str());
- return (NULL);
+ const string ex_what = "Error in Message.from_wire(): " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
- PyErr_SetString(PyExc_RuntimeError,
- "Unexpected exception in Message.from_wire");
- return (NULL);
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in Message.from_wire()");
}
+ return (NULL);
}
PyErr_SetString(PyExc_TypeError,
diff --git a/src/lib/dns/python/name_python.cc b/src/lib/dns/python/name_python.cc
index 6758d0e..c24d24d 100644
--- a/src/lib/dns/python/name_python.cc
+++ b/src/lib/dns/python/name_python.cc
@@ -115,7 +115,7 @@ PyObject* Name_reverse(s_Name* self);
PyObject* Name_concatenate(s_Name* self, PyObject* args);
PyObject* Name_downcase(s_Name* self);
PyObject* Name_isWildCard(s_Name* self);
-long Name_hash(PyObject* py_self);
+Py_hash_t Name_hash(PyObject* py_self);
PyMethodDef Name_methods[] = {
{ "at", reinterpret_cast<PyCFunction>(Name_at), METH_VARARGS,
@@ -520,7 +520,7 @@ Name_isWildCard(s_Name* self) {
}
}
-long
+Py_hash_t
Name_hash(PyObject* pyself) {
s_Name* const self = static_cast<s_Name*>(pyself);
return (LabelSequence(*self->cppobj).getHash(false));
diff --git a/src/lib/dns/python/pydnspp_common.h b/src/lib/dns/python/pydnspp_common.h
index 8092b08..e9e9359 100644
--- a/src/lib/dns/python/pydnspp_common.h
+++ b/src/lib/dns/python/pydnspp_common.h
@@ -43,6 +43,11 @@ extern PyObject* po_DNSMessageBADVERS;
int readDataFromSequence(uint8_t *data, size_t len, PyObject* sequence);
int addClassVariable(PyTypeObject& c, const char* name, PyObject* obj);
+
+// Short term workaround for unifying the return type of tp_hash
+#if PY_MINOR_VERSION < 2
+typedef long Py_hash_t;
+#endif
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/rdata_python.cc b/src/lib/dns/python/rdata_python.cc
index e4ff890..20f67c8 100644
--- a/src/lib/dns/python/rdata_python.cc
+++ b/src/lib/dns/python/rdata_python.cc
@@ -116,6 +116,7 @@ Rdata_init(PyObject* self_p, PyObject* args, PyObject*) {
return (0);
} else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype,
&rrclass_type, &rrclass, &data, &len)) {
+ PyErr_Clear();
InputBuffer input_buffer(data, len);
self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
PyRRClass_ToRRClass(rrclass),
diff --git a/src/lib/dns/python/rrclass_python.cc b/src/lib/dns/python/rrclass_python.cc
index 2c3dae6..b94dc02 100644
--- a/src/lib/dns/python/rrclass_python.cc
+++ b/src/lib/dns/python/rrclass_python.cc
@@ -52,7 +52,7 @@ PyObject* RRClass_str(PyObject* self);
PyObject* RRClass_toWire(s_RRClass* self, PyObject* args);
PyObject* RRClass_getCode(s_RRClass* self);
PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
-long RRClass_hash(PyObject* pyself);
+Py_hash_t RRClass_hash(PyObject* pyself);
// Static function for direct class creation
PyObject* RRClass_IN(s_RRClass *self);
@@ -265,7 +265,7 @@ PyObject* RRClass_ANY(s_RRClass*) {
return (RRClass_createStatic(RRClass::ANY()));
}
-long
+Py_hash_t
RRClass_hash(PyObject* pyself) {
s_RRClass* const self = static_cast<s_RRClass*>(pyself);
return (self->cppobj->getCode());
diff --git a/src/lib/dns/python/tests/message_python_test.py b/src/lib/dns/python/tests/message_python_test.py
index 1ec0e99..6f32b11 100644
--- a/src/lib/dns/python/tests/message_python_test.py
+++ b/src/lib/dns/python/tests/message_python_test.py
@@ -118,6 +118,11 @@ class MessageTest(unittest.TestCase):
self.assertFalse(self.r.get_header_flag(Message.HEADERFLAG_AD))
self.assertFalse(self.r.get_header_flag(Message.HEADERFLAG_CD))
+ # 0 passed as flag should raise
+ self.assertRaises(InvalidParameter, self.r.get_header_flag, 0)
+ # unused bit
+ self.assertRaises(InvalidParameter, self.r.get_header_flag, 0x80000000)
+
self.r.set_header_flag(Message.HEADERFLAG_QR)
self.assertTrue(self.r.get_header_flag(Message.HEADERFLAG_QR))
@@ -267,6 +272,15 @@ class MessageTest(unittest.TestCase):
self.assertEqual(1, sys.getrefcount(self.r.get_question()))
self.assertEqual(1, sys.getrefcount(self.r.get_question()[0]))
+ # Message.add_question() called in non-RENDER mode should assert
+ self.r.clear(Message.PARSE)
+ self.assertRaises(InvalidMessageOperation, self.r.add_question, q)
+
+ def test_make_response(self):
+ # Message.make_response() called in non-PARSE mode should assert
+ self.r.clear(Message.RENDER)
+ self.assertRaises(InvalidMessageOperation, self.r.make_response)
+
def test_add_rrset(self):
self.assertRaises(TypeError, self.r.add_rrset, "wrong")
self.assertRaises(TypeError, self.r.add_rrset)
@@ -295,6 +309,7 @@ class MessageTest(unittest.TestCase):
self.assertEqual(1, self.r.get_rr_count(Message.SECTION_QUESTION))
self.r.clear_section(Message.SECTION_QUESTION)
self.assertEqual(0, self.r.get_rr_count(Message.SECTION_QUESTION))
+ self.assertEqual(0, len(self.r.get_question()))
def test_clear_section(self):
for section in [Message.SECTION_ANSWER, Message.SECTION_AUTHORITY,
diff --git a/src/lib/dns/rdata.cc b/src/lib/dns/rdata.cc
index c1ece52..59a5887 100644
--- a/src/lib/dns/rdata.cc
+++ b/src/lib/dns/rdata.cc
@@ -119,7 +119,7 @@ Generic::Generic(isc::util::InputBuffer& buffer, size_t rdata_len) {
impl_ = new GenericImpl(data);
}
-Generic::Generic(const string& rdata_string) {
+Generic::Generic(const std::string& rdata_string) {
istringstream iss(rdata_string);
string unknown_mark;
iss >> unknown_mark;
diff --git a/src/lib/dns/rdata/any_255/tsig_250.cc b/src/lib/dns/rdata/any_255/tsig_250.cc
index 4eb72bc..9ef887f 100644
--- a/src/lib/dns/rdata/any_255/tsig_250.cc
+++ b/src/lib/dns/rdata/any_255/tsig_250.cc
@@ -74,25 +74,28 @@ struct TSIG::TSIGImpl {
/// \code <Alg> <Time> <Fudge> <MACsize> [<MAC>] <OrigID> <Error> <OtherLen> [<OtherData>]
/// \endcode
/// where
-/// - <Alg> is a valid textual representation of domain name.
-/// - <Time> is an unsigned 48-bit decimal integer.
-/// - <MACSize>, <OrigID>, and <OtherLen> are an unsigned 16-bit decimal
+/// - <Alg> is a valid textual representation of domain name.
+/// - <Time> is an unsigned 48-bit decimal integer.
+/// - <MACSize>, <OrigID>, and <OtherLen> are an unsigned
+/// 16-bit decimal
/// integer.
-/// - <Error> is an unsigned 16-bit decimal integer or a valid mnemonic for
-/// the Error field specified in RFC2845. Currently, "BADSIG", "BADKEY",
+/// - <Error> is an unsigned 16-bit decimal integer or a valid mnemonic
+/// for the Error field specified in RFC2845. Currently, "BADSIG", "BADKEY",
/// and "BADTIME" are supported (case sensitive). In future versions
/// other representations that are compatible with the DNS RCODE will be
/// supported.
-/// - <MAC> and <OtherData> is a BASE-64 encoded string that does not contain
-/// space characters.
-/// When <MACSize> and <OtherLen> is 0, <MAC> and <OtherData> must not
-/// appear in \c tsgi_str, respectively.
-/// - The decoded data of <MAC> is <MACSize> bytes of binary stream.
-/// - The decoded data of <OtherData> is <OtherLen> bytes of binary stream.
+/// - <MAC> and <OtherData> is a BASE-64 encoded string that does
+/// not contain space characters.
+/// When <MACSize> and <OtherLen> is 0, <MAC> and
+/// <OtherData> must not appear in \c tsig_str, respectively.
+/// - The decoded data of <MAC> is <MACSize> bytes of binary
+/// stream.
+/// - The decoded data of <OtherData> is <OtherLen> bytes of
+/// binary stream.
///
/// An example of valid string is:
/// \code "hmac-sha256. 853804800 300 3 AAAA 2845 0 0" \endcode
-/// In this example <OtherData> is missing because <OtherLen> is 0.
+/// In this example <OtherData> is missing because <OtherLen> is 0.
///
/// Note that RFC2845 does not define the standard presentation format
/// of %TSIG RR, so the above syntax is implementation specific.
@@ -101,10 +104,10 @@ struct TSIG::TSIGImpl {
///
/// <b>Exceptions</b>
///
-/// If <Alg> is not a valid domain name, a corresponding exception from
+/// If <Alg> is not a valid domain name, a corresponding exception from
/// the \c Name class will be thrown;
-/// if <MAC> or <OtherData> is not validly encoded in BASE-64, an exception
-/// of class \c isc::BadValue will be thrown;
+/// if <MAC> or <OtherData> is not validly encoded in BASE-64, an
+/// exception of class \c isc::BadValue will be thrown;
/// if %any of the other bullet points above is not met, an exception of
/// class \c InvalidRdataText will be thrown.
/// This constructor internally involves resource allocation, and if it fails
diff --git a/src/lib/dns/rdata/ch_3/a_1.cc b/src/lib/dns/rdata/ch_3/a_1.cc
index 65378a1..3d13a9e 100644
--- a/src/lib/dns/rdata/ch_3/a_1.cc
+++ b/src/lib/dns/rdata/ch_3/a_1.cc
@@ -27,7 +27,7 @@ using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-A::A(const string&) {
+A::A(const std::string&) {
// TBD
}
diff --git a/src/lib/dns/rdata/generic/dlv_32769.cc b/src/lib/dns/rdata/generic/dlv_32769.cc
index 9887aa8..a3db998 100644
--- a/src/lib/dns/rdata/generic/dlv_32769.cc
+++ b/src/lib/dns/rdata/generic/dlv_32769.cc
@@ -34,7 +34,7 @@ using namespace isc::dns::rdata::generic::detail;
/// \brief Constructor from string.
///
/// A copy of the implementation object is allocated and constructed.
-DLV::DLV(const string& ds_str) :
+DLV::DLV(const std::string& ds_str) :
impl_(new DLVImpl(ds_str))
{}
diff --git a/src/lib/dns/rdata/generic/dnskey_48.cc b/src/lib/dns/rdata/generic/dnskey_48.cc
index 7bdbd05..054ac18 100644
--- a/src/lib/dns/rdata/generic/dnskey_48.cc
+++ b/src/lib/dns/rdata/generic/dnskey_48.cc
@@ -51,7 +51,7 @@ struct DNSKEYImpl {
const vector<uint8_t> keydata_;
};
-DNSKEY::DNSKEY(const string& dnskey_str) :
+DNSKEY::DNSKEY(const std::string& dnskey_str) :
impl_(NULL)
{
istringstream iss(dnskey_str);
diff --git a/src/lib/dns/rdata/generic/ds_43.cc b/src/lib/dns/rdata/generic/ds_43.cc
index 20b62dc..4234f9d 100644
--- a/src/lib/dns/rdata/generic/ds_43.cc
+++ b/src/lib/dns/rdata/generic/ds_43.cc
@@ -31,7 +31,7 @@ using namespace isc::dns::rdata::generic::detail;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-DS::DS(const string& ds_str) :
+DS::DS(const std::string& ds_str) :
impl_(new DSImpl(ds_str))
{}
diff --git a/src/lib/dns/rdata/generic/hinfo_13.cc b/src/lib/dns/rdata/generic/hinfo_13.cc
index 45f4209..b1aeaa1 100644
--- a/src/lib/dns/rdata/generic/hinfo_13.cc
+++ b/src/lib/dns/rdata/generic/hinfo_13.cc
@@ -37,7 +37,7 @@ using namespace isc::dns::characterstr;
// BEGIN_RDATA_NAMESPACE
-HINFO::HINFO(const string& hinfo_str) {
+HINFO::HINFO(const std::string& hinfo_str) {
string::const_iterator input_iterator = hinfo_str.begin();
cpu_ = getNextCharacterString(hinfo_str, input_iterator);
diff --git a/src/lib/dns/rdata/generic/nsec3_50.cc b/src/lib/dns/rdata/generic/nsec3_50.cc
index b569d91..89f188a 100644
--- a/src/lib/dns/rdata/generic/nsec3_50.cc
+++ b/src/lib/dns/rdata/generic/nsec3_50.cc
@@ -64,7 +64,7 @@ struct NSEC3Impl {
const vector<uint8_t> typebits_;
};
-NSEC3::NSEC3(const string& nsec3_str) :
+NSEC3::NSEC3(const std::string& nsec3_str) :
impl_(NULL)
{
istringstream iss(nsec3_str);
diff --git a/src/lib/dns/rdata/generic/nsec3param_51.cc b/src/lib/dns/rdata/generic/nsec3param_51.cc
index ac09b57..6614bdc 100644
--- a/src/lib/dns/rdata/generic/nsec3param_51.cc
+++ b/src/lib/dns/rdata/generic/nsec3param_51.cc
@@ -46,7 +46,7 @@ struct NSEC3PARAMImpl {
const vector<uint8_t> salt_;
};
-NSEC3PARAM::NSEC3PARAM(const string& nsec3param_str) :
+NSEC3PARAM::NSEC3PARAM(const std::string& nsec3param_str) :
impl_(NULL)
{
istringstream iss(nsec3param_str);
diff --git a/src/lib/dns/rdata/generic/nsec_47.cc b/src/lib/dns/rdata/generic/nsec_47.cc
index 08825db..aeb6da8 100644
--- a/src/lib/dns/rdata/generic/nsec_47.cc
+++ b/src/lib/dns/rdata/generic/nsec_47.cc
@@ -49,7 +49,7 @@ struct NSECImpl {
vector<uint8_t> typebits_;
};
-NSEC::NSEC(const string& nsec_str) :
+NSEC::NSEC(const std::string& nsec_str) :
impl_(NULL)
{
istringstream iss(nsec_str);
diff --git a/src/lib/dns/rdata/generic/opt_41.cc b/src/lib/dns/rdata/generic/opt_41.cc
index 62cfc17..d64effb 100644
--- a/src/lib/dns/rdata/generic/opt_41.cc
+++ b/src/lib/dns/rdata/generic/opt_41.cc
@@ -27,7 +27,7 @@ using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-OPT::OPT(const string&) {
+OPT::OPT(const std::string&) {
isc_throw(InvalidRdataText, "OPT RR cannot be constructed from text");
}
diff --git a/src/lib/dns/rdata/generic/ptr_12.cc b/src/lib/dns/rdata/generic/ptr_12.cc
index 86ddeb4..b76fc7f 100644
--- a/src/lib/dns/rdata/generic/ptr_12.cc
+++ b/src/lib/dns/rdata/generic/ptr_12.cc
@@ -28,7 +28,7 @@ using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-PTR::PTR(const string& type_str) :
+PTR::PTR(const std::string& type_str) :
ptr_name_(type_str)
{}
diff --git a/src/lib/dns/rdata/generic/rrsig_46.cc b/src/lib/dns/rdata/generic/rrsig_46.cc
index 59ff030..e0137b9 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.cc
+++ b/src/lib/dns/rdata/generic/rrsig_46.cc
@@ -72,7 +72,7 @@ struct RRSIGImpl {
const vector<uint8_t> signature_;
};
-RRSIG::RRSIG(const string& rrsig_str) :
+RRSIG::RRSIG(const std::string& rrsig_str) :
impl_(NULL)
{
istringstream iss(rrsig_str);
diff --git a/src/lib/dns/rdata/generic/soa_6.cc b/src/lib/dns/rdata/generic/soa_6.cc
index e473bca..e70db0f 100644
--- a/src/lib/dns/rdata/generic/soa_6.cc
+++ b/src/lib/dns/rdata/generic/soa_6.cc
@@ -41,7 +41,7 @@ SOA::SOA(InputBuffer& buffer, size_t) :
buffer.readData(numdata_, sizeof(numdata_));
}
-SOA::SOA(const string& soastr) :
+SOA::SOA(const std::string& soastr) :
mname_("."), rname_(".") // quick hack workaround
{
istringstream iss(soastr);
diff --git a/src/lib/dns/rdata/generic/sshfp_44.cc b/src/lib/dns/rdata/generic/sshfp_44.cc
index 6320fd9..6fa8609 100644
--- a/src/lib/dns/rdata/generic/sshfp_44.cc
+++ b/src/lib/dns/rdata/generic/sshfp_44.cc
@@ -80,7 +80,7 @@ SSHFP::SSHFP(const std::string& sshfp_str)
decodeHex(fingerprintbuf.str(), fingerprint_);
}
-SSHFP::SSHFP(uint8_t algorithm, uint8_t fingerprint_type, const string& fingerprint)
+SSHFP::SSHFP(uint8_t algorithm, uint8_t fingerprint_type, const std::string& fingerprint)
{
if ((algorithm < 1) || (algorithm > 2)) {
isc_throw(InvalidRdataText, "SSHFP algorithm number out of range");
diff --git a/src/lib/dns/rdata/hs_4/a_1.cc b/src/lib/dns/rdata/hs_4/a_1.cc
index 65378a1..3d13a9e 100644
--- a/src/lib/dns/rdata/hs_4/a_1.cc
+++ b/src/lib/dns/rdata/hs_4/a_1.cc
@@ -27,7 +27,7 @@ using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-A::A(const string&) {
+A::A(const std::string&) {
// TBD
}
diff --git a/src/lib/dns/rdata/in_1/a_1.cc b/src/lib/dns/rdata/in_1/a_1.cc
index fa46f90..3b15a4c 100644
--- a/src/lib/dns/rdata/in_1/a_1.cc
+++ b/src/lib/dns/rdata/in_1/a_1.cc
@@ -34,7 +34,7 @@ using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-A::A(const string& addrstr) {
+A::A(const std::string& addrstr) {
// RFC1035 states textual representation of IN/A RDATA is
// "four decimal numbers separated by dots without any embedded spaces".
// This is exactly what inet_pton() accepts for AF_INET. In particular,
diff --git a/src/lib/dns/rdata/in_1/aaaa_28.cc b/src/lib/dns/rdata/in_1/aaaa_28.cc
index e9fc122..ce49a04 100644
--- a/src/lib/dns/rdata/in_1/aaaa_28.cc
+++ b/src/lib/dns/rdata/in_1/aaaa_28.cc
@@ -34,7 +34,7 @@ using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-AAAA::AAAA(const string& addrstr) {
+AAAA::AAAA(const std::string& addrstr) {
if (inet_pton(AF_INET6, addrstr.c_str(), &addr_) != 1) {
isc_throw(InvalidRdataText,
"IN/AAAA RDATA construction from text failed: "
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.cc b/src/lib/dns/rdata/in_1/dhcid_49.cc
index f0c4aca..7745161 100644
--- a/src/lib/dns/rdata/in_1/dhcid_49.cc
+++ b/src/lib/dns/rdata/in_1/dhcid_49.cc
@@ -47,7 +47,7 @@ using namespace isc::util;
/// < n octets > Digest (length depends on digest type)
/// If the data is less than 3 octets (i.e. it cannot contain id type code and
/// digest type code), an exception of class \c InvalidRdataLength is thrown.
-DHCID::DHCID(const string& dhcid_str) {
+DHCID::DHCID(const std::string& dhcid_str) {
istringstream iss(dhcid_str);
stringbuf digestbuf;
diff --git a/src/lib/dns/rdata/in_1/srv_33.cc b/src/lib/dns/rdata/in_1/srv_33.cc
index 93b5d4d..a1a3909 100644
--- a/src/lib/dns/rdata/in_1/srv_33.cc
+++ b/src/lib/dns/rdata/in_1/srv_33.cc
@@ -52,22 +52,22 @@ struct SRVImpl {
/// \code <Priority> <Weight> <Port> <Target>
/// \endcode
/// where
-/// - <Priority>, <Weight>, and <Port> are an unsigned 16-bit decimal
-/// integer.
-/// - <Target> is a valid textual representation of domain name.
+/// - <Priority>, <Weight>, and <Port> are an unsigned
+/// 16-bit decimal integer.
+/// - <Target> is a valid textual representation of domain name.
///
/// An example of valid string is:
/// \code "1 5 1500 example.com." \endcode
///
/// <b>Exceptions</b>
///
-/// If <Target> is not a valid domain name, a corresponding exception from
-/// the \c Name class will be thrown;
+/// If <Target> is not a valid domain name, a corresponding exception
+/// from the \c Name class will be thrown;
/// if %any of the other bullet points above is not met, an exception of
/// class \c InvalidRdataText will be thrown.
/// This constructor internally involves resource allocation, and if it fails
/// a corresponding standard exception will be thrown.
-SRV::SRV(const string& srv_str) :
+SRV::SRV(const std::string& srv_str) :
impl_(NULL)
{
istringstream iss(srv_str);
diff --git a/src/lib/dns/rrclass.cc b/src/lib/dns/rrclass.cc
index a28e5cf..ac5823c 100644
--- a/src/lib/dns/rrclass.cc
+++ b/src/lib/dns/rrclass.cc
@@ -30,7 +30,7 @@ using namespace isc::util;
namespace isc {
namespace dns {
-RRClass::RRClass(const string& classstr) {
+RRClass::RRClass(const std::string& classstr) {
classcode_ = RRParamRegistry::getRegistry().textToClassCode(classstr);
}
diff --git a/src/lib/dns/rrparamregistry-placeholder.cc b/src/lib/dns/rrparamregistry-placeholder.cc
index 8b01e41..f7f3a1a 100644
--- a/src/lib/dns/rrparamregistry-placeholder.cc
+++ b/src/lib/dns/rrparamregistry-placeholder.cc
@@ -224,7 +224,7 @@ RRParamRegistry::getRegistry() {
}
void
-RRParamRegistry::add(const string& typecode_string, uint16_t typecode,
+RRParamRegistry::add(const std::string& typecode_string, uint16_t typecode,
RdataFactoryPtr rdata_factory)
{
bool type_added = false;
@@ -242,8 +242,8 @@ RRParamRegistry::add(const string& typecode_string, uint16_t typecode,
}
void
-RRParamRegistry::add(const string& typecode_string, uint16_t typecode,
- const string& classcode_string, uint16_t classcode,
+RRParamRegistry::add(const std::string& typecode_string, uint16_t typecode,
+ const std::string& classcode_string, uint16_t classcode,
RdataFactoryPtr rdata_factory)
{
// Rollback logic on failure is complicated. If adding the new type or
@@ -470,7 +470,7 @@ RRParamRegistry::codeToClassText(uint16_t code) const {
RdataPtr
RRParamRegistry::createRdata(const RRType& rrtype, const RRClass& rrclass,
- const string& rdata_string)
+ const std::string& rdata_string)
{
// If the text indicates that it's rdata of an "unknown" type (beginning
// with '\# n'), parse it that way. (TBD)
diff --git a/src/lib/dns/rrttl.cc b/src/lib/dns/rrttl.cc
index ecd8cc6..49c63be 100644
--- a/src/lib/dns/rrttl.cc
+++ b/src/lib/dns/rrttl.cc
@@ -28,7 +28,7 @@ using namespace isc::util;
namespace isc {
namespace dns {
-RRTTL::RRTTL(const string& ttlstr) {
+RRTTL::RRTTL(const std::string& ttlstr) {
// Some systems (at least gcc-4.4) flow negative values over into
// unsigned integer, where older systems failed to parse. We want
// that failure here, so we extract into int64 and check the value
diff --git a/src/lib/dns/rrtype.cc b/src/lib/dns/rrtype.cc
index af077d4..4ef4e67 100644
--- a/src/lib/dns/rrtype.cc
+++ b/src/lib/dns/rrtype.cc
@@ -31,7 +31,7 @@ using isc::dns::RRType;
namespace isc {
namespace dns {
-RRType::RRType(const string& typestr) {
+RRType::RRType(const std::string& typestr) {
typecode_ = RRParamRegistry::getRegistry().textToTypeCode(typestr);
}
diff --git a/src/lib/dns/tests/message_unittest.cc b/src/lib/dns/tests/message_unittest.cc
index c5dd3ed..f30a2ac 100644
--- a/src/lib/dns/tests/message_unittest.cc
+++ b/src/lib/dns/tests/message_unittest.cc
@@ -406,6 +406,8 @@ TEST_F(MessageTest, clearQuestionSection) {
message_render.clearSection(Message::SECTION_QUESTION);
EXPECT_EQ(0, message_render.getRRCount(Message::SECTION_QUESTION));
+ EXPECT_TRUE(message_render.beginQuestion() ==
+ message_render.endQuestion());
}
diff --git a/src/lib/log/Makefile.am b/src/lib/log/Makefile.am
index b82eb1b..fb3aed7 100644
--- a/src/lib/log/Makefile.am
+++ b/src/lib/log/Makefile.am
@@ -2,6 +2,7 @@ SUBDIRS = . compiler tests
AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CPPFLAGS += -DTOP_BUILDDIR=\"${abs_top_builddir}\"
CLEANFILES = *.gcno *.gcda
diff --git a/src/lib/log/compiler/message.cc b/src/lib/log/compiler/message.cc
index 66dc9c7..86c5f20 100644
--- a/src/lib/log/compiler/message.cc
+++ b/src/lib/log/compiler/message.cc
@@ -58,14 +58,14 @@ static const char* VERSION = "1.0-0";
/// \b Invocation<BR>
/// The program is invoked with the command:
///
-/// <tt>message [-v | -h | -p | -d <dir> | <message-file>]</tt>
+/// <tt>message [-v | -h | -p | -d <dir> | <message-file>]</tt>
///
/// It reads the message file and writes out two files of the same
/// name in the current working directory (unless -d is used) but
/// with extensions of .h and .cc, or .py if -p is used.
///
/// -v causes it to print the version number and exit. -h prints a help
-/// message (and exits). -p sets the output to python. -d <dir> will make
+/// message (and exits). -p sets the output to python. -d <dir> will make
/// it write the output file(s) to dir instead of current working
/// directory
@@ -119,9 +119,9 @@ currentTime() {
/// \brief Create Header Sentinel
///
-/// Given the name of a file, create an #ifdef sentinel name. The name is
-/// __<name>_<ext>, where <name> is the name of the file, and <ext> is the
-/// extension less the leading period. The sentinel will be upper-case.
+/// Given the name of a file, create an \#ifdef sentinel name. The name is
+/// __<name>_<ext>, where <name> is the name of the file, and <ext>
+/// is the extension less the leading period. The sentinel will be upper-case.
///
/// \param file Filename object representing the file.
///
diff --git a/src/lib/log/logger.cc b/src/lib/log/logger.cc
index d10e979..fef5627 100644
--- a/src/lib/log/logger.cc
+++ b/src/lib/log/logger.cc
@@ -179,6 +179,13 @@ Logger::fatal(const isc::log::MessageID& ident) {
}
}
+// Replace the interprocess synchronization object
+
+void
+Logger::setInterprocessSync(isc::util::InterprocessSync* sync) {
+ getLoggerPtr()->setInterprocessSync(sync);
+}
+
// Comparison (testing only)
bool
diff --git a/src/lib/log/logger.h b/src/lib/log/logger.h
index 5715bc4..6405488 100644
--- a/src/lib/log/logger.h
+++ b/src/lib/log/logger.h
@@ -25,6 +25,7 @@
#include <log/message_types.h>
#include <log/log_formatter.h>
+#include <util/interprocess_sync.h>
namespace isc {
namespace log {
@@ -98,6 +99,17 @@ public:
{}
};
+/// \brief Bad Interprocess Sync
+///
+/// Exception thrown if a bad InterprocessSync object (such as NULL) is
+/// used.
+class BadInterprocessSync : public isc::Exception {
+public:
+ BadInterprocessSync(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what)
+ {}
+};
+
/// \brief Logger Class
///
/// This class is the main class used for logging. Use comprises:
@@ -237,6 +249,17 @@ public:
/// \param ident Message identification.
Formatter fatal(const MessageID& ident);
+ /// \brief Replace the interprocess synchronization object
+ ///
+ /// If this method is called with NULL as the argument, it throws a
+ /// BadInterprocessSync exception.
+ ///
+ /// \param sync The logger uses this synchronization object for
+ /// synchronizing output of log messages. It should be deletable and
+ /// the ownership is transferred to the logger. If NULL is passed,
+ /// a BadInterprocessSync exception is thrown.
+ void setInterprocessSync(isc::util::InterprocessSync* sync);
+
/// \brief Equality
///
/// Check if two instances of this logger refer to the same stream.
diff --git a/src/lib/log/logger_impl.cc b/src/lib/log/logger_impl.cc
index 046da13..2d6c0f4 100644
--- a/src/lib/log/logger_impl.cc
+++ b/src/lib/log/logger_impl.cc
@@ -32,12 +32,14 @@
#include <log/message_types.h>
#include <util/strutil.h>
+#include <util/interprocess_sync_file.h>
// Note: as log4cplus and the BIND 10 logger have many concepts in common, and
// thus many similar names, to disambiguate types we don't "use" the log4cplus
// namespace: instead, all log4cplus types are explicitly qualified.
using namespace std;
+using namespace isc::util;
namespace isc {
namespace log {
@@ -47,14 +49,17 @@ namespace log {
// one compiler requires that all member variables be constructed before the
// constructor is run, but log4cplus::Logger (the type of logger_) has no
// default constructor.
-LoggerImpl::LoggerImpl(const string& name) : name_(expandLoggerName(name)),
- logger_(log4cplus::Logger::getInstance(name_))
+LoggerImpl::LoggerImpl(const string& name) :
+ name_(expandLoggerName(name)),
+ logger_(log4cplus::Logger::getInstance(name_)),
+ sync_(new InterprocessSyncFile("logger"))
{
}
// Destructor. (Here because of virtual declaration.)
LoggerImpl::~LoggerImpl() {
+ delete sync_;
}
// Set the severity for logging.
@@ -102,8 +107,30 @@ LoggerImpl::lookupMessage(const MessageID& ident) {
MessageDictionary::globalDictionary().getText(ident)));
}
+// Replace the interprocess synchronization object
+
+void
+LoggerImpl::setInterprocessSync(isc::util::InterprocessSync* sync) {
+ if (sync == NULL) {
+ isc_throw(BadInterprocessSync,
+ "NULL was passed to setInterprocessSync()");
+ }
+
+ delete sync_;
+ sync_ = sync;
+}
+
void
LoggerImpl::outputRaw(const Severity& severity, const string& message) {
+ // Use an interprocess sync locker for mutual exclusion from other
+ // processes to avoid log messages getting interspersed.
+
+ InterprocessSyncLocker locker(*sync_);
+
+ if (!locker.lock()) {
+ LOG4CPLUS_ERROR(logger_, "Unable to lock logger lockfile");
+ }
+
switch (severity) {
case DEBUG:
LOG4CPLUS_DEBUG(logger_, message);
@@ -124,6 +151,10 @@ LoggerImpl::outputRaw(const Severity& severity, const string& message) {
case FATAL:
LOG4CPLUS_FATAL(logger_, message);
}
+
+ if (!locker.unlock()) {
+ LOG4CPLUS_ERROR(logger_, "Unable to unlock logger lockfile");
+ }
}
} // namespace log
diff --git a/src/lib/log/logger_impl.h b/src/lib/log/logger_impl.h
index 90bd41a..10d3db4 100644
--- a/src/lib/log/logger_impl.h
+++ b/src/lib/log/logger_impl.h
@@ -32,6 +32,8 @@
#include <log/logger_level_impl.h>
#include <log/message_types.h>
+#include <util/interprocess_sync.h>
+
namespace isc {
namespace log {
@@ -167,6 +169,17 @@ public:
/// This gets you the unformatted text of message for given ID.
std::string* lookupMessage(const MessageID& id);
+ /// \brief Replace the interprocess synchronization object
+ ///
+ /// If this method is called with NULL as the argument, it throws a
+ /// BadInterprocessSync exception.
+ ///
+ /// \param sync The logger uses this synchronization object for
+ /// synchronizing output of log messages. It should be deletable and
+ /// the ownership is transferred to the logger implementation.
+ /// If NULL is passed, a BadInterprocessSync exception is thrown.
+ void setInterprocessSync(isc::util::InterprocessSync* sync);
+
/// \brief Equality
///
/// Check if two instances of this logger refer to the same stream.
@@ -178,8 +191,9 @@ public:
}
private:
- std::string name_; ///< Full name of this logger
- log4cplus::Logger logger_; ///< Underlying log4cplus logger
+ std::string name_; ///< Full name of this logger
+ log4cplus::Logger logger_; ///< Underlying log4cplus logger
+ isc::util::InterprocessSync* sync_;
};
} // namespace log
diff --git a/src/lib/log/logger_manager.cc b/src/lib/log/logger_manager.cc
index 8a8a36b..8431c2e 100644
--- a/src/lib/log/logger_manager.cc
+++ b/src/lib/log/logger_manager.cc
@@ -28,6 +28,7 @@
#include <log/message_initializer.h>
#include <log/message_reader.h>
#include <log/message_types.h>
+#include "util/interprocess_sync_null.h"
using namespace std;
@@ -148,6 +149,13 @@ LoggerManager::readLocalMessageFile(const char* file) {
MessageDictionary& dictionary = MessageDictionary::globalDictionary();
MessageReader reader(&dictionary);
+
+ // Turn off use of any lock files. This is because this logger can
+ // be used by standalone programs which may not have write access to
+ // the local state directory (to create lock files). So we switch to
+ // using a null interprocess sync object here.
+ logger.setInterprocessSync(new isc::util::InterprocessSyncNull("logger"));
+
try {
logger.info(LOG_READING_LOCAL_FILE).arg(file);
diff --git a/src/lib/log/logger_unittest_support.cc b/src/lib/log/logger_unittest_support.cc
index a0969be..4f02b07 100644
--- a/src/lib/log/logger_unittest_support.cc
+++ b/src/lib/log/logger_unittest_support.cc
@@ -160,6 +160,9 @@ void initLogger(isc::log::Severity severity, int dbglevel) {
// Set the local message file
const char* localfile = getenv("B10_LOGGER_LOCALMSG");
+ // Set a directory for creating lockfiles when running tests
+ setenv("B10_LOCKFILE_DIR_FROM_BUILD", TOP_BUILDDIR, 1);
+
// Initialize logging
initLogger(root, isc::log::DEBUG, isc::log::MAX_DEBUG_LEVEL, localfile);
diff --git a/src/lib/log/message_dictionary.cc b/src/lib/log/message_dictionary.cc
index deb8232..3bfc56c 100644
--- a/src/lib/log/message_dictionary.cc
+++ b/src/lib/log/message_dictionary.cc
@@ -29,7 +29,7 @@ MessageDictionary::~MessageDictionary() {
// Add message and note if ID already exists
bool
-MessageDictionary::add(const string& ident, const string& text) {
+MessageDictionary::add(const std::string& ident, const std::string& text) {
Dictionary::iterator i = dictionary_.find(ident);
bool not_found = (i == dictionary_.end());
if (not_found) {
@@ -44,7 +44,7 @@ MessageDictionary::add(const string& ident, const string& text) {
// Add message and note if ID does not already exist
bool
-MessageDictionary::replace(const string& ident, const string& text) {
+MessageDictionary::replace(const std::string& ident, const std::string& text) {
Dictionary::iterator i = dictionary_.find(ident);
bool found = (i != dictionary_.end());
if (found) {
@@ -87,7 +87,7 @@ MessageDictionary::load(const char* messages[]) {
// output.
const string&
-MessageDictionary::getText(const string& ident) const {
+MessageDictionary::getText(const std::string& ident) const {
static const string empty("");
Dictionary::const_iterator i = dictionary_.find(ident);
if (i == dictionary_.end()) {
diff --git a/src/lib/log/message_exception.h b/src/lib/log/message_exception.h
index cd6caf2..8b9d58a 100644
--- a/src/lib/log/message_exception.h
+++ b/src/lib/log/message_exception.h
@@ -38,6 +38,9 @@ public:
/// \brief Constructor
///
+ /// \param file Filename where the exception occurred.
+ /// \param line Line where exception occurred.
+ /// \param what Text description of the problem.
/// \param id Message identification.
/// \param lineno Line number on which error occurred (if > 0).
MessageException(const char* file, size_t line, const char* what,
@@ -51,6 +54,9 @@ public:
/// \brief Constructor
///
+ /// \param file Filename where the exception occurred.
+ /// \param line Line where exception occurred.
+ /// \param what Text description of the problem.
/// \param id Message identification.
/// \param arg1 First message argument.
/// \param lineno Line number on which error occurred (if > 0).
@@ -66,6 +72,9 @@ public:
/// \brief Constructor
///
+ /// \param file Filename where the exception occurred.
+ /// \param line Line where exception occurred.
+ /// \param what Text description of the problem.
/// \param id Message identification.
/// \param arg1 First message argument.
/// \param arg2 Second message argument.
diff --git a/src/lib/log/tests/.gitignore b/src/lib/log/tests/.gitignore
index 41b863b..b0e45b9 100644
--- a/src/lib/log/tests/.gitignore
+++ b/src/lib/log/tests/.gitignore
@@ -6,6 +6,10 @@
/initializer_unittests_2
/local_file_test.sh
/logger_example
+/logger_lock_test
+/logger_lock_test.sh
+/log_test_messages.cc
+/log_test_messages.h
/run_unittests
/severity_test.sh
/tempdir.h
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index 6f3d768..9742037 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -12,6 +12,13 @@ endif
CLEANFILES = *.gcno *.gcda
+EXTRA_DIST = log_test_messages.mes
+BUILT_SOURCES = log_test_messages.h log_test_messages.cc
+log_test_messages.h log_test_messages.cc: log_test_messages.mes
+ $(AM_V_GEN) $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/log/tests/log_test_messages.mes
+
+CLEANFILES += log_test_messages.h log_test_messages.cc
+
noinst_PROGRAMS = logger_example
logger_example_SOURCES = logger_example.cc
logger_example_CPPFLAGS = $(AM_CPPFLAGS)
@@ -30,6 +37,16 @@ init_logger_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
init_logger_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
init_logger_test_LDADD += $(AM_LDADD) $(LOG4CPLUS_LIBS)
+noinst_PROGRAMS += logger_lock_test
+logger_lock_test_SOURCES = logger_lock_test.cc
+nodist_logger_lock_test_SOURCES = log_test_messages.cc log_test_messages.h
+logger_lock_test_CPPFLAGS = $(AM_CPPFLAGS)
+logger_lock_test_LDFLAGS = $(AM_LDFLAGS)
+logger_lock_test_LDADD = $(top_builddir)/src/lib/log/liblog.la
+logger_lock_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
+logger_lock_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+logger_lock_test_LDADD += $(AM_LDADD) $(LOG4CPLUS_LIBS)
+
if HAVE_GTEST
TESTS =
@@ -62,6 +79,7 @@ run_unittests_SOURCES += logger_specification_unittest.cc
run_unittests_SOURCES += message_dictionary_unittest.cc
run_unittests_SOURCES += message_reader_unittest.cc
run_unittests_SOURCES += output_option_unittest.cc
+nodist_run_unittests_SOURCES = log_test_messages.cc log_test_messages.h
run_unittests_CPPFLAGS = $(AM_CPPFLAGS)
run_unittests_CXXFLAGS = $(AM_CXXFLAGS)
@@ -104,4 +122,5 @@ check-local:
$(SHELL) $(abs_builddir)/destination_test.sh
$(SHELL) $(abs_builddir)/init_logger_test.sh
$(SHELL) $(abs_builddir)/local_file_test.sh
+ $(SHELL) $(abs_builddir)/logger_lock_test.sh
$(SHELL) $(abs_builddir)/severity_test.sh
diff --git a/src/lib/log/tests/log_test_messages.mes b/src/lib/log/tests/log_test_messages.mes
new file mode 100644
index 0000000..ed4940c
--- /dev/null
+++ b/src/lib/log/tests/log_test_messages.mes
@@ -0,0 +1,26 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \brief Message Utility Message File
+#
+# This is the source of the set of messages generated by the message and
+# logging components. The associated .h and .cc files are created by hand from
+# this file though and are not built during the build process; this is to avoid
+# the chicken-and-egg situation where we need the files to build the message
+# compiler, yet we need the compiler to build the files.
+
+$NAMESPACE isc::log
+
+% LOG_LOCK_TEST_MESSAGE this is a test message.
+This is a log message used in testing.
diff --git a/src/lib/log/tests/logger_example.cc b/src/lib/log/tests/logger_example.cc
index d3f08f3..853d48a 100644
--- a/src/lib/log/tests/logger_example.cc
+++ b/src/lib/log/tests/logger_example.cc
@@ -41,6 +41,7 @@
// Include a set of message definitions.
#include <log/log_messages.h>
+#include "util/interprocess_sync_null.h"
using namespace isc::log;
using namespace std;
@@ -280,10 +281,17 @@ int main(int argc, char** argv) {
LoggerManager::readLocalMessageFile(argv[optind]);
}
- // Log a few messages to different loggers.
+ // Log a few messages to different loggers. Here, we switch to using
+ // null interprocess sync objects for the loggers below as the
+ // logger example can be used as a standalone program (which may not
+ // have write access to a local state directory to create
+ // lockfiles).
isc::log::Logger logger_ex(ROOT_NAME);
+ logger_ex.setInterprocessSync(new isc::util::InterprocessSyncNull("logger"));
isc::log::Logger logger_alpha("alpha");
+ logger_alpha.setInterprocessSync(new isc::util::InterprocessSyncNull("logger"));
isc::log::Logger logger_beta("beta");
+ logger_beta.setInterprocessSync(new isc::util::InterprocessSyncNull("logger"));
LOG_FATAL(logger_ex, LOG_WRITE_ERROR).arg("test1").arg("42");
LOG_ERROR(logger_ex, LOG_READING_LOCAL_FILE).arg("dummy/file");
diff --git a/src/lib/log/tests/logger_lock_test.cc b/src/lib/log/tests/logger_lock_test.cc
new file mode 100644
index 0000000..d63989c
--- /dev/null
+++ b/src/lib/log/tests/logger_lock_test.cc
@@ -0,0 +1,64 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <log/macros.h>
+#include <log/logger_support.h>
+#include <log/log_messages.h>
+#include "util/interprocess_sync.h"
+#include "log_test_messages.h"
+#include <iostream>
+
+using namespace std;
+using namespace isc::log;
+
+class MockLoggingSync : public isc::util::InterprocessSync {
+public:
+ /// \brief Constructor
+ MockLoggingSync(const std::string& component_name) :
+ InterprocessSync(component_name)
+ {}
+
+protected:
+ virtual bool lock() {
+ cout << "FIELD1 FIELD2 LOGGER_LOCK_TEST: LOCK\n";
+ return (true);
+ }
+
+ virtual bool tryLock() {
+ cout << "FIELD1 FIELD2 LOGGER_LOCK_TEST: TRYLOCK\n";
+ return (true);
+ }
+
+ virtual bool unlock() {
+ cout << "FIELD1 FIELD2 LOGGER_LOCK_TEST: UNLOCK\n";
+ return (true);
+ }
+};
+
+/// \brief Test logger lock sequence
+///
+/// A program used in testing the logger. It verifies that (1) an
+/// interprocess sync lock is first acquired by the logger, (2) the
+/// message is logged by the logger, and (3) the lock is released in
+/// that sequence.
+int
+main(int, char**) {
+ initLogger();
+ Logger logger("log");
+ logger.setInterprocessSync(new MockLoggingSync("log"));
+
+ LOG_INFO(logger, LOG_LOCK_TEST_MESSAGE);
+
+ return (0);
+}
diff --git a/src/lib/log/tests/logger_lock_test.sh.in b/src/lib/log/tests/logger_lock_test.sh.in
new file mode 100755
index 0000000..0324499
--- /dev/null
+++ b/src/lib/log/tests/logger_lock_test.sh.in
@@ -0,0 +1,46 @@
+#!/bin/sh
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Checks that the locker interprocess sync locks are acquired and
+# released correctly.
+
+failcount=0
+tempfile=@abs_builddir@/logger_lock_test_tempfile_$$
+destfile=@abs_builddir@/logger_lock_test_destfile_$$
+
+passfail() {
+ if [ $1 -eq 0 ]; then
+ echo " pass"
+ else
+ echo " FAIL"
+ failcount=`expr $failcount + $1`
+ fi
+}
+
+echo -n "Testing that logger acquires and releases locks correctly:"
+cat > $tempfile << .
+LOGGER_LOCK_TEST: LOCK
+INFO [bind10.log] LOG_LOCK_TEST_MESSAGE this is a test message.
+LOGGER_LOCK_TEST: UNLOCK
+.
+rm -f $destfile
+B10_LOGGER_SEVERITY=INFO B10_LOGGER_DESTINATION=stdout ./logger_lock_test > $destfile
+cut -d' ' -f3- $destfile | diff $tempfile -
+passfail $?
+
+# Tidy up.
+rm -f $tempfile $destfile
+
+exit $failcount
diff --git a/src/lib/log/tests/logger_unittest.cc b/src/lib/log/tests/logger_unittest.cc
index 069205e..a9330a9 100644
--- a/src/lib/log/tests/logger_unittest.cc
+++ b/src/lib/log/tests/logger_unittest.cc
@@ -23,6 +23,9 @@
#include <log/logger_manager.h>
#include <log/logger_name.h>
#include <log/log_messages.h>
+#include "log/tests/log_test_messages.h"
+
+#include <util/interprocess_sync_file.h>
using namespace isc;
using namespace isc::log;
@@ -379,3 +382,66 @@ TEST_F(LoggerTest, LoggerNameLength) {
}, ".*");
#endif
}
+
+TEST_F(LoggerTest, setInterprocessSync) {
+ // Create a logger
+ Logger logger("alpha");
+
+ EXPECT_THROW(logger.setInterprocessSync(NULL), BadInterprocessSync);
+}
+
+class MockSync : public isc::util::InterprocessSync {
+public:
+ /// \brief Constructor
+ MockSync(const std::string& component_name) :
+ InterprocessSync(component_name), was_locked_(false),
+ was_unlocked_(false)
+ {}
+
+ bool wasLocked() const {
+ return (was_locked_);
+ }
+
+ bool wasUnlocked() const {
+ return (was_unlocked_);
+ }
+
+protected:
+ bool lock() {
+ was_locked_ = true;
+ return (true);
+ }
+
+ bool tryLock() {
+ return (true);
+ }
+
+ bool unlock() {
+ was_unlocked_ = true;
+ return (true);
+ }
+
+private:
+ bool was_locked_;
+ bool was_unlocked_;
+};
+
+// Checks that the logger logs exclusively and other BIND 10 components
+// are locked out.
+
+TEST_F(LoggerTest, Lock) {
+ // Create a logger
+ Logger logger("alpha");
+
+ // Setup our own mock sync object so that we can intercept the lock
+ // call and check if a lock has been taken.
+ MockSync* sync = new MockSync("logger");
+ logger.setInterprocessSync(sync);
+
+ // Log a message and put things into play.
+ logger.setSeverity(isc::log::INFO, 100);
+ logger.info(LOG_LOCK_TEST_MESSAGE);
+
+ EXPECT_TRUE(sync->wasLocked());
+ EXPECT_TRUE(sync->wasUnlocked());
+}
diff --git a/src/lib/log/tests/message_dictionary_unittest.cc b/src/lib/log/tests/message_dictionary_unittest.cc
index 394fea0..b8bded3 100644
--- a/src/lib/log/tests/message_dictionary_unittest.cc
+++ b/src/lib/log/tests/message_dictionary_unittest.cc
@@ -28,16 +28,17 @@ using namespace std;
// global dictionary is loaded, the former should be marked as a duplicate
// and the latter should be present.
-static const char* values[] = {
- "LOG_DUPLICATE_NAMESPACE", "duplicate $NAMESPACE directive found",
+namespace {
+const char* values[] = {
+ // This message for DUPLICATE_NAMESPACE must be copied from
+ // ../log_messages.mes; otherwise logger check might fail.
+ "LOG_DUPLICATE_NAMESPACE", "line %1: duplicate $NAMESPACE directive found",
"NEWSYM", "new symbol added",
NULL
};
MessageInitializer init(values);
-
-
-
+}
class MessageDictionaryTest : public ::testing::Test {
protected:
diff --git a/src/lib/log/tests/run_initializer_unittests.cc b/src/lib/log/tests/run_initializer_unittests.cc
index 54ee120..6660215 100644
--- a/src/lib/log/tests/run_initializer_unittests.cc
+++ b/src/lib/log/tests/run_initializer_unittests.cc
@@ -12,6 +12,7 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <stdlib.h>
#include <gtest/gtest.h>
#include <util/unittests/run_all.h>
diff --git a/src/lib/log/tests/run_unittests.cc b/src/lib/log/tests/run_unittests.cc
index 8a9d1e5..019a548 100644
--- a/src/lib/log/tests/run_unittests.cc
+++ b/src/lib/log/tests/run_unittests.cc
@@ -12,6 +12,7 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <stdlib.h>
#include <gtest/gtest.h>
#include <util/unittests/run_all.h>
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
index 658db1e..196a8b9 100644
--- a/src/lib/python/isc/bind10/tests/Makefile.am
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -23,6 +23,7 @@ endif
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/config/tests/Makefile.am b/src/lib/python/isc/config/tests/Makefile.am
index 6670ee7..cb59e6f 100644
--- a/src/lib/python/isc/config/tests/Makefile.am
+++ b/src/lib/python/isc/config/tests/Makefile.am
@@ -22,6 +22,7 @@ endif
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/config \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
CONFIG_WR_TESTDATA_PATH=$(abs_top_builddir)/src/lib/config/tests/testdata \
diff --git a/src/lib/python/isc/ddns/libddns_messages.mes b/src/lib/python/isc/ddns/libddns_messages.mes
index 12c3a8c..7e34e70 100644
--- a/src/lib/python/isc/ddns/libddns_messages.mes
+++ b/src/lib/python/isc/ddns/libddns_messages.mes
@@ -15,6 +15,17 @@
# No namespace declaration - these constants go in the global namespace
# of the libddns_messages python module.
+% LIBDDNS_DATASRC_ERROR update client %1 failed due to data source error: %2
+An update attempt failed due to some error in the corresponding data
+source. This is generally an unexpected event, but can still happen
+for various reasons such as DB lock contention or a failure of the
+backend DB server. The cause of the error is also logged. It's
+advisable to check the message, and, if necessary, take an appropriate
+action (e.g., restarting the DB server if it dies). If this message
+is logged the data source isn't modified due to the
+corresponding update request. When used by the b10-ddns, the server
+will return a response with an RCODE of SERVFAIL.
+
% LIBDDNS_PREREQ_FORMERR update client %1 for zone %2: Format error in prerequisite (%3). Non-zero TTL.
The prerequisite with the given name, class and type is not well-formed.
The specific prerequisite is shown. In this case, it has a non-zero TTL value.
@@ -58,7 +69,7 @@ specified NAME. Note that this prerequisite IS satisfied by
empty nonterminals.
% LIBDDNS_PREREQ_NOTZONE update client %1 for zone %2: prerequisite not in zone (%3)
-A DNS UPDATE prerequisite has a name that does not appear to be inside
+A DDNS UPDATE prerequisite has a name that does not appear to be inside
the zone specified in the Zone section of the UPDATE message.
The specific prerequisite is shown. A NOTZONE error response is sent to
the client.
@@ -93,10 +104,57 @@ RRset exists (value independent). At least one RR with a
specified NAME and TYPE (in the zone and class specified by
the Zone Section) must exist.
+% LIBDDNS_UPDATE_ADD_BAD_TYPE update client %1 for zone %2: update addition RR bad type: %3
+The Update section of a DDNS update message contains a statement
+that tries to add a record of an invalid type. Most likely the
+record has an RRType that is considered a 'meta' type, which
+cannot be zone content data. The specific record is shown.
+A FORMERR response is sent back to the client.
+
% LIBDDNS_UPDATE_APPROVED update client %1 for zone %2 approved
Debug message. An update request was approved in terms of the zone's
update ACL.
+% LIBDDNS_UPDATE_BAD_CLASS update client %1 for zone %2: bad class in update RR: %3
+The Update section of a DDNS update message contains an RRset with
+a bad class. The class of the update RRset must be either the same
+as the class in the Zone Section, ANY, or NONE.
+A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DATASRC_ERROR error in datasource during DDNS update: %1
+An error occured while committing the DDNS update changes to the
+datasource. The specific error is printed. A SERVFAIL response is sent
+back to the client.
+
+% LIBDDNS_UPDATE_DELETE_BAD_TYPE update client %1 for zone %2: update deletion RR bad type: %3
+The Update section of a DDNS update message contains a statement
+that tries to delete an rrset of an invalid type. Most likely the
+record has an RRType that is considered a 'meta' type, which
+cannot be zone content data. The specific record is shown.
+A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DELETE_NONZERO_TTL update client %1 for zone %2: update deletion RR has non-zero TTL: %3
+The Update section of a DDNS update message contains a 'delete rrset'
+statement with a non-zero TTL. This is not allowed by the protocol.
+A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY update client %1 for zone %2: update deletion RR contains data %3
+The Update section of a DDNS update message contains a 'delete rrset'
+statement with a non-empty RRset. This is not allowed by the protocol.
+A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE update client %1 for zone %2: update deletion RR bad type: %3
+The Update section of a DDNS update message contains a statement
+that tries to delete one or more rrs of an invalid type. Most
+likely the records have an RRType that is considered a 'meta'
+type, which cannot be zone content data. The specific record is
+shown. A FORMERR response is sent back to the client.
+
+% LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL update client %1 for zone %2: update deletion RR has non-zero TTL: %3
+The Update section of a DDNS update message contains a 'delete rrs'
+statement with a non-zero TTL. This is not allowed by the protocol.
+A FORMERR response is sent back to the client.
+
% LIBDDNS_UPDATE_DENIED update client %1 for zone %2 denied
Informational message. An update request was denied because it was
rejected by the zone's update ACL. When this library is used by
@@ -134,9 +192,23 @@ configuration of those clients to suppress the requests. As specified
in Section 3.1 of RFC2136, the receiving server will return a response
with an RCODE of NOTAUTH.
-% LIBDDNS_UPDATE_PREREQUISITE_FAILED prerequisite failed in update update client %1 for zone %2: result code %3
+% LIBDDNS_UPDATE_NOTZONE update client %1 for zone %2: update RR out of zone %3
+A DDNS UPDATE record has a name that does not appear to be inside
+the zone specified in the Zone section of the UPDATE message.
+The specific update record is shown. A NOTZONE error response is
+sent to the client.
+
+% LIBDDNS_UPDATE_PREREQUISITE_FAILED prerequisite failed in update client %1 for zone %2: result code %3
The handling of the prerequisite section (RFC2136 Section 3.2) found
that one of the prerequisites was not satisfied. The result code
should give more information on what prerequisite type failed.
If the result code is FORMERR, the prerequisite section was not well-formed.
An error response with the given result code is sent back to the client.
+
+% LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION update client %1 for zone %2: uncaught exception while processing update section: %3
+An uncaught exception was encountered while processing the Update
+section of a DDNS message. The specific exception is shown in the log message.
+To make sure DDNS service is not interrupted, this problem is caught instead
+of reraised; The update is aborted, and a SERVFAIL is sent back to the client.
+This is most probably a bug in the DDNS code, but *could* be caused by
+the data source.
diff --git a/src/lib/python/isc/ddns/session.py b/src/lib/python/isc/ddns/session.py
index 6a4079d..366bc8b 100644
--- a/src/lib/python/isc/ddns/session.py
+++ b/src/lib/python/isc/ddns/session.py
@@ -19,6 +19,8 @@ from isc.log import *
from isc.ddns.logger import logger, ClientFormatter, ZoneFormatter,\
RRsetFormatter
from isc.log_messages.libddns_messages import *
+from isc.datasrc import ZoneFinder
+import isc.xfrin.diff
from isc.acl.acl import ACCEPT, REJECT, DROP
import copy
@@ -59,6 +61,122 @@ class UpdateError(Exception):
self.rcode = rcode
self.nolog = nolog
+def foreach_rr(rrset):
+ '''
+ Generator that creates a new RRset with one RR from
+ the given RRset upon each iteration, usable in calls that
+ need to loop over an RRset and perform an action with each
+ of the individual RRs in it.
+ Example:
+ for rr in foreach_rr(rrset):
+ print(str(rr))
+ '''
+ for rdata in rrset.get_rdata():
+ rr = isc.dns.RRset(rrset.get_name(),
+ rrset.get_class(),
+ rrset.get_type(),
+ rrset.get_ttl())
+ rr.add_rdata(rdata)
+ yield rr
+
+def convert_rrset_class(rrset, rrclass):
+ '''Returns a (new) rrset with the data from the given rrset,
+ but of the given class. Useful to convert from NONE and ANY to
+ a real class.
+ Note that the caller should be careful what to convert;
+ and DNS error that could happen during wire-format reading
+ could technically occur here, and is not caught by this helper.
+ '''
+ new_rrset = isc.dns.RRset(rrset.get_name(), rrclass,
+ rrset.get_type(), rrset.get_ttl())
+ for rdata in rrset.get_rdata():
+ # Rdata class is nof modifiable, and must match rrset's
+ # class, so we need to to some ugly conversion here.
+ # And we cannot use to_text() (since the class may be unknown)
+ wire = rdata.to_wire(bytes())
+ new_rrset.add_rdata(isc.dns.Rdata(rrset.get_type(), rrclass, wire))
+ return new_rrset
+
+def collect_rrsets(collection, rrset):
+ '''
+ Helper function to collect similar rrsets.
+ Collect all rrsets with the same name, class, and type
+ collection is the currently collected list of RRsets,
+ rrset is the RRset to add;
+ if an RRset with the same name, class and type as the
+ given rrset exists in the collection, its rdata fields
+ are added to that RRset. Otherwise, the rrset is added
+ to the given collection.
+ TTL is ignored.
+ This method does not check rdata contents for duplicate
+ values.
+
+ The collection and its rrsets are modified in-place,
+ this method does not return anything.
+ '''
+ found = False
+ for existing_rrset in collection:
+ if existing_rrset.get_name() == rrset.get_name() and\
+ existing_rrset.get_class() == rrset.get_class() and\
+ existing_rrset.get_type() == rrset.get_type():
+ for rdata in rrset.get_rdata():
+ existing_rrset.add_rdata(rdata)
+ found = True
+ if not found:
+ collection.append(rrset)
+
+class DDNS_SOA:
+ '''Class to handle the SOA in the DNS update '''
+
+ def __get_serial_internal(self, origin_soa):
+ '''Get serial number from soa'''
+ return Serial(int(origin_soa.get_rdata()[0].to_text().split()[2]))
+
+ def __write_soa_internal(self, origin_soa, soa_num):
+ '''Write back serial number to soa'''
+ new_soa = RRset(origin_soa.get_name(), origin_soa.get_class(),
+ RRType.SOA(), origin_soa.get_ttl())
+ soa_rdata_parts = origin_soa.get_rdata()[0].to_text().split()
+ soa_rdata_parts[2] = str(soa_num.get_value())
+ new_soa.add_rdata(Rdata(origin_soa.get_type(), origin_soa.get_class(),
+ " ".join(soa_rdata_parts)))
+ return new_soa
+
+ def soa_update_check(self, origin_soa, new_soa):
+ '''Check whether the new soa is valid. If the serial number is bigger
+ than the old one, it is valid, then return True, otherwise, return
+ False. Make sure the origin_soa and new_soa parameters are not none
+ before invoke soa_update_check.
+ Parameters:
+ origin_soa, old SOA resource record.
+ new_soa, new SOA resource record.
+ Output:
+ if the serial number of new soa is bigger than the old one, return
+ True, otherwise return False.
+ '''
+ old_serial = self.__get_serial_internal(origin_soa)
+ new_serial = self.__get_serial_internal(new_soa)
+ if(new_serial > old_serial):
+ return True
+ else:
+ return False
+
+ def update_soa(self, origin_soa, inc_number = 1):
+ ''' Update the soa number incrementally as RFC 2136. Please make sure
+ that the origin_soa exists and not none before invoke this function.
+ Parameters:
+ origin_soa, the soa resource record which will be updated.
+ inc_number, the number which will be added into the serial number of
+ origin_soa, the default value is one.
+ Output:
+ The new origin soa whoes serial number has been updated.
+ '''
+ soa_num = self.__get_serial_internal(origin_soa)
+ soa_num = soa_num + inc_number
+ if soa_num.get_value() == 0:
+ soa_num = soa_num + 1
+ return self.__write_soa_internal(origin_soa, soa_num)
+
class UpdateSession:
'''Protocol handling for a single dynamic update request.
@@ -89,6 +207,7 @@ class UpdateSession:
self.__tsig = req_message.get_tsig_record()
self.__client_addr = client_addr
self.__zone_config = zone_config
+ self.__added_soa = None
def get_message(self):
'''Return the update message.
@@ -122,17 +241,24 @@ class UpdateSession:
'''
try:
- datasrc_client, zname, zclass = self.__get_update_zone()
- # conceptual code that would follow
- prereq_result = self.__check_prerequisites(datasrc_client,
- zname, zclass)
+ self._get_update_zone()
+ # Contrary to what RFC2136 specifies, we do ACL checks before
+ # prerequisites. It's now generally considered to be a bad
+ # idea, and actually does harm such as information
+ # leak. It should make more sense to prevent any security issues
+ # by performing ACL check as early as possible.
+ self.__check_update_acl(self.__zname, self.__zclass)
+ self._create_diff()
+ prereq_result = self.__check_prerequisites()
if prereq_result != Rcode.NOERROR():
self.__make_response(prereq_result)
- return UPDATE_ERROR, zname, zclass
- self.__check_update_acl(zname, zclass)
- # self.__do_update()
- # self.__make_response(Rcode.NOERROR())
- return UPDATE_SUCCESS, zname, zclass
+ return UPDATE_ERROR, self.__zname, self.__zclass
+ update_result = self.__do_update()
+ if update_result != Rcode.NOERROR():
+ self.__make_response(update_result)
+ return UPDATE_ERROR, self.__zname, self.__zclass
+ self.__make_response(Rcode.NOERROR())
+ return UPDATE_SUCCESS, self.__zname, self.__zclass
except UpdateError as e:
if not e.nolog:
logger.debug(logger.DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_ERROR,
@@ -145,16 +271,26 @@ class UpdateSession:
return UPDATE_ERROR, None, None
self.__message = None
return UPDATE_DROP, None, None
+ except isc.datasrc.Error as e:
+ logger.error(LIBDDNS_DATASRC_ERROR,
+ ClientFormatter(self.__client_addr, self.__tsig), e)
+ self.__make_response(Rcode.SERVFAIL())
+ return UPDATE_ERROR, None, None
- def __get_update_zone(self):
+ def _get_update_zone(self):
'''Parse the zone section and find the zone to be updated.
If the zone section is valid and the specified zone is found in
- the configuration, it returns a tuple of:
- - A matching data source that contains the specified zone
- - The zone name as a Name object
- - The zone class as an RRClass object
-
+ the configuration, sets private member variables for this session:
+ __datasrc_client: A matching data source that contains the specified
+ zone
+ __zname: The zone name as a Name object
+ __zclass: The zone class as an RRClass object
+ If this method raises an exception, these members are not set.
+
+ Note: This method is protected for ease of use in tests, where
+ methods are tested that need the setup done here without calling
+ the full handle() method.
'''
# Validation: the zone section must contain exactly one question,
# and it must be of type SOA.
@@ -172,7 +308,10 @@ class UpdateSession:
zclass = zrecord.get_class()
zone_type, datasrc_client = self.__zone_config.find_zone(zname, zclass)
if zone_type == isc.ddns.zone_config.ZONE_PRIMARY:
- return datasrc_client, zname, zclass
+ self.__datasrc_client = datasrc_client
+ self.__zname = zname
+ self.__zclass = zclass
+ return
elif zone_type == isc.ddns.zone_config.ZONE_SECONDARY:
# We are a secondary server; since we don't yet support update
# forwarding, we return 'not implemented'.
@@ -186,6 +325,26 @@ class UpdateSession:
ZoneFormatter(zname, zclass))
raise UpdateError('notauth', zname, zclass, Rcode.NOTAUTH(), True)
+ def _create_diff(self):
+ '''
+ Initializes the internal data structure used for searching current
+ data and for adding and deleting data. This is supposed to be called
+ after ACL checks but before prerequisite checks (since the latter
+ needs the find calls provided by the Diff class).
+ Adds the private member:
+ __diff: A buffer of changes made against the zone by this update
+ This object also contains find() calls, see documentation
+ of the Diff class.
+
+ Note: This method is protected for ease of use in tests, where
+ methods are tested that need the setup done here without calling
+ the full handle() method.
+ '''
+ self.__diff = isc.xfrin.diff.Diff(self.__datasrc_client,
+ self.__zname,
+ journaling=True,
+ single_update_mode=True)
+
def __check_update_acl(self, zname, zclass):
'''Apply update ACL for the zone to be updated.'''
acl = self.__zone_config.get_update_acl(zname, zclass)
@@ -217,7 +376,7 @@ class UpdateSession:
self.__message.clear_section(SECTION_ZONE)
self.__message.set_rcode(rcode)
- def __prereq_rrset_exists(self, datasrc_client, rrset):
+ def __prereq_rrset_exists(self, rrset):
'''Check whether an rrset with the given name and type exists. Class,
TTL, and Rdata (if any) of the given RRset are ignored.
RFC2136 Section 2.4.1.
@@ -229,22 +388,18 @@ class UpdateSession:
only return what the result code would be (and not read/copy
any actual data).
'''
- _, finder = datasrc_client.find_zone(rrset.get_name())
- result, _, _ = finder.find(rrset.get_name(), rrset.get_type(),
- finder.NO_WILDCARD | finder.FIND_GLUE_OK)
- return result == finder.SUCCESS
+ result, _, _ = self.__diff.find(rrset.get_name(), rrset.get_type())
+ return result == ZoneFinder.SUCCESS
- def __prereq_rrset_exists_value(self, datasrc_client, rrset):
+ def __prereq_rrset_exists_value(self, rrset):
'''Check whether an rrset that matches name, type, and rdata(s) of the
given rrset exists.
RFC2136 Section 2.4.2
Returns True if the prerequisite is satisfied, False otherwise.
'''
- _, finder = datasrc_client.find_zone(rrset.get_name())
- result, found_rrset, _ = finder.find(rrset.get_name(), rrset.get_type(),
- finder.NO_WILDCARD |
- finder.FIND_GLUE_OK)
- if result == finder.SUCCESS and\
+ result, found_rrset, _ = self.__diff.find(rrset.get_name(),
+ rrset.get_type())
+ if result == ZoneFinder.SUCCESS and\
rrset.get_name() == found_rrset.get_name() and\
rrset.get_type() == found_rrset.get_type():
# We need to match all actual RRs, unfortunately there is no
@@ -262,15 +417,15 @@ class UpdateSession:
return len(found_rdata) == 0
return False
- def __prereq_rrset_does_not_exist(self, datasrc_client, rrset):
+ def __prereq_rrset_does_not_exist(self, rrset):
'''Check whether no rrsets with the same name and type as the given
rrset exist.
RFC2136 Section 2.4.3.
Returns True if the prerequisite is satisfied, False otherwise.
'''
- return not self.__prereq_rrset_exists(datasrc_client, rrset)
+ return not self.__prereq_rrset_exists(rrset)
- def __prereq_name_in_use(self, datasrc_client, rrset):
+ def __prereq_name_in_use(self, rrset):
'''Check whether the name of the given RRset is in use (i.e. has
1 or more RRs).
RFC2136 Section 2.4.4
@@ -282,37 +437,43 @@ class UpdateSession:
to only return what the result code would be (and not read/copy
any actual data).
'''
- _, finder = datasrc_client.find_zone(rrset.get_name())
- result, rrsets, flags = finder.find_all(rrset.get_name(),
- finder.NO_WILDCARD |
- finder.FIND_GLUE_OK)
- if result == finder.SUCCESS and\
- (flags & finder.RESULT_WILDCARD == 0):
+ result, rrsets, flags = self.__diff.find_all(rrset.get_name())
+ if result == ZoneFinder.SUCCESS and\
+ (flags & ZoneFinder.RESULT_WILDCARD == 0):
return True
return False
- def __prereq_name_not_in_use(self, datasrc_client, rrset):
+ def __prereq_name_not_in_use(self, rrset):
'''Check whether the name of the given RRset is not in use (i.e. does
not exist at all, or is an empty nonterminal.
RFC2136 Section 2.4.5.
Returns True if the prerequisite is satisfied, False otherwise.
'''
- return not self.__prereq_name_in_use(datasrc_client, rrset)
+ return not self.__prereq_name_in_use(rrset)
- def __check_prerequisites(self, datasrc_client, zname, zclass):
+ def __check_in_zone(self, rrset):
+ '''Returns true if the name of the given rrset is equal to
+ or a subdomain of the zname from the Zone Section.'''
+ relation = rrset.get_name().compare(self.__zname).get_relation()
+ return relation == NameComparisonResult.SUBDOMAIN or\
+ relation == NameComparisonResult.EQUAL
+
+ def __check_prerequisites(self):
'''Check the prerequisites section of the UPDATE Message.
RFC2136 Section 2.4.
Returns a dns Rcode signaling either no error (Rcode.NOERROR())
or that one of the prerequisites failed (any other Rcode).
'''
+
+ # Temporary array to store exact-match RRsets
+ exact_match_rrsets = []
+
for rrset in self.__message.get_section(SECTION_PREREQUISITE):
# First check if the name is in the zone
- relation = rrset.get_name().compare(zname).get_relation()
- if relation != NameComparisonResult.SUBDOMAIN and\
- relation != NameComparisonResult.EQUAL:
+ if not self.__check_in_zone(rrset):
logger.info(LIBDDNS_PREREQ_NOTZONE,
ClientFormatter(self.__client_addr),
- ZoneFormatter(zname, zclass),
+ ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.NOTZONE()
@@ -322,24 +483,23 @@ class UpdateSession:
rrset.get_rdata_count() != 0:
logger.info(LIBDDNS_PREREQ_FORMERR_ANY,
ClientFormatter(self.__client_addr),
- ZoneFormatter(zname, zclass),
+ ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR()
elif rrset.get_type() == RRType.ANY():
- if not self.__prereq_name_in_use(datasrc_client,
- rrset):
+ if not self.__prereq_name_in_use(rrset):
rcode = Rcode.NXDOMAIN()
logger.info(LIBDDNS_PREREQ_NAME_IN_USE_FAILED,
ClientFormatter(self.__client_addr),
- ZoneFormatter(zname, zclass),
+ ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset), rcode)
return rcode
else:
- if not self.__prereq_rrset_exists(datasrc_client, rrset):
+ if not self.__prereq_rrset_exists(rrset):
rcode = Rcode.NXRRSET()
logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_FAILED,
ClientFormatter(self.__client_addr),
- ZoneFormatter(zname, zclass),
+ ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset), rcode)
return rcode
elif rrset.get_class() == RRClass.NONE():
@@ -347,49 +507,358 @@ class UpdateSession:
rrset.get_rdata_count() != 0:
logger.info(LIBDDNS_PREREQ_FORMERR_NONE,
ClientFormatter(self.__client_addr),
- ZoneFormatter(zname, zclass),
+ ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR()
elif rrset.get_type() == RRType.ANY():
- if not self.__prereq_name_not_in_use(datasrc_client,
- rrset):
+ if not self.__prereq_name_not_in_use(rrset):
rcode = Rcode.YXDOMAIN()
logger.info(LIBDDNS_PREREQ_NAME_NOT_IN_USE_FAILED,
ClientFormatter(self.__client_addr),
- ZoneFormatter(zname, zclass),
+ ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset), rcode)
return rcode
else:
- if not self.__prereq_rrset_does_not_exist(datasrc_client,
- rrset):
+ if not self.__prereq_rrset_does_not_exist(rrset):
rcode = Rcode.YXRRSET()
logger.info(LIBDDNS_PREREQ_RRSET_DOES_NOT_EXIST_FAILED,
ClientFormatter(self.__client_addr),
- ZoneFormatter(zname, zclass),
+ ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset), rcode)
return rcode
- elif rrset.get_class() == zclass:
+ elif rrset.get_class() == self.__zclass:
if rrset.get_ttl().get_value() != 0:
logger.info(LIBDDNS_PREREQ_FORMERR,
ClientFormatter(self.__client_addr),
- ZoneFormatter(zname, zclass),
+ ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR()
else:
- if not self.__prereq_rrset_exists_value(datasrc_client,
- rrset):
- rcode = Rcode.NXRRSET()
- logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED,
- ClientFormatter(self.__client_addr),
- ZoneFormatter(zname, zclass),
- RRsetFormatter(rrset), rcode)
- return rcode
+ collect_rrsets(exact_match_rrsets, rrset)
else:
logger.info(LIBDDNS_PREREQ_FORMERR_CLASS,
ClientFormatter(self.__client_addr),
- ZoneFormatter(zname, zclass),
+ ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
return Rcode.FORMERR()
+ for collected_rrset in exact_match_rrsets:
+ if not self.__prereq_rrset_exists_value(collected_rrset):
+ rcode = Rcode.NXRRSET()
+ logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(collected_rrset), rcode)
+ return rcode
+
# All prerequisites are satisfied
return Rcode.NOERROR()
+
+ def __set_soa_rrset(self, rrset):
+ '''Sets the given rrset to the member __added_soa (which
+ is used by __do_update for updating the SOA record'''
+ self.__added_soa = rrset
+
+ def __do_prescan(self):
+ '''Perform the prescan as defined in RFC2136 section 3.4.1.
+ This method has a side-effect; it sets self._new_soa if
+ it encounters the addition of a SOA record in the update
+ list (so serial can be checked by update later, etc.).
+ It puts the added SOA in self.__added_soa.
+ '''
+ for rrset in self.__message.get_section(SECTION_UPDATE):
+ if not self.__check_in_zone(rrset):
+ logger.info(LIBDDNS_UPDATE_NOTZONE,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.NOTZONE()
+ if rrset.get_class() == self.__zclass:
+ # In fact, all metatypes are in a specific range,
+ # so one check can test TKEY to ANY
+ # (some value check is needed anyway, since we do
+ # not have defined RRtypes for MAILA and MAILB)
+ if rrset.get_type().get_code() >= 249:
+ logger.info(LIBDDNS_UPDATE_ADD_BAD_TYPE,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ if rrset.get_type() == RRType.SOA():
+ # In case there's multiple soa records in the update
+ # somehow, just take the last
+ for rr in foreach_rr(rrset):
+ self.__set_soa_rrset(rr)
+ elif rrset.get_class() == RRClass.ANY():
+ if rrset.get_ttl().get_value() != 0:
+ logger.info(LIBDDNS_UPDATE_DELETE_NONZERO_TTL,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ if rrset.get_rdata_count() > 0:
+ logger.info(LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ if rrset.get_type().get_code() >= 249 and\
+ rrset.get_type().get_code() <= 254:
+ logger.info(LIBDDNS_UPDATE_DELETE_BAD_TYPE,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ elif rrset.get_class() == RRClass.NONE():
+ if rrset.get_ttl().get_value() != 0:
+ logger.info(LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ if rrset.get_type().get_code() >= 249:
+ logger.info(LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ else:
+ logger.info(LIBDDNS_UPDATE_BAD_CLASS,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ RRsetFormatter(rrset))
+ return Rcode.FORMERR()
+ return Rcode.NOERROR()
+
+ def __do_update_add_single_rr(self, rr, existing_rrset):
+ '''Helper for __do_update_add_rrs_to_rrset: only add the
+ rr if it is not present yet
+ (note that rr here should already be a single-rr rrset)
+ '''
+ if existing_rrset is None:
+ self.__diff.add_data(rr)
+ else:
+ rr_rdata = rr.get_rdata()[0]
+ if not rr_rdata in existing_rrset.get_rdata():
+ self.__diff.add_data(rr)
+
+ def __do_update_add_rrs_to_rrset(self, rrset):
+ '''Add the rrs from the given rrset to the internal diff.
+ There is handling for a number of special cases mentioned
+ in RFC2136;
+ - If the addition is a CNAME, but existing data at its
+ name is not, the addition is ignored, and vice versa.
+ - If it is a CNAME, and existing data is too, it is
+ replaced (existing data is deleted)
+ An additional restriction is that SOA data is ignored as
+ well (it is handled separately by the __do_update method).
+
+ Note that in the (near) future, this method may have
+ addition special-cases processing.
+ '''
+ # For a number of cases, we may need to remove data in the zone
+ # (note; SOA is handled separately by __do_update, so that one
+ # is explicitely ignored here)
+ if rrset.get_type() == RRType.SOA():
+ return
+ result, orig_rrset, _ = self.__diff.find(rrset.get_name(),
+ rrset.get_type())
+ if result == ZoneFinder.CNAME:
+ # Ignore non-cname rrs that try to update CNAME records
+ # (if rrset itself is a CNAME, the finder result would be
+ # SUCCESS, see next case)
+ return
+ elif result == ZoneFinder.SUCCESS:
+ # if update is cname, and zone rr is not, ignore
+ if rrset.get_type() == RRType.CNAME():
+ # Remove original CNAME record (the new one
+ # is added below)
+ self.__diff.delete_data(orig_rrset)
+ # We do not have WKS support at this time, but if there
+ # are special Update equality rules such as for WKS, and
+ # we do have support for the type, this is where the check
+ # (and potential delete) would go.
+ elif result == ZoneFinder.NXRRSET:
+ # There is data present, but not for this type.
+ # If this type is CNAME, ignore the update
+ if rrset.get_type() == RRType.CNAME():
+ return
+ for rr in foreach_rr(rrset):
+ self.__do_update_add_single_rr(rr, orig_rrset)
+
+ def __do_update_delete_rrset(self, rrset):
+ '''Deletes the rrset with the name and type of the given
+ rrset from the zone data (by putting all existing data
+ in the internal diff as delete statements).
+ Special cases: if the delete statement is for the
+ zone's apex, and the type is either SOA or NS, it
+ is ignored.'''
+ # find the rrset with local updates
+ result, to_delete, _ = self.__diff.find_updated(rrset.get_name(),
+ rrset.get_type())
+ if result == ZoneFinder.SUCCESS:
+ if to_delete.get_name() == self.__zname and\
+ (to_delete.get_type() == RRType.SOA() or\
+ to_delete.get_type() == RRType.NS()):
+ # ignore
+ return
+ for rr in foreach_rr(to_delete):
+ self.__diff.delete_data(rr)
+
+ def __ns_deleter_helper(self, rrset):
+ '''Special case helper for deleting NS resource records
+ at the zone apex. In that scenario, the last NS record
+ may never be removed (and any action that would do so
+ should be ignored).
+ '''
+ # Find the current NS rrset, including local additions and deletions
+ result, orig_rrset, _ = self.__diff.find_updated(rrset.get_name(),
+ rrset.get_type())
+
+ # Even a real rrset comparison wouldn't help here...
+ # The goal is to make sure that after deletion of the
+ # given rrset, at least 1 NS record is left (at the apex).
+ # So we make a (shallow) copy of the existing rrset,
+ # and for each rdata in the to_delete set, we check if it wouldn't
+ # delete the last one. If it would, that specific one is ignored.
+ # If it would not, the rdata is removed from the temporary list
+ orig_rrset_rdata = copy.copy(orig_rrset.get_rdata())
+ for rdata in rrset.get_rdata():
+ if len(orig_rrset_rdata) == 1 and rdata == orig_rrset_rdata[0]:
+ # ignore
+ continue
+ else:
+ # create an individual RRset for deletion
+ to_delete = isc.dns.RRset(rrset.get_name(),
+ rrset.get_class(),
+ rrset.get_type(),
+ rrset.get_ttl())
+ to_delete.add_rdata(rdata)
+ orig_rrset_rdata.remove(rdata)
+ self.__diff.delete_data(to_delete)
+
+ def __do_update_delete_name(self, rrset):
+ '''Delete all data at the name of the given rrset,
+ by adding all data found by find_all as delete statements
+ to the internal diff.
+ Special case: if the name is the zone's apex, SOA and
+ NS records are kept.
+ '''
+ # Find everything with the name, including local additions
+ result, rrsets, flags = self.__diff.find_all_updated(rrset.get_name())
+ if result == ZoneFinder.SUCCESS and\
+ (flags & ZoneFinder.RESULT_WILDCARD == 0):
+ for to_delete in rrsets:
+ # if name == self.__zname and type is soa or ns, don't delete!
+ if to_delete.get_name() == self.__zname and\
+ (to_delete.get_type() == RRType.SOA() or
+ to_delete.get_type() == RRType.NS()):
+ continue
+ else:
+ for rr in foreach_rr(to_delete):
+ self.__diff.delete_data(rr)
+
+ def __do_update_delete_rrs_from_rrset(self, rrset):
+ '''Deletes all resource records in the given rrset from the
+ zone. Resource records that do not exist are ignored.
+ If the rrset if of type SOA, it is ignored.
+ Uses the __ns_deleter_helper if the rrset's name is the
+ zone's apex, and the type is NS.
+ '''
+ # Delete all rrs in the rrset, except if name=self.__zname and type=soa, or
+ # type = ns and there is only one left (...)
+
+ # The delete does not want class NONE, we would not have gotten here
+ # if it wasn't, but now is a good time to change it to the zclass.
+ to_delete = convert_rrset_class(rrset, self.__zclass)
+
+ if rrset.get_name() == self.__zname:
+ if rrset.get_type() == RRType.SOA():
+ # ignore
+ return
+ elif rrset.get_type() == RRType.NS():
+ # hmm. okay. annoying. There must be at least one left,
+ # delegate to helper method
+ self.__ns_deleter_helper(to_delete)
+ return
+ for rr in foreach_rr(to_delete):
+ self.__diff.delete_data(rr)
+
+ def __update_soa(self):
+ '''Checks the member value __added_soa, and depending on
+ whether it has been set and what its value is, creates
+ a new SOA if necessary.
+ Then removes the original SOA and adds the new one,
+ by adding the needed operations to the internal diff.'''
+ # Get the existing SOA
+ # if a new soa was specified, add that one, otherwise, do the
+ # serial magic and add the newly created one
+
+ # get it from DS and to increment and stuff
+ result, old_soa, _ = self.__diff.find(self.__zname, RRType.SOA(),
+ ZoneFinder.NO_WILDCARD |
+ ZoneFinder.FIND_GLUE_OK)
+ # We may implement recovering from missing SOA data at some point, but
+ # for now servfail on such a broken state
+ if result != ZoneFinder.SUCCESS:
+ raise UpdateError("Error finding SOA record in datasource.",
+ self.__zname, self.__zclass, Rcode.SERVFAIL())
+ serial_operation = DDNS_SOA()
+ if self.__added_soa is not None and\
+ serial_operation.soa_update_check(old_soa, self.__added_soa):
+ new_soa = self.__added_soa
+ else:
+ # increment goes here
+ new_soa = serial_operation.update_soa(old_soa)
+
+ self.__diff.delete_data(old_soa)
+ self.__diff.add_data(new_soa)
+
+ def __do_update(self):
+ '''Scan, check, and execute the Update section in the
+ DDNS Update message.
+ Returns an Rcode to signal the result (NOERROR upon success,
+ any error result otherwise).
+ '''
+ # prescan
+ prescan_result = self.__do_prescan()
+ if prescan_result != Rcode.NOERROR():
+ return prescan_result
+
+ # update
+ try:
+ # Do special handling for SOA first
+ self.__update_soa()
+
+ # Algorithm from RFC2136 Section 3.4
+ # Note that this works on full rrsets, not individual RRs.
+ # Some checks might be easier with individual RRs, but only if we
+ # would use the ZoneUpdater directly (so we can query the
+ # 'zone-as-it-would-be-so-far'. However, due to the current use
+ # of the Diff class, this is not the case, and therefore it
+ # is easier to work with full rrsets for the most parts
+ # (less lookups needed; conversion to individual rrs is
+ # the same effort whether it is done here or in the several
+ # do_update statements)
+ for rrset in self.__message.get_section(SECTION_UPDATE):
+ if rrset.get_class() == self.__zclass:
+ self.__do_update_add_rrs_to_rrset(rrset)
+ elif rrset.get_class() == RRClass.ANY():
+ if rrset.get_type() == RRType.ANY():
+ self.__do_update_delete_name(rrset)
+ else:
+ self.__do_update_delete_rrset(rrset)
+ elif rrset.get_class() == RRClass.NONE():
+ self.__do_update_delete_rrs_from_rrset(rrset)
+
+ self.__diff.commit()
+ return Rcode.NOERROR()
+ except isc.datasrc.Error as dse:
+ logger.info(LIBDDNS_UPDATE_DATASRC_ERROR, dse)
+ return Rcode.SERVFAIL()
+ except Exception as uce:
+ logger.error(LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION,
+ ClientFormatter(self.__client_addr),
+ ZoneFormatter(self.__zname, self.__zclass),
+ uce)
+ return Rcode.SERVFAIL()
diff --git a/src/lib/python/isc/ddns/tests/session_tests.py b/src/lib/python/isc/ddns/tests/session_tests.py
index 7f15480..f7c2d3c 100644
--- a/src/lib/python/isc/ddns/tests/session_tests.py
+++ b/src/lib/python/isc/ddns/tests/session_tests.py
@@ -18,7 +18,7 @@ import shutil
import isc.log
import unittest
from isc.dns import *
-from isc.datasrc import DataSourceClient
+from isc.datasrc import DataSourceClient, ZoneFinder
from isc.ddns.session import *
from isc.ddns.zone_config import *
@@ -39,7 +39,7 @@ TEST_CLIENT4 = ('192.0.2.1', 53)
TEST_TSIG_KEY = TSIGKey("example.org:SFuWd/q99SzF8Yzd1QbB9g==")
def create_update_msg(zones=[TEST_ZONE_RECORD], prerequisites=[],
- tsig_key=None):
+ updates=[], tsig_key=None):
msg = Message(Message.RENDER)
msg.set_qid(5353) # arbitrary chosen
msg.set_opcode(Opcode.UPDATE())
@@ -48,6 +48,8 @@ def create_update_msg(zones=[TEST_ZONE_RECORD], prerequisites=[],
msg.add_question(z)
for p in prerequisites:
msg.add_rrset(SECTION_PREREQUISITE, p)
+ for u in updates:
+ msg.add_rrset(SECTION_UPDATE, u)
renderer = MessageRenderer()
if tsig_key is not None:
@@ -57,11 +59,133 @@ def create_update_msg(zones=[TEST_ZONE_RECORD], prerequisites=[],
# re-read the created data in the parse mode
msg.clear(Message.PARSE)
- msg.from_wire(renderer.get_data())
+ msg.from_wire(renderer.get_data(), Message.PRESERVE_ORDER)
return msg
-class SesseionTestBase(unittest.TestCase):
+def add_rdata(rrset, rdata):
+ '''
+ Helper function for easily adding Rdata fields to RRsets.
+ This function assumes the given rdata is of type string or bytes,
+ and corresponds to the given rrset
+ '''
+ rrset.add_rdata(isc.dns.Rdata(rrset.get_type(),
+ rrset.get_class(),
+ rdata))
+
+def create_rrset(name, rrclass, rrtype, ttl, rdatas = []):
+ '''
+ Helper method to easily create RRsets, auto-converts
+ name, rrclass, rrtype, and ttl (if possibly through their
+ respective constructors)
+ rdatas is a list of rr data strings, or bytestrings, which
+ should match the RRType of the rrset to create
+ '''
+ if type(name) != Name:
+ name = Name(name)
+ if type(rrclass) != RRClass:
+ rrclass = RRClass(rrclass)
+ if type(rrtype) != RRType:
+ rrtype = RRType(rrtype)
+ if type(ttl) != RRTTL:
+ ttl = RRTTL(ttl)
+ rrset = isc.dns.RRset(name, rrclass, rrtype, ttl)
+ for rdata in rdatas:
+ add_rdata(rrset, rdata)
+ return rrset
+
+class SessionModuleTests(unittest.TestCase):
+ '''Tests for module-level functions in the session.py module'''
+
+ def test_foreach_rr_in_rrset(self):
+ rrset = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.A(), 3600, [ "192.0.2.1" ])
+
+ l = []
+ for rr in foreach_rr(rrset):
+ l.append(str(rr))
+ self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n"], l)
+
+ add_rdata(rrset, "192.0.2.2")
+ add_rdata(rrset, "192.0.2.3")
+
+ # but through the generator, there should be several 1-line entries
+ l = []
+ for rr in foreach_rr(rrset):
+ l.append(str(rr))
+ self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n",
+ "www.example.org. 3600 IN A 192.0.2.2\n",
+ "www.example.org. 3600 IN A 192.0.2.3\n",
+ ], l)
+
+ def test_convert_rrset_class(self):
+ # Converting an RRSET to a different class should work
+ # if the rdata types can be converted
+ rrset = create_rrset("www.example.org", RRClass.NONE(), RRType.A(),
+ 3600, [ b'\xc0\x00\x02\x01', b'\xc0\x00\x02\x02'])
+
+ rrset2 = convert_rrset_class(rrset, RRClass.IN())
+ self.assertEqual("www.example.org. 3600 IN A 192.0.2.1\n" +
+ "www.example.org. 3600 IN A 192.0.2.2\n",
+ str(rrset2))
+
+ rrset3 = convert_rrset_class(rrset2, RRClass.NONE())
+ self.assertEqual("www.example.org. 3600 CLASS254 A \\# 4 " +
+ "c0000201\nwww.example.org. 3600 CLASS254 " +
+ "A \\# 4 c0000202\n",
+ str(rrset3))
+
+ # depending on what type of bad data is given, a number
+ # of different exceptions could be raised (TODO: i recall
+ # there was a ticket about making a better hierarchy for
+ # dns/parsing related exceptions)
+ self.assertRaises(InvalidRdataLength, convert_rrset_class,
+ rrset, RRClass.CH())
+ add_rdata(rrset, b'\xc0\x00')
+ self.assertRaises(DNSMessageFORMERR, convert_rrset_class,
+ rrset, RRClass.IN())
+
+ def test_collect_rrsets(self):
+ '''
+ Tests the 'rrset collector' method, which collects rrsets
+ with the same name and type
+ '''
+ collected = []
+
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.1" ]))
+ # Same name and class, different type
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.TXT(), 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.2" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.TXT(), 0, [ "two" ]))
+ # Same class and type as an existing one, different name
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.3" ]))
+ # Same name and type as an existing one, different class
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
+ RRType.TXT(), 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.4" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
+ RRType.TXT(), 0, [ "two" ]))
+
+ strings = [ rrset.to_text() for rrset in collected ]
+ # note + vs , in this list
+ expected = ['a.example.org. 0 IN A 192.0.2.1\n' +
+ 'a.example.org. 0 IN A 192.0.2.2\n',
+ 'a.example.org. 0 IN TXT "one"\n' +
+ 'a.example.org. 0 IN TXT "two"\n',
+ 'b.example.org. 0 IN A 192.0.2.3\n' +
+ 'b.example.org. 0 IN A 192.0.2.4\n',
+ 'a.example.org. 0 CH TXT "one"\n' +
+ 'a.example.org. 0 CH TXT "two"\n']
+
+ self.assertEqual(expected, strings)
+
+class SessionTestBase(unittest.TestCase):
'''Base class for all sesion related tests.
It just initializes common test parameters in its setUp() and defines
@@ -76,9 +200,17 @@ class SesseionTestBase(unittest.TestCase):
self._acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS):
REQUEST_LOADER.load([{"action": "ACCEPT"}])}
self._session = UpdateSession(self._update_msg, TEST_CLIENT4,
- ZoneConfig([], TEST_RRCLASS,
+ ZoneConfig(set(), TEST_RRCLASS,
self._datasrc_client,
self._acl_map))
+ self._session._get_update_zone()
+ self._session._create_diff()
+
+ def tearDown(self):
+ # With the Updater created in _get_update_zone, and tests
+ # doing all kinds of crazy stuff, one might get database locked
+ # errors if it doesn't clean up explicitely after each test
+ self._session = None
def check_response(self, msg, expected_rcode):
'''Perform common checks on update resposne message.'''
@@ -92,7 +224,66 @@ class SesseionTestBase(unittest.TestCase):
self.assertEqual(0, msg.get_rr_count(SECTION_UPDATE))
self.assertEqual(0, msg.get_rr_count(Message.SECTION_ADDITIONAL))
-class SessionTest(SesseionTestBase):
+class TestDDNSSOA(unittest.TestCase):
+ '''unittest for the DDNS_SOA'''
+ def test_update_soa(self):
+ '''unittest for update_soa function'''
+ soa_update = DDNS_SOA()
+ soa_rr = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1233 3600 1800 2419200 7200"])
+ expected_soa_rr = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600, ["ns1.example.org. "
+ + "admin.example.org. " +
+ "1234 3600 1800 2419200 7200"])
+ self.assertEqual(soa_update.update_soa(soa_rr).get_rdata()[0].to_text(),
+ expected_soa_rr.get_rdata()[0].to_text())
+ max_serial = 2 ** 32 - 1
+ soa_rdata = "%d %s"%(max_serial,"3600 1800 2419200 7200")
+ soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(), 3600,
+ ["ns1.example.org. " + "admin.example.org. " +
+ soa_rdata])
+ expected_soa_rr = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600, ["ns1.example.org. "
+ + "admin.example.org. " +
+ "1 3600 1800 2419200 7200"])
+ self.assertEqual(soa_update.update_soa(soa_rr).get_rdata()[0].to_text(),
+ expected_soa_rr.get_rdata()[0].to_text())
+
+ def test_soa_update_check(self):
+ '''unittest for soa_update_check function'''
+ small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1233 3600 1800 2419200 7200"])
+ large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1234 3600 1800 2419200 7200"])
+ soa_update = DDNS_SOA()
+ # The case of (i1 < i2 and i2 - i1 < 2^(SERIAL_BITS - 1)) in rfc 1982
+ self.assertTrue(soa_update.soa_update_check(small_soa_rr,
+ large_soa_rr))
+ self.assertFalse(soa_update.soa_update_check(large_soa_rr,
+ small_soa_rr))
+ small_serial = 1235 + 2 ** 31
+ soa_rdata = "%d %s"%(small_serial,"3600 1800 2419200 7200")
+ small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ soa_rdata])
+ large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1234 3600 1800 2419200 7200"])
+ # The case of (i1 > i2 and i1 - i2 > 2^(SERIAL_BITS - 1)) in rfc 1982
+ self.assertTrue(soa_update.soa_update_check(small_soa_rr,
+ large_soa_rr))
+ self.assertFalse(soa_update.soa_update_check(large_soa_rr,
+ small_soa_rr))
+
+class SessionTest(SessionTestBase):
'''Basic session tests'''
def test_handle(self):
@@ -136,7 +327,7 @@ class SessionTest(SesseionTestBase):
msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
RRType.SOA())])
session = UpdateSession(msg, TEST_CLIENT4,
- ZoneConfig([(TEST_ZONE_NAME, TEST_RRCLASS)],
+ ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
TEST_RRCLASS, self._datasrc_client))
self.assertEqual(UPDATE_ERROR, session.handle()[0])
self.check_response(session.get_message(), Rcode.NOTIMP())
@@ -145,7 +336,7 @@ class SessionTest(SesseionTestBase):
'''Common test sequence for the 'notauth' test'''
msg = create_update_msg(zones=[Question(zname, zclass, RRType.SOA())])
session = UpdateSession(msg, TEST_CLIENT4,
- ZoneConfig([(TEST_ZONE_NAME, TEST_RRCLASS)],
+ ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
TEST_RRCLASS, self._datasrc_client))
self.assertEqual(UPDATE_ERROR, session.handle()[0])
self.check_response(session.get_message(), Rcode.NOTAUTH())
@@ -160,11 +351,114 @@ class SessionTest(SesseionTestBase):
# zone class doesn't match
self.check_notauth(Name('example.org'), RRClass.CH())
+ def test_update_datasrc_error(self):
+ # if the data source client raises an exception, it should result in
+ # a SERVFAIL.
+ class BadDataSourceClient:
+ def find_zone(self, name):
+ raise isc.datasrc.Error('faked exception')
+ msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.SOA())])
+ session = UpdateSession(msg, TEST_CLIENT4,
+ ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
+ TEST_RRCLASS,
+ BadDataSourceClient()))
+ self.assertEqual(UPDATE_ERROR, session.handle()[0])
+ self.check_response(session.get_message(), Rcode.SERVFAIL())
+
+ def test_foreach_rr_in_rrset(self):
+ rrset = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.A(), 3600, [ "192.0.2.1" ])
+
+ l = []
+ for rr in foreach_rr(rrset):
+ l.append(str(rr))
+ self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n"], l)
+
+ add_rdata(rrset, "192.0.2.2")
+ add_rdata(rrset, "192.0.2.3")
+
+ # but through the generator, there should be several 1-line entries
+ l = []
+ for rr in foreach_rr(rrset):
+ l.append(str(rr))
+ self.assertEqual(["www.example.org. 3600 IN A 192.0.2.1\n",
+ "www.example.org. 3600 IN A 192.0.2.2\n",
+ "www.example.org. 3600 IN A 192.0.2.3\n",
+ ], l)
+
+ def test_convert_rrset_class(self):
+ # Converting an RRSET to a different class should work
+ # if the rdata types can be converted
+ rrset = create_rrset("www.example.org", RRClass.NONE(), RRType.A(),
+ 3600, [ b'\xc0\x00\x02\x01', b'\xc0\x00\x02\x02'])
+
+ rrset2 = convert_rrset_class(rrset, RRClass.IN())
+ self.assertEqual("www.example.org. 3600 IN A 192.0.2.1\n" +
+ "www.example.org. 3600 IN A 192.0.2.2\n",
+ str(rrset2))
+
+ rrset3 = convert_rrset_class(rrset2, RRClass.NONE())
+ self.assertEqual("www.example.org. 3600 CLASS254 A \\# 4 " +
+ "c0000201\nwww.example.org. 3600 CLASS254 " +
+ "A \\# 4 c0000202\n",
+ str(rrset3))
+
+ # depending on what type of bad data is given, a number
+ # of different exceptions could be raised (TODO: i recall
+ # there was a ticket about making a better hierarchy for
+ # dns/parsing related exceptions)
+ self.assertRaises(InvalidRdataLength, convert_rrset_class,
+ rrset, RRClass.CH())
+ add_rdata(rrset, b'\xc0\x00')
+ self.assertRaises(DNSMessageFORMERR, convert_rrset_class,
+ rrset, RRClass.IN())
+
+ def test_collect_rrsets(self):
+ '''
+ Tests the 'rrset collector' method, which collects rrsets
+ with the same name and type
+ '''
+ collected = []
+
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.1" ]))
+ # Same name and class, different type
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.TXT(), 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.2" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
+ RRType.TXT(), 0, [ "two" ]))
+ # Same class and type as an existing one, different name
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.3" ]))
+ # Same name and type as an existing one, different class
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
+ RRType.TXT(), 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.4" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
+ RRType.TXT(), 0, [ "two" ]))
+
+ strings = [ rrset.to_text() for rrset in collected ]
+ # note + vs , in this list
+ expected = ['a.example.org. 0 IN A 192.0.2.1\n' +
+ 'a.example.org. 0 IN A 192.0.2.2\n',
+ 'a.example.org. 0 IN TXT "one"\n' +
+ 'a.example.org. 0 IN TXT "two"\n',
+ 'b.example.org. 0 IN A 192.0.2.3\n' +
+ 'b.example.org. 0 IN A 192.0.2.4\n',
+ 'a.example.org. 0 CH TXT "one"\n' +
+ 'a.example.org. 0 CH TXT "two"\n']
+
+ self.assertEqual(expected, strings)
+
def __prereq_helper(self, method, expected, rrset):
'''Calls the given method with self._datasrc_client
and the given rrset, and compares the return value.
Function does not do much but makes the code look nicer'''
- self.assertEqual(expected, method(self._datasrc_client, rrset))
+ self.assertEqual(expected, method(rrset))
def __check_prerequisite_exists_combined(self, method, rrclass, expected):
'''shared code for the checks for the very similar (but reversed
@@ -175,156 +469,102 @@ class SessionTest(SesseionTestBase):
'''
# Basic existence checks
# www.example.org should have an A, but not an MX
- rrset = isc.dns.RRset(isc.dns.Name("www.example.org"),
- rrclass, isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("www.example.org", rrclass, RRType.A(), 0)
self.__prereq_helper(method, expected, rrset)
- rrset = isc.dns.RRset(isc.dns.Name("www.example.org"),
- rrclass, isc.dns.RRType.MX(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("www.example.org", rrclass, RRType.MX(), 0)
self.__prereq_helper(method, not expected, rrset)
# example.org should have an MX, but not an A
- rrset = isc.dns.RRset(isc.dns.Name("example.org"),
- rrclass, isc.dns.RRType.MX(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("example.org", rrclass, RRType.MX(), 0)
self.__prereq_helper(method, expected, rrset)
- rrset = isc.dns.RRset(isc.dns.Name("example.org"),
- rrclass, isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("example.org", rrclass, RRType.A(), 0)
self.__prereq_helper(method, not expected, rrset)
# Also check the case where the name does not even exist
- rrset = isc.dns.RRset(isc.dns.Name("doesnotexist.example.org"),
- rrclass, isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("doesnotexist.example.org", rrclass, RRType.A(), 0)
self.__prereq_helper(method, not expected, rrset)
# Wildcard expansion should not be applied, but literal matches
# should work
- rrset = isc.dns.RRset(isc.dns.Name("foo.wildcard.example.org"),
- rrclass, isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("foo.wildcard.example.org", rrclass, RRType.A(), 0)
self.__prereq_helper(method, not expected, rrset)
- rrset = isc.dns.RRset(isc.dns.Name("*.wildcard.example.org"),
- rrclass, isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("*.wildcard.example.org", rrclass, RRType.A(), 0)
self.__prereq_helper(method, expected, rrset)
# Likewise, CNAME directly should match, but what it points to should
# not
- rrset = isc.dns.RRset(isc.dns.Name("cname.example.org"),
- rrclass, isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("cname.example.org", rrclass, RRType.A(), 0)
self.__prereq_helper(method, not expected, rrset)
- rrset = isc.dns.RRset(isc.dns.Name("cname.example.org"),
- rrclass, isc.dns.RRType.CNAME(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("cname.example.org", rrclass, RRType.CNAME(), 0)
self.__prereq_helper(method, expected, rrset)
# And also make sure a delegation (itself) is not treated as existing
# data
- rrset = isc.dns.RRset(isc.dns.Name("foo.sub.example.org"),
- rrclass, isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("foo.sub.example.org", rrclass, RRType.A(), 0)
self.__prereq_helper(method, not expected, rrset)
# But the delegation data itself should match
- rrset = isc.dns.RRset(isc.dns.Name("sub.example.org"),
- rrclass, isc.dns.RRType.NS(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("sub.example.org", rrclass, RRType.NS(), 0)
self.__prereq_helper(method, expected, rrset)
# As should glue
- rrset = isc.dns.RRset(isc.dns.Name("ns.sub.example.org"),
- rrclass, isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("ns.sub.example.org", rrclass, RRType.A(), 0)
self.__prereq_helper(method, expected, rrset)
def test_check_prerequisite_exists(self):
method = self._session._UpdateSession__prereq_rrset_exists
self.__check_prerequisite_exists_combined(method,
- isc.dns.RRClass.ANY(),
+ RRClass.ANY(),
True)
def test_check_prerequisite_does_not_exist(self):
method = self._session._UpdateSession__prereq_rrset_does_not_exist
self.__check_prerequisite_exists_combined(method,
- isc.dns.RRClass.NONE(),
+ RRClass.NONE(),
False)
def test_check_prerequisite_exists_value(self):
method = self._session._UpdateSession__prereq_rrset_exists_value
- rrset = isc.dns.RRset(isc.dns.Name("www.example.org"),
- isc.dns.RRClass.IN(), isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("www.example.org", RRClass.IN(), RRType.A(), 0)
# empty one should not match
self.__prereq_helper(method, False, rrset)
# When the rdata is added, it should match
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.A(),
- isc.dns.RRClass.IN(),
- "192.0.2.1"))
+ add_rdata(rrset, "192.0.2.1")
self.__prereq_helper(method, True, rrset)
# But adding more should not
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.A(),
- isc.dns.RRClass.IN(),
- "192.0.2.2"))
+ add_rdata(rrset, "192.0.2.2")
self.__prereq_helper(method, False, rrset)
# Also test one with more than one RR
- rrset = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.IN(), isc.dns.RRType.NS(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("example.org", RRClass.IN(), RRType.NS(), 0)
self.__prereq_helper(method, False, rrset)
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns1.example.org."))
+ add_rdata(rrset, "ns1.example.org.")
self.__prereq_helper(method, False, rrset)
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns2.example.org."))
+ add_rdata(rrset, "ns2.example.org")
self.__prereq_helper(method, False, rrset)
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns3.example.org."))
+ add_rdata(rrset, "ns3.example.org.")
self.__prereq_helper(method, True, rrset)
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns4.example.org."))
+ add_rdata(rrset, "ns4.example.org.")
self.__prereq_helper(method, False, rrset)
# Repeat that, but try a different order of Rdata addition
- rrset = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.IN(), isc.dns.RRType.NS(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("example.org", RRClass.IN(), RRType.NS(), 0)
self.__prereq_helper(method, False, rrset)
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns3.example.org."))
+ add_rdata(rrset, "ns3.example.org.")
self.__prereq_helper(method, False, rrset)
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns2.example.org."))
+ add_rdata(rrset, "ns2.example.org.")
self.__prereq_helper(method, False, rrset)
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns1.example.org."))
+ add_rdata(rrset, "ns1.example.org.")
self.__prereq_helper(method, True, rrset)
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns4.example.org."))
+ add_rdata(rrset, "ns4.example.org.")
self.__prereq_helper(method, False, rrset)
# and test one where the name does not even exist
- rrset = isc.dns.RRset(isc.dns.Name("doesnotexist.example.org"),
- isc.dns.RRClass.IN(), isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.A(),
- isc.dns.RRClass.IN(),
- "192.0.2.1"))
+ rrset = create_rrset("doesnotexist.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.1" ])
self.__prereq_helper(method, False, rrset)
def __check_prerequisite_name_in_use_combined(self, method, rrclass,
@@ -333,51 +573,42 @@ class SessionTest(SesseionTestBase):
in behaviour) methods __prereq_name_in_use and
__prereq_name_not_in_use
'''
- rrset = isc.dns.RRset(isc.dns.Name("example.org"),
- rrclass, isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("example.org", rrclass, RRType.ANY(), 0)
self.__prereq_helper(method, expected, rrset)
- rrset = isc.dns.RRset(isc.dns.Name("www.example.org"),
- rrclass, isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("www.example.org", rrclass, RRType.ANY(), 0)
self.__prereq_helper(method, expected, rrset)
- rrset = isc.dns.RRset(isc.dns.Name("doesnotexist.example.org"),
- rrclass, isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("doesnotexist.example.org", rrclass,
+ RRType.ANY(), 0)
self.__prereq_helper(method, not expected, rrset)
- rrset = isc.dns.RRset(isc.dns.Name("belowdelegation.sub.example.org"),
- rrclass, isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("belowdelegation.sub.example.org", rrclass,
+ RRType.ANY(), 0)
self.__prereq_helper(method, not expected, rrset)
- rrset = isc.dns.RRset(isc.dns.Name("foo.wildcard.example.org"),
- rrclass, isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("foo.wildcard.example.org", rrclass,
+ RRType.ANY(), 0)
self.__prereq_helper(method, not expected, rrset)
# empty nonterminal should not match
- rrset = isc.dns.RRset(isc.dns.Name("nonterminal.example.org"),
- rrclass, isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("nonterminal.example.org", rrclass,
+ RRType.ANY(), 0)
self.__prereq_helper(method, not expected, rrset)
- rrset = isc.dns.RRset(isc.dns.Name("empty.nonterminal.example.org"),
- rrclass, isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("empty.nonterminal.example.org", rrclass,
+ RRType.ANY(), 0)
self.__prereq_helper(method, expected, rrset)
def test_check_prerequisite_name_in_use(self):
method = self._session._UpdateSession__prereq_name_in_use
self.__check_prerequisite_name_in_use_combined(method,
- isc.dns.RRClass.ANY(),
+ RRClass.ANY(),
True)
def test_check_prerequisite_name_not_in_use(self):
method = self._session._UpdateSession__prereq_name_not_in_use
self.__check_prerequisite_name_in_use_combined(method,
- isc.dns.RRClass.NONE(),
+ RRClass.NONE(),
False)
def check_prerequisite_result(self, expected, prerequisites):
@@ -386,20 +617,60 @@ class SessionTest(SesseionTestBase):
from 'prerequisites'. Then checks if __check_prerequisites()
returns the Rcode specified in 'expected'.'''
msg = create_update_msg([TEST_ZONE_RECORD], prerequisites)
- zconfig = ZoneConfig([], TEST_RRCLASS, self._datasrc_client,
+ zconfig = ZoneConfig(set(), TEST_RRCLASS, self._datasrc_client,
self._acl_map)
session = UpdateSession(msg, TEST_CLIENT4, zconfig)
+ session._get_update_zone()
+ session._create_diff()
# compare the to_text output of the rcodes (nicer error messages)
# This call itself should also be done by handle(),
# but just for better failures, it is first called on its own
self.assertEqual(expected.to_text(),
- session._UpdateSession__check_prerequisites(self._datasrc_client,
- TEST_ZONE_NAME,
- TEST_RRCLASS).to_text())
+ session._UpdateSession__check_prerequisites().to_text())
+ # Now see if handle finds the same result
+ (result, _, _) = session.handle()
+ self.assertEqual(expected.to_text(),
+ session._UpdateSession__message.get_rcode().to_text())
+ # And that the result looks right
+ if expected == Rcode.NOERROR():
+ self.assertEqual(UPDATE_SUCCESS, result)
+ else:
+ self.assertEqual(UPDATE_ERROR, result)
+
+ def check_prescan_result(self, expected, updates, expected_soa = None):
+ '''Helper method for checking the result of a prerequisite check;
+ creates an update session, and fills it with the list of rrsets
+ from 'updates'. Then checks if __do_prescan()
+ returns the Rcode specified in 'expected'.'''
+ msg = create_update_msg([TEST_ZONE_RECORD], [], updates)
+ zconfig = ZoneConfig(set(), TEST_RRCLASS, self._datasrc_client,
+ self._acl_map)
+ session = UpdateSession(msg, TEST_CLIENT4, zconfig)
+ session._get_update_zone()
+ session._create_diff()
+ # compare the to_text output of the rcodes (nicer error messages)
+ # This call itself should also be done by handle(),
+ # but just for better failures, it is first called on its own
+ self.assertEqual(expected.to_text(),
+ session._UpdateSession__do_prescan().to_text())
+ # If there is an expected soa, check it
+ self.assertEqual(str(expected_soa),
+ str(session._UpdateSession__added_soa))
+
+ def check_full_handle_result(self, expected, updates, prerequisites=[]):
+ '''Helper method for checking the result of a full handle;
+ creates an update session, and fills it with the list of rrsets
+ from 'updates'. Then checks if __handle()
+ results in a response with rcode 'expected'.'''
+ msg = create_update_msg([TEST_ZONE_RECORD], prerequisites, updates)
+ zconfig = ZoneConfig(set(), TEST_RRCLASS, self._datasrc_client,
+ self._acl_map)
+ session = UpdateSession(msg, TEST_CLIENT4, zconfig)
+
# Now see if handle finds the same result
(result, _, _) = session.handle()
- self.assertEqual(expected,
- session._UpdateSession__message.get_rcode())
+ self.assertEqual(expected.to_text(),
+ session._UpdateSession__message.get_rcode().to_text())
# And that the result looks right
if expected == Rcode.NOERROR():
self.assertEqual(UPDATE_SUCCESS, result)
@@ -414,95 +685,47 @@ class SessionTest(SesseionTestBase):
# in the specific prerequisite type tests)
# Let's first define a number of prereq's that should succeed
- rrset_exists_yes = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.ANY(),
- isc.dns.RRType.SOA(),
- isc.dns.RRTTL(0))
-
- rrset_exists_value_yes = isc.dns.RRset(isc.dns.Name("www.example.org"),
- isc.dns.RRClass.IN(),
- isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
- rrset_exists_value_yes.add_rdata(isc.dns.Rdata(isc.dns.RRType.A(),
- isc.dns.RRClass.IN(),
- "192.0.2.1"))
-
- rrset_does_not_exist_yes = isc.dns.RRset(isc.dns.Name("foo.example.org"),
- isc.dns.RRClass.NONE(),
- isc.dns.RRType.SOA(),
- isc.dns.RRTTL(0))
-
- name_in_use_yes = isc.dns.RRset(isc.dns.Name("www.example.org"),
- isc.dns.RRClass.ANY(),
- isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
-
- name_not_in_use_yes = isc.dns.RRset(isc.dns.Name("foo.example.org"),
- isc.dns.RRClass.NONE(),
- isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
-
- rrset_exists_value_1 = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.IN(),
- isc.dns.RRType.NS(),
- isc.dns.RRTTL(0))
- rrset_exists_value_1.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns1.example.org"))
- rrset_exists_value_2 = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.IN(),
- isc.dns.RRType.NS(),
- isc.dns.RRTTL(0))
- rrset_exists_value_2.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns2.example.org"))
- rrset_exists_value_3 = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.IN(),
- isc.dns.RRType.NS(),
- isc.dns.RRTTL(0))
- rrset_exists_value_3.add_rdata(isc.dns.Rdata(isc.dns.RRType.NS(),
- isc.dns.RRClass.IN(),
- "ns3.example.org"))
+ rrset_exists_yes = create_rrset("example.org", RRClass.ANY(),
+ RRType.SOA(), 0)
+
+ rrset_exists_value_yes = create_rrset("www.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.1" ])
+
+ rrset_does_not_exist_yes = create_rrset("foo.example.org",
+ RRClass.NONE(), RRType.SOA(),
+ 0)
+
+ name_in_use_yes = create_rrset("www.example.org", RRClass.ANY(),
+ RRType.ANY(), 0)
+
+ name_not_in_use_yes = create_rrset("foo.example.org", RRClass.NONE(),
+ RRType.ANY(), 0)
+
+ rrset_exists_value_1 = create_rrset("example.org", RRClass.IN(),
+ RRType.NS(), 0,
+ [ "ns1.example.org" ])
+ rrset_exists_value_2 = create_rrset("example.org", RRClass.IN(),
+ RRType.NS(), 0,
+ [ "ns2.example.org" ])
+ rrset_exists_value_3 = create_rrset("example.org", RRClass.IN(),
+ RRType.NS(), 0,
+ [ "ns3.example.org" ])
# and a number that should not
- rrset_exists_no = isc.dns.RRset(isc.dns.Name("foo.example.org"),
- isc.dns.RRClass.ANY(),
- isc.dns.RRType.SOA(),
- isc.dns.RRTTL(0))
-
-
- rrset_exists_value_no = isc.dns.RRset(isc.dns.Name("www.example.org"),
- isc.dns.RRClass.IN(),
- isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
- rrset_exists_value_no.add_rdata(isc.dns.Rdata(isc.dns.RRType.A(),
- isc.dns.RRClass.IN(),
- "192.0.2.2"))
-
- rrset_does_not_exist_no = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.NONE(),
- isc.dns.RRType.SOA(),
- isc.dns.RRTTL(0))
-
- name_in_use_no = isc.dns.RRset(isc.dns.Name("foo.example.org"),
- isc.dns.RRClass.ANY(),
- isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
-
- name_not_in_use_no = isc.dns.RRset(isc.dns.Name("www.example.org"),
- isc.dns.RRClass.NONE(),
- isc.dns.RRType.ANY(),
- isc.dns.RRTTL(0))
-
- # Create an UPDATE with all 5 'yes' prereqs
- create_update_msg([TEST_ZONE_RECORD],
- [rrset_exists_yes,
- rrset_does_not_exist_yes,
- name_in_use_yes,
- name_not_in_use_yes,
- rrset_exists_value_yes,
- ])
+ rrset_exists_no = create_rrset("foo.example.org", RRClass.ANY(),
+ RRType.SOA(), 0)
+
+ rrset_exists_value_no = create_rrset("www.example.org", RRClass.IN(),
+ RRType.A(), 0, [ "192.0.2.2" ])
+
+ rrset_does_not_exist_no = create_rrset("example.org", RRClass.NONE(),
+ RRType.SOA(), 0)
+
+ name_in_use_no = create_rrset("foo.example.org", RRClass.ANY(),
+ RRType.ANY(), 0)
+ name_not_in_use_no = create_rrset("www.example.org", RRClass.NONE(),
+ RRType.ANY(), 0)
# check 'no' result codes
self.check_prerequisite_result(Rcode.NXRRSET(),
[ rrset_exists_no ])
@@ -516,6 +739,23 @@ class SessionTest(SesseionTestBase):
[ name_not_in_use_no ])
# the 'yes' codes should result in ok
+ # individually
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ rrset_exists_yes ] )
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ rrset_exists_value_yes ])
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ rrset_does_not_exist_yes ])
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ name_in_use_yes ])
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ name_not_in_use_yes ])
+ self.check_prerequisite_result(Rcode.NOERROR(),
+ [ rrset_exists_value_1,
+ rrset_exists_value_2,
+ rrset_exists_value_3])
+
+ # and together
self.check_prerequisite_result(Rcode.NOERROR(),
[ rrset_exists_yes,
rrset_exists_value_yes,
@@ -546,70 +786,705 @@ class SessionTest(SesseionTestBase):
rrset_exists_value_1])
def test_prerequisite_notzone(self):
- rrset = isc.dns.RRset(isc.dns.Name("some.other.zone."),
- isc.dns.RRClass.ANY(),
- isc.dns.RRType.SOA(),
- isc.dns.RRTTL(0))
+ rrset = create_rrset("some.other.zone.", RRClass.ANY(), RRType.SOA(), 0)
self.check_prerequisite_result(Rcode.NOTZONE(), [ rrset ])
def test_prerequisites_formerr(self):
# test for form errors in the prerequisite section
# Class ANY, non-zero TTL
- rrset = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.ANY(),
- isc.dns.RRType.SOA(),
- isc.dns.RRTTL(1))
+ rrset = create_rrset("example.org", RRClass.ANY(), RRType.SOA(), 1)
self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
# Class ANY, but with rdata
- rrset = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.ANY(),
- isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.A(),
- isc.dns.RRClass.ANY(),
- "\# 04 00 00 00 00"))
+ rrset = create_rrset("example.org", RRClass.ANY(), RRType.A(), 0,
+ [ b'\x00\x00\x00\x00' ])
self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
# Class NONE, non-zero TTL
- rrset = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.NONE(),
- isc.dns.RRType.SOA(),
- isc.dns.RRTTL(1))
+ rrset = create_rrset("example.org", RRClass.NONE(), RRType.SOA(), 1)
self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
# Class NONE, but with rdata
- rrset = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.NONE(),
- isc.dns.RRType.A(),
- isc.dns.RRTTL(0))
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.A(),
- isc.dns.RRClass.NONE(),
- "\# 04 00 00 00 00"))
+ rrset = create_rrset("example.org", RRClass.NONE(), RRType.A(), 0,
+ [ b'\x00\x00\x00\x00' ])
self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
# Matching class and type, but non-zero TTL
- rrset = isc.dns.RRset(isc.dns.Name("www.example.org"),
- isc.dns.RRClass.IN(),
- isc.dns.RRType.A(),
- isc.dns.RRTTL(1))
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.A(),
- isc.dns.RRClass.IN(),
- "192.0.2.1"))
+ rrset = create_rrset("www.example.org", RRClass.IN(), RRType.A(), 1,
+ [ "192.0.2.1" ])
self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
# Completely different class
- rrset = isc.dns.RRset(isc.dns.Name("example.org"),
- isc.dns.RRClass.CH(),
- isc.dns.RRType.TXT(),
- isc.dns.RRTTL(0))
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.TXT(),
- isc.dns.RRClass.CH(),
- "foo"))
+ rrset = create_rrset("example.org", RRClass.CH(), RRType.TXT(), 0,
+ [ "foo" ])
self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
-class SessionACLTest(SesseionTestBase):
+ def __prereq_helper(self, method, expected, rrset):
+ '''Calls the given method with self._datasrc_client
+ and the given rrset, and compares the return value.
+ Function does not do much but makes the code look nicer'''
+ self.assertEqual(expected, method(rrset))
+
+ def __initialize_update_rrsets(self):
+ '''Prepare a number of RRsets to be used in several update tests
+ The rrsets are stored in self'''
+ orig_a_rrset = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.A(), 3600, [ "192.0.2.1" ])
+ self.orig_a_rrset = orig_a_rrset
+
+ rrset_update_a = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.A(), 3600,
+ [ "192.0.2.2", "192.0.2.3" ])
+ self.rrset_update_a = rrset_update_a
+
+ rrset_update_soa = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600,
+ [ "ns1.example.org. " +
+ "admin.example.org. " +
+ "1233 3600 1800 2419200 7200" ])
+ self.rrset_update_soa = rrset_update_soa
+
+ rrset_update_soa_del = create_rrset("example.org", RRClass.NONE(),
+ RRType.SOA(), 0,
+ [ "ns1.example.org. " +
+ "admin.example.org. " +
+ "1233 3600 1800 2419200 7200" ])
+ self.rrset_update_soa_del = rrset_update_soa_del
+
+ rrset_update_soa2 = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600,
+ [ "ns1.example.org. " +
+ "admin.example.org. " +
+ "4000 3600 1800 2419200 7200" ])
+ self.rrset_update_soa2 = rrset_update_soa2
+
+ rrset_update_del_name = create_rrset("www.example.org", RRClass.ANY(),
+ RRType.ANY(), 0)
+ self.rrset_update_del_name = rrset_update_del_name
+
+ rrset_update_del_name_apex = create_rrset("example.org", RRClass.ANY(),
+ RRType.ANY(), 0)
+ self.rrset_update_del_name_apex = rrset_update_del_name_apex
+
+ rrset_update_del_rrset = create_rrset("www.example.org", RRClass.ANY(),
+ RRType.A(), 0)
+ self.rrset_update_del_rrset = rrset_update_del_rrset
+
+ rrset_update_del_mx_apex = create_rrset("example.org", RRClass.ANY(),
+ RRType.MX(), 0)
+ self.rrset_update_del_mx_apex = rrset_update_del_mx_apex
+
+ rrset_update_del_soa_apex = create_rrset("example.org", RRClass.ANY(),
+ RRType.SOA(), 0)
+ self.rrset_update_del_soa_apex = rrset_update_del_soa_apex
+
+ rrset_update_del_ns_apex = create_rrset("example.org", RRClass.ANY(),
+ RRType.NS(), 0)
+ self.rrset_update_del_ns_apex = rrset_update_del_ns_apex
+
+ rrset_update_del_rrset_part = create_rrset("www.example.org",
+ RRClass.NONE(), RRType.A(),
+ 0,
+ [ b'\xc0\x00\x02\x02',
+ b'\xc0\x00\x02\x03' ])
+ self.rrset_update_del_rrset_part = rrset_update_del_rrset_part
+
+ rrset_update_del_rrset_ns = create_rrset("example.org", RRClass.NONE(),
+ RRType.NS(), 0,
+ [ b'\x03ns1\x07example\x03org\x00',
+ b'\x03ns2\x07example\x03org\x00',
+ b'\x03ns3\x07example\x03org\x00' ])
+ self.rrset_update_del_rrset_ns = rrset_update_del_rrset_ns
+
+ rrset_update_del_rrset_mx = create_rrset("example.org", RRClass.NONE(),
+ RRType.MX(), 0,
+ [ b'\x00\x0a\x04mail\x07example\x03org\x00' ])
+ self.rrset_update_del_rrset_mx = rrset_update_del_rrset_mx
+
+ def test_acl_before_prereq(self):
+ name_in_use_no = create_rrset("foo.example.org", RRClass.ANY(),
+ RRType.ANY(), 0)
+
+ # Test a prerequisite that would fail
+ self.check_full_handle_result(Rcode.NXDOMAIN(), [], [ name_in_use_no ])
+
+ # Change ACL so that it would be denied
+ self._acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS):
+ REQUEST_LOADER.load([{"action": "REJECT"}])}
+
+ # The prerequisite should now not be reached; it should fail on the
+ # ACL
+ self.check_full_handle_result(Rcode.REFUSED(), [], [ name_in_use_no ])
+
+ def test_prescan(self):
+ '''Test whether the prescan succeeds on data that is ok, and whether
+ if notices the SOA if present'''
+ # prepare a set of correct update statements
+ self.__initialize_update_rrsets()
+
+ self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+
+ # check if soa is noticed
+ self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_soa ],
+ self.rrset_update_soa)
+
+ # Other types of succesful prechecks
+ self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_soa2 ],
+ self.rrset_update_soa2)
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name ])
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name_apex ])
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset ])
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_mx_apex ])
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset_part ])
+
+ # and check a few permutations of the above
+ # all of them (with one of the soas)
+ self.check_prescan_result(Rcode.NOERROR(),
+ [
+ self.rrset_update_a,
+ self.rrset_update_soa,
+ self.rrset_update_del_name,
+ self.rrset_update_del_name_apex,
+ self.rrset_update_del_rrset,
+ self.rrset_update_del_mx_apex,
+ self.rrset_update_del_rrset_part
+ ],
+ self.rrset_update_soa)
+
+ # Two soas. Should we reject or simply use the last?
+ # (RFC is not really explicit on this, but between the lines I read
+ # use the last)
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_soa,
+ self.rrset_update_soa2 ],
+ self.rrset_update_soa2)
+ self.check_prescan_result(Rcode.NOERROR(),
+ [ self.rrset_update_soa2,
+ self.rrset_update_soa ],
+ self.rrset_update_soa)
+
+ self.check_prescan_result(Rcode.NOERROR(),
+ [
+ self.rrset_update_del_mx_apex,
+ self.rrset_update_del_name,
+ self.rrset_update_del_name_apex,
+ self.rrset_update_del_rrset_part,
+ self.rrset_update_a,
+ self.rrset_update_del_rrset,
+ self.rrset_update_soa
+ ],
+ self.rrset_update_soa)
+
+ def test_prescan_failures(self):
+ '''Test whether prescan fails on bad data'''
+ # out of zone data
+ rrset = create_rrset("different.zone", RRClass.ANY(), RRType.TXT(), 0)
+ self.check_prescan_result(Rcode.NOTZONE(), [ rrset ])
+
+ # forbidden type, zone class
+ rrset = create_rrset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.ANY(), 0,
+ [ b'\x00' ])
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ # non-zero TTL, class ANY
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.TXT(), 1)
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ # non-zero Rdata, class ANY
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.TXT(), 0,
+ [ "foo" ])
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ # forbidden type, class ANY
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.AXFR(), 0,
+ [ b'\x00' ])
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ # non-zero TTL, class NONE
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE(), RRType.TXT(), 1)
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ # forbidden type, class NONE
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE(), RRType.AXFR(), 0,
+ [ b'\x00' ])
+ self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+
+ def __check_inzone_data(self, expected_result, name, rrtype,
+ expected_rrset = None):
+ '''Does a find on TEST_ZONE for the given rrset's name and type,
+ then checks if the result matches the expected result.
+ If so, and if expected_rrset is given, they are compared as
+ well.'''
+ _, finder = self._datasrc_client.find_zone(TEST_ZONE_NAME)
+ result, found_rrset, _ = finder.find(name, rrtype,
+ finder.NO_WILDCARD |
+ finder.FIND_GLUE_OK)
+ self.assertEqual(expected_result, result)
+ # Sigh. Need rrsets.compare() again.
+ # To be sure, compare name, class, type, and ttl
+ if expected_rrset is not None:
+ self.assertEqual(expected_rrset.get_name(), found_rrset.get_name())
+ self.assertEqual(expected_rrset.get_class(), found_rrset.get_class())
+ self.assertEqual(expected_rrset.get_type(), found_rrset.get_type())
+ self.assertEqual(expected_rrset.get_ttl().to_text(),
+ found_rrset.get_ttl().to_text())
+ expected_rdata =\
+ [ rdata.to_text() for rdata in expected_rrset.get_rdata() ]
+ found_rdata =\
+ [ rdata.to_text() for rdata in found_rrset.get_rdata() ]
+ expected_rdata.sort()
+ found_rdata.sort()
+ self.assertEqual(expected_rdata, found_rdata)
+
+ def test_update_add_delete_rrset(self):
+ '''
+ Tests a sequence of related add and delete updates. Some other
+ cases are tested by later tests.
+ '''
+ self.__initialize_update_rrsets()
+
+ # initially, the www should only contain one rr
+ # (set to self.orig_a_rrset)
+
+ # during this test, we will extend it at some point
+ extended_a_rrset = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.A(), 3600,
+ [ "192.0.2.1",
+ "192.0.2.2",
+ "192.0.2.3" ])
+
+ # Sanity check, make sure original data is really there before updates
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.orig_a_rrset)
+
+ # Add two rrs
+ self.check_full_handle_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ extended_a_rrset)
+
+ # Adding the same RRsets should not make a difference.
+ self.check_full_handle_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ extended_a_rrset)
+
+ # Now delete those two, and we should end up with the original RRset
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset_part ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.orig_a_rrset)
+
+ # 'Deleting' them again should make no difference
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset_part ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.orig_a_rrset)
+
+ # But deleting the entire rrset, independent of its contents, should
+ # work
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ # Check that if we update the SOA, it is updated to our value
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_soa2 ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.SOA(),
+ self.rrset_update_soa2)
+
+ def test_glue_deletions(self):
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("sub.example.org."),
+ RRType.NS())
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("ns.sub.example.org."),
+ RRType.A())
+
+ # See that we can delete glue
+ rrset_delete_glue = create_rrset("ns.sub.example.org.",
+ RRClass.ANY(),
+ RRType.A(),
+ 0)
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ rrset_delete_glue ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("sub.example.org."),
+ RRType.NS())
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("ns.sub.example.org."),
+ RRType.A())
+
+ # Check that we don't accidentally delete a delegation if we
+ # try to delete non-existent glue
+ rrset_delete_nonexistent_glue = create_rrset("foo.sub.example.org.",
+ RRClass.ANY(),
+ RRType.A(),
+ 0)
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ rrset_delete_nonexistent_glue ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("sub.example.org."),
+ RRType.NS())
+
+ def test_update_add_new_data(self):
+ '''
+ This tests adds data where none is present
+ '''
+ # Add data at a completely new name
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("new.example.org"),
+ RRType.A())
+ rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.A(),
+ 3600, [ "192.0.2.1", "192.0.2.2" ])
+ self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("new.example.org"),
+ RRType.A(),
+ rrset)
+
+ # Also try a name where data is present, but none of this
+ # specific type
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
+ isc.dns.Name("new.example.org"),
+ RRType.TXT())
+ rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.TXT(),
+ 3600, [ "foo" ])
+ self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("new.example.org"),
+ RRType.TXT(),
+ rrset)
+
+ def test_update_add_new_data_interspersed(self):
+ '''
+ This tests adds data where none is present, similar to
+ test_update_add_new_data, but this time the second RRset
+ is put into the record between the two RRs of the first
+ RRset.
+ '''
+ # Add data at a completely new name
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("new_a.example.org"),
+ RRType.A())
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("new_txt.example.org"),
+ RRType.TXT())
+
+ rrset1 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A(),
+ 3600, [ "192.0.2.1" ])
+
+ rrset2 = create_rrset("new_txt.example.org", TEST_RRCLASS, RRType.TXT(),
+ 3600, [ "foo" ])
+
+ rrset3 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A(),
+ 3600, [ "192.0.2.2" ])
+
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ rrset1, rrset2, rrset3 ])
+
+ # The update should have merged rrset1 and rrset3
+ rrset_merged = create_rrset("new_a.example.org", TEST_RRCLASS,
+ RRType.A(), 3600,
+ [ "192.0.2.1", "192.0.2.2" ])
+
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("new_a.example.org"),
+ RRType.A(),
+ rrset_merged)
+
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("new_txt.example.org"),
+ RRType.TXT(),
+ rrset2)
+
+ def test_update_delete_name(self):
+ '''
+ Tests whether deletion of every RR for a name works
+ '''
+ self.__initialize_update_rrsets()
+
+ # First check it is there
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ # Delete the entire name
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ # Should still be gone after pointless second delete
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ def test_update_apex_special_cases(self):
+ '''
+ Tests a few special cases when deleting data from the apex
+ '''
+ self.__initialize_update_rrsets()
+
+ # the original SOA
+ orig_soa_rrset = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600,
+ [ "ns1.example.org. " +
+ "admin.example.org. " +
+ "1234 3600 1800 2419200 7200" ])
+ # At some point, the SOA SERIAL will be auto-incremented
+ incremented_soa_rrset_01 = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1235 3600 1800 2419200 7200" ])
+ incremented_soa_rrset_02 = create_rrset("example.org", TEST_RRCLASS,
+ RRType.SOA(), 3600, ["ns1.example.org. " +
+ "admin.example.org. " +
+ "1236 3600 1800 2419200 7200" ])
+
+ # We will delete some of the NS records
+ orig_ns_rrset = create_rrset("example.org", TEST_RRCLASS,
+ RRType.NS(), 3600,
+ [ "ns1.example.org.",
+ "ns2.example.org.",
+ "ns3.example.org." ])
+
+ # Sanity check, make sure original data is really there before updates
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.NS(),
+ orig_ns_rrset)
+ # We will delete the MX record later in this test, so let's make
+ # sure that it exists (we do not care about its value)
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.MX())
+
+ # Check that we cannot delete the SOA record by direct deletion
+ # both by name+type and by full rrset
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_soa_apex,
+ self.rrset_update_soa_del ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.SOA(),
+ incremented_soa_rrset_01)
+
+ # If we delete everything at the apex, the SOA and NS rrsets should be
+ # untouched (but serial will be incremented)
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name_apex ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.SOA(),
+ incremented_soa_rrset_02)
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.NS(),
+ orig_ns_rrset)
+ # but the MX should be gone
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
+ isc.dns.Name("example.org"),
+ RRType.MX())
+
+ # Deleting the NS rrset by name and type only, it should also be left
+ # untouched
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_ns_apex ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.NS(),
+ orig_ns_rrset)
+
+ def test_update_apex_special_case_ns_rrset(self):
+ # If we delete the NS at the apex specifically, it should still
+ # keep one record
+ self.__initialize_update_rrsets()
+ # When we are done, we should have a reduced NS rrset
+ short_ns_rrset = create_rrset("example.org", TEST_RRCLASS,
+ RRType.NS(), 3600,
+ [ "ns3.example.org." ])
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset_ns ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.NS(),
+ short_ns_rrset)
+
+ def test_update_apex_special_case_ns_rrset2(self):
+ # If we add new NS records, then delete all existing ones, it
+ # should not keep any
+ self.__initialize_update_rrsets()
+ new_ns = create_rrset("example.org", TEST_RRCLASS, RRType.NS(), 3600,
+ [ "newns1.example.org", "newns2.example.org" ])
+
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ new_ns,
+ self.rrset_update_del_rrset_ns ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.NS(),
+ new_ns)
+
+ def test_update_delete_normal_rrset_at_apex(self):
+ '''
+ Tests a number of 'normal rrset' deletes at the apex
+ '''
+
+ # MX should simply be deleted
+ self.__initialize_update_rrsets()
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("example.org"),
+ RRType.MX())
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset_mx ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
+ isc.dns.Name("example.org"),
+ RRType.MX())
+
+ def test_update_add_then_delete_rrset(self):
+ # If we add data, then delete the whole rrset, added data should
+ # be gone as well
+ self.__initialize_update_rrsets()
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_a,
+ self.rrset_update_del_rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ def test_update_add_then_delete_name(self):
+ # If we add data, then delete the entire name, added data should
+ # be gone as well
+ self.__initialize_update_rrsets()
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_a,
+ self.rrset_update_del_name ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+
+ def test_update_delete_then_add_rrset(self):
+ # If we delete an entire rrset, then add something there again,
+ # the addition should be done
+ self.__initialize_update_rrsets()
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_rrset,
+ self.rrset_update_a ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.rrset_update_a)
+
+ def test_update_delete_then_add_rrset(self):
+ # If we delete an entire name, then add something there again,
+ # the addition should be done
+ self.__initialize_update_rrsets()
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A())
+ self.check_full_handle_result(Rcode.NOERROR(),
+ [ self.rrset_update_del_name,
+ self.rrset_update_a ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.rrset_update_a)
+
+ def test_update_cname_special_cases(self):
+ self.__initialize_update_rrsets()
+
+ # Sanity check
+ orig_cname_rrset = create_rrset("cname.example.org", TEST_RRCLASS,
+ RRType.CNAME(), 3600,
+ [ "www.example.org." ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
+ isc.dns.Name("cname.example.org"),
+ RRType.A(),
+ orig_cname_rrset)
+
+ # If we try to add data where a cname is preset
+ rrset = create_rrset("cname.example.org", TEST_RRCLASS, RRType.A(),
+ 3600, [ "192.0.2.1" ])
+
+ self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
+ isc.dns.Name("cname.example.org"),
+ RRType.A(),
+ orig_cname_rrset)
+
+ # But updating the cname itself should work
+ new_cname_rrset = create_rrset("cname.example.org", TEST_RRCLASS,
+ RRType.CNAME(), 3600,
+ [ "mail.example.org." ])
+ self.check_full_handle_result(Rcode.NOERROR(), [ new_cname_rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
+ isc.dns.Name("cname.example.org"),
+ RRType.A(),
+ new_cname_rrset)
+
+ self.__initialize_update_rrsets()
+
+ # Likewise, adding a cname where other data is
+ # present should do nothing either
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.orig_a_rrset)
+ new_cname_rrset = create_rrset("www.example.org", TEST_RRCLASS,
+ RRType.CNAME(), 3600,
+ [ "mail.example.org." ])
+ self.check_full_handle_result(Rcode.NOERROR(), [ new_cname_rrset ])
+ self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
+ isc.dns.Name("www.example.org"),
+ RRType.A(),
+ self.orig_a_rrset)
+
+ def test_update_bad_class(self):
+ rrset = create_rrset("example.org.", RRClass.CH(), RRType.TXT(), 0,
+ [ "foo" ])
+ self.check_full_handle_result(Rcode.FORMERR(), [ rrset ])
+
+ def test_uncaught_exception(self):
+ def my_exc():
+ raise Exception("foo")
+ self._session._UpdateSession__update_soa = my_exc
+ self.assertEqual(Rcode.SERVFAIL().to_text(),
+ self._session._UpdateSession__do_update().to_text())
+
+class SessionACLTest(SessionTestBase):
'''ACL related tests for update session.'''
def test_update_acl_check(self):
'''Test for various ACL checks.
@@ -619,7 +1494,7 @@ class SessionACLTest(SesseionTestBase):
'''
# create a separate session, with default (empty) ACL map.
session = UpdateSession(self._update_msg,
- TEST_CLIENT4, ZoneConfig([], TEST_RRCLASS,
+ TEST_CLIENT4, ZoneConfig(set(), TEST_RRCLASS,
self._datasrc_client))
# then the request should be rejected.
self.assertEqual((UPDATE_ERROR, None, None), session.handle())
@@ -648,7 +1523,7 @@ class SessionACLTest(SesseionTestBase):
# If the message doesn't contain TSIG, it doesn't match the ACCEPT
# ACL entry, and the request should be rejected.
session = UpdateSession(self._update_msg,
- TEST_CLIENT4, ZoneConfig([], TEST_RRCLASS,
+ TEST_CLIENT4, ZoneConfig(set(), TEST_RRCLASS,
self._datasrc_client,
acl_map))
self.assertEqual((UPDATE_ERROR, None, None), session.handle())
@@ -657,7 +1532,7 @@ class SessionACLTest(SesseionTestBase):
# If the message contains TSIG, it should match the ACCEPT
# ACL entry, and the request should be granted.
session = UpdateSession(create_update_msg(tsig_key=TEST_TSIG_KEY),
- TEST_CLIENT4, ZoneConfig([], TEST_RRCLASS,
+ TEST_CLIENT4, ZoneConfig(set(), TEST_RRCLASS,
self._datasrc_client,
acl_map))
self.assertEqual((UPDATE_SUCCESS, TEST_ZONE_NAME, TEST_RRCLASS),
diff --git a/src/lib/python/isc/ddns/tests/zone_config_tests.py b/src/lib/python/isc/ddns/tests/zone_config_tests.py
index 4efd1c1..7facb48 100644
--- a/src/lib/python/isc/ddns/tests/zone_config_tests.py
+++ b/src/lib/python/isc/ddns/tests/zone_config_tests.py
@@ -55,7 +55,7 @@ class ZoneConfigTest(unittest.TestCase):
'''Some basic tests for the ZoneConfig class.'''
def setUp(self):
self.__datasrc_client = FakeDataSourceClient()
- self.zconfig = ZoneConfig([(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)],
+ self.zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)},
TEST_RRCLASS, self.__datasrc_client)
def test_find_zone(self):
@@ -87,34 +87,27 @@ class ZoneConfigTest(unittest.TestCase):
TEST_RRCLASS)))
# zone class doesn't match (but zone name matches)
self.__datasrc_client.set_find_result(DataSourceClient.SUCCESS)
- zconfig = ZoneConfig([(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)],
+ zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)},
RRClass.CH(), self.__datasrc_client)
self.assertEqual((ZONE_NOTFOUND, None),
(zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)))
# similar to the previous case, but also in the secondary list
- zconfig = ZoneConfig([(TEST_ZONE_NAME, TEST_RRCLASS)],
+ zconfig = ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
RRClass.CH(), self.__datasrc_client)
self.assertEqual((ZONE_NOTFOUND, None),
(zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)))
# check some basic tests varying the secondary list.
# empty secondary list doesn't cause any disruption.
- zconfig = ZoneConfig([], TEST_RRCLASS, self.__datasrc_client)
+ zconfig = ZoneConfig(set(), TEST_RRCLASS, self.__datasrc_client)
self.assertEqual((ZONE_PRIMARY, self.__datasrc_client),
self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS))
- # adding some mulitle tuples, including subdomainof the test zone name,
- # and the same zone name but a different class
- zconfig = ZoneConfig([(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS),
+ # adding some mulitle tuples, including subdomain of the test zone
+ # name, and the same zone name but a different class
+ zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS),
(Name('example'), TEST_RRCLASS),
(Name('sub.example.org'), TEST_RRCLASS),
- (TEST_ZONE_NAME, RRClass.CH())],
- TEST_RRCLASS, self.__datasrc_client)
- self.assertEqual((ZONE_PRIMARY, self.__datasrc_client),
- self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS))
- # secondary zone list has a duplicate entry, which is just
- # (effecitivey) ignored
- zconfig = ZoneConfig([(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS),
- (TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)],
+ (TEST_ZONE_NAME, RRClass.CH())},
TEST_RRCLASS, self.__datasrc_client)
self.assertEqual((ZONE_PRIMARY, self.__datasrc_client),
self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS))
@@ -122,7 +115,7 @@ class ZoneConfigTest(unittest.TestCase):
class ACLConfigTest(unittest.TestCase):
def setUp(self):
self.__datasrc_client = FakeDataSourceClient()
- self.__zconfig = ZoneConfig([(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)],
+ self.__zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)},
TEST_RRCLASS, self.__datasrc_client)
def test_get_update_acl(self):
diff --git a/src/lib/python/isc/ddns/zone_config.py b/src/lib/python/isc/ddns/zone_config.py
index 388770c..848eac1 100644
--- a/src/lib/python/isc/ddns/zone_config.py
+++ b/src/lib/python/isc/ddns/zone_config.py
@@ -22,6 +22,9 @@ ZONE_NOTFOUND = -1 # Zone isn't found in find_zone()
ZONE_PRIMARY = 0 # Primary zone
ZONE_SECONDARY = 1 # Secondary zone
+# The default ACL if unspecifed on construction of ZoneConfig.
+DEFAULT_ACL = REQUEST_LOADER.load([{"action": "REJECT"}])
+
class ZoneConfig:
'''A temporary helper class to encapsulate zone related configuration.
@@ -38,7 +41,7 @@ class ZoneConfig:
'''Constructor.
Parameters:
- - secondaries: a list of 2-element tuples. Each element is a pair
+ - secondaries: a set of 2-element tuples. Each element is a pair
of isc.dns.Name and isc.dns.RRClass, and identifies a single
secondary zone.
- datasrc_class: isc.dns.RRClass object. Specifies the RR class
@@ -53,12 +56,10 @@ class ZoneConfig:
ACL will be applied to all zones, which is to reject any requests.
'''
- self.__secondaries = set()
- for (zname, zclass) in secondaries:
- self.__secondaries.add((zname, zclass))
+ self.__secondaries = secondaries
self.__datasrc_class = datasrc_class
self.__datasrc_client = datasrc_client
- self.__default_acl = REQUEST_LOADER.load([{"action": "REJECT"}])
+ self.__default_acl = DEFAULT_ACL
self.__acl_map = acl_map
def find_zone(self, zone_name, zone_class):
diff --git a/src/lib/python/isc/log/tests/Makefile.am b/src/lib/python/isc/log/tests/Makefile.am
index 170eee6..ec29b7a 100644
--- a/src/lib/python/isc/log/tests/Makefile.am
+++ b/src/lib/python/isc/log/tests/Makefile.am
@@ -17,6 +17,7 @@ check-local:
chmod +x $(abs_builddir)/log_console.py
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(abs_srcdir)/check_output.sh $(abs_builddir)/log_console.py $(abs_srcdir)/console.out
if ENABLE_PYTHON_COVERAGE
touch $(abs_top_srcdir)/.coverage
@@ -28,6 +29,7 @@ endif
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done ; \
for pytest in $(PYTESTS_GEN) ; do \
@@ -36,5 +38,6 @@ endif
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/server_common/Makefile.am b/src/lib/python/isc/server_common/Makefile.am
index a9eca2e..d89df2f 100644
--- a/src/lib/python/isc/server_common/Makefile.am
+++ b/src/lib/python/isc/server_common/Makefile.am
@@ -1,6 +1,7 @@
SUBDIRS = tests
-python_PYTHON = __init__.py tsig_keyring.py
+python_PYTHON = __init__.py tsig_keyring.py auth_command.py dns_tcp.py
+python_PYTHON += logger.py
pythondir = $(pyexecdir)/isc/server_common
diff --git a/src/lib/python/isc/server_common/auth_command.py b/src/lib/python/isc/server_common/auth_command.py
new file mode 100644
index 0000000..eb9c892
--- /dev/null
+++ b/src/lib/python/isc/server_common/auth_command.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''This module is a utility to create some intermodule command(s) for Auth.'''
+
+from isc.dns import *
+import isc.log
+from isc.config.ccsession import create_command
+from isc.log_messages.server_common_messages import *
+from isc.server_common.logger import logger
+
+AUTH_MODULE_NAME = 'Auth'
+
+def auth_loadzone_command(module_cc, zone_name, zone_class):
+ '''Create a 'loadzone' command with a given zone for Auth server.
+
+ This function checks the Auth module configuration to see if it
+ servers a given zone via an in-memory data source on top of SQLite3
+ data source, and, if so, generate an inter-module command for Auth
+ to force it to reload the zone.
+
+ Parameters:
+ module_cc (CCSession): a CC session that can get access to auth module
+ configuration as a remote configuration
+ zone_name (isc.dns.Name): the zone name to be possibly reloaded
+ zone_class (isc.dns.RRClass): the RR class of the zone to be possibly
+ reloaded.
+
+ Return: a CC command message for the reload if the zone is found;
+ otherwise None.
+
+ '''
+ # Note: this function was originally a dedicated subroutine of xfrin,
+ # but was moved here so it can be shared by some other modules
+ # (specifically, by ddns). It's expected that we'll soon fundamentally
+ # revisit the whole data source related configuration, at which point
+ # this function should be substantially modified if not completely
+ # deprecated (which is a more likely scenario). For this reason, the
+ # corresponding tests were still kept in xfrin.
+
+ datasources, is_default =\
+ module_cc.get_remote_config_value(AUTH_MODULE_NAME, "datasources")
+ if is_default:
+ return None
+ for d in datasources:
+ if "type" not in d:
+ continue
+ try:
+ if "class" in d:
+ dclass = RRClass(d["class"])
+ else:
+ dclass = RRClass("IN")
+ except InvalidRRClass as err:
+ logger.info(PYSERVER_COMMON_AUTH_CONFIG_RRCLASS_ERROR, err)
+ continue
+
+ if d["type"].lower() == "memory" and dclass == zone_class:
+ for zone in d["zones"]:
+ if "filetype" not in zone:
+ continue
+ if "origin" not in zone:
+ continue
+ if "filetype" not in zone:
+ continue
+ try:
+ name = Name(zone["origin"])
+ except (EmptyLabel, TooLongLabel, BadLabelType, BadEscape,
+ TooLongName, IncompleteName):
+ logger.info(PYSERVER_COMMON_AUTH_CONFIG_NAME_PARSER_ERROR,
+ err)
+ continue
+
+ if zone["filetype"].lower() == "sqlite3" and name == zone_name:
+ param = {"origin": zone_name.to_text(),
+ "class": zone_class.to_text(),
+ "datasrc": d["type"]}
+ return create_command("loadzone", param)
+ return None
diff --git a/src/lib/python/isc/server_common/dns_tcp.py b/src/lib/python/isc/server_common/dns_tcp.py
new file mode 100644
index 0000000..3b78d0d
--- /dev/null
+++ b/src/lib/python/isc/server_common/dns_tcp.py
@@ -0,0 +1,280 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Utility for handling DNS transactions over TCP.
+
+This module defines a few convenient utility classes for handling DNS
+transactions via a TCP socket.
+
+"""
+
+import isc.log
+from isc.server_common.logger import logger
+from isc.log_messages.server_common_messages import *
+from isc.ddns.logger import ClientFormatter
+import errno
+import socket
+import struct
+
+class DNSTCPSendBuffer:
+ '''A composite buffer for a DNS message sent over TCP.
+
+ This class encapsulates binary data supposed to be a complete DNS
+ message, taking into account the 2-byte length field preceeding the
+ actual data.
+
+ An object of this class is constructed with a binary object for the
+ DNS message data (in wire-format), conceptually "appended" to the
+ 2-byte length field. The length is automatically calculated and
+ converted to the wire-format data in the network byte order.
+
+ Its get_data() method returns a binary object corresponding to the
+ consecutive region of the conceptual buffer starting from the specified
+ position. The returned region may not necessarily contain all remaining
+ data from the specified position; this class can internally hold multiple
+ separate binary objects to represent the conceptual buffer, and,
+ in that case, get_data() identifies the object that contains the
+ specified position of data, and returns the longest consecutive region
+ from that position. So the caller must call get_data(), incrementing
+ the position as it transmits the data, until it gets None.
+
+ This class is primarily intended to be a private utility for the
+ DNSTCPContext class, but can be used by other general applications
+ that need to send DNS messages over TCP in their own way.
+
+ '''
+ def __init__(self, data):
+ '''Consructor.
+
+ Parameter:
+ data (binary): A binary sequence that is supposed to be a
+ complete DNS message in the wire format. It must not
+ exceed 65535 bytes in length; otherwise ValueError will be
+ raised. This class does not check any further validity on
+ the data as a DNS message.
+
+ '''
+ self.__data_size = len(data)
+ self.__len_size = 2 # fixed length
+ if self.__data_size > 0xffff:
+ raise ValueError('Too large data for DNS/TCP, size: ' +
+ str(self.__data_size))
+ self.__lenbuf = struct.pack('H', socket.htons(self.__data_size))
+ self.__databuf = data
+
+ def get_total_len(self):
+ '''Return the total length of the buffer, including the length field.
+
+ '''
+ return self.__data_size + self.__len_size
+
+ def get_data(self, pos):
+ '''Return a portion of data from a specified position.
+
+ Parameter:
+ pos (int): The position in the TCP DNS message data (including
+ the 2-byte length field) from which the data are to be returned.
+
+ Return:
+ A Python binary object that corresponds to a part of the TCP
+ DNS message data starting at the specified position. It may
+ or may not contain all remaining data from that position.
+ If the given position is beyond the end of the enrire data,
+ None will be returned.
+
+ '''
+ if pos >= self.__len_size:
+ pos -= self.__len_size
+ if pos >= self.__data_size:
+ return None
+ return self.__databuf[pos:]
+ return self.__lenbuf[pos:]
+
+class DNSTCPContextError(Exception):
+ '''An exception raised against logic errors in DNSTCPContext.
+
+ This is raised only when the context class is used in an unexpected way,
+ that is for a caller's bug.
+
+ '''
+ pass
+
+class DNSTCPContext:
+ '''Context of a TCP connection used for DNS transactions.
+
+ This class offers the following services:
+ - Handle the initial 2-byte length field internally. The user of
+ this class only has to deal with the bare DNS message (just like
+ the one transmiited over UDP).
+ - Asynchronous I/O. It supports the non blocking operation mode,
+ where method calls never block. The caller is told whether it's
+ ongoing and it should watch the socket or it's fully completed.
+ - Socket error handling: it internally catches socket related exceptions
+ and handle them in an appropriate way. A fatal error will be reported
+ to the caller in the form of a normal return value. The application
+ of this class can therefore assume it's basically exception free.
+
+ Notes:
+ - the initial implementation only supports non blocking mode, but
+ it's intended to be extended so it can work in both blocking or
+ non blocking mode as we see the need for it.
+ - the initial implementation only supports send operations on an
+ already connected socket, but the intent is to extend this class
+ so it can handle receive or connect operations.
+
+ '''
+
+ # Result codes used in send()/send_ready() methods.
+ SEND_DONE = 1
+ SENDING = 2
+ CLOSED = 3
+
+ def __init__(self, sock):
+ '''Constructor.
+
+ Parameter:
+ sock (Python socket): the socket to be used for the transaction.
+ It must represent a TCP socket; otherwise DNSTCPContextError
+ will be raised. It's also expected to be connected, but it's
+ not checked on construction; a subsequent send operation would
+ fail.
+
+ '''
+ if sock.proto != socket.IPPROTO_TCP:
+ raise DNSTCPContextError('not a TCP socket, proto: ' +
+ str(sock.proto))
+ sock.setblocking(False)
+ self.__sock = sock
+ self.__send_buffer = None
+ self.__remote_addr = sock.getpeername() # record it for logging
+
+ def send(self, data):
+ '''Send a DNS message.
+
+ In the non blocking mode, it sends as much data as possible via
+ the underlying TCP socket until it would block or all data are sent
+ out, and returns the corresponding result code. This method
+ therefore doesn't block in this mode.
+
+ Note: the initial implementation only works in the non blocking
+ mode.
+
+ This method must not be called once an error is detected and
+ CLOSED is returned or a prior send attempt is ongoing (with
+ the result code of SENDING); otherwise DNSTCPContextError is
+ raised.
+
+ Parameter:
+ data (binary): A binary sequence that is supposed to be a
+ complete DNS message in the wire format. It must meet
+ the assumption that DNSTCPSendBuffer requires.
+
+ Return:
+ An integer constant representing the result:
+ - SEND_DONE All data have been sent out successfully.
+ - SENDING All writable data has been sent out, and further
+ attempt would block at the moment. The caller is expected
+ to detect it when the underlying socket is writable again
+ and call send_ready() to continue the send.
+ - CLOSED A network error happened before the send operation is
+ completed. The underlying socket has been closed, and this
+ context object will be unusable.
+
+ '''
+ if self.__sock is None:
+ raise DNSTCPContextError('send() called after close')
+ if self.__send_buffer is not None:
+ raise DNSTCPContextError('duplicate send()')
+
+ self.__send_buffer = DNSTCPSendBuffer(data)
+ self.__send_marker = 0
+ return self.__do_send()
+
+ def send_ready(self):
+ '''Resume sending a DNS message.
+
+ This method is expected to be called followed by a send() call or
+ another send_ready() call that resulted in SENDING, when the caller
+ detects the underlying socket becomes writable. It works as
+ send() except that it continues the send operation from the suspended
+ position of the data at the time of the previous call.
+
+ This method must not be called once an error is detected and
+ CLOSED is returned or a send() method hasn't been called to
+ start the operation; otherwise DNSTCPContextError is raised.
+
+ Return: see send().
+
+ '''
+ if self.__sock is None:
+ raise DNSTCPContextError('send() called after close')
+ if self.__send_buffer is None:
+ raise DNSTCPContextError('send_ready() called before send')
+
+ return self.__do_send()
+
+ def __do_send(self):
+ while True:
+ data = self.__send_buffer.get_data(self.__send_marker)
+ if data is None:
+ # send complete; clear the internal buffer for next possible
+ # send.
+ logger.debug(logger.DBGLVL_TRACE_DETAIL,
+ PYSERVER_COMMON_DNS_TCP_SEND_DONE,
+ ClientFormatter(self.__remote_addr),
+ self.__send_marker)
+ self.__send_buffer = None
+ self.__send_marker = 0
+ return self.SEND_DONE
+ try:
+ cc = self.__sock.send(data)
+ except socket.error as ex:
+ total_len = self.__send_buffer.get_total_len()
+ if ex.errno == errno.EAGAIN:
+ logger.debug(logger.DBGLVL_TRACE_DETAIL,
+ PYSERVER_COMMON_DNS_TCP_SEND_PENDING,
+ ClientFormatter(self.__remote_addr),
+ self.__send_marker, total_len)
+ return self.SENDING
+ logger.warn(PYSERVER_COMMON_DNS_TCP_SEND_ERROR,
+ ClientFormatter(self.__remote_addr),
+ self.__send_marker, total_len, ex)
+ self.__sock.close()
+ self.__sock = None
+ return self.CLOSED
+ self.__send_marker += cc
+
+ def close(self):
+ '''Close the socket.
+
+ This method closes the underlying socket. Once called, the context
+ object is effectively useless; any further method call would result
+ in a DNSTCPContextError exception.
+
+ The underlying socket will be automatically (and implicitly) closed
+ when this object is deallocated, but Python seems to expect socket
+ objects should be explicitly closed before deallocation. So it's
+ generally advisable for the user of this object to call this method
+ explictily when it doesn't need the context.
+
+ This method can be called more than once or can be called after
+ other I/O related methods have returned CLOSED; it's compatible
+ with the close() method of the Python socket class.
+
+ '''
+ if self.__sock is None:
+ return
+ self.__sock.close()
+ self.__sock = None # prevent furhter operation
diff --git a/src/lib/python/isc/server_common/logger.py b/src/lib/python/isc/server_common/logger.py
new file mode 100644
index 0000000..7451e05
--- /dev/null
+++ b/src/lib/python/isc/server_common/logger.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Common definitions regarding logging for the server_common package.'''
+
+import isc.log
+
+logger = isc.log.Logger("server_common")
diff --git a/src/lib/python/isc/server_common/server_common_messages.mes b/src/lib/python/isc/server_common/server_common_messages.mes
index b32205c..9eab129 100644
--- a/src/lib/python/isc/server_common/server_common_messages.mes
+++ b/src/lib/python/isc/server_common/server_common_messages.mes
@@ -21,6 +21,35 @@
# have that at this moment. So when adding a message, make sure that
# the name is not already used in src/lib/config/config_messages.mes
+% PYSERVER_COMMON_AUTH_CONFIG_NAME_PARSER_ERROR Invalid name when parsing Auth configuration: %1
+There was an invalid name when parsing Auth configuration.
+
+% PYSERVER_COMMON_AUTH_CONFIG_RRCLASS_ERROR Invalid RRClass when parsing Auth configuration: %1
+There was an invalid RR class when parsing Auth configuration.
+
+% PYSERVER_COMMON_DNS_TCP_SEND_DONE completed sending TCP message to %1 (%2 bytes in total)
+Debug message. A complete DNS message has been successfully
+transmitted over a TCP connection, possibly after multiple send
+operations. The destination address and the total size of the message
+(including the 2-byte length field) are shown in the log message.
+
+% PYSERVER_COMMON_DNS_TCP_SEND_ERROR failed to send TCP message to %1 (%2/%3 bytes sent): %4
+A DNS message has been attempted to be sent out over a TCP connection,
+but it failed due to some network error. Although it's not expected
+to happen too often, it can still happen for various reasons. The
+administrator may want to examine the cause of the failure, which is
+included in the log message, to see if it requires some action to
+be taken at the server side. When this message is logged, the
+corresponding TCP connection was closed immediately after the error
+was detected.
+
+% PYSERVER_COMMON_DNS_TCP_SEND_PENDING sent part TCP message to %1 (up to %2/%3 bytes)
+Debug message. A part of DNS message has been transmitted over a TCP
+connection, and it's suspended because further attempt would block.
+The destination address and the total size of the message that has
+been transmitted so far (including the 2-byte length field) are shown
+in the log message.
+
% PYSERVER_COMMON_TSIG_KEYRING_DEINIT Deinitializing global TSIG keyring
A debug message noting that the global TSIG keyring is being removed from
memory. Most programs don't do that, they just exit, which is OK.
diff --git a/src/lib/python/isc/server_common/tests/Makefile.am b/src/lib/python/isc/server_common/tests/Makefile.am
index 4829edc..82cd854 100644
--- a/src/lib/python/isc/server_common/tests/Makefile.am
+++ b/src/lib/python/isc/server_common/tests/Makefile.am
@@ -1,5 +1,5 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = tsig_keyring_test.py
+PYTESTS = tsig_keyring_test.py dns_tcp_test.py
EXTRA_DIST = $(PYTESTS)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
@@ -20,5 +20,6 @@ endif
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/server_common/tests/dns_tcp_test.py b/src/lib/python/isc/server_common/tests/dns_tcp_test.py
new file mode 100644
index 0000000..7e74c04
--- /dev/null
+++ b/src/lib/python/isc/server_common/tests/dns_tcp_test.py
@@ -0,0 +1,246 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Tests for isc.server_common.dns_tcp'''
+
+import isc.log
+from isc.server_common.dns_tcp import *
+import socket
+import errno
+import unittest
+
+def check_length_field(assert_eq, len_data, expected_len):
+ # Examine the "length field" part of the data. It should be 2-byte field,
+ # and (in our implementation) always given as a separate chunk of data.
+ # The 16-bit length value of the actual data should be stored in the
+ # network byte order.
+ len_high = (expected_len >> 8) & 0x00ff
+ len_low = (expected_len & 0x00ff)
+ assert_eq(2, len(len_data))
+ assert_eq(len_high, len_data[0])
+ assert_eq(len_low, len_data[1])
+
+class BufferTest(unittest.TestCase):
+ def check_length_field(self, buf, expected_len):
+ '''Common subtest for the main tests that checks the length buffer.'''
+ check_length_field(self.assertEqual, buf.get_data(0), expected_len)
+
+ # Confirm the get_data(1) returns the latter half of the (partial)
+ # buffer.
+ self.assertEqual(1, len(buf.get_data(1)))
+ self.assertEqual(expected_len & 0x00ff, buf.get_data(1)[0])
+
+ def test_small_data(self):
+ # The smallest size (in practice) of data: that of the header field.
+ expected_data = b'x' * 12
+ buf = DNSTCPSendBuffer(expected_data)
+ self.check_length_field(buf, 12)
+
+ self.assertEqual(expected_data, buf.get_data(2))
+ self.assertEqual(b'x' * 11, buf.get_data(3))
+ self.assertEqual(None, buf.get_data(14))
+
+ def test_large_data(self):
+ # Test with an arbitrarily large size of data.
+ buf = DNSTCPSendBuffer(b'x' * 65534)
+ self.check_length_field(buf, 65534)
+ self.assertEqual(b'x' * 65534, buf.get_data(2))
+ self.assertEqual(b'x' * 2, buf.get_data(65534))
+ self.assertEqual(None, buf.get_data(65536))
+
+ def test_largest_data(self):
+ # This is the largest possible size of DNS message.
+ buf = DNSTCPSendBuffer(b'y' * 65535)
+ self.check_length_field(buf, 65535)
+ self.assertEqual(b'y', buf.get_data(65536))
+ self.assertEqual(None, buf.get_data(65537))
+
+ def test_too_large_data(self):
+ # The maximum possible size of a valid DNS message is 65535.
+ # Beyond that, the buffer construction should result in an exception.
+ self.assertRaises(ValueError, DNSTCPSendBuffer, b'0' * 65536)
+
+ def test_empty_data(self):
+ # Unusual, but it's not rejected
+ buf = DNSTCPSendBuffer(b'')
+ self.check_length_field(buf, 0)
+ self.assertEqual(None, buf.get_data(2))
+
+ def test_get_total_len(self):
+ self.assertEqual(14, DNSTCPSendBuffer(b'x' * 12).get_total_len())
+ self.assertEqual(2, DNSTCPSendBuffer(b'').get_total_len())
+ self.assertEqual(65537, DNSTCPSendBuffer(b'X' * 65535).get_total_len())
+
+class FakeSocket:
+ '''Emulating python socket w/o involving IO while allowing inspection.'''
+ def __init__(self, proto=socket.IPPROTO_TCP):
+ self._setblocking_val = None # record the latest value of setblocking()
+ self._closed = False # set to True on close()
+ self._sent_data = [] # record the transmitted data in send()
+ self._send_buflen = None # capacity of the faked "send buffer";
+ # None means infinity, -1 means "closed"
+ self._send_cc = 0 # waterline of the send buffer
+ self.proto = proto # protocol (should be TCP, but can be faked)
+
+ def setblocking(self, on):
+ self._setblocking_val = on
+
+ def close(self):
+ self._closed = True
+
+ def send(self, data):
+ # Calculate the available space in the "send buffer"
+ if self._send_buflen == -1:
+ raise socket.error(errno.EPIPE, "Broken pipe")
+ elif self._send_buflen is None:
+ available_space = len(data)
+ else:
+ available_space = self._send_buflen - self._send_cc
+ if available_space == 0:
+ # if there's no space, (assuming it's nonblocking mode) raise
+ # EAGAIN.
+ raise socket.error(errno.EAGAIN,
+ "Resource temporarily unavailable")
+ # determine the sendable part of the data, record it, update "buffer".
+ cc = min(available_space, len(data))
+ self._sent_data.append(data[:cc])
+ self._send_cc += cc
+ return cc
+
+ def make_send_ready(self):
+ # pretend that the accrued data has been cleared, making room in
+ # the send buffer.
+ self._send_cc = 0
+
+ def getpeername(self):
+ '''Return faked remote address'''
+ return ('2001:db8::1', 53000, 0, 0)
+
+class ContextTest(unittest.TestCase):
+ def setUp(self):
+ self.__sock = FakeSocket()
+ # there should be no setblocking value on the fake socket by default.
+ self.assertEqual(None, self.__sock._setblocking_val)
+ self.__ctx = DNSTCPContext(self.__sock)
+ # dummy data that has the same length as the DNS header section:
+ self.__test_data = b'x' * 12
+
+ def test_initialization(self):
+ # Creating a context (in setUp) should make the socket non-blocking.
+ self.assertFalse(self.__sock._setblocking_val)
+
+ # Only a TCP socket is accepted.
+ self.assertRaises(DNSTCPContextError, DNSTCPContext,
+ FakeSocket(proto=socket.IPPROTO_UDP))
+
+ def test_send_all(self):
+ # By default, a single send() call will send out all data by 2
+ # send() calls: one for the 2-byte length data and the other for the
+ # actual data.
+ self.assertEqual(DNSTCPContext.SEND_DONE,
+ self.__ctx.send(self.__test_data))
+ self.assertEqual(2, len(self.__sock._sent_data))
+ check_length_field(self.assertEqual, self.__sock._sent_data[0],
+ len(self.__test_data))
+ self.assertEqual(self.__test_data, self.__sock._sent_data[1])
+
+ def test_send_divided(self):
+ # set the "send buffer" of the fake socket to 7 (half of the size of
+ # len + data).
+ self.__sock._send_buflen = 7
+
+ # The initial send() can only send out the half of the data in
+ # two calls to socket.send(): the first one for the length field,
+ # and the other is for the first 5 bytes of the data
+ self.assertEqual(DNSTCPContext.SENDING,
+ self.__ctx.send(self.__test_data))
+ self.assertEqual(2, len(self.__sock._sent_data))
+ check_length_field(self.assertEqual, self.__sock._sent_data[0],
+ len(self.__test_data))
+ self.assertEqual(self.__test_data[:5], self.__sock._sent_data[1])
+
+ # "flush" the send buffer of the fake socket
+ self.__sock.make_send_ready()
+
+ # send_ready() can now complete the send. The remaining data should
+ # have been passed.
+ self.assertEqual(DNSTCPContext.SEND_DONE, self.__ctx.send_ready())
+ self.assertEqual(3, len(self.__sock._sent_data))
+ self.assertEqual(self.__test_data[5:], self.__sock._sent_data[2])
+
+ def test_send_multi(self):
+ # On a successful completion of send, another send can be done.
+ for i in (0, 2):
+ self.assertEqual(DNSTCPContext.SEND_DONE,
+ self.__ctx.send(self.__test_data))
+ self.assertEqual(i + 2, len(self.__sock._sent_data))
+ check_length_field(self.assertEqual, self.__sock._sent_data[i],
+ len(self.__test_data))
+ self.assertEqual(self.__test_data, self.__sock._sent_data[i + 1])
+
+ def test_send_reset(self):
+ # the connection will be "reset" before the initial send.
+ # send() should return CLOSED, and the underlying socket should be
+ # closed.
+ self.__sock._send_buflen = -1
+ self.assertEqual(DNSTCPContext.CLOSED,
+ self.__ctx.send(self.__test_data))
+ self.assertTrue(self.__sock._closed)
+
+ # Once closed, send() cannot be called any more
+ self.assertRaises(DNSTCPContextError, self.__ctx.send,
+ self.__test_data)
+ # Calling close() is okay (it's NO-OP)
+ self.__ctx.close()
+
+ def test_send_divided_reset(self):
+ # Similar to send_reset, but send() succeeds, and then the connection
+ # will be "reset".
+ self.__sock._send_buflen = 7
+ self.assertEqual(DNSTCPContext.SENDING,
+ self.__ctx.send(self.__test_data))
+ self.__sock._send_buflen = -1
+ self.assertEqual(DNSTCPContext.CLOSED, self.__ctx.send_ready())
+ self.assertTrue(self.__sock._closed)
+
+ # Once closed, send_ready() cannot be called any more
+ self.assertRaises(DNSTCPContextError, self.__ctx.send_ready)
+
+ def test_duplicate_send(self):
+ # send() cannot be called until it's completed
+ self.__sock._send_buflen = 7
+ self.assertEqual(DNSTCPContext.SENDING,
+ self.__ctx.send(self.__test_data))
+ self.assertRaises(DNSTCPContextError, self.__ctx.send,
+ self.__test_data)
+
+ def test_skip_send(self):
+ # send_ready() cannot be called before send().
+ self.assertRaises(DNSTCPContextError, self.__ctx.send_ready)
+
+ def test_close(self):
+ self.assertEqual(DNSTCPContext.SEND_DONE,
+ self.__ctx.send(self.__test_data))
+ self.__ctx.close()
+ self.assertTrue(self.__sock._closed)
+
+ # Duplicate close is just ignored, and the socket is still closed.
+ self.__ctx.close()
+ self.assertTrue(self.__sock._closed)
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/server_common/tsig_keyring.py b/src/lib/python/isc/server_common/tsig_keyring.py
index 308cfd4..de3b759 100644
--- a/src/lib/python/isc/server_common/tsig_keyring.py
+++ b/src/lib/python/isc/server_common/tsig_keyring.py
@@ -20,10 +20,10 @@ tsig_keys module.
import isc.dns
import isc.log
+from isc.server_common.logger import logger
from isc.log_messages.server_common_messages import *
updater = None
-logger = isc.log.Logger("server_common")
class Unexpected(Exception):
"""
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
index 2d3f937..ea51967 100644
--- a/src/lib/python/isc/xfrin/diff.py
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -24,7 +24,9 @@ But for now, it lives here.
"""
import isc.dns
+from isc.datasrc import ZoneFinder
import isc.log
+from isc.datasrc import ZoneFinder
from isc.log_messages.libxfrin_messages import *
class NoSuchZone(Exception):
@@ -119,7 +121,7 @@ class Diff:
else:
self.__buffer = []
- def __check_commited(self):
+ def __check_committed(self):
"""
This checks if the diff is already commited or broken. If it is, it
raises ValueError. This check is for methods that need to work only on
@@ -169,7 +171,7 @@ class Diff:
- in single_update_mode if any later rr is of type SOA (both for
addition and deletion)
"""
- self.__check_commited()
+ self.__check_committed()
if rr.get_rdata_count() != 1:
raise ValueError('The rrset must contain exactly 1 Rdata, but ' +
'it holds ' + str(rr.get_rdata_count()))
@@ -179,9 +181,13 @@ class Diff:
str(self.__updater.get_class()))
if self.__single_update_mode:
if operation == 'add':
- self.__append_with_soa_check(self.__additions, operation, rr)
+ if not self._remove_rr_from_deletions(rr):
+ self.__append_with_soa_check(self.__additions, operation,
+ rr)
elif operation == 'delete':
- self.__append_with_soa_check(self.__deletions, operation, rr)
+ if not self._remove_rr_from_additions(rr):
+ self.__append_with_soa_check(self.__deletions, operation,
+ rr)
else:
self.__buffer.append((operation, rr))
if len(self.__buffer) >= DIFF_APPLY_TRESHOLD:
@@ -298,7 +304,7 @@ class Diff:
else:
raise ValueError('Unknown operation ' + operation)
- self.__check_commited()
+ self.__check_committed()
# First, compact the data
self.compact()
try:
@@ -330,7 +336,7 @@ class Diff:
This might raise isc.datasrc.Error.
"""
- self.__check_commited()
+ self.__check_committed()
# Push the data inside the data source
self.apply()
# Make sure they are visible.
@@ -376,3 +382,205 @@ class Diff:
raise ValueError("Separate buffers requested in single-update mode")
else:
return (self.__deletions, self.__additions)
+
+ def find(self, name, rrtype,
+ options=(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK)):
+ """
+ Calls the find() method in the ZoneFinder associated with this
+ Diff's ZoneUpdater, i.e. the find() on the zone as it was on the
+ moment this Diff object got created.
+ See the ZoneFinder documentation for a full description.
+ Note that the result does not include changes made in this Diff
+ instance so far.
+ Options default to NO_WILDCARD and FIND_GLUE_OK.
+ Raises a ValueError if the Diff has been committed already
+ """
+ self.__check_committed()
+ return self.__updater.find(name, rrtype, options)
+
+ def find_all(self, name,
+ options=(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK)):
+ """
+ Calls the find() method in the ZoneFinder associated with this
+ Diff's ZoneUpdater, i.e. the find_all() on the zone as it was on the
+ moment this Diff object got created.
+ See the ZoneFinder documentation for a full description.
+ Note that the result does not include changes made in this Diff
+ instance so far.
+ Options default to NO_WILDCARD and FIND_GLUE_OK.
+ Raises a ValueError if the Diff has been committed already
+ """
+ self.__check_committed()
+ return self.__updater.find_all(name, options)
+
+ def __remove_rr_from_buffer(self, buf, rr):
+ '''Helper for common code in remove_rr_from_deletions() and
+ remove_rr_from_additions();
+ returns the result of the removal operation on the given buffer
+ '''
+ def same_rr(a, b):
+ # Consider two rr's the same if name, type, and rdata match
+ # Note that at this point it should have been checked that
+ # the rr in the buffer and the given rr have exactly one rdata
+ return a.get_name() == b.get_name() and\
+ a.get_type() == b.get_type() and\
+ a.get_rdata()[0] == b.get_rdata()[0]
+ if rr.get_type() == isc.dns.RRType.SOA():
+ return buf
+ else:
+ return [ op for op in buf if not same_rr(op[1], rr)]
+
+ def _remove_rr_from_deletions(self, rr):
+ '''
+ Removes the given rr from the currently buffered deletions;
+ returns True if anything is removed, False if the RR was not present.
+ This method is protected; it is not meant to be called from anywhere
+ but the add_data() method. It is not private for easier testing.
+ '''
+ orig_size = len(self.__deletions)
+ self.__deletions = self.__remove_rr_from_buffer(self.__deletions, rr)
+ return len(self.__deletions) != orig_size
+
+ def _remove_rr_from_additions(self, rr):
+ '''
+ Removes the given rr from the currently buffered additions;
+ returns True if anything is removed, False if the RR was not present.
+ This method is protected; it is not meant to be called from anywhere
+ but the delete_data() method. It is not private for easier testing.
+ '''
+ orig_size = len(self.__additions)
+ self.__additions = self.__remove_rr_from_buffer(self.__additions, rr)
+ return len(self.__additions) != orig_size
+
+ def __get_name_from_additions(self, name):
+ '''
+ Returns a list of all rrs in the additions queue that have the given
+ Name.
+ This method is protected; it is not meant to be called from anywhere
+ but the find_all_updated() method. It is not private for easier
+ testing.
+ '''
+ return [ rr for (_, rr) in self.__additions if rr.get_name() == name ]
+
+ def __get_name_from_deletions(self, name):
+ '''
+ Returns a list of all rrs in the deletions queue that have the given
+ Name
+ This method is protected; it is not meant to be called from anywhere
+ but the find_all_updated() method. It is not private for easier
+ testing.
+ '''
+ return [ rr for (_, rr) in self.__deletions if rr.get_name() == name ]
+
+ def __get_name_type_from_additions(self, name, rrtype):
+ '''
+ Returns a list of the rdatas of the rrs in the additions queue that
+ have the given name and type
+ This method is protected; it is not meant to be called from anywhere
+ but the find_updated() method. It is not private for easier testing.
+ '''
+ return [ rr for (_, rr) in self.__additions\
+ if rr.get_name() == name and rr.get_type() == rrtype ]
+
+ def __get_name_type_from_deletions(self, name, rrtype):
+ '''
+ Returns a list of the rdatas of the rrs in the deletions queue that
+ have the given name and type
+ This method is protected; it is not meant to be called from anywhere
+ but the find_updated() method. It is not private for easier testing.
+ '''
+ return [ rr.get_rdata()[0] for (_, rr) in self.__deletions\
+ if rr.get_name() == name and rr.get_type() == rrtype ]
+
+ def find_updated(self, name, rrtype):
+ '''
+ Returns the result of find(), but with current updates applied, i.e.
+ as if this diff has been committed. Only performs additional
+ processing in the case find() returns SUCCESS, NXDOMAIN, or NXRRSET;
+ in all other cases, the results are returned directly.
+ Any RRs in the current deletions buffer are removed from the result,
+ and RRs in the current additions buffer are added to the result.
+ If the result was SUCCESS, but every RR in it is removed due to
+ deletions, and there is nothing in the additions, the rcode is changed
+ to NXRRSET.
+ If the result was NXDOMAIN or NXRRSET, and there are rrs in the
+ additions buffer, the result is changed to SUCCESS.
+ '''
+ if not self.__single_update_mode:
+ raise ValueError("find_updated() can only be used in " +
+ "single-update mode")
+ result, rrset, flags = self.find(name, rrtype)
+
+ added_rrs = self.__get_name_type_from_additions(name, rrtype)
+ deleted_rrs = self.__get_name_type_from_deletions(name, rrtype)
+
+ if result == ZoneFinder.SUCCESS:
+ new_rrset = isc.dns.RRset(name, self.__updater.get_class(),
+ rrtype, rrset.get_ttl())
+ for rdata in rrset.get_rdata():
+ if rdata not in deleted_rrs:
+ new_rrset.add_rdata(rdata)
+ # If all data has been deleted, and there is nothing to add
+ # we cannot really know whether it is NXDOMAIN or NXRRSET,
+ # NXRRSET seems safest (we could find out, but it would require
+ # another search on the name which is probably not worth the
+ # trouble
+ if new_rrset.get_rdata_count() == 0 and len(added_rrs) == 0:
+ result = ZoneFinder.NXRRSET
+ new_rrset = None
+ elif (result == ZoneFinder.NXDOMAIN or result == ZoneFinder.NXRRSET)\
+ and len(added_rrs) > 0:
+ new_rrset = isc.dns.RRset(name, self.__updater.get_class(),
+ rrtype, added_rrs[0].get_ttl())
+ # There was no data in the zone, but there is data now
+ result = ZoneFinder.SUCCESS
+ else:
+ # Can't reliably handle other cases, just return the original
+ # data
+ return result, rrset, flags
+
+ for rr in added_rrs:
+ # Can only be 1-rr RRsets at this point
+ new_rrset.add_rdata(rr.get_rdata()[0])
+
+ return result, new_rrset, flags
+
+ def find_all_updated(self, name):
+ '''
+ Returns the result of find_all(), but with current updates applied,
+ i.e. as if this diff has been committed. Only performs additional
+ processing in the case find() returns SUCCESS or NXDOMAIN;
+ in all other cases, the results are returned directly.
+ Any RRs in the current deletions buffer are removed from the result,
+ and RRs in the current additions buffer are added to the result.
+ If the result was SUCCESS, but every RR in it is removed due to
+ deletions, and there is nothing in the additions, the rcode is changed
+ to NXDOMAIN.
+ If the result was NXDOMAIN, and there are rrs in the additions buffer,
+ the result is changed to SUCCESS.
+ '''
+ if not self.__single_update_mode:
+ raise ValueError("find_all_updated can only be used in " +
+ "single-update mode")
+ result, rrsets, flags = self.find_all(name)
+ new_rrsets = []
+ added_rrs = self.__get_name_from_additions(name)
+ if result == ZoneFinder.SUCCESS and\
+ (flags & ZoneFinder.RESULT_WILDCARD == 0):
+ deleted_rrs = self.__get_name_from_deletions(name)
+ for rr in rrsets:
+ if rr not in deleted_rrs:
+ new_rrsets.append(rr)
+ if len(new_rrsets) == 0 and len(added_rrs) == 0:
+ result = ZoneFinder.NXDOMAIN
+ elif result == ZoneFinder.NXDOMAIN and\
+ len(added_rrs) > 0:
+ result = ZoneFinder.SUCCESS
+ else:
+ # Can't reliably handle other cases, just return the original
+ # data
+ return result, rrsets, flags
+ for rr in added_rrs:
+ if rr.get_name() == name:
+ new_rrsets.append(rr)
+ return result, new_rrsets, flags
diff --git a/src/lib/python/isc/xfrin/tests/Makefile.am b/src/lib/python/isc/xfrin/tests/Makefile.am
index 416d62b..459efc3 100644
--- a/src/lib/python/isc/xfrin/tests/Makefile.am
+++ b/src/lib/python/isc/xfrin/tests/Makefile.am
@@ -20,5 +20,6 @@ endif
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
+ B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
index 6c545d2..906406f 100644
--- a/src/lib/python/isc/xfrin/tests/diff_tests.py
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -15,7 +15,7 @@
import isc.log
import unittest
-import isc.datasrc
+from isc.datasrc import ZoneFinder
from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
from isc.xfrin.diff import Diff, NoSuchZone
@@ -48,6 +48,13 @@ class DiffTest(unittest.TestCase):
self.__broken_called = False
self.__warn_called = False
self.__should_replace = False
+ self.__find_called = False
+ self.__find_name = None
+ self.__find_type = None
+ self.__find_options = None
+ self.__find_all_called = False
+ self.__find_all_name = None
+ self.__find_all_options = None
# Some common values
self.__rrclass = RRClass.IN()
self.__type = RRType.A()
@@ -70,6 +77,31 @@ class DiffTest(unittest.TestCase):
self.__rrset_multi.add_rdata(Rdata(self.__type, self.__rrclass,
'192.0.2.2'))
+ # Also create a few other (valid) rrsets
+ # A SOA record
+ self.__rrset_soa = RRset(Name('example.org.'), self.__rrclass,
+ RRType.SOA(), RRTTL(3600))
+ self.__rrset_soa.add_rdata(Rdata(RRType.SOA(), self.__rrclass,
+ "ns1.example.org. " +
+ "admin.example.org. " +
+ "1233 3600 1800 2419200 7200"))
+ # A few single-rr rrsets that together would for a multi-rr rrset
+ self.__rrset3 = RRset(Name('c.example.org.'), self.__rrclass,
+ RRType.TXT(), self.__ttl)
+ self.__rrset3.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "one"))
+ self.__rrset4 = RRset(Name('c.example.org.'), self.__rrclass,
+ RRType.TXT(), self.__ttl)
+ self.__rrset4.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "two"))
+ self.__rrset5 = RRset(Name('c.example.org.'), self.__rrclass,
+ RRType.TXT(), self.__ttl)
+ self.__rrset5.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "three"))
+ self.__rrset6 = RRset(Name('d.example.org.'), self.__rrclass,
+ RRType.A(), self.__ttl)
+ self.__rrset6.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.1"))
+ self.__rrset7 = RRset(Name('d.example.org.'), self.__rrclass,
+ RRType.A(), self.__ttl)
+ self.__rrset7.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.2"))
+
def __mock_compact(self):
"""
This can be put into the diff to hook into its compact method and see
@@ -156,6 +188,23 @@ class DiffTest(unittest.TestCase):
return self
+ def find(self, name, rrtype, options=None):
+ self.__find_called = True
+ self.__find_name = name
+ self.__find_type = rrtype
+ self.__find_options = options
+ # Doesn't really matter what is returned, as long
+ # as the test can check that it's passed along
+ return "find_return"
+
+ def find_all(self, name, options=None):
+ self.__find_all_called = True
+ self.__find_all_name = name
+ self.__find_all_options = options
+ # Doesn't really matter what is returned, as long
+ # as the test can check that it's passed along
+ return "find_all_return"
+
def test_create(self):
"""
This test the case when the diff is successfuly created. It just
@@ -265,6 +314,9 @@ class DiffTest(unittest.TestCase):
self.assertRaises(ValueError, diff.commit)
self.assertRaises(ValueError, diff.add_data, self.__rrset2)
self.assertRaises(ValueError, diff.delete_data, self.__rrset1)
+ self.assertRaises(ValueError, diff.find, Name('foo.example.org.'),
+ RRType.A())
+ self.assertRaises(ValueError, diff.find_all, Name('foo.example.org.'))
diff.apply = orig_apply
self.assertRaises(ValueError, diff.apply)
# This one does not state it should raise, so check it doesn't
@@ -498,6 +550,17 @@ class DiffTest(unittest.TestCase):
self.assertRaises(ValueError, diff_single.get_buffer)
self.assertEqual(([], []), diff_single.get_single_update_buffers())
+ def test_finds_single(self):
+ '''
+ Test that find_updated() and find_all_updated() can only be used
+ in single-update-mode.
+ '''
+ diff_multi = Diff(self, Name('example.org.'), single_update_mode=False)
+ self.assertRaises(ValueError, diff_multi.find_updated,
+ Name('example.org.'), RRType.A())
+ self.assertRaises(ValueError, diff_multi.find_all_updated,
+ Name('example.org.'))
+
def test_single_update_mode(self):
'''
Test single-update mode. In this mode, updates and deletes can
@@ -506,55 +569,47 @@ class DiffTest(unittest.TestCase):
and it must be the first change.
'''
- # First create some RRsets to play with
- soa = RRset(Name('example.org.'), self.__rrclass, RRType.SOA(),
- RRTTL(3600))
- soa.add_rdata(Rdata(soa.get_type(), soa.get_class(),
- "ns.example.org. foo.example.org. 1234 28800 "+
- "7200 604800 3600"))
-
- a = RRset(Name('www.example.org.'), self.__rrclass, RRType.A(),
- RRTTL(3600))
- a.add_rdata(Rdata(a.get_type(), a.get_class(),
- "192.0.2.1"))
-
- a2 = RRset(Name('www.example.org.'), self.__rrclass, RRType.A(),
- RRTTL(3600))
- a2.add_rdata(Rdata(a2.get_type(), a2.get_class(),
- "192.0.2.2"))
-
# full rrset for A (to check compact())
- a_1_2 = RRset(Name('www.example.org.'), self.__rrclass, RRType.A(),
- RRTTL(3600))
- a_1_2.add_rdata(Rdata(a_1_2.get_type(), a_1_2.get_class(),
- "192.0.2.1"))
- a_1_2.add_rdata(Rdata(a_1_2.get_type(), a_1_2.get_class(),
- "192.0.2.2"))
+ txt = RRset(Name('c.example.org.'), self.__rrclass, RRType.TXT(),
+ RRTTL(3600))
+ txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "one"))
+ txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "two"))
+ txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "three"))
+ a = RRset(Name('d.example.org.'), self.__rrclass, RRType.A(),
+ RRTTL(3600))
+ a.add_rdata(Rdata(a.get_type(), a.get_class(), "192.0.2.1"))
+ a.add_rdata(Rdata(a.get_type(), a.get_class(), "192.0.2.2"))
diff = Diff(self, Name('example.org.'), single_update_mode=True)
# adding a first should fail
self.assertRaises(ValueError, diff.add_data, a)
# But soa should work
- diff.add_data(soa)
+ diff.add_data(self.__rrset_soa)
# And then A should as well
- diff.add_data(a)
- diff.add_data(a2)
+ diff.add_data(self.__rrset3)
+ diff.add_data(self.__rrset4)
+ diff.add_data(self.__rrset5)
# But another SOA should fail again
- self.assertRaises(ValueError, diff.add_data, soa)
+ self.assertRaises(ValueError, diff.add_data, self.__rrset_soa)
# Same for delete
- self.assertRaises(ValueError, diff.delete_data, a)
- diff.delete_data(soa)
- diff.delete_data(a)
- diff.delete_data(a2)
- self.assertRaises(ValueError, diff.delete_data, soa)
+ self.assertRaises(ValueError, diff.delete_data, self.__rrset6)
+ diff.delete_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset6)
+ diff.delete_data(self.__rrset7)
+ self.assertRaises(ValueError, diff.delete_data, self.__rrset_soa)
# Not compacted yet, so the buffers should be as we
# filled them
(delbuf, addbuf) = diff.get_single_update_buffers()
- self.assertEqual([('delete', soa), ('delete', a), ('delete', a2)], delbuf)
- self.assertEqual([('add', soa), ('add', a), ('add', a2)], addbuf)
+ self.assertEqual([('delete', self.__rrset_soa),
+ ('delete', self.__rrset6),
+ ('delete', self.__rrset7)], delbuf)
+ self.assertEqual([('add', self.__rrset_soa),
+ ('add', self.__rrset3),
+ ('add', self.__rrset4),
+ ('add', self.__rrset5)], addbuf)
# Compact should compact the A records in both buffers
diff.compact()
@@ -563,18 +618,18 @@ class DiffTest(unittest.TestCase):
self.assertEqual(2, len(delbuf))
self.assertEqual(2, len(delbuf[0]))
self.assertEqual('delete', delbuf[0][0])
- self.assertEqual(soa.to_text(), delbuf[0][1].to_text())
+ self.assertEqual(self.__rrset_soa.to_text(), delbuf[0][1].to_text())
self.assertEqual(2, len(delbuf[1]))
self.assertEqual('delete', delbuf[1][0])
- self.assertEqual(a_1_2.to_text(), delbuf[1][1].to_text())
+ self.assertEqual(a.to_text(), delbuf[1][1].to_text())
self.assertEqual(2, len(addbuf))
self.assertEqual(2, len(addbuf[0]))
self.assertEqual('add', addbuf[0][0])
- self.assertEqual(soa.to_text(), addbuf[0][1].to_text())
+ self.assertEqual(self.__rrset_soa.to_text(), addbuf[0][1].to_text())
self.assertEqual(2, len(addbuf[1]))
self.assertEqual('add', addbuf[1][0])
- self.assertEqual(a_1_2.to_text(), addbuf[1][1].to_text())
+ self.assertEqual(txt.to_text(), addbuf[1][1].to_text())
# Apply should reset the buffers
diff.apply()
@@ -587,6 +642,450 @@ class DiffTest(unittest.TestCase):
self.assertRaises(ValueError, diff.add_data, a)
self.assertRaises(ValueError, diff.delete_data, a)
+ def test_add_delete_same(self):
+ '''
+ Test that if a record is added, then deleted, it is not added to
+ both buffers, but remove from the addition, and vice versa
+ '''
+ diff = Diff(self, Name('example.org.'), single_update_mode=True)
+ # Need SOA records first
+ diff.delete_data(self.__rrset_soa)
+ diff.add_data(self.__rrset_soa)
+
+ deletions, additions = diff.get_single_update_buffers()
+ self.assertEqual(1, len(deletions))
+ self.assertEqual(1, len(additions))
+
+ diff.add_data(self.__rrset1)
+ deletions, additions = diff.get_single_update_buffers()
+ self.assertEqual(1, len(deletions))
+ self.assertEqual(2, len(additions))
+
+ diff.delete_data(self.__rrset1)
+ deletions, additions = diff.get_single_update_buffers()
+ self.assertEqual(1, len(deletions))
+ self.assertEqual(1, len(additions))
+
+ diff.delete_data(self.__rrset2)
+ deletions, additions = diff.get_single_update_buffers()
+ self.assertEqual(2, len(deletions))
+ self.assertEqual(1, len(additions))
+
+ diff.add_data(self.__rrset2)
+ deletions, additions = diff.get_single_update_buffers()
+ self.assertEqual(1, len(deletions))
+ self.assertEqual(1, len(additions))
+
+ def test_find(self):
+ diff = Diff(self, Name('example.org.'))
+ name = Name('www.example.org.')
+ rrtype = RRType.A()
+
+ self.assertFalse(self.__find_called)
+ self.assertEqual(None, self.__find_name)
+ self.assertEqual(None, self.__find_type)
+ self.assertEqual(None, self.__find_options)
+
+ self.assertEqual("find_return", diff.find(name, rrtype))
+
+ self.assertTrue(self.__find_called)
+ self.assertEqual(name, self.__find_name)
+ self.assertEqual(rrtype, self.__find_type)
+ self.assertEqual(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK,
+ self.__find_options)
+
+ def test_find_options(self):
+ diff = Diff(self, Name('example.org.'))
+ name = Name('foo.example.org.')
+ rrtype = RRType.TXT()
+ options = ZoneFinder.NO_WILDCARD
+
+ self.assertEqual("find_return", diff.find(name, rrtype, options))
+
+ self.assertTrue(self.__find_called)
+ self.assertEqual(name, self.__find_name)
+ self.assertEqual(rrtype, self.__find_type)
+ self.assertEqual(options, self.__find_options)
+
+ def test_find_all(self):
+ diff = Diff(self, Name('example.org.'))
+ name = Name('www.example.org.')
+
+ self.assertFalse(self.__find_all_called)
+ self.assertEqual(None, self.__find_all_name)
+ self.assertEqual(None, self.__find_all_options)
+
+ self.assertEqual("find_all_return", diff.find_all(name))
+
+ self.assertTrue(self.__find_all_called)
+ self.assertEqual(name, self.__find_all_name)
+ self.assertEqual(ZoneFinder.NO_WILDCARD | ZoneFinder.FIND_GLUE_OK,
+ self.__find_all_options)
+
+ def test_find_all_options(self):
+ diff = Diff(self, Name('example.org.'))
+ name = Name('www.example.org.')
+ options = isc.datasrc.ZoneFinder.NO_WILDCARD
+
+ self.assertFalse(self.__find_all_called)
+ self.assertEqual(None, self.__find_all_name)
+ self.assertEqual(None, self.__find_all_options)
+
+ self.assertEqual("find_all_return", diff.find_all(name, options))
+
+ self.assertTrue(self.__find_all_called)
+ self.assertEqual(name, self.__find_all_name)
+ self.assertEqual(options, self.__find_all_options)
+
+ def __common_remove_rr_from_buffer(self, diff, add_method, remove_method,
+ op_str, buf_nr):
+ add_method(self.__rrset_soa)
+ add_method(self.__rrset2)
+ add_method(self.__rrset3)
+ add_method(self.__rrset4)
+
+ # sanity check
+ buf = diff.get_single_update_buffers()[buf_nr]
+ expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa,
+ self.__rrset2,
+ self.__rrset3,
+ self.__rrset4 ] ]
+ result = [ (op, str(rr)) for (op, rr) in buf ]
+ self.assertEqual(expected, result)
+
+ # remove one
+ self.assertTrue(remove_method(self.__rrset2))
+ buf = diff.get_single_update_buffers()[buf_nr]
+ expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa,
+ self.__rrset3,
+ self.__rrset4 ] ]
+ result = [ (op, str(rr)) for (op, rr) in buf ]
+ self.assertEqual(expected, result)
+
+ # SOA should not be removed
+ self.assertFalse(remove_method(self.__rrset_soa))
+ buf = diff.get_single_update_buffers()[buf_nr]
+ expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa,
+ self.__rrset3,
+ self.__rrset4 ] ]
+ result = [ (op, str(rr)) for (op, rr) in buf ]
+ self.assertEqual(expected, result)
+
+ # remove another
+ self.assertTrue(remove_method(self.__rrset4))
+ buf = diff.get_single_update_buffers()[buf_nr]
+ expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa,
+ self.__rrset3 ] ]
+ result = [ (op, str(rr)) for (op, rr) in buf ]
+ self.assertEqual(expected, result)
+
+ # remove nonexistent should return False
+ self.assertFalse(remove_method(self.__rrset4))
+ buf = diff.get_single_update_buffers()[buf_nr]
+ expected = [ (op_str, str(rr)) for rr in [ self.__rrset_soa,
+ self.__rrset3 ] ]
+ result = [ (op, str(rr)) for (op, rr) in buf ]
+ self.assertEqual(expected, result)
+
+ def test_remove_rr_from_additions(self):
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ self.__common_remove_rr_from_buffer(diff, diff.add_data,
+ diff._remove_rr_from_additions,
+ 'add', 1)
+
+ def test_remove_rr_from_deletions(self):
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ self.__common_remove_rr_from_buffer(diff, diff.delete_data,
+ diff._remove_rr_from_deletions,
+ 'delete', 0)
+
+ def __create_find(self, result, rrset, flags):
+ '''
+ Overwrites the local find() method with a method that returns
+ the tuple (result, rrset, flags)
+ '''
+ def new_find(name, rrtype, fflags):
+ return (result, rrset, flags)
+ self.find = new_find
+
+ def __create_find_all(self, result, rrsets, flags):
+ '''
+ Overwrites the local find() method with a method that returns
+ the tuple (result, rrsets, flags)
+ '''
+ def new_find_all(name, fflags):
+ return (result, rrsets, flags)
+ self.find_all = new_find_all
+
+ def __check_find_call(self, method, query_rrset, expected_rcode,
+ expected_rdatas=None):
+ '''
+ Helper for find tests; calls the given method with the name and
+ type of the given rrset. Checks for the given rcode.
+ If expected_rdatas is not none, the result name, and type are
+ checked to match the given rrset ones, and the rdatas are checked
+ to be equal.
+ The given method must have the same arguments and return type
+ as find()
+ '''
+ result, rrset, _ = method(query_rrset.get_name(),
+ query_rrset.get_type())
+ self.assertEqual(expected_rcode, result)
+ if expected_rdatas is not None:
+ self.assertEqual(query_rrset.get_name(), rrset.get_name())
+ self.assertEqual(query_rrset.get_type(), rrset.get_type())
+ if expected_rdatas is not None:
+ self.assertEqual(expected_rdatas, rrset.get_rdata())
+ else:
+ self.assertEqual(None, rrset)
+
+ def __check_find_all_call(self, method, query_rrset, expected_rcode,
+ expected_rrs=[]):
+ '''
+ Helper for find tests; calls the given method with the name and
+ type of the given rrset. Checks for the given rcode.
+ If expected_rdatas is not none, the result name, and type are
+ checked to match the given rrset ones, and the rdatas are checked
+ to be equal.
+ The given method must have the same arguments and return type
+ as find()
+ '''
+ result, rrsets, _ = method(query_rrset.get_name())
+ self.assertEqual(expected_rcode, result)
+ # We have no real equality function for rrsets, but since
+ # the rrsets in question are themselves returns, pointer equality
+ # works as well
+ self.assertEqual(expected_rrs, rrsets)
+
+ def test_find_updated_existing_data(self):
+ '''
+ Tests whether existent data is updated with the additions and
+ deletions from the Diff
+ '''
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ diff.add_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset_soa)
+
+ # override the actual find method
+ self.__create_find(ZoneFinder.SUCCESS, self.__rrset3, 0)
+
+ # sanity check
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ # check that normal find also returns the original data
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ # Adding another should have it returned in the find_updated
+ diff.add_data(self.__rrset4)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata() +
+ self.__rrset4.get_rdata())
+
+ # check that normal find still returns the original data
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ # Adding a different type should have no effect
+ diff.add_data(self.__rrset2)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata() +
+ self.__rrset4.get_rdata())
+
+ # check that normal find still returns the original data
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ # Deleting 3 now should result in only 4 being updated
+ diff.delete_data(self.__rrset3)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset4.get_rdata())
+
+ # check that normal find still returns the original data
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ # Deleting 4 now should result in empty rrset
+ diff.delete_data(self.__rrset4)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.NXRRSET)
+
+ # check that normal find still returns the original data
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+
+ def test_find_updated_nonexistent_data(self):
+ '''
+ Test whether added data for a query that would originally result
+ in NXDOMAIN works
+ '''
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ diff.add_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset_soa)
+
+ # override the actual find method
+ self.__create_find(ZoneFinder.NXDOMAIN, None, 0)
+
+ # Sanity check
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+
+ # Add data and see it is returned
+ diff.add_data(self.__rrset3)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+
+ # Add unrelated data, result should be the same
+ diff.add_data(self.__rrset2)
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, self.__rrset3.get_rdata())
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+
+ # Remove, result should now be NXDOMAIN again
+ diff.delete_data(self.__rrset3)
+ result, rrset, _ = diff.find_updated(self.__rrset3.get_name(),
+ self.__rrset3.get_type())
+ self.__check_find_call(diff.find_updated, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+ self.__check_find_call(diff.find, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+
+ def test_find_updated_other(self):
+ '''
+ Test that any other ZoneFinder.result code is directly
+ passed on.
+ '''
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+
+ # Add and delete some data to make sure it's not used
+ diff.add_data(self.__rrset_soa)
+ diff.add_data(self.__rrset3)
+ diff.delete_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset2)
+
+ for rcode in [ ZoneFinder.DELEGATION,
+ ZoneFinder.CNAME,
+ ZoneFinder.DNAME ]:
+ # override the actual find method
+ self.__create_find(rcode, None, 0)
+ self.__check_find_call(diff.find, self.__rrset3, rcode)
+ self.__check_find_call(diff.find_updated, self.__rrset3, rcode)
+
+ def test_find_all_existing_data(self):
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ diff.add_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset_soa)
+
+ # override the actual find method
+ self.__create_find_all(ZoneFinder.SUCCESS, [ self.__rrset3 ], 0)
+
+ # Sanity check
+ result, rrsets, _ = diff.find_all_updated(self.__rrset3.get_name())
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual([self.__rrset3], rrsets)
+
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, [self.__rrset3])
+ self.__check_find_all_call(diff.find_all, self.__rrset3,
+ ZoneFinder.SUCCESS, [self.__rrset3])
+
+ # Add a second rr with different type at same name
+ add_rrset = RRset(self.__rrset3.get_name(), self.__rrclass,
+ RRType.A(), self.__ttl)
+ add_rrset.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.2"))
+ diff.add_data(add_rrset)
+
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
+ ZoneFinder.SUCCESS,
+ [self.__rrset3, add_rrset])
+ self.__check_find_all_call(diff.find_all, self.__rrset3,
+ ZoneFinder.SUCCESS, [self.__rrset3])
+
+ # Remove original one
+ diff.delete_data(self.__rrset3)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
+ ZoneFinder.SUCCESS, [add_rrset])
+ self.__check_find_all_call(diff.find_all, self.__rrset3,
+ ZoneFinder.SUCCESS, [self.__rrset3])
+
+ # And remove new one, result should then become NXDOMAIN
+ diff.delete_data(add_rrset)
+ result, rrsets, _ = diff.find_all_updated(self.__rrset3.get_name())
+
+ self.assertEqual(ZoneFinder.NXDOMAIN, result)
+ self.assertEqual([ ], rrsets)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
+ ZoneFinder.NXDOMAIN)
+ self.__check_find_all_call(diff.find_all, self.__rrset3,
+ ZoneFinder.SUCCESS, [self.__rrset3])
+
+ def test_find_all_nonexistent_data(self):
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ diff.add_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset_soa)
+
+ self.__create_find_all(ZoneFinder.NXDOMAIN, [], 0)
+
+ # Sanity check
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+ self.__check_find_all_call(diff.find_all, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+
+ # Adding data should change the result
+ diff.add_data(self.__rrset2)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset2,
+ ZoneFinder.SUCCESS, [ self.__rrset2 ])
+ self.__check_find_all_call(diff.find_all, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+
+ # Adding data at other name should not
+ diff.add_data(self.__rrset3)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset2,
+ ZoneFinder.SUCCESS, [ self.__rrset2 ])
+ self.__check_find_all_call(diff.find_all, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+
+ # Deleting it should revert to original
+ diff.delete_data(self.__rrset2)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+ self.__check_find_all_call(diff.find_all, self.__rrset2,
+ ZoneFinder.NXDOMAIN)
+
+ def test_find_all_other_results(self):
+ '''
+ Any result code other than SUCCESS and NXDOMAIN should cause
+ the results to be passed on directly
+ '''
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+
+ # Add and delete some data to make sure it's not used
+ diff.add_data(self.__rrset_soa)
+ diff.add_data(self.__rrset3)
+ diff.delete_data(self.__rrset_soa)
+ diff.delete_data(self.__rrset2)
+
+ for rcode in [ ZoneFinder.NXRRSET,
+ ZoneFinder.DELEGATION,
+ ZoneFinder.CNAME,
+ ZoneFinder.DNAME ]:
+ # override the actual find method
+ self.__create_find_all(rcode, [], 0)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset2,
+ rcode)
+ self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
+ rcode)
+ self.__check_find_all_call(diff.find_all, self.__rrset2,
+ rcode)
+ self.__check_find_all_call(diff.find_all, self.__rrset3,
+ rcode)
if __name__ == "__main__":
isc.log.init("bind10")
diff --git a/src/lib/resolve/tests/recursive_query_unittest.cc b/src/lib/resolve/tests/recursive_query_unittest.cc
index a8b8057..02721f1 100644
--- a/src/lib/resolve/tests/recursive_query_unittest.cc
+++ b/src/lib/resolve/tests/recursive_query_unittest.cc
@@ -175,6 +175,10 @@ protected:
resolver_.reset();
}
+ void SetUp() {
+ callback_.reset(new ASIOCallBack(this));
+ }
+
// Send a test UDP packet to a mock server
void sendUDP(const int family) {
ScopedAddrInfo sai(resolveAddress(family, IPPROTO_UDP, false));
@@ -190,7 +194,7 @@ protected:
if (cc != sizeof(test_data)) {
isc_throw(IOError, "unexpected sendto result: " << cc);
}
- io_service_->run();
+ io_service_.run();
}
// Send a test TCP packet to a mock server
@@ -210,7 +214,7 @@ protected:
if (cc != sizeof(test_data)) {
isc_throw(IOError, "unexpected send result: " << cc);
}
- io_service_->run();
+ io_service_.run();
}
// Receive a UDP packet from a mock server; used for testing
@@ -233,10 +237,10 @@ protected:
// The IO service queue should have a RecursiveQuery object scheduled
// to run at this point. This call will cause it to begin an
// async send, then return.
- io_service_->run_one();
+ io_service_.run_one();
// ... and this one will block until the send has completed
- io_service_->run_one();
+ io_service_.run_one();
// Now we attempt to recv() whatever was sent.
// XXX: there's no guarantee the receiving socket can immediately get
@@ -326,9 +330,7 @@ protected:
// Set up empty DNS Service
// Set up an IO Service queue without any addresses
void setDNSService() {
- io_service_.reset(new IOService());
- callback_.reset(new ASIOCallBack(this));
- dns_service_.reset(new DNSService(*io_service_, callback_.get(), NULL,
+ dns_service_.reset(new DNSService(io_service_, callback_.get(), NULL,
NULL));
}
@@ -491,12 +493,10 @@ private:
static_cast<const uint8_t*>(io_message.getData()),
static_cast<const uint8_t*>(io_message.getData()) +
io_message.getDataSize());
- io_service_->stop();
+ io_service_.stop();
}
protected:
- // We use a pointer for io_service_, because for some tests we
- // need to recreate a new one within one onstance of this class
- scoped_ptr<IOService> io_service_;
+ IOService io_service_;
scoped_ptr<DNSService> dns_service_;
scoped_ptr<isc::nsas::NameserverAddressStore> nsas_;
isc::cache::ResolverCache cache_;
@@ -513,24 +513,26 @@ RecursiveQueryTest::RecursiveQueryTest() :
dns_service_(NULL), callback_(NULL), callback_protocol_(0),
callback_native_(-1), resolver_(new isc::util::unittests::TestResolver())
{
- io_service_.reset(new IOService());
- setDNSService(true, true);
nsas_.reset(new isc::nsas::NameserverAddressStore(resolver_));
}
TEST_F(RecursiveQueryTest, v6UDPSend) {
+ setDNSService(true, true);
doTest(AF_INET6, IPPROTO_UDP);
}
TEST_F(RecursiveQueryTest, v6TCPSend) {
+ setDNSService(true, true);
doTest(AF_INET6, IPPROTO_TCP);
}
TEST_F(RecursiveQueryTest, v4UDPSend) {
+ setDNSService(true, true);
doTest(AF_INET, IPPROTO_UDP);
}
TEST_F(RecursiveQueryTest, v4TCPSend) {
+ setDNSService(true, true);
doTest(AF_INET, IPPROTO_TCP);
}
@@ -643,7 +645,7 @@ TEST_F(RecursiveQueryTest, forwarderSend) {
// to the same port as the actual server
uint16_t port = boost::lexical_cast<uint16_t>(TEST_CLIENT_PORT);
- MockServer server(*io_service_);
+ MockServer server(io_service_);
RecursiveQuery rq(*dns_service_,
*nsas_, cache_,
singleAddress(TEST_IPV4_ADDR, port),
@@ -766,7 +768,7 @@ TEST_F(RecursiveQueryTest, forwardQueryTimeout) {
// Prepare the server
bool done(true);
- MockServerStop server(*io_service_, &done);
+ MockServerStop server(io_service_, &done);
// Do the answer
const uint16_t port = boost::lexical_cast<uint16_t>(TEST_CLIENT_PORT);
@@ -784,7 +786,7 @@ TEST_F(RecursiveQueryTest, forwardQueryTimeout) {
boost::shared_ptr<MockResolverCallback> callback(new MockResolverCallback(&server));
query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback);
// Run the test
- io_service_->run();
+ io_service_.run();
EXPECT_EQ(callback->result, MockResolverCallback::FAILURE);
}
@@ -800,7 +802,7 @@ TEST_F(RecursiveQueryTest, forwardClientTimeout) {
// Prepare the server
bool done1(true);
- MockServerStop server(*io_service_, &done1);
+ MockServerStop server(io_service_, &done1);
MessagePtr answer(new Message(Message::RENDER));
@@ -819,7 +821,7 @@ TEST_F(RecursiveQueryTest, forwardClientTimeout) {
boost::shared_ptr<MockResolverCallback> callback(new MockResolverCallback(&server));
query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback);
// Run the test
- io_service_->run();
+ io_service_.run();
EXPECT_EQ(callback->result, MockResolverCallback::FAILURE);
}
@@ -834,7 +836,7 @@ TEST_F(RecursiveQueryTest, forwardLookupTimeout) {
// Prepare the server
bool done(true);
- MockServerStop server(*io_service_, &done);
+ MockServerStop server(io_service_, &done);
MessagePtr answer(new Message(Message::RENDER));
@@ -854,7 +856,7 @@ TEST_F(RecursiveQueryTest, forwardLookupTimeout) {
boost::shared_ptr<MockResolverCallback> callback(new MockResolverCallback(&server));
query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback);
// Run the test
- io_service_->run();
+ io_service_.run();
EXPECT_EQ(callback->result, MockResolverCallback::FAILURE);
}
@@ -869,7 +871,7 @@ TEST_F(RecursiveQueryTest, lowtimeouts) {
// Prepare the server
bool done(true);
- MockServerStop server(*io_service_, &done);
+ MockServerStop server(io_service_, &done);
MessagePtr answer(new Message(Message::RENDER));
@@ -889,7 +891,7 @@ TEST_F(RecursiveQueryTest, lowtimeouts) {
boost::shared_ptr<MockResolverCallback> callback(new MockResolverCallback(&server));
query.forward(ConstMessagePtr(&query_message), answer, buffer, &server, callback);
// Run the test
- io_service_->run();
+ io_service_.run();
EXPECT_EQ(callback->result, MockResolverCallback::FAILURE);
}
@@ -903,7 +905,7 @@ TEST_F(RecursiveQueryTest, DISABLED_recursiveSendOk) {
setDNSService(true, false);
bool done;
- MockServerStop server(*io_service_, &done);
+ MockServerStop server(io_service_, &done);
vector<pair<string, uint16_t> > empty_vector;
RecursiveQuery rq(*dns_service_, *nsas_, cache_, empty_vector,
empty_vector, 10000, 0);
@@ -912,7 +914,7 @@ TEST_F(RecursiveQueryTest, DISABLED_recursiveSendOk) {
OutputBufferPtr buffer(new OutputBuffer(0));
MessagePtr answer(new Message(Message::RENDER));
rq.resolve(q, answer, buffer, &server);
- io_service_->run();
+ io_service_.run();
// Check that the answer we got matches the one we wanted
EXPECT_EQ(Rcode::NOERROR(), answer->getRcode());
@@ -929,7 +931,7 @@ TEST_F(RecursiveQueryTest, DISABLED_recursiveSendNXDOMAIN) {
setDNSService(true, false);
bool done;
- MockServerStop server(*io_service_, &done);
+ MockServerStop server(io_service_, &done);
vector<pair<string, uint16_t> > empty_vector;
RecursiveQuery rq(*dns_service_, *nsas_, cache_, empty_vector,
empty_vector, 10000, 0);
@@ -938,7 +940,7 @@ TEST_F(RecursiveQueryTest, DISABLED_recursiveSendNXDOMAIN) {
OutputBufferPtr buffer(new OutputBuffer(0));
MessagePtr answer(new Message(Message::RENDER));
rq.resolve(q, answer, buffer, &server);
- io_service_->run();
+ io_service_.run();
// Check that the answer we got matches the one we wanted
EXPECT_EQ(Rcode::NXDOMAIN(), answer->getRcode());
@@ -1012,7 +1014,7 @@ TEST_F(RecursiveQueryTest, CachedNS) {
OutputBufferPtr buffer(new OutputBuffer(0));
MessagePtr answer(new Message(Message::RENDER));
// The server is here so we have something to pass there
- MockServer server(*io_service_);
+ MockServer server(io_service_);
rq.resolve(q, answer, buffer, &server);
// We don't need to run the service in this test. We are interested only
// in the place it starts resolving at
diff --git a/src/lib/testutils/socket_request.h b/src/lib/testutils/socket_request.h
index 5c76d30..0ae15f3 100644
--- a/src/lib/testutils/socket_request.h
+++ b/src/lib/testutils/socket_request.h
@@ -55,7 +55,7 @@ public:
/// \param expect_port The port which is expected to be requested. If
/// the application requests a different port, it is considered
/// a failure.
- /// \param expeted_app The share name for which all the requests should
+ /// \param expected_app The share name for which all the requests should
/// be made. This is not the usual app_name - the requestSocket does
/// not fall back to this value if its share_name is left empty, if
/// you want to check the code relies on the requestor to use the
diff --git a/src/lib/testutils/testdata/rwtest.sqlite3 b/src/lib/testutils/testdata/rwtest.sqlite3
index 24afc2c..558bc3f 100644
Binary files a/src/lib/testutils/testdata/rwtest.sqlite3 and b/src/lib/testutils/testdata/rwtest.sqlite3 differ
diff --git a/src/lib/util/Makefile.am b/src/lib/util/Makefile.am
index c2b3020..fad2465 100644
--- a/src/lib/util/Makefile.am
+++ b/src/lib/util/Makefile.am
@@ -4,6 +4,7 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
AM_CPPFLAGS += -I$(top_srcdir)/src/lib/exceptions -I$(top_builddir)/src/lib/exceptions
AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CPPFLAGS += -DLOCKFILE_DIR=\"${localstatedir}/${PACKAGE_NAME}\"
AM_CXXFLAGS = $(B10_CXXFLAGS)
lib_LTLIBRARIES = libutil.la
@@ -12,6 +13,9 @@ libutil_la_SOURCES += locks.h lru_list.h
libutil_la_SOURCES += strutil.h strutil.cc
libutil_la_SOURCES += buffer.h io_utilities.h
libutil_la_SOURCES += time_utilities.h time_utilities.cc
+libutil_la_SOURCES += interprocess_sync.h
+libutil_la_SOURCES += interprocess_sync_file.h interprocess_sync_file.cc
+libutil_la_SOURCES += interprocess_sync_null.h interprocess_sync_null.cc
libutil_la_SOURCES += range_utilities.h
libutil_la_SOURCES += hash/sha1.h hash/sha1.cc
libutil_la_SOURCES += encode/base16_from_binary.h
diff --git a/src/lib/util/buffer.h b/src/lib/util/buffer.h
index 1263636..7e88108 100644
--- a/src/lib/util/buffer.h
+++ b/src/lib/util/buffer.h
@@ -206,8 +206,8 @@ public:
/// If specified buffer is too short, it will be expanded
/// using vector::resize() method.
///
- /// @param Reference to a buffer (data will be stored there).
- /// @param Size specified number of bytes to read in a vector.
+ /// @param data Reference to a buffer (data will be stored there).
+ /// @param len Size specified number of bytes to read in a vector.
///
void readVector(std::vector<uint8_t>& data, size_t len) {
if (position_ + len > len_) {
diff --git a/src/lib/util/interprocess_sync.h b/src/lib/util/interprocess_sync.h
new file mode 100644
index 0000000..e4fa7af
--- /dev/null
+++ b/src/lib/util/interprocess_sync.h
@@ -0,0 +1,149 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __INTERPROCESS_SYNC_H__
+#define __INTERPROCESS_SYNC_H__
+
+#include <string>
+
+namespace isc {
+namespace util {
+
+class InterprocessSyncLocker; // forward declaration
+
+/// \brief Interprocess Sync Class
+///
+/// This class specifies an interface for mutual exclusion among
+/// co-operating processes. This is an abstract class and a real
+/// implementation such as InterprocessSyncFile should be used
+/// in code. Usage is as follows:
+///
+/// 1. Client instantiates a sync object of an implementation (such as
+/// InterprocessSyncFile).
+/// 2. Client then creates an automatic (stack) object of
+/// InterprocessSyncLocker around the sync object. Such an object
+/// destroys itself and releases any acquired lock when it goes out of extent.
+/// 3. Client calls lock() method on the InterprocessSyncLocker.
+/// 4. Client performs task that needs mutual exclusion.
+/// 5. Client frees lock with unlock(), or simply returns from the basic
+/// block which forms the scope for the InterprocessSyncLocker.
+///
+/// NOTE: All implementations of InterprocessSync should keep the
+/// is_locked_ member variable updated whenever their
+/// lock()/tryLock()/unlock() implementations are called.
+class InterprocessSync {
+ // InterprocessSyncLocker is the only code outside this class that
+ // should be allowed to call the lock(), tryLock() and unlock()
+ // methods.
+ friend class InterprocessSyncLocker;
+
+public:
+ /// \brief Constructor
+ ///
+ /// Creates an interprocess synchronization object
+ ///
+ /// \param task_name Name of the synchronization task. This has to be
+ /// identical among the various processes that need to be
+ /// synchronized for the same task.
+ InterprocessSync(const std::string& task_name) :
+ task_name_(task_name), is_locked_(false)
+ {}
+
+ /// \brief Destructor
+ virtual ~InterprocessSync() {}
+
+protected:
+ /// \brief Acquire the lock (blocks if something else has acquired a
+ /// lock on the same task name)
+ ///
+ /// \return Returns true if the lock was acquired, false otherwise.
+ virtual bool lock() = 0;
+
+ /// \brief Try to acquire a lock (doesn't block)
+ ///
+ /// \return Returns true if the lock was acquired, false otherwise.
+ virtual bool tryLock() = 0;
+
+ /// \brief Release the lock
+ ///
+ /// \return Returns true if the lock was released, false otherwise.
+ virtual bool unlock() = 0;
+
+ const std::string task_name_; ///< The task name
+ bool is_locked_; ///< Is the lock taken?
+};
+
+/// \brief Interprocess Sync Locker Class
+///
+/// This class is used for making automatic stack objects to manage
+/// locks that are released automatically when the block is exited
+/// (RAII). It is meant to be used along with InterprocessSync objects. See
+/// the description of InterprocessSync.
+class InterprocessSyncLocker {
+public:
+ /// \brief Constructor
+ ///
+ /// Creates a lock manager around a interprocess synchronization object
+ ///
+ /// \param sync The sync object which has to be locked/unlocked by
+ /// this locker object.
+ InterprocessSyncLocker(InterprocessSync& sync) :
+ sync_(sync)
+ {}
+
+ /// \brief Destructor
+ ~InterprocessSyncLocker() {
+ if (isLocked())
+ unlock();
+ }
+
+ /// \brief Acquire the lock (blocks if something else has acquired a
+ /// lock on the same task name)
+ ///
+ /// \return Returns true if the lock was acquired, false otherwise.
+ bool lock() {
+ return (sync_.lock());
+ }
+
+ /// \brief Try to acquire a lock (doesn't block)
+ ///
+ /// \return Returns true if a new lock could be acquired, false
+ /// otherwise.
+ bool tryLock() {
+ return (sync_.tryLock());
+ }
+
+ /// \brief Check if the lock is taken
+ ///
+ /// \return Returns true if a lock is currently acquired, false
+ /// otherwise.
+ bool isLocked() const {
+ return (sync_.is_locked_);
+ }
+
+ /// \brief Release the lock
+ ///
+ /// \return Returns true if the lock was released, false otherwise.
+ bool unlock() {
+ return (sync_.unlock());
+ }
+
+protected:
+ InterprocessSync& sync_; ///< Ref to underlying sync object
+};
+
+} // namespace util
+} // namespace isc
+
+#endif // __INTERPROCESS_SYNC_H__
diff --git a/src/lib/util/interprocess_sync_file.cc b/src/lib/util/interprocess_sync_file.cc
new file mode 100644
index 0000000..d045449
--- /dev/null
+++ b/src/lib/util/interprocess_sync_file.cc
@@ -0,0 +1,130 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "interprocess_sync_file.h"
+
+#include <string>
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+namespace isc {
+namespace util {
+
+InterprocessSyncFile::~InterprocessSyncFile() {
+ if (fd_ != -1) {
+ // This will also release any applied locks.
+ close(fd_);
+ // The lockfile will continue to exist, and we must not delete
+ // it.
+ }
+}
+
+bool
+InterprocessSyncFile::do_lock(int cmd, short l_type) {
+ // Open lock file only when necessary (i.e., here). This is so that
+ // if a default InterprocessSync object is replaced with another
+ // implementation, it doesn't attempt any opens.
+ if (fd_ == -1) {
+ std::string lockfile_path = LOCKFILE_DIR;
+
+ const char* const env = getenv("B10_FROM_BUILD");
+ if (env != NULL) {
+ lockfile_path = env;
+ }
+
+ const char* const env2 = getenv("B10_FROM_BUILD_LOCALSTATEDIR");
+ if (env2 != NULL) {
+ lockfile_path = env2;
+ }
+
+ const char* const env3 = getenv("B10_LOCKFILE_DIR_FROM_BUILD");
+ if (env3 != NULL) {
+ lockfile_path = env3;
+ }
+
+ lockfile_path += "/" + task_name_ + "_lockfile";
+
+ // Open the lockfile in the constructor so it doesn't do the access
+ // checks every time a message is logged.
+ const mode_t mode = umask(0111);
+ fd_ = open(lockfile_path.c_str(), O_CREAT | O_RDWR, 0660);
+ umask(mode);
+
+ if (fd_ == -1) {
+ isc_throw(InterprocessSyncFileError,
+ "Unable to use interprocess sync lockfile: " +
+ lockfile_path);
+ }
+ }
+
+ struct flock lock;
+
+ memset(&lock, 0, sizeof (lock));
+ lock.l_type = l_type;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 1;
+
+ return (fcntl(fd_, cmd, &lock) == 0);
+}
+
+bool
+InterprocessSyncFile::lock() {
+ if (is_locked_) {
+ return (true);
+ }
+
+ if (do_lock(F_SETLKW, F_WRLCK)) {
+ is_locked_ = true;
+ return (true);
+ }
+
+ return (false);
+}
+
+bool
+InterprocessSyncFile::tryLock() {
+ if (is_locked_) {
+ return (true);
+ }
+
+ if (do_lock(F_SETLK, F_WRLCK)) {
+ is_locked_ = true;
+ return (true);
+ }
+
+ return (false);
+}
+
+bool
+InterprocessSyncFile::unlock() {
+ if (!is_locked_) {
+ return (true);
+ }
+
+ if (do_lock(F_SETLKW, F_UNLCK)) {
+ is_locked_ = false;
+ return (true);
+ }
+
+ return (false);
+}
+
+} // namespace util
+} // namespace isc
diff --git a/src/lib/util/interprocess_sync_file.h b/src/lib/util/interprocess_sync_file.h
new file mode 100644
index 0000000..fd8da1b
--- /dev/null
+++ b/src/lib/util/interprocess_sync_file.h
@@ -0,0 +1,91 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __INTERPROCESS_SYNC_FILE_H__
+#define __INTERPROCESS_SYNC_FILE_H__
+
+#include <util/interprocess_sync.h>
+#include <exceptions/exceptions.h>
+
+namespace isc {
+namespace util {
+
+/// \brief InterprocessSyncFileError
+///
+/// Exception that is thrown if it's not possible to open the
+/// lock file.
+class InterprocessSyncFileError : public Exception {
+public:
+ InterprocessSyncFileError(const char* file, size_t line,
+ const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+/// \brief File-based Interprocess Sync Class
+///
+/// This class specifies a concrete implementation for a file-based
+/// interprocess synchronization mechanism. Please see the
+/// InterprocessSync class documentation for usage.
+///
+/// An InterprocessSyncFileError exception may be thrown if there is an
+/// issue opening the lock file.
+///
+/// Lock files are created typically in the local state directory
+/// (var). They are typically named like "<task_name>_lockfile".
+/// This implementation opens lock files lazily (only when
+/// necessary). It also leaves the lock files lying around as multiple
+/// processes may have locks on them.
+class InterprocessSyncFile : public InterprocessSync {
+public:
+ /// \brief Constructor
+ ///
+ /// Creates a file-based interprocess synchronization object
+ ///
+ /// \param name Name of the synchronization task. This has to be
+ /// identical among the various processes that need to be
+ /// synchronized for the same task.
+ InterprocessSyncFile(const std::string& task_name) :
+ InterprocessSync(task_name), fd_(-1)
+ {}
+
+ /// \brief Destructor
+ virtual ~InterprocessSyncFile();
+
+protected:
+ /// \brief Acquire the lock (blocks if something else has acquired a
+ /// lock on the same task name)
+ ///
+ /// \return Returns true if the lock was acquired, false otherwise.
+ bool lock();
+
+ /// \brief Try to acquire a lock (doesn't block)
+ ///
+ /// \return Returns true if the lock was acquired, false otherwise.
+ bool tryLock();
+
+ /// \brief Release the lock
+ ///
+ /// \return Returns true if the lock was released, false otherwise.
+ bool unlock();
+
+private:
+ bool do_lock(int cmd, short l_type);
+
+ int fd_; ///< The descriptor for the open file
+};
+
+} // namespace util
+} // namespace isc
+
+#endif // __INTERPROCESS_SYNC_FILE_H__
diff --git a/src/lib/util/interprocess_sync_null.cc b/src/lib/util/interprocess_sync_null.cc
new file mode 100644
index 0000000..5355d57
--- /dev/null
+++ b/src/lib/util/interprocess_sync_null.cc
@@ -0,0 +1,42 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "interprocess_sync_null.h"
+
+namespace isc {
+namespace util {
+
+InterprocessSyncNull::~InterprocessSyncNull() {
+}
+
+bool
+InterprocessSyncNull::lock() {
+ is_locked_ = true;
+ return (true);
+}
+
+bool
+InterprocessSyncNull::tryLock() {
+ is_locked_ = true;
+ return (true);
+}
+
+bool
+InterprocessSyncNull::unlock() {
+ is_locked_ = false;
+ return (true);
+}
+
+} // namespace util
+} // namespace isc
diff --git a/src/lib/util/interprocess_sync_null.h b/src/lib/util/interprocess_sync_null.h
new file mode 100644
index 0000000..6ac0322
--- /dev/null
+++ b/src/lib/util/interprocess_sync_null.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __INTERPROCESS_SYNC_NULL_H__
+#define __INTERPROCESS_SYNC_NULL_H__
+
+#include <util/interprocess_sync.h>
+
+namespace isc {
+namespace util {
+
+/// \brief Null Interprocess Sync Class
+///
+/// This class specifies a concrete implementation for a null (no effect)
+/// interprocess synchronization mechanism. Please see the
+/// InterprocessSync class documentation for usage.
+class InterprocessSyncNull : public InterprocessSync {
+public:
+ /// \brief Constructor
+ ///
+ /// Creates a null interprocess synchronization object
+ ///
+ /// \param name Name of the synchronization task. This has to be
+ /// identical among the various processes that need to be
+ /// synchronized for the same task.
+ InterprocessSyncNull(const std::string& task_name) :
+ InterprocessSync(task_name)
+ {}
+
+ /// \brief Destructor
+ virtual ~InterprocessSyncNull();
+
+protected:
+ /// \brief Acquire the lock (never blocks)
+ ///
+ /// \return Always returns true
+ bool lock();
+
+ /// \brief Try to acquire a lock (doesn't block)
+ ///
+ /// \return Always returns true
+ bool tryLock();
+
+ /// \brief Release the lock
+ ///
+ /// \return Always returns true
+ bool unlock();
+};
+
+} // namespace util
+} // namespace isc
+
+#endif // __INTERPROCESS_SYNC_NULL_H__
diff --git a/src/lib/util/tests/Makefile.am b/src/lib/util/tests/Makefile.am
index 37dfc5e..cf1e5a5 100644
--- a/src/lib/util/tests/Makefile.am
+++ b/src/lib/util/tests/Makefile.am
@@ -28,6 +28,8 @@ run_unittests_SOURCES += filename_unittest.cc
run_unittests_SOURCES += hex_unittest.cc
run_unittests_SOURCES += io_utilities_unittest.cc
run_unittests_SOURCES += lru_list_unittest.cc
+run_unittests_SOURCES += interprocess_sync_file_unittest.cc
+run_unittests_SOURCES += interprocess_sync_null_unittest.cc
run_unittests_SOURCES += qid_gen_unittest.cc
run_unittests_SOURCES += random_number_generator_unittest.cc
run_unittests_SOURCES += sha1_unittest.cc
diff --git a/src/lib/util/tests/interprocess_sync_file_unittest.cc b/src/lib/util/tests/interprocess_sync_file_unittest.cc
new file mode 100644
index 0000000..9a1b025
--- /dev/null
+++ b/src/lib/util/tests/interprocess_sync_file_unittest.cc
@@ -0,0 +1,174 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "util/interprocess_sync_file.h"
+#include <gtest/gtest.h>
+#include <unistd.h>
+
+using namespace std;
+
+namespace isc {
+namespace util {
+
+namespace {
+unsigned char
+parentReadLockedState (int fd) {
+ unsigned char locked = 0xff;
+
+ fd_set rfds;
+ FD_ZERO(&rfds);
+ FD_SET(fd, &rfds);
+
+ // We use select() here to wait for new data on the input end of
+ // the pipe. We wait for 5 seconds (an arbitrary value) for input
+ // data, and continue if no data is available. This is done so
+ // that read() is not blocked due to some issue in the child
+ // process (and the tests continue running).
+
+ struct timeval tv;
+ tv.tv_sec = 5;
+ tv.tv_usec = 0;
+
+ const int nfds = select(fd + 1, &rfds, NULL, NULL, &tv);
+ EXPECT_EQ(1, nfds);
+
+ if (nfds == 1) {
+ // Read status
+ ssize_t bytes_read = read(fd, &locked, sizeof(locked));
+ EXPECT_EQ(sizeof(locked), bytes_read);
+ }
+
+ return (locked);
+}
+
+TEST(InterprocessSyncFileTest, TestLock) {
+ InterprocessSyncFile sync("test");
+ InterprocessSyncLocker locker(sync);
+
+ EXPECT_FALSE(locker.isLocked());
+ EXPECT_TRUE(locker.lock());
+ EXPECT_TRUE(locker.isLocked());
+
+ int fds[2];
+
+ // Here, we check that a lock has been taken by forking and
+ // checking from the child that a lock exists. This has to be
+ // done from a separate process as we test by trying to lock the
+ // range again on the lock file. The lock attempt would pass if
+ // done from the same process for the granted range. The lock
+ // attempt must fail to pass our check.
+
+ EXPECT_EQ(0, pipe(fds));
+
+ if (fork() == 0) {
+ unsigned char locked = 0;
+ // Child writes to pipe
+ close(fds[0]);
+
+ InterprocessSyncFile sync2("test");
+ InterprocessSyncLocker locker2(sync2);
+
+ if (!locker2.tryLock()) {
+ EXPECT_FALSE(locker2.isLocked());
+ locked = 1;
+ } else {
+ EXPECT_TRUE(locker2.isLocked());
+ }
+
+ ssize_t bytes_written = write(fds[1], &locked, sizeof(locked));
+ EXPECT_EQ(sizeof(locked), bytes_written);
+
+ close(fds[1]);
+ exit(0);
+ } else {
+ // Parent reads from pipe
+ close(fds[1]);
+
+ const unsigned char locked = parentReadLockedState(fds[0]);
+
+ close(fds[0]);
+
+ EXPECT_EQ(1, locked);
+ }
+
+ EXPECT_TRUE(locker.unlock());
+ EXPECT_FALSE(locker.isLocked());
+
+ EXPECT_EQ (0, unlink(TEST_DATA_TOPBUILDDIR "/test_lockfile"));
+}
+
+TEST(InterprocessSyncFileTest, TestMultipleFilesDirect) {
+ InterprocessSyncFile sync("test1");
+ InterprocessSyncLocker locker(sync);
+
+ EXPECT_TRUE(locker.lock());
+
+ InterprocessSyncFile sync2("test2");
+ InterprocessSyncLocker locker2(sync2);
+ EXPECT_TRUE(locker2.lock());
+ EXPECT_TRUE(locker2.unlock());
+
+ EXPECT_TRUE(locker.unlock());
+
+ EXPECT_EQ (0, unlink(TEST_DATA_TOPBUILDDIR "/test1_lockfile"));
+ EXPECT_EQ (0, unlink(TEST_DATA_TOPBUILDDIR "/test2_lockfile"));
+}
+
+TEST(InterprocessSyncFileTest, TestMultipleFilesForked) {
+ InterprocessSyncFile sync("test1");
+ InterprocessSyncLocker locker(sync);
+
+ EXPECT_TRUE(locker.lock());
+
+ int fds[2];
+
+ EXPECT_EQ(0, pipe(fds));
+
+ if (fork() == 0) {
+ unsigned char locked = 0xff;
+ // Child writes to pipe
+ close(fds[0]);
+
+ InterprocessSyncFile sync2("test2");
+ InterprocessSyncLocker locker2(sync2);
+
+ if (locker2.tryLock()) {
+ locked = 0;
+ }
+
+ ssize_t bytes_written = write(fds[1], &locked, sizeof(locked));
+ EXPECT_EQ(sizeof(locked), bytes_written);
+
+ close(fds[1]);
+ exit(0);
+ } else {
+ // Parent reads from pipe
+ close(fds[1]);
+
+ const unsigned char locked = parentReadLockedState(fds[0]);
+
+ close(fds[0]);
+
+ EXPECT_EQ(0, locked);
+ }
+
+ EXPECT_TRUE(locker.unlock());
+
+ EXPECT_EQ (0, unlink(TEST_DATA_TOPBUILDDIR "/test1_lockfile"));
+ EXPECT_EQ (0, unlink(TEST_DATA_TOPBUILDDIR "/test2_lockfile"));
+}
+}
+
+} // namespace util
+} // namespace isc
diff --git a/src/lib/util/tests/interprocess_sync_null_unittest.cc b/src/lib/util/tests/interprocess_sync_null_unittest.cc
new file mode 100644
index 0000000..70e2b07
--- /dev/null
+++ b/src/lib/util/tests/interprocess_sync_null_unittest.cc
@@ -0,0 +1,76 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "util/interprocess_sync_null.h"
+#include <gtest/gtest.h>
+
+using namespace std;
+
+namespace isc {
+namespace util {
+
+TEST(InterprocessSyncNullTest, TestNull) {
+ InterprocessSyncNull sync("test1");
+ InterprocessSyncLocker locker(sync);
+
+ // Check if the is_locked_ flag is set correctly during lock().
+ EXPECT_FALSE(locker.isLocked());
+ EXPECT_TRUE(locker.lock());
+ EXPECT_TRUE(locker.isLocked());
+
+ // lock() must always return true (this is called 4 times, just an
+ // arbitrary number)
+ EXPECT_TRUE(locker.lock());
+ EXPECT_TRUE(locker.lock());
+ EXPECT_TRUE(locker.lock());
+ EXPECT_TRUE(locker.lock());
+
+ // Check if the is_locked_ flag is set correctly during unlock().
+ EXPECT_TRUE(locker.isLocked());
+ EXPECT_TRUE(locker.unlock());
+ EXPECT_FALSE(locker.isLocked());
+
+ // unlock() must always return true (this is called 4 times, just an
+ // arbitrary number)
+ EXPECT_TRUE(locker.unlock());
+ EXPECT_TRUE(locker.unlock());
+ EXPECT_TRUE(locker.unlock());
+ EXPECT_TRUE(locker.unlock());
+
+ // Check if the is_locked_ flag is set correctly during tryLock().
+ EXPECT_FALSE(locker.isLocked());
+ EXPECT_TRUE(locker.tryLock());
+ EXPECT_TRUE(locker.isLocked());
+
+ // tryLock() must always return true (this is called 4 times, just an
+ // arbitrary number)
+ EXPECT_TRUE(locker.tryLock());
+ EXPECT_TRUE(locker.tryLock());
+ EXPECT_TRUE(locker.tryLock());
+ EXPECT_TRUE(locker.tryLock());
+
+ // Random order (should all return true)
+ EXPECT_TRUE(locker.unlock());
+ EXPECT_TRUE(locker.lock());
+ EXPECT_TRUE(locker.tryLock());
+ EXPECT_TRUE(locker.lock());
+ EXPECT_TRUE(locker.unlock());
+ EXPECT_TRUE(locker.lock());
+ EXPECT_TRUE(locker.tryLock());
+ EXPECT_TRUE(locker.unlock());
+ EXPECT_TRUE(locker.unlock());
+}
+
+} // namespace util
+} // namespace isc
diff --git a/src/lib/util/tests/run_unittests.cc b/src/lib/util/tests/run_unittests.cc
index a2181cf..8789a9c 100644
--- a/src/lib/util/tests/run_unittests.cc
+++ b/src/lib/util/tests/run_unittests.cc
@@ -14,10 +14,12 @@
#include <gtest/gtest.h>
#include <util/unittests/run_all.h>
+#include <stdlib.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
+ setenv("B10_LOCKFILE_DIR_FROM_BUILD", TEST_DATA_TOPBUILDDIR, 1);
return (isc::util::unittests::run_all());
}
diff --git a/tests/tools/perfdhcp/Makefile.am b/tests/tools/perfdhcp/Makefile.am
index 656836d..6ebc00f 100644
--- a/tests/tools/perfdhcp/Makefile.am
+++ b/tests/tools/perfdhcp/Makefile.am
@@ -6,6 +6,12 @@ AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
+# Some versions of GCC warn about some versions of Boost regarding
+# missing initializer for members in its posix_time.
+# https://svn.boost.org/trac/boost/ticket/3477
+# But older GCC compilers don't have the flag.
+AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG)
+
AM_LDFLAGS = $(CLOCK_GETTIME_LDFLAGS)
AM_LDFLAGS += -lm
if USE_STATIC_LINK
@@ -14,8 +20,21 @@ endif
lib_LTLIBRARIES = libperfdhcp++.la
libperfdhcp___la_SOURCES = command_options.cc command_options.h
+libperfdhcp___la_SOURCES += localized_option.h
+libperfdhcp___la_SOURCES += perf_pkt6.cc perf_pkt6.h
+libperfdhcp___la_SOURCES += perf_pkt4.cc perf_pkt4.h
+libperfdhcp___la_SOURCES += pkt_transform.cc pkt_transform.h
+
libperfdhcp___la_CXXFLAGS = $(AM_CXXFLAGS)
+if USE_CLANGPP
+# Disable unused parameter warning caused by some of the
+# Boost headers when compiling with clang.
+libperfdhcp___la_CXXFLAGS += -Wno-unused-parameter
+endif
+
libperfdhcp___la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+libperfdhcp___la_LIBADD += $(top_builddir)/src/lib/dhcp/libdhcp++.la
+libperfdhcp___la_LIBADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
pkglibexec_PROGRAMS = perfdhcp
perfdhcp_SOURCES = perfdhcp.c
diff --git a/tests/tools/perfdhcp/command_options.cc b/tests/tools/perfdhcp/command_options.cc
index 7b62076..5b4f424 100644
--- a/tests/tools/perfdhcp/command_options.cc
+++ b/tests/tools/perfdhcp/command_options.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
diff --git a/tests/tools/perfdhcp/command_options.h b/tests/tools/perfdhcp/command_options.h
index 033d29a..9196857 100644
--- a/tests/tools/perfdhcp/command_options.h
+++ b/tests/tools/perfdhcp/command_options.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
diff --git a/tests/tools/perfdhcp/localized_option.h b/tests/tools/perfdhcp/localized_option.h
new file mode 100644
index 0000000..5374684
--- /dev/null
+++ b/tests/tools/perfdhcp/localized_option.h
@@ -0,0 +1,123 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOCALIZED_OPTION_H
+#define __LOCALIZED_OPTION_H
+
+#include <dhcp/pkt6.h>
+
+namespace isc {
+namespace perfdhcp {
+
+/// \brief DHCP option at specific offset
+///
+/// This class represents DHCP option with data placed at specified
+/// offset in DHCP message.
+/// Objects of this type are intended to be used when DHCP packets
+/// are created from templates (e.g. read from template file).
+/// Such packets have number of options with contents that have to be
+/// replaced before sending: e.g. DUID can be randomized.
+/// If option of this type is added to \ref PerfPkt6 options collection,
+/// \ref perfdhcp::PerfPkt6 will call \ref getOffset on this object
+/// to retrieve user-defined option position and replace contents of
+/// the output buffer at this offset before packet is sent to the server.
+/// (\see perfdhcp::PerfPkt6::rawPack).
+/// In order to read on-wire data from incoming packet client class
+/// has to specify options of \ref perfdhcp::LocalizedOption type
+/// with expected offsets of these options in a packet. The
+/// \ref perfdhcp::PerfPkt6 will use offsets to read fragments
+/// of packet and store them in options' buffers.
+/// (\see perfdhcp::PerfPkt6::rawUnpack).
+///
+class LocalizedOption : public dhcp::Option {
+public:
+ /// \brief Constructor, sets default (0) option offset
+ ///
+ /// \param u specifies universe (V4 or V6)
+ /// \param type option type (0-255 for V4 and 0-65535 for V6)
+ /// \param data content of the option
+ LocalizedOption(dhcp::Option::Universe u,
+ uint16_t type,
+ const dhcp::OptionBuffer& data) :
+ dhcp::Option(u, type, data),
+ offset_(0) {
+ }
+
+
+ /// \brief Constructor, used to create localized option from buffer
+ ///
+ /// \param u specifies universe (V4 or V6)
+ /// \param type option type (0-255 for V4 and 0-65535 for V6)
+ /// \param data content of the option
+ /// \param offset location of option in a packet (zero is default)
+ LocalizedOption(dhcp::Option::Universe u,
+ uint16_t type,
+ const dhcp::OptionBuffer& data,
+ const size_t offset) :
+ dhcp::Option(u, type, data),
+ offset_(offset) {
+ }
+
+ /// \brief Constructor, sets default (0) option offset
+ ///
+ /// This contructor is similar to the previous one, but it does not take
+ /// the whole vector<uint8_t>, but rather subset of it.
+ ///
+ /// \param u specifies universe (V4 or V6)
+ /// \param type option type (0-255 for V4 and 0-65535 for V6)
+ /// \param first iterator to the first element that should be copied
+ /// \param last iterator to the next element after the last one
+ /// to be copied.
+ LocalizedOption(dhcp::Option::Universe u,
+ uint16_t type,
+ dhcp::OptionBufferConstIter first,
+ dhcp::OptionBufferConstIter last) :
+ dhcp::Option(u, type, first, last),
+ offset_(0) {
+ }
+
+
+ /// \brief Constructor, used to create option from buffer iterators
+ ///
+ /// This contructor is similar to the previous one, but it does not take
+ /// the whole vector<uint8_t>, but rather subset of it.
+ ///
+ /// \param u specifies universe (V4 or V6)
+ /// \param type option type (0-255 for V4 and 0-65535 for V6)
+ /// \param first iterator to the first element that should be copied
+ /// \param last iterator to the next element after the last one
+ /// to be copied.
+ /// \param offset offset of option in a packet (zero is default)
+ LocalizedOption(dhcp::Option::Universe u,
+ uint16_t type,
+ dhcp::OptionBufferConstIter first,
+ dhcp::OptionBufferConstIter last, const size_t offset) :
+ dhcp::Option(u, type, first, last),
+ offset_(offset) {
+ }
+
+ /// \brief Returns offset of an option in a DHCP packet.
+ ///
+ /// \return option offset in a packet
+ size_t getOffset() const { return offset_; };
+
+private:
+ size_t offset_; ///< Offset of DHCP option in a packet
+};
+
+
+} // namespace perfdhcp
+} // namespace isc
+
+#endif // __LOCALIZED_OPTION_H
diff --git a/tests/tools/perfdhcp/perf_pkt4.cc b/tests/tools/perfdhcp/perf_pkt4.cc
new file mode 100644
index 0000000..3f733af
--- /dev/null
+++ b/tests/tools/perfdhcp/perf_pkt4.cc
@@ -0,0 +1,62 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dhcp/libdhcp++.h>
+#include <dhcp/dhcp6.h>
+
+#include "perf_pkt4.h"
+#include "pkt_transform.h"
+
+using namespace std;
+using namespace isc;
+using namespace dhcp;
+
+namespace isc {
+namespace perfdhcp {
+
+PerfPkt4::PerfPkt4(const uint8_t* buf,
+ size_t len,
+ size_t transid_offset,
+ uint32_t transid) :
+ Pkt4(buf, len),
+ transid_offset_(transid_offset) {
+ setTransid(transid);
+}
+
+bool
+PerfPkt4::rawPack() {
+ return (PktTransform::pack(dhcp::Option::V4,
+ data_,
+ options_,
+ getTransidOffset(),
+ getTransid(),
+ bufferOut_));
+}
+
+bool
+PerfPkt4::rawUnpack() {
+ uint32_t transid = getTransid();
+ bool res = PktTransform::unpack(dhcp::Option::V4,
+ data_,
+ options_,
+ getTransidOffset(),
+ transid);
+ if (res) {
+ setTransid(transid);
+ }
+ return (res);
+}
+
+} // namespace perfdhcp
+} // namespace isc
diff --git a/tests/tools/perfdhcp/perf_pkt4.h b/tests/tools/perfdhcp/perf_pkt4.h
new file mode 100644
index 0000000..f4cc440
--- /dev/null
+++ b/tests/tools/perfdhcp/perf_pkt4.h
@@ -0,0 +1,113 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PERF_PKT4_H
+#define __PERF_PKT4_H
+
+#include <time.h>
+#include <boost/shared_ptr.hpp>
+#include <dhcp/pkt4.h>
+
+#include "localized_option.h"
+
+namespace isc {
+namespace perfdhcp {
+
+/// \brief PerfPkt4 (DHCPv4 packet)
+///
+/// This class extends the functionality of \ref isc::dhcp::Pkt4 by adding the
+/// ability to specify an options offset in the DHCP message and to override
+/// options' contents. This is particularly useful when we create a packet
+/// object using a template file (i.e. do not build it dynamically). The client
+/// class should read data from the template file and pass it to this class in
+/// a buffer.
+///
+/// The contents of such a packet can be later partially replaced, notably the
+/// selected options and the transaction ID. (The transaction ID and its
+/// offset in the template file are passed via the constructor.)
+///
+/// In order to replace contents of the options, the client class has to
+/// create a collection of \ref LocalizedOption, adding them using
+/// \ref dhcp::Pkt4::addOption.
+///
+/// \note If you don't use template files simply use constructors
+/// inherited from parent class and isc::dhcp::Option type instead
+
+class PerfPkt4 : public dhcp::Pkt4 {
+public:
+
+ /// Localized option pointer type.
+ typedef boost::shared_ptr<LocalizedOption> LocalizedOptionPtr;
+
+ /// \brief Constructor, used to create messages from packet
+ /// template files.
+ ///
+ /// Creates a new DHCPv4 message using the provided buffer.
+ /// The transaction ID and its offset are specified via this
+ /// constructor. The transaction ID is stored in outgoing message
+ /// when client class calls \ref PerfPkt4::rawPack. Transaction id
+ /// offset value is used for incoming and outgoing messages to
+ /// identify transaction ID field's position in incoming and outgoing
+ /// messages.
+ ///
+ /// \param buf buffer holding contents of the message (this can
+ /// be directly read from template file).
+ /// \param len length of the data in the buffer.
+ /// \param transid_offset transaction id offset in a message.
+ /// \param transid transaction id to be stored in outgoing message.
+ PerfPkt4(const uint8_t* buf,
+ size_t len,
+ size_t transid_offset = 1,
+ uint32_t transid = 0);
+
+ /// \brief Returns transaction id offset in packet buffer
+ ///
+ /// \return Transaction ID offset in packet buffer
+ size_t getTransidOffset() const { return transid_offset_; };
+
+ /// \brief Prepares on-wire format from raw buffer.
+ ///
+ /// The method copies the buffer provided in the constructor to the
+ /// output buffer and replaces the transaction ID and selected
+ /// options with new data.
+ ///
+ /// \note Use this method to prepare an on-wire DHCPv4 message
+ /// when you use template packets that require replacement
+ /// of selected options' contents before sending.
+ ///
+ /// \return false ID pack operation failed.
+ bool rawPack();
+
+ /// \brief Handles limited binary packet parsing for packets with
+ /// custom offsets of options and transaction ID
+ ///
+ /// This method handles the parsing of packets that have custom offsets
+ /// of options or transaction ID. Use
+ /// \ref isc::dhcp::Pkt4::addOption to specify which options to parse.
+ /// Options should be of the \ref isc::perfdhcp::LocalizedOption
+ /// type with offset values provided. Each added option will
+ /// be updated with actual data read from the binary packet buffer.
+ ///
+ /// \return false If unpack operation failed.
+ bool rawUnpack();
+
+private:
+ size_t transid_offset_; ///< transaction id offset
+
+};
+
+} // namespace perfdhcp
+} // namespace isc
+
+#endif // __PERF_PKT4_H
diff --git a/tests/tools/perfdhcp/perf_pkt6.cc b/tests/tools/perfdhcp/perf_pkt6.cc
new file mode 100644
index 0000000..24cfb93
--- /dev/null
+++ b/tests/tools/perfdhcp/perf_pkt6.cc
@@ -0,0 +1,64 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <iostream>
+#include <exceptions/exceptions.h>
+#include <dhcp/libdhcp++.h>
+#include <dhcp/dhcp6.h>
+
+#include "perf_pkt6.h"
+#include "pkt_transform.h"
+
+using namespace std;
+using namespace isc;
+using namespace dhcp;
+
+namespace isc {
+namespace perfdhcp {
+
+PerfPkt6::PerfPkt6(const uint8_t* buf,
+ size_t len,
+ size_t transid_offset,
+ uint32_t transid) :
+ Pkt6(buf, len, Pkt6::UDP),
+ transid_offset_(transid_offset) {
+ setTransid(transid);
+}
+
+bool
+PerfPkt6::rawPack() {
+ return (PktTransform::pack(dhcp::Option::V6,
+ data_,
+ options_,
+ getTransidOffset(),
+ getTransid(),
+ bufferOut_));
+}
+
+bool
+PerfPkt6::rawUnpack() {
+ uint32_t transid = getTransid();
+ bool res = PktTransform::unpack(dhcp::Option::V6,
+ data_,
+ options_,
+ getTransidOffset(),
+ transid);
+ if (res) {
+ setTransid(transid);
+ }
+ return (res);
+}
+
+} // namespace perfdhcp
+} // namespace isc
diff --git a/tests/tools/perfdhcp/perf_pkt6.h b/tests/tools/perfdhcp/perf_pkt6.h
new file mode 100644
index 0000000..94fe47b
--- /dev/null
+++ b/tests/tools/perfdhcp/perf_pkt6.h
@@ -0,0 +1,113 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PERF_PKT6_H
+#define __PERF_PKT6_H
+
+#include <time.h>
+#include <boost/shared_ptr.hpp>
+#include <dhcp/pkt6.h>
+
+#include "localized_option.h"
+
+namespace isc {
+namespace perfdhcp {
+
+/// \brief PerfPkt6 (DHCPv6 packet)
+///
+/// This class extends the functionality of \ref isc::dhcp::Pkt6 by
+/// adding the ability to specify an options offset in the DHCP message
+/// and so override the options' contents. This is particularly useful when we
+/// create a packet object using a template file (i.e. do not build it
+/// dynamically). The client class should read the data from the template file
+/// and pass it to this class as a buffer.
+///
+/// The contents of such packet can be later partially replaced: in particular,
+/// selected options and the transaction ID can be altered. (The transaction
+/// ID and its offset in the template file is passed via the constructor.)
+///
+/// In order to replace the contents of options, the client class has to
+/// create a collection of \ref LocalizedOption by adding them using
+/// \ref dhcp::Pkt6::addOption.
+///
+/// \note If you don't use template files, simply use constructors
+/// inherited from parent class and the \ref isc::dhcp::Option type instead.
+
+class PerfPkt6 : public dhcp::Pkt6 {
+public:
+
+ /// Localized option pointer type.
+ typedef boost::shared_ptr<LocalizedOption> LocalizedOptionPtr;
+
+ /// \brief Constructor, used to create messages from packet
+ /// template files.
+ ///
+ /// Creates a new DHCPv6 message using the provided buffer.
+ /// The transaction ID and its offset are specified via this
+ /// constructor. The transaction ID is stored in outgoing message
+ /// when client class calls \ref PerfPkt6::rawPack. Transaction id
+ /// offset value is used for incoming and outgoing messages to
+ /// identify transaction ID field's position in incoming and outgoing
+ /// messages.
+ ///
+ /// \param buf buffer holding contents of the message (this can
+ /// be directly read from template file).
+ /// \param len length of the data in the buffer.
+ /// \param transid_offset transaction id offset in a message.
+ /// \param transid transaction id to be stored in outgoing message.
+ PerfPkt6(const uint8_t* buf,
+ size_t len,
+ size_t transid_offset = 1,
+ uint32_t transid = 0);
+
+ /// \brief Returns transaction id offset in packet buffer
+ ///
+ /// \return Transaction ID offset in the packet buffer.
+ size_t getTransidOffset() const { return transid_offset_; };
+
+ /// \brief Prepares on-wire format from raw buffer
+ ///
+ /// The method copies the buffer provided in constructor to the
+ /// output buffer and replaces the transaction ID and selected
+ /// options with new data.
+ ///
+ /// \note Use this method to prepare an on-wire DHCPv6 message
+ /// when you use template packets that require replacement
+ /// of selected options' contents before sending.
+ ///
+ /// \return false ID pack operation failed.
+ bool rawPack();
+
+ /// \brief Handles limited binary packet parsing for packets with
+ /// custom offsets of options and transaction id
+ ///
+ /// This methoid handles the parsing of packets that have custom offsets
+ /// of options or transaction ID. Use
+ /// \ref isc::dhcp::Pkt4::addOption to specify which options to parse.
+ /// Options should be of the \ref isc::perfdhcp::LocalizedOption
+ /// type with offset values provided. Each added option will
+ /// be updated with actual data read from the binary packet buffer.
+ ///
+ /// \return false if unpack operation failed.
+ bool rawUnpack();
+
+private:
+ size_t transid_offset_; ///< transaction id offset
+
+};
+
+} // namespace perfdhcp
+} // namespace isc
+
+#endif // __PERF_PKT6_H
diff --git a/tests/tools/perfdhcp/pkt_transform.cc b/tests/tools/perfdhcp/pkt_transform.cc
new file mode 100644
index 0000000..5ed39bf
--- /dev/null
+++ b/tests/tools/perfdhcp/pkt_transform.cc
@@ -0,0 +1,222 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <iostream>
+
+#include <exceptions/exceptions.h>
+#include <dhcp/option.h>
+#include <dhcp/libdhcp++.h>
+#include <dhcp/dhcp6.h>
+
+#include "pkt_transform.h"
+#include "localized_option.h"
+
+using namespace std;
+using namespace isc;
+using namespace dhcp;
+
+namespace isc {
+namespace perfdhcp {
+
+bool
+PktTransform::pack(const Option::Universe universe,
+ const OptionBuffer& in_buffer,
+ const Option::OptionCollection& options,
+ const size_t transid_offset,
+ const uint32_t transid,
+ util::OutputBuffer& out_buffer) {
+
+ // Always override the packet if function is called.
+ out_buffer.clear();
+ // Write whole buffer to output buffer.
+ out_buffer.writeData(&in_buffer[0], in_buffer.size());
+
+ uint8_t transid_len = (universe == Option::V6) ? 3 : 4;
+
+ if ((transid_offset + transid_len >= in_buffer.size()) ||
+ (transid_offset == 0)) {
+ cout << "Failed to build packet: provided transaction id offset: "
+ << transid_offset << " is out of bounds (expected 1.."
+ << in_buffer.size()-1 << ")." << endl;
+ return (false);
+ }
+
+ try {
+ size_t offset_ptr = transid_offset;
+ if (universe == Option::V4) {
+ out_buffer.writeUint8At(transid >> 24 & 0xFF, offset_ptr++);
+ }
+ out_buffer.writeUint8At(transid >> 16 & 0xFF, offset_ptr++);
+ out_buffer.writeUint8At(transid >> 8 & 0xFF, offset_ptr++);
+ out_buffer.writeUint8At(transid & 0xFF, offset_ptr++);
+
+ // We already have packet template stored in output buffer
+ // but still some options have to be updated if client
+ // specified them along with their offsets in the buffer.
+ PktTransform::packOptions(in_buffer, options, out_buffer);
+ } catch (const isc::BadValue& e) {
+ cout << "Building packet failed: " << e.what() << endl;
+ return (false);
+ }
+ return (true);
+}
+
+bool
+PktTransform::unpack(const Option::Universe universe,
+ const OptionBuffer& in_buffer,
+ const Option::OptionCollection& options,
+ const size_t transid_offset,
+ uint32_t& transid) {
+
+ uint8_t transid_len = (universe == Option::V6) ? 3 : 4;
+
+ // Validate transaction id offset.
+ if ((transid_offset + transid_len + 1 > in_buffer.size()) ||
+ (transid_offset == 0)) {
+ cout << "Failed to parse packet: provided transaction id offset: "
+ << transid_offset << " is out of bounds (expected 1.."
+ << in_buffer.size()-1 << ")." << endl;
+ return (false);
+ }
+
+ // Read transaction id from the buffer.
+ // For DHCPv6 we transaction id is 3 bytes long so the high byte
+ // of transid will be zero.
+ OptionBufferConstIter it = in_buffer.begin() + transid_offset;
+ transid = 0;
+ for (int i = 0; i < transid_len; ++i, ++it) {
+ // Read next byte and shift it left to its position in
+ // transid (shift by the number of bytes read so far.
+ transid += *it << (transid_len - i - 1) * 8;
+ }
+
+ try {
+ PktTransform::unpackOptions(in_buffer, options);
+ } catch (const isc::BadValue& e) {
+ cout << "Packet parsing failed: " << e.what() << endl;
+ return (false);
+ }
+
+ return (true);
+}
+
+void
+PktTransform::packOptions(const OptionBuffer& in_buffer,
+ const Option::OptionCollection& options,
+ util::OutputBuffer& out_buffer) {
+ try {
+ // If there are any options on the list, we will use provided
+ // options offsets to override them in the output buffer
+ // with new contents.
+ for (Option::OptionCollection::const_iterator it = options.begin();
+ it != options.end(); ++it) {
+ // Get options with their position (offset).
+ boost::shared_ptr<LocalizedOption> option =
+ boost::dynamic_pointer_cast<LocalizedOption>(it->second);
+ if (option == NULL) {
+ isc_throw(isc::BadValue, "option is null");
+ }
+ uint32_t offset = option->getOffset();
+ if ((offset == 0) ||
+ (offset + option->len() > in_buffer.size())) {
+ isc_throw(isc::BadValue,
+ "option offset for option: " << option->getType()
+ << " is out of bounds (expected 1.."
+ << in_buffer.size() - option->len() << ")");
+ }
+
+ // Create temporary buffer to store option contents.
+ util::OutputBuffer buf(option->len());
+ // Pack option contents into temporary buffer.
+ option->pack(buf);
+ // OutputBuffer class has nice functions that write
+ // data at the specified position so we can use it to
+ // inject contents of temporary buffer to output buffer.
+ const uint8_t *buf_data =
+ static_cast<const uint8_t*>(buf.getData());
+ for (int i = 0; i < buf.getLength(); ++i) {
+ out_buffer.writeUint8At(buf_data[i], offset + i);
+ }
+ }
+ }
+ catch (const Exception&) {
+ isc_throw(isc::BadValue, "failed to pack options into buffer.");
+ }
+}
+
+void
+PktTransform::unpackOptions(const OptionBuffer& in_buffer,
+ const Option::OptionCollection& options) {
+ for (Option::OptionCollection::const_iterator it = options.begin();
+ it != options.end(); ++it) {
+
+ boost::shared_ptr<LocalizedOption> option =
+ boost::dynamic_pointer_cast<LocalizedOption>(it->second);
+ if (option == NULL) {
+ isc_throw(isc::BadValue, "option is null");
+ }
+ size_t opt_pos = option->getOffset();
+ if (opt_pos == 0) {
+ isc_throw(isc::BadValue, "failed to unpack packet from raw buffer "
+ "(Option position not specified)");
+ } else if (opt_pos + option->getHeaderLen() > in_buffer.size()) {
+ isc_throw(isc::BadValue,
+ "failed to unpack options from from raw buffer "
+ "(Option position out of bounds)");
+ }
+
+ size_t offset = opt_pos;
+ size_t offset_step = 1;
+ uint16_t opt_type = 0;
+ if (option->getUniverse() == Option::V6) {
+ offset_step = 2;
+ // For DHCPv6 option type is in first two octets.
+ opt_type = in_buffer[offset] * 256 + in_buffer[offset + 1];
+ } else {
+ // For DHCPv4 option type is in first octet.
+ opt_type = in_buffer[offset];
+ }
+ // Check if we got expected option type.
+ if (opt_type != option->getType()) {
+ isc_throw(isc::BadValue,
+ "failed to unpack option from raw buffer "
+ "(option type mismatch)");
+ }
+
+ // Get option length which is supposed to be after option type.
+ offset += offset_step;
+ uint16_t opt_len = in_buffer[offset] * 256 + in_buffer[offset + 1];
+ if (option->getUniverse() == Option::V6) {
+ opt_len = in_buffer[offset] * 256 + in_buffer[offset + 1];
+ } else {
+ opt_len = in_buffer[offset];
+ }
+
+ // Check if packet is not truncated.
+ if (offset + option->getHeaderLen() + opt_len > in_buffer.size()) {
+ isc_throw(isc::BadValue,
+ "failed to unpack option from raw buffer "
+ "(option truncated)");
+ }
+
+ // Seek to actual option data and replace it.
+ offset += offset_step;
+ option->setData(in_buffer.begin() + offset,
+ in_buffer.begin() + offset + opt_len);
+ }
+}
+
+
+} // namespace perfdhcp
+} // namespace isc
diff --git a/tests/tools/perfdhcp/pkt_transform.h b/tests/tools/perfdhcp/pkt_transform.h
new file mode 100644
index 0000000..7fb19f4
--- /dev/null
+++ b/tests/tools/perfdhcp/pkt_transform.h
@@ -0,0 +1,139 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PKT_TRANSFORM_H
+#define __PKT_TRANSFORM_H
+
+#include <dhcp/option.h>
+
+#include "localized_option.h"
+
+namespace isc {
+namespace perfdhcp {
+
+/// \brief Read and write raw data to DHCP packets.
+///
+/// This class provides static functions to read/write raw data from/to the
+/// packet buffer. When reading data with the unpack() method, the
+/// corresponding options objects are updated. When writing to the packet
+/// buffer with pack(), options objects carry input data to be written.
+///
+/// This class is used both by \ref PerfPkt4 and
+/// \ref PerfPkt6 classes in case DHCP packets are created
+/// from template files. In this case, some of the template
+/// packet's options are replaced before sending it to the
+/// server. Offset of specific options are provided from the
+/// command line by the perfdhcp tool user, and passed in an
+/// options collection.
+class PktTransform {
+public:
+
+ /// \brief Prepares on-wire format from raw buffer.
+ ///
+ /// The method copies the input buffer and options contents
+ /// to the output buffer. The input buffer must contain whole
+ /// initial packet data. Parts of this data will be
+ /// overriden by options data specified in an options
+ /// collection. Such options must have their offsets within
+ /// a packet specified (see \ref LocalizedOption to find out
+ /// how to specify options offset).
+ ///
+ /// \note The specified options must fit into the size of the
+ /// initial packet data. A call to this method will fail
+ /// if the option's offset + its size is beyond the packet's size.
+ ///
+ /// \param universe Universe used, V4 or V6
+ /// \param in_buffer Input buffer holding intial packet
+ /// data, this can be directly read from template file
+ /// \param options Options collection with offsets
+ /// \param transid_offset offset of transaction id in a packet,
+ /// transaction ID will be written to output buffer at this
+ /// offset
+ /// \param transid Transaction ID value
+ /// \param out_buffer Output buffer holding "packed" data
+ ///
+ /// \return false, if pack operation failed.
+ static bool pack(const dhcp::Option::Universe universe,
+ const dhcp::OptionBuffer& in_buffer,
+ const dhcp::Option::OptionCollection& options,
+ const size_t transid_offset,
+ const uint32_t transid,
+ util::OutputBuffer& out_buffer);
+
+ /// \brief Handles selective binary packet parsing.
+ ///
+ /// This method handles the parsing of packets that have non-default
+ /// options or transaction ID offsets. The client class has to use
+ /// \ref isc::dhcp::Pkt6::addOption to specify which options to parse.
+ /// Each option should be of the \ref isc::perfdhcp::LocalizedOption
+ /// type with the offset value specified.
+ ///
+ /// \param universe universe used, V4 or V6
+ /// \param in_buffer input buffer to be parsed
+ /// \param options options collection with options offsets
+ /// \param transid_offset offset of transaction id in input buffer
+ /// \param transid transaction id value read from input buffer
+ ///
+ /// \return false, if unpack operation failed.
+ static bool unpack(const dhcp::Option::Universe universe,
+ const dhcp::OptionBuffer& in_buffer,
+ const dhcp::Option::OptionCollection& options,
+ const size_t transid_offset,
+ uint32_t& transid);
+
+private:
+ /// \brief Replaces contents of options in a buffer.
+ ///
+ /// The method uses a localized options collection to
+ /// replace parts of packet data (e.g. data read
+ /// from template file).
+ /// This private method is called from \ref PktTransform::pack
+ ///
+ /// \param in_buffer input buffer holding initial packet data.
+ /// \param out_buffer output buffer with "packed" options.
+ /// \param options options collection with actual data and offsets.
+ ///
+ /// \throw isc::Unexpected if options update failed.
+ static void packOptions(const dhcp::OptionBuffer& in_buffer,
+ const dhcp::Option::OptionCollection& options,
+ util::OutputBuffer& out_buffer);
+
+ /// \brief Reads contents of specified options from buffer.
+ ///
+ /// The method reads options data from the input buffer
+ /// and stores it in options objects. Offsets of the options
+ /// must be specified (see \ref LocalizedOption to find out how to specify
+ /// the option offset).
+ /// This private method is called by \ref PktTransform::unpack.
+ ///
+ /// \note This method iterates through all options in an
+ /// options collection, checks the offset of the option
+ /// in input buffer and reads data from the buffer to
+ /// update the option's buffer. If the provided options collection
+ /// is empty, a call to this method will have no effect.
+ ///
+ /// \param universe universe used, V4 or V6
+ /// \param in_buffer input buffer to be parsed.
+ /// \param options oprions collection with their offsets
+ /// in input buffer specified.
+ ///
+ /// \throw isc::Unexpected if options unpack failed.
+ static void unpackOptions(const dhcp::OptionBuffer& in_buffer,
+ const dhcp::Option::OptionCollection& options);
+};
+
+} // namespace perfdhcp
+} // namespace isc
+
+#endif // __PKT_TRANSFORM_H
diff --git a/tests/tools/perfdhcp/tests/Makefile.am b/tests/tools/perfdhcp/tests/Makefile.am
index c94ecba..d4034b3 100644
--- a/tests/tools/perfdhcp/tests/Makefile.am
+++ b/tests/tools/perfdhcp/tests/Makefile.am
@@ -15,14 +15,28 @@ if HAVE_GTEST
TESTS += run_unittests
run_unittests_SOURCES = run_unittests.cc
run_unittests_SOURCES += command_options_unittest.cc
+run_unittests_SOURCES += perf_pkt6_unittest.cc
+run_unittests_SOURCES += perf_pkt4_unittest.cc
+run_unittests_SOURCES += localized_option_unittest.cc
run_unittests_SOURCES += $(top_builddir)/tests/tools/perfdhcp/command_options.cc
+run_unittests_SOURCES += $(top_builddir)/tests/tools/perfdhcp/pkt_transform.cc
+run_unittests_SOURCES += $(top_builddir)/tests/tools/perfdhcp/perf_pkt6.cc
+run_unittests_SOURCES += $(top_builddir)/tests/tools/perfdhcp/perf_pkt4.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+if USE_CLANGPP
+# Disable unused parameter warning caused by some of the
+# Boost headers when compiling with clang.
+run_unittests_CXXFLAGS = -Wno-unused-parameter
+endif
+
run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+run_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp++.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
endif
diff --git a/tests/tools/perfdhcp/tests/command_options_unittest.cc b/tests/tools/perfdhcp/tests/command_options_unittest.cc
index c92edd0..8e1053d 100644
--- a/tests/tools/perfdhcp/tests/command_options_unittest.cc
+++ b/tests/tools/perfdhcp/tests/command_options_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
diff --git a/tests/tools/perfdhcp/tests/localized_option_unittest.cc b/tests/tools/perfdhcp/tests/localized_option_unittest.cc
new file mode 100644
index 0000000..e51560e
--- /dev/null
+++ b/tests/tools/perfdhcp/tests/localized_option_unittest.cc
@@ -0,0 +1,48 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <gtest/gtest.h>
+
+#include <dhcp/option.h>
+#include <dhcp/dhcp6.h>
+
+#include "../localized_option.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::perfdhcp;
+
+namespace {
+
+TEST(LocalizedOptionTest, Constructor) {
+ OptionBuffer opt_buf;
+ // Create option with default offset.
+ boost::scoped_ptr<LocalizedOption> opt1(new LocalizedOption(Option::V6,
+ D6O_CLIENTID,
+ opt_buf));
+ EXPECT_EQ(Option::V6, opt1->getUniverse());
+ EXPECT_EQ(D6O_CLIENTID, opt1->getType());
+ EXPECT_EQ(0, opt1->getOffset());
+
+ // Create option with non-default offset.
+ boost::scoped_ptr<LocalizedOption> opt2(new LocalizedOption(Option::V6,
+ D6O_CLIENTID,
+ opt_buf,
+ 40));
+ EXPECT_EQ(40, opt2->getOffset());
+}
+
+}
diff --git a/tests/tools/perfdhcp/tests/perf_pkt4_unittest.cc b/tests/tools/perfdhcp/tests/perf_pkt4_unittest.cc
new file mode 100644
index 0000000..3863faa
--- /dev/null
+++ b/tests/tools/perfdhcp/tests/perf_pkt4_unittest.cc
@@ -0,0 +1,384 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
+#include <dhcp/dhcp4.h>
+
+#include "../localized_option.h"
+#include "../perf_pkt4.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+using namespace isc::perfdhcp;
+
+typedef PerfPkt4::LocalizedOptionPtr LocalizedOptionPtr;
+
+namespace {
+
+// A dummy MAC address, padded with 0s
+const uint8_t dummyChaddr[16] = {0, 1, 2, 3, 4, 5, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+
+// Let's use some creative test content here (128 chars + \0)
+const uint8_t dummyFile[] = "Lorem ipsum dolor sit amet, consectetur "
+ "adipiscing elit. Proin mollis placerat metus, at "
+ "lacinia orci ornare vitae. Mauris amet.";
+
+// Yet another type of test content (64 chars + \0)
+const uint8_t dummySname[] = "Lorem ipsum dolor sit amet, consectetur "
+ "adipiscing elit posuere.";
+
+class PerfPkt4Test : public ::testing::Test {
+public:
+ PerfPkt4Test() {
+ }
+
+ /// \brief Returns buffer with sample DHCPDISCOVER message.
+ ///
+ /// This method creates buffer containing on-wire data of
+ /// DHCPDICOSVER message. This buffer is used by tests below
+ /// to create DHCPv4 test packets.
+ ///
+ /// \return vector containing on-wire data
+ std::vector<uint8_t>& capture() {
+
+ // That is only part of the header. It contains all "short" fields,
+ // larger fields are constructed separately.
+ uint8_t hdr[] = {
+ 1, 6, 6, 13, // op, htype, hlen, hops,
+ 0x12, 0x34, 0x56, 0x78, // transaction-id
+ 0, 42, 0x80, 0x00, // 42 secs, BROADCAST flags
+ 192, 0, 2, 1, // ciaddr
+ 1, 2, 3, 4, // yiaddr
+ 192, 0, 2, 255, // siaddr
+ 255, 255, 255, 255, // giaddr
+ };
+
+ uint8_t v4Opts[] = {
+ DHO_HOST_NAME, 3, 0, 1, 2, // Host name option.
+ DHO_BOOT_SIZE, 3, 10, 11, 12, // Boot file size option
+ DHO_MERIT_DUMP, 3, 20, 21, 22, // Merit dump file
+ DHO_DHCP_MESSAGE_TYPE, 1, 1, // DHCP message type.
+ 128, 3, 30, 31, 32,
+ 254, 3, 40, 41, 42,
+ };
+
+ // Initialize the vector with the header fields defined above.
+ static std::vector<uint8_t> buf(hdr, hdr + sizeof(hdr));
+
+ // If this is a first call to this function. Initialize
+ // remaining data.
+ if (buf.size() == sizeof(hdr)) {
+
+ // Append the large header fields.
+ std::copy(dummyChaddr, dummyChaddr + Pkt4::MAX_CHADDR_LEN,
+ back_inserter(buf));
+ std::copy(dummySname, dummySname + Pkt4::MAX_SNAME_LEN,
+ back_inserter(buf));
+ std::copy(dummyFile, dummyFile + Pkt4::MAX_FILE_LEN,
+ back_inserter(buf));
+
+ // Append magic cookie.
+ buf.push_back(0x63);
+ buf.push_back(0x82);
+ buf.push_back(0x53);
+ buf.push_back(0x63);
+
+ // Append options.
+ std::copy(v4Opts, v4Opts + sizeof(v4Opts), back_inserter(buf));
+ }
+ return buf;
+ }
+};
+
+TEST_F(PerfPkt4Test, Constructor) {
+ // Initialize some dummy payload.
+ uint8_t data[250];
+ for (int i = 0; i < 250; ++i) {
+ data[i] = i;
+ }
+
+ // Test constructor to be used for incoming messages.
+ // Use default (1) offset value and don't specify transaction id.
+ const size_t offset_transid[] = { 1, 10 };
+ boost::scoped_ptr<PerfPkt4> pkt1(new PerfPkt4(data,
+ sizeof(data),
+ offset_transid[0]));
+ EXPECT_EQ(1, pkt1->getTransidOffset());
+
+ // Test constructor to be used for outgoing messages.
+ // Use non-zero offset and specify transaction id.
+ const uint32_t transid = 0x010203;
+ boost::scoped_ptr<PerfPkt4> pkt2(new PerfPkt4(data, sizeof(data),
+ offset_transid[1],
+ transid));
+ EXPECT_EQ(transid, pkt2->getTransid());
+ EXPECT_EQ(offset_transid[1], pkt2->getTransidOffset());
+
+ // Test default constructor. Transaction id offset is expected to be 1.
+ boost::scoped_ptr<PerfPkt4> pkt3(new PerfPkt4(data, sizeof(data)));
+ EXPECT_EQ(1, pkt3->getTransidOffset());
+}
+
+TEST_F(PerfPkt4Test, RawPack) {
+ // Create new packet.
+ std::vector<uint8_t> buf = capture();
+ boost::scoped_ptr<PerfPkt4> pkt(new PerfPkt4(&buf[0], buf.size()));
+
+ // Initialize options data.
+ uint8_t buf_hostname[] = { DHO_HOST_NAME, 3, 4, 5, 6 };
+ uint8_t buf_boot_filesize[] = { DHO_BOOT_SIZE, 3, 1, 2, 3 };
+ OptionBuffer vec_hostname(buf_hostname + 2,
+ buf_hostname + sizeof(buf_hostname));
+ OptionBuffer vec_boot_filesize(buf_boot_filesize + 2,
+ buf_boot_filesize + sizeof(buf_hostname));
+
+ // Create options objects.
+ const size_t offset_hostname = 240;
+ LocalizedOptionPtr pkt_hostname(new LocalizedOption(Option::V4,
+ DHO_HOST_NAME,
+ vec_hostname,
+ offset_hostname));
+ const size_t offset_boot_filesize = 245;
+ LocalizedOptionPtr pkt_boot_filesize(new LocalizedOption(Option::V4,
+ DHO_BOOT_SIZE,
+ vec_boot_filesize,
+ offset_boot_filesize));
+
+ // Try to add options to packet.
+ ASSERT_NO_THROW(pkt->addOption(pkt_boot_filesize));
+ ASSERT_NO_THROW(pkt->addOption(pkt_hostname));
+
+ // We have valid options addedwith valid offsets so
+ // pack operation should succeed.
+ ASSERT_TRUE(pkt->rawPack());
+
+ // Buffer should now contain new values of DHO_HOST_NAME and
+ // DHO_BOOT_SIZE options.
+ util::OutputBuffer pkt_output = pkt->getBuffer();
+ ASSERT_EQ(buf.size(), pkt_output.getLength());
+ const uint8_t* out_buf_data =
+ static_cast<const uint8_t*>(pkt_output.getData());
+
+ // Check if options we read from buffer is valid.
+ EXPECT_EQ(0, memcmp(buf_hostname,
+ out_buf_data + offset_hostname,
+ sizeof(buf_hostname)));
+ EXPECT_EQ(0, memcmp(buf_boot_filesize,
+ out_buf_data + offset_boot_filesize,
+ sizeof(buf_boot_filesize)));
+}
+
+TEST_F(PerfPkt4Test, RawUnpack) {
+ // Create new packet.
+ std::vector<uint8_t> buf = capture();
+ boost::scoped_ptr<PerfPkt4> pkt(new PerfPkt4(&buf[0], buf.size()));
+
+ // Create options (existing in the packet) and specify their offsets.
+ const size_t offset_merit = 250;
+ LocalizedOptionPtr opt_merit(new LocalizedOption(Option::V4,
+ DHO_MERIT_DUMP,
+ OptionBuffer(),
+ offset_merit));
+
+ const size_t offset_msg_type = 255;
+ LocalizedOptionPtr opt_msg_type(new LocalizedOption(Option::V4,
+ DHO_DHCP_MESSAGE_TYPE,
+ OptionBuffer(),
+ offset_msg_type));
+ // Addition should be successful
+ ASSERT_NO_THROW(pkt->addOption(opt_merit));
+ ASSERT_NO_THROW(pkt->addOption(opt_msg_type));
+
+ // Option fit to packet boundaries and offsets are valid,
+ // so this should unpack successfully.
+ ASSERT_TRUE(pkt->rawUnpack());
+
+ // At this point we should have updated options data (read from buffer).
+ // Let's try to retrieve them.
+ opt_merit = boost::dynamic_pointer_cast<LocalizedOption>
+ (pkt->getOption(DHO_MERIT_DUMP));
+ opt_msg_type = boost::dynamic_pointer_cast<LocalizedOption>
+ (pkt->getOption(DHO_DHCP_MESSAGE_TYPE));
+ ASSERT_TRUE(opt_merit);
+ ASSERT_TRUE(opt_msg_type);
+
+ // Get first option payload.
+ OptionBuffer opt_merit_data = opt_merit->getData();
+
+ // Define reference data.
+ uint8_t buf_merit[] = { 20, 21, 22 };
+
+ // Validate first option data.
+ ASSERT_EQ(sizeof(buf_merit), opt_merit_data.size());
+ EXPECT_TRUE(std::equal(opt_merit_data.begin(),
+ opt_merit_data.end(),
+ buf_merit));
+
+ // Get second option payload.
+ OptionBuffer opt_msg_type_data = opt_msg_type->getData();
+
+ // Expect one byte of message type payload.
+ ASSERT_EQ(1, opt_msg_type_data.size());
+ EXPECT_EQ(1, opt_msg_type_data[0]);
+}
+
+TEST_F(PerfPkt4Test, InvalidOptions) {
+ // Create new packet.
+ std::vector<uint8_t> buf = capture();
+ boost::scoped_ptr<PerfPkt4> pkt1(new PerfPkt4(&buf[0], buf.size()));
+
+ // Create option with invalid offset.
+ // This option is at offset 250 (not 251).
+ const size_t offset_merit = 251;
+ LocalizedOptionPtr opt_merit(new LocalizedOption(Option::V4,
+ DHO_MERIT_DUMP,
+ OptionBuffer(),
+ offset_merit));
+ ASSERT_NO_THROW(pkt1->addOption(opt_merit));
+
+ cout << "Testing unpack of invalid options. "
+ << "This may produce spurious errors." << endl;
+
+ // Unpack is expected to fail because it is supposed to read
+ // option type from buffer and match it with DHO_MERIT_DUMP.
+ // It will not match because option is shifted by on byte.
+ ASSERT_FALSE(pkt1->rawUnpack());
+
+ // Create another packet.
+ boost::scoped_ptr<PerfPkt4> pkt2(new PerfPkt4(&buf[0], buf.size()));
+
+ // Create DHO_DHCP_MESSAGE_TYPE option that has the wrong offset.
+ // With this offset, option goes beyond packet size (268).
+ const size_t offset_msg_type = 266;
+ LocalizedOptionPtr opt_msg_type(new LocalizedOption(Option::V4,
+ DHO_DHCP_MESSAGE_TYPE,
+ OptionBuffer(1, 2),
+ offset_msg_type));
+ // Adding option is expected to be successful because no
+ // offset validation takes place at this point.
+ ASSERT_NO_THROW(pkt2->addOption(opt_msg_type));
+
+ // This is expected to fail because option is out of bounds.
+ ASSERT_FALSE(pkt2->rawPack());
+}
+
+TEST_F(PerfPkt4Test, TruncatedPacket) {
+ // Get the whole packet and truncate it to 249 bytes.
+ std::vector<uint8_t> buf = capture();
+ buf.resize(249);
+ boost::scoped_ptr<PerfPkt4> pkt(new PerfPkt4(&buf[0], buf.size()));
+
+ // Option DHO_BOOT_SIZE is now truncated because whole packet
+ // is truncated. This option ends at 249 while last index of
+ // truncated packet is now 248.
+ const size_t offset_boot_filesize = 245;
+ LocalizedOptionPtr opt_boot_filesize(new LocalizedOption(Option::V4,
+ DHO_BOOT_SIZE,
+ OptionBuffer(3, 1),
+ offset_boot_filesize));
+ ASSERT_NO_THROW(pkt->addOption(opt_boot_filesize));
+
+ cout << "Testing pack and unpack of options in truncated "
+ << "packet. This may produce spurious errors." << endl;
+
+ // Both pack and unpack are expected to fail because
+ // added option is out of bounds.
+ EXPECT_FALSE(pkt->rawUnpack());
+ EXPECT_FALSE(pkt->rawPack());
+}
+
+TEST_F(PerfPkt4Test, PackTransactionId) {
+ // Create dummy packet that consists of zeros.
+ std::vector<uint8_t> buf(268, 0);
+
+ const size_t offset_transid[] = { 10, 265 };
+ const uint32_t transid = 0x0102;
+ // Initialize transaction id 0x00000102 at offset 10.
+ boost::scoped_ptr<PerfPkt4> pkt1(new PerfPkt4(&buf[0], buf.size(),
+ offset_transid[0],
+ transid));
+
+ // Pack will inject transaction id at offset 10 into the
+ // packet buffer.
+ ASSERT_TRUE(pkt1->rawPack());
+
+ // Get packet's output buffer and make sure it has valid size.
+ util::OutputBuffer out_buf = pkt1->getBuffer();
+ ASSERT_EQ(buf.size(), out_buf.getLength());
+ const uint8_t *out_buf_data =
+ static_cast<const uint8_t*>(out_buf.getData());
+
+ // Initialize reference data for transaction id.
+ const uint8_t ref_data[] = { 0, 0, 1, 2 };
+
+ // Expect that reference transaction id matches what we have
+ // read from buffer.
+ EXPECT_EQ(0, memcmp(ref_data, out_buf_data + offset_transid[0], 4));
+
+ cout << "Testing pack with invalid transaction id offset. "
+ << "This may produce spurious errors" << endl;
+
+ // Create packet with invalid transaction id offset.
+ // Packet length is 268, transaction id is 4 bytes long so last byte of
+ // transaction id is out of bounds.
+ boost::scoped_ptr<PerfPkt4> pkt2(new PerfPkt4(&buf[0], buf.size(),
+ offset_transid[1],
+ transid));
+ EXPECT_FALSE(pkt2->rawPack());
+}
+
+TEST_F(PerfPkt4Test, UnpackTransactionId) {
+ // Initialize packet data, lebgth 268, zeros only.
+ std::vector<uint8_t> in_data(268, 0);
+
+ // Assume that transaction id is at offset 100.
+ // Fill 4 bytes at offset 100 with dummy transaction id.
+ for (int i = 100; i < 104; ++i) {
+ in_data[i] = i - 99;
+ }
+
+ // Create packet from initialized buffer.
+ const size_t offset_transid[] = { 100, 270 };
+ boost::scoped_ptr<PerfPkt4> pkt1(new PerfPkt4(&in_data[0],
+ in_data.size(),
+ offset_transid[0]));
+ ASSERT_TRUE(pkt1->rawUnpack());
+
+ // Get unpacked transaction id and compare with reference.
+ EXPECT_EQ(0x01020304, pkt1->getTransid());
+
+ // Create packet with transaction id at invalid offset.
+ boost::scoped_ptr<PerfPkt4> pkt2(new PerfPkt4(&in_data[0],
+ in_data.size(),
+ offset_transid[1]));
+
+ cout << "Testing unpack of transaction id at invalid offset. "
+ << "This may produce spurious errors." << endl;
+
+ // Unpack is supposed to fail because transaction id is at
+ // out of bounds offset.
+ EXPECT_FALSE(pkt2->rawUnpack());
+}
+
+}
diff --git a/tests/tools/perfdhcp/tests/perf_pkt6_unittest.cc b/tests/tools/perfdhcp/tests/perf_pkt6_unittest.cc
new file mode 100644
index 0000000..de134cc
--- /dev/null
+++ b/tests/tools/perfdhcp/tests/perf_pkt6_unittest.cc
@@ -0,0 +1,327 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
+#include <dhcp/dhcp6.h>
+
+#include "../localized_option.h"
+#include "../perf_pkt6.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::perfdhcp;
+
+typedef PerfPkt6::LocalizedOptionPtr LocalizedOptionPtr;
+
+namespace {
+
+class PerfPkt6Test : public ::testing::Test {
+public:
+ PerfPkt6Test() {
+ }
+
+ /// \brief Returns captured SOLICIT packet.
+ ///
+ /// Captured SOLICIT packet with transid=0x3d79fb and options: client-id,
+ /// in_na, dns-server, elapsed-time, option-request
+ /// This code was autogenerated
+ /// (see src/bin/dhcp6/tests/iface_mgr_unittest.c),
+ /// but we spent some time to make is less ugly than it used to be.
+ ///
+ /// \return pointer to Pkt6 that represents received SOLICIT
+ PerfPkt6* capture() {
+ uint8_t data[98];
+ data[0] = 1;
+ data[1] = 1; data[2] = 2; data[3] = 3; data[4] = 0;
+ data[5] = 1; data[6] = 0; data[7] = 14; data[8] = 0;
+ data[9] = 1; data[10] = 0; data[11] = 1; data[12] = 21;
+ data[13] = 158; data[14] = 60; data[15] = 22; data[16] = 0;
+ data[17] = 30; data[18] = 140; data[19] = 155; data[20] = 115;
+ data[21] = 73; data[22] = 0; data[23] = 3; data[24] = 0;
+ data[25] = 40; data[26] = 0; data[27] = 0; data[28] = 0;
+ data[29] = 1; data[30] = 255; data[31] = 255; data[32] = 255;
+ data[33] = 255; data[34] = 255; data[35] = 255; data[36] = 255;
+ data[37] = 255; data[38] = 0; data[39] = 5; data[40] = 0;
+ data[41] = 24; data[42] = 32; data[43] = 1; data[44] = 13;
+ data[45] = 184; data[46] = 0; data[47] = 1; data[48] = 0;
+ data[49] = 0; data[50] = 0; data[51] = 0; data[52] = 0;
+ data[53] = 0; data[54] = 0; data[55] = 0; data[56] = 18;
+ data[57] = 52; data[58] = 255; data[59] = 255; data[60] = 255;
+ data[61] = 255; data[62] = 255; data[63] = 255; data[64] = 255;
+ data[65] = 255; data[66] = 0; data[67] = 23; data[68] = 0;
+ data[69] = 16; data[70] = 32; data[71] = 1; data[72] = 13;
+ data[73] = 184; data[74] = 0; data[75] = 1; data[76] = 0;
+ data[77] = 0; data[78] = 0; data[79] = 0; data[80] = 0;
+ data[81] = 0; data[82] = 0; data[83] = 0; data[84] = 221;
+ data[85] = 221; data[86] = 0; data[87] = 8; data[88] = 0;
+ data[89] = 2; data[90] = 0; data[91] = 100; data[92] = 0;
+ data[93] = 6; data[94] = 0; data[95] = 2; data[96] = 0;
+ data[97] = 23;
+
+ PerfPkt6* pkt = new PerfPkt6(data, sizeof(data));
+
+ return (pkt);
+ }
+
+ /// \brief Returns truncated SOLICIT packet.
+ ///
+ /// Returns truncated SOLICIT packet which will be used for
+ /// negative tests: e.g. pack options out of packet.
+ ///
+ /// \return pointer to Pkt6 that represents truncated SOLICIT
+ PerfPkt6* captureTruncated() {
+ uint8_t data[17];
+ data[0] = 1;
+ data[1] = 1; data[2] = 2; data[3] = 3; data[4] = 0;
+ data[5] = 1; data[6] = 0; data[7] = 14; data[8] = 0;
+ data[9] = 1; data[10] = 0; data[11] = 1; data[12] = 21;
+ data[13] = 158; data[14] = 60; data[15] = 22; data[16] = 0;
+
+ PerfPkt6* pkt = new PerfPkt6(data, sizeof(data));
+
+ return (pkt);
+ }
+
+
+};
+
+TEST_F(PerfPkt6Test, Constructor) {
+ // Data to be used to create packet.
+ uint8_t data[] = { 0, 1, 2, 3, 4, 5 };
+
+ // Test constructor to be used for incoming messages.
+ // Use default (1) offset value and don't specify transaction id.
+ boost::scoped_ptr<PerfPkt6> pkt1(new PerfPkt6(data, sizeof(data)));
+ EXPECT_EQ(sizeof(data), pkt1->getData().size());
+ EXPECT_EQ(0, memcmp(&pkt1->getData()[0], data, sizeof(data)));
+ EXPECT_EQ(1, pkt1->getTransidOffset());
+
+ // Test constructor to be used for outgoing messages.
+ // Use non-zero offset and specify transaction id.
+ const size_t offset_transid = 10;
+ const uint32_t transid = 0x010203;
+ boost::scoped_ptr<PerfPkt6> pkt2(new PerfPkt6(data, sizeof(data),
+ offset_transid, transid));
+ EXPECT_EQ(sizeof(data), pkt2->getData().size());
+ EXPECT_EQ(0, memcmp(&pkt2->getData()[0], data, sizeof(data)));
+ EXPECT_EQ(0x010203, pkt2->getTransid());
+ EXPECT_EQ(10, pkt2->getTransidOffset());
+}
+
+TEST_F(PerfPkt6Test, RawPackUnpack) {
+ // Create first packet.
+ boost::scoped_ptr<PerfPkt6> pkt1(capture());
+
+ // Create some input buffers to initialize options.
+ uint8_t buf_elapsed_time[] = { 1, 1 };
+ uint8_t buf_duid[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 };
+
+ // Create options.
+ const size_t offset_elapsed_time = 86;
+ OptionBuffer vec_elapsed_time(buf_elapsed_time,
+ buf_elapsed_time + sizeof(buf_elapsed_time));
+ LocalizedOptionPtr pkt1_elapsed_time(new LocalizedOption(Option::V6,
+ D6O_ELAPSED_TIME,
+ vec_elapsed_time,
+ offset_elapsed_time));
+ const size_t offset_duid = 4;
+ OptionBuffer vec_duid(buf_duid, buf_duid + sizeof(buf_duid));
+ LocalizedOptionPtr pkt1_duid(new LocalizedOption(Option::V6,
+ D6O_CLIENTID,
+ vec_duid,
+ offset_duid));
+
+ // Add option to packet and create on-wire format from added options.
+ // Contents of options will override contents of packet buffer.
+ ASSERT_NO_THROW(pkt1->addOption(pkt1_elapsed_time));
+ ASSERT_NO_THROW(pkt1->addOption(pkt1_duid));
+ ASSERT_TRUE(pkt1->rawPack());
+
+ // Reset so as we can reuse them for another packet.
+ vec_elapsed_time.clear();
+ vec_duid.clear();
+
+ // Get output buffer from packet 1 to create new packet
+ // that will be later validated.
+ util::OutputBuffer pkt1_output = pkt1->getBuffer();
+ ASSERT_EQ(pkt1_output.getLength(), pkt1->getData().size());
+ const uint8_t* pkt1_output_data = static_cast<const uint8_t*>
+ (pkt1_output.getData());
+ boost::scoped_ptr<PerfPkt6> pkt2(new PerfPkt6(pkt1_output_data,
+ pkt1_output.getLength()));
+
+ // Create objects specifying options offset in a packet.
+ // Offsets will inform pkt2 object where to read data from.
+ LocalizedOptionPtr pkt2_elapsed_time(new LocalizedOption(Option::V6,
+ D6O_ELAPSED_TIME,
+ vec_elapsed_time,
+ offset_elapsed_time));
+ LocalizedOptionPtr pkt2_duid(new LocalizedOption(Option::V6,
+ D6O_CLIENTID,
+ vec_duid,
+ offset_duid));
+ // Add options to packet to pass their offsets.
+ pkt2->addOption(pkt2_elapsed_time);
+ pkt2->addOption(pkt2_duid);
+
+ // Unpack: get relevant parts of buffer data into option objects.
+ ASSERT_TRUE(pkt2->rawUnpack());
+
+ // Once option data is stored in options objects we pull it out.
+ pkt2_elapsed_time = boost::dynamic_pointer_cast<LocalizedOption>
+ (pkt2->getOption(D6O_ELAPSED_TIME));
+ pkt2_duid = boost::dynamic_pointer_cast<LocalizedOption>
+ (pkt2->getOption(D6O_CLIENTID));
+
+ // Check if options are present. They have to be there since
+ // we have added them ourselfs.
+ ASSERT_TRUE(pkt2_elapsed_time);
+ ASSERT_TRUE(pkt2_duid);
+
+ // Expecting option contents be the same as original.
+ OptionBuffer pkt2_elapsed_time_data = pkt2_elapsed_time->getData();
+ OptionBuffer pkt2_duid_data = pkt2_duid->getData();
+ EXPECT_EQ(0x0101, pkt2_elapsed_time->getUint16());
+ EXPECT_TRUE(std::equal(pkt2_duid_data.begin(),
+ pkt2_duid_data.end(),
+ buf_duid));
+}
+
+TEST_F(PerfPkt6Test, InvalidOptions) {
+ // Create packet.
+ boost::scoped_ptr<PerfPkt6> pkt1(capture());
+ OptionBuffer vec_server_id;
+ vec_server_id.resize(10);
+ // Testing invalid offset of the option (greater than packet size)
+ const size_t offset_serverid[] = { 150, 85 };
+ LocalizedOptionPtr pkt1_serverid(new LocalizedOption(Option::V6,
+ D6O_SERVERID,
+ vec_server_id,
+ offset_serverid[0]));
+ pkt1->addOption(pkt1_serverid);
+ // Pack has to fail due to invalid offset.
+ EXPECT_FALSE(pkt1->rawPack());
+
+ // Create packet.
+ boost::scoped_ptr<PerfPkt6> pkt2(capture());
+ // Testing offset of the option (lower than pakcet size but
+ // tail of the option out of bounds).
+ LocalizedOptionPtr pkt2_serverid(new LocalizedOption(Option::V6,
+ D6O_SERVERID,
+ vec_server_id,
+ offset_serverid[1]));
+ pkt2->addOption(pkt2_serverid);
+ // Pack must fail due to invalid offset.
+ EXPECT_FALSE(pkt2->rawPack());
+}
+
+
+TEST_F(PerfPkt6Test, TruncatedPacket) {
+ cout << "Testing parsing options from truncated packet."
+ << "This may produce spurious errors" << endl;
+
+ // Create truncated (in the middle of duid options)
+ boost::scoped_ptr<PerfPkt6> pkt1(captureTruncated());
+ OptionBuffer vec_duid;
+ vec_duid.resize(30);
+ const size_t offset_duid = 4;
+ LocalizedOptionPtr pkt1_duid(new LocalizedOption(Option::V6,
+ D6O_CLIENTID,
+ vec_duid,
+ offset_duid));
+ pkt1->addOption(pkt1_duid);
+ // Pack/unpack must fail because length of the option read from buffer
+ // will extend over the actual packet length.
+ EXPECT_FALSE(pkt1->rawUnpack());
+ EXPECT_FALSE(pkt1->rawPack());
+}
+
+TEST_F(PerfPkt6Test, PackTransactionId) {
+ uint8_t data[100];
+ memset(&data, 0, sizeof(data));
+
+ const size_t offset_transid[] = { 50, 100 };
+ const uint32_t transid = 0x010203;
+
+ // Create dummy packet that is simply filled with zeros.
+ boost::scoped_ptr<PerfPkt6> pkt1(new PerfPkt6(data,
+ sizeof(data),
+ offset_transid[0],
+ transid));
+
+ // Reference data are non zero so we can detect them in dummy packet.
+ uint8_t ref_data[3] = { 1, 2, 3 };
+
+ // This will store given transaction id in the packet data at
+ // offset of 50.
+ ASSERT_TRUE(pkt1->rawPack());
+
+ // Get the output buffer so we can validate it.
+ util::OutputBuffer out_buf = pkt1->getBuffer();
+ ASSERT_EQ(sizeof(data), out_buf.getLength());
+ const uint8_t *out_buf_data = static_cast<const uint8_t*>
+ (out_buf.getData());
+
+ // Validate transaction id.
+ EXPECT_EQ(0, memcmp(out_buf_data + offset_transid[0], ref_data, 3));
+
+
+ // Out of bounds transaction id offset.
+ boost::scoped_ptr<PerfPkt6> pkt2(new PerfPkt6(data,
+ sizeof(data),
+ offset_transid[1],
+ transid));
+ cout << "Testing out of bounds offset. "
+ "This may produce spurious errors ..." << endl;
+ EXPECT_FALSE(pkt2->rawPack());
+}
+
+TEST_F(PerfPkt6Test, UnpackTransactionId) {
+ // Initialize data for dummy packet (zeros only).
+ uint8_t data[100] = { 0 };
+
+ // Generate transaction id = 0x010203 and inject at offset = 50.
+ for (int i = 50; i < 53; ++i) {
+ data[i] = i - 49;
+ }
+ // Create packet and point out that transaction id is at offset 50.
+ const size_t offset_transid[] = { 50, 300 };
+ boost::scoped_ptr<PerfPkt6> pkt1(new PerfPkt6(data,
+ sizeof(data),
+ offset_transid[0]));
+
+ // Get transaction id out of buffer and store in class member.
+ ASSERT_TRUE(pkt1->rawUnpack());
+ // Test value of transaction id.
+ EXPECT_EQ(0x010203, pkt1->getTransid());
+
+ // Out of bounds transaction id offset.
+ boost::scoped_ptr<PerfPkt6> pkt2(new PerfPkt6(data,
+ sizeof(data),
+ offset_transid[1]));
+ cout << "Testing out of bounds offset. "
+ "This may produce spurious errors ..." << endl;
+ EXPECT_FALSE(pkt2->rawUnpack());
+
+}
+
+}
More information about the bind10-changes
mailing list