BIND 10 trac1288, updated. 045c30f0dffebb30ad8862986be435748ed0efb6 [1288] disable adjust_ttl in get_iterator to preserve the original TTLs of the same name/type when they are different (unusual for modern zones but possible). added test case for it, and for that purpose added sqlite3 test data and refactored the test framework a bit.
BIND 10 source code commits
bind10-changes at lists.isc.org
Fri Nov 11 20:53:23 UTC 2011
The branch, trac1288 has been updated
via 045c30f0dffebb30ad8862986be435748ed0efb6 (commit)
via a6fd03e989a1fd5ae9514774bb3b3bb2a6668765 (commit)
via 81986f1f0af388bc75baf4fe26e29771f885f200 (commit)
via 08e1873a3593b4fa06754654d22d99771aa388a6 (commit)
via d0e0bab2c4e3ce4f60c893d3a89ec8c91e2f11e0 (commit)
via 46c4fc8c240445d0d7cb70a0b5ae17eff371c5db (commit)
via 65f4be2b65bf19baad6bbeda742b44dff7cd9b4a (commit)
via a3ba4cca05891f1052aae6bbe28c125799c7fe6f (commit)
via 4dc03f5419813b974b9794aa2cba4f55557fbbb5 (commit)
via dc2ea48db152796f6c0f62641f00646ef32e2b9c (commit)
via b513f0ab652e11892c232b6170f675fbb9990609 (commit)
via bde035f1ebcb1a9c7678692538f9aec18f5232e6 (commit)
via b85213cd68ec24c5deede886d466bf0911b9e762 (commit)
via 056a1342f0d73cf53a37ed672a8a4ad907c4cfa2 (commit)
via 71de39fb8126b7200b2f6dcd9689a000c958fe0e (commit)
via f337180ad87778e3b91111efe93c3e31b1c92a91 (commit)
via 01c6801b65e167ba2cf635143b988bf4bcbbdc68 (commit)
via 31d5a4f66b18cca838ca1182b9f13034066427a7 (commit)
via 0f7a43ef24e2fedfa554200cbfa3d83971dbfd90 (commit)
via 9f854755d1bad72bc4bd94accbc60d211c880cb7 (commit)
via 0a3592efda9bd97cf251163cf9a30f38122cb7c2 (commit)
via 1177bfe30e17a76bea6b6447e14ae9be9e1ca8c2 (commit)
via 9862bdf184aceb37cfdbb4fbb455209bdf88a0f4 (commit)
via e6a596fe8f57103c735d8e135f855d46c248844c (commit)
via f8cea54b5bb8f870a01beebbdcde5eb90dd7d8b4 (commit)
via 137a61f2afcd6d16ea20c3a4436046d783a5babf (commit)
via 6b75c128bcdcefd85c18ccb6def59e9acedd4437 (commit)
via 1a5bd80bbe01abbb2a5932bc43fab8e7a287dcf5 (commit)
via c03e6df1521a378fa3cb9eab4a11db93e6e34969 (commit)
via 8cea64b69af8d5ef21497d2f1c9812968ce5d8f7 (commit)
via 1aa233fab1d74dc776899df61181806679d14013 (commit)
via 45bd390859827c02965765b4b146b5351cbbb1c1 (commit)
via 0f6b216a89583edd445942386df5a388b39149d5 (commit)
via ac552055bc8a4d996a0c24eb5f13d01667a3d77a (commit)
via 26aaecc388f8c152b5d63a1f3906ba5a625b0e31 (commit)
via 10c84106e8b34d78fa1916e4bc3db15030fd94f9 (commit)
via 23cfc5b4d9b384172d0eadd2269ed6a6121966a8 (commit)
via 8d7ef6fe3b696ee2cffdc4f10fdf673968933077 (commit)
via 6cd1c3aa7fb998fe9f873045b74185f793177cb5 (commit)
via e6d7624e503084067e6c4659c6bdbd89c038fdd7 (commit)
via 4b56e1807d8ce8b86da6793b67b50ff57ee62b9e (commit)
via 5c16ff47ae8d485da0684ee7dd5547eeef3c6232 (commit)
via 65d8475336b8e884ff261b9a1fe03688e1618cf4 (commit)
via 388e77cae5d9260bcc314465f6711bcdd782a26d (commit)
via 96c94d6baf0a68b641cc9b93966b09b38ebaa15b (commit)
via 1db4e8af5cf9a8600e8005807f0aa5109756c064 (commit)
via 4aa0057db95051e8e554bb5fcbcfbfecf822a5cd (commit)
via 007d31f50876cd58a031dd86b461145e77bea63e (commit)
via 27b7f9d36113514773777eb94bf66a3ef8c49a82 (commit)
via 6716721a7c10737d86a4a29af530d54a458f83ca (commit)
via e8aa8b8b994146dfff6d29435a66c88dcf79eb69 (commit)
via 586c93cef97215330b8bdffed6c35335fb66173d (commit)
via 5d6c71aeb2575883488b2cde87501aa84260b1ab (commit)
via 233d2d783e6be97f208998f9fcf79404eea5c9b3 (commit)
via dee6a4739aee15e8899da2e35d179cb1d8623e76 (commit)
via 50672f2d6073e813fb80250398b6e6a2b93c915d (commit)
via 1a90f118bf69d6239ca290f712bfeb89a9027efd (commit)
via 5d290088a1b996011217cf801e37600d5bcd037e (commit)
via 3d59d6a24e3a84c3ca453721649e6adfab863c0e (commit)
via a95b528af25a2b3bda91f9b88c04a20b0b783208 (commit)
via 58e8ca7d1c5d8f4b69aa174405e4ef280b8012cc (commit)
via aa13f832395794bab3647ed375ac8a6e2d26e55f (commit)
via f2ffe07f7e25c037855685b7693ea4d4eed1cd0c (commit)
via 0ea04c4bb216cc822be49626d4b0269956fd070e (commit)
via b03d29677700c1dd2a527dafe9987defb7556e97 (commit)
via 043ff1e7ec5f2c8e3d6b7e278418fc03eea2b09f (commit)
via 67a11e710e06647dfb65ea6e592fd80851422dad (commit)
via b4b9c3e18f8d76b695d7b84f1b128ccba229d814 (commit)
via bb76c3f643eb85fc8b1ed8087f72368ad1d23aa3 (commit)
via 2764ae7bde7b314773b7258d23fce3813c4407b2 (commit)
via 1d9614bc52634bd512121f34af66290a2cdb2958 (commit)
via 34092bce6cb5755eb6b53979f8f624ca78b592fb (commit)
via 35ca4f5aa94daa5e3a8ddcb02812e7d76685e65e (commit)
via 6d46a3787127f87aa65c9dfb626476f79b4f0194 (commit)
via c692292fb26bf6af6e94b7e160c0c7af27e123ac (commit)
via d6a9dffdd4ee8af94e31ae9462e2ef851b49fca8 (commit)
via bfae9c1e78bcc1e94b4d5eef4d0bb9da1d42f30e (commit)
via 0428f6fcc7b5acc73f70913a17bd6f23c5a6ad3a (commit)
via 9b9a92fc3d9cd1e37166f04284a922f9ab220bbe (commit)
via bd938be1cafae39233d0a8357a4e10b383f7de37 (commit)
via e7d5e8f78ebad76b695e48fc2780babba6ec07d5 (commit)
via 0166b44b81851c687d85e4f3fd87ffb0e92c6d58 (commit)
via 05d4deb643271e0f0b0dcfb22809714086d50788 (commit)
via c5f69488232bd0464cd7e2174be96b30b51b7e83 (commit)
via a3fd03e16b71ae4e9b480e4e48c7ddfa393555ac (commit)
via 64d4ac8b0fee6702093428b855f3d878d7306468 (commit)
via 5038c63b05eaee1bda68346899ac3f6baf5fbe56 (commit)
via 5166d1a65421c3e8515dbcb0d5fcb44c7f400035 (commit)
via 66bb38a4d0cf296f48181d624d22b1074688de38 (commit)
via 7d2826b519f95b2fecd299e15952e897c5a60b2b (commit)
via c3b01cc59ba03c6054af4bae42e08965b3f60eb0 (commit)
via 687b0e5483e088ca07d5f7249b109cc377d04bd2 (commit)
via e41f8459ca5dbc886e838e6e32585ba5c7eb96e6 (commit)
via e856c49ae33b2b79d8eab0b313e4ba25db261c4a (commit)
via 3a6d50835b621e4825ec0d8434ce066bd31020d0 (commit)
via 6d2960ff386a85c9738fc4cfd3975ee1d58eaa04 (commit)
via 3a25578a01620918cd722e430b61c0fe91177e0a (commit)
via 8f876a23792b3feeedb807a66a08cd4f62d60d8a (commit)
via 6cfcb5a3c784f774702d9ca183e13f6b6690b74d (commit)
via 701ffebae5b357a693e764bbef904dc374ebb591 (commit)
via e16e284794d66212aec735ece0ee1fc112f2d2db (commit)
via af0b62cf1161739d3a1244750b60d3e6b75a22e8 (commit)
via b64ab304aa90d938003922c95926ef1b0ea4fec9 (commit)
via 4e0d6d115cd572e58b886bcaffee3f1df7b6bcad (commit)
via 4493013b75994f8689a26951592fb575a23e5b35 (commit)
via 8df7345ad6d658c6a366499b6e491790289168ed (commit)
via f0ad44ee4a8bc33ea2109d91243d95db1833659a (commit)
via 3f070803d6d61ffbbda0f6628bb2d7f0cfdb6ca0 (commit)
via c9160954fd701796f52c329e5ec3ca2ba6f5995c (commit)
via 25b432b279b90ca97dd4a69dc1d4f5428fe2660f (commit)
via dd63399d282dc503e4009bb579ddc4ca15ccde5f (commit)
via af2a4d06dedf27a1c86cd7ada5e85df495a79ff6 (commit)
via 46adf014f18c6b3f9a685b8f0fdd0775a583a7c5 (commit)
via 9b76badecd4b688c95c5f83ecdc168922e48680b (commit)
via 07520bd77da400ca476432f8bedcd934d992ec81 (commit)
via 2ab68057dceb0d471daf4524ba338f8f45e942f2 (commit)
via 11981fea517310f13154bf7695f0278b571ac28a (commit)
via 092dbe3f2af0a0c010a081f64947dbedb11b3468 (commit)
via bfab5a33ceabe3f0d31bd465d13308c8b84adf68 (commit)
via ef51c8418dc44bf2882c898990b30fc76ca9a97b (commit)
via ab642e89554bedf0a66c2358db71ec16ddeb2e7f (commit)
via 91c2cf35e41642a997df020de797324bb4cfedcc (commit)
via c6e8dd84e81f5686d45cc41f514d4f61d075a276 (commit)
via 94282a20ab88b590e8b3b2522da2873ca81c632a (commit)
via 4ddb345fdc31614e191b0a0dea282ddd2ebe279e (commit)
via 18b04945884fbcc1783e52bed845c34395c0acf8 (commit)
via 7d25b201c0bc91987c4d9743d0c21b9486b98fd8 (commit)
via b01c18148a840b0d5719cbcd2653bf1b346e45f9 (commit)
via 41f528a9eacdb430406a0d9047049585cae31db8 (commit)
via c75108b70a9d560034949a75dc52ecfb59fa0b3f (commit)
via 6266a0dd4e0537335e22c2941940636fe220c202 (commit)
via 14f9cfa80194d2d391ea6657ad0205e6223e2d25 (commit)
via 5e3d007b0b08f340e646a2df9073b31cd3c76476 (commit)
via c3a5acc65768a1d87c102159baae0d04f8c14790 (commit)
via 1c4e66cfdfab4fb4608f2b8d18a25e28e7a70adc (commit)
via 7db8a3e327aa6eb8fdc5fed2abb7f52b030fe6f8 (commit)
via fd3c952098c46d84c9a277b1409442813a263876 (commit)
via b108bc9f9231872d4f3e0fa768b8c0e4506a2b95 (commit)
via c5cef09ac250129340f357a9ea2dd798d290be4d (commit)
via 8b349f6730bf85ccfb37d368aa18db4f6c0aaa1b (commit)
via 4b584e952e14a40e81b7e360c75cd787ba988481 (commit)
via 702e2dd653a315141e01147ac4cc2a6c06fab673 (commit)
via 5d38929255f7d8cca95020672a2b72273a07de1d (commit)
via 673ef8efd5d474d66d62d134348730518160cbf9 (commit)
via 44160936a4c52ebaf4be6e1f0fcc02c84c7fb719 (commit)
via db063ad7e102eafe75bda392197e9653be95bea4 (commit)
via e23b6b271c892905c9a14386aee502610502bba4 (commit)
via e7a16b2735b09c0d5b55375e3091fa886940fc40 (commit)
via 8da9b5298d5cbd0df840240e71460d047f4da808 (commit)
via 18e970e16c5044da8b4a7d2c800f0b7baeab9f96 (commit)
via 0b145510ca7b6d4cfe8bc43cd6de2563907dfca3 (commit)
via 72f4baca540cc17e18da4632cb4d32df29f3a9a3 (commit)
via 86123d1dc31432d176eb54fa300eb65e269df0f4 (commit)
via 7e874ac36e4086fc0ff9b50537ffdbaeb685ed09 (commit)
via f0f4387faa4f6246546ee4b79e6289dd370913d1 (commit)
via 13c03c7116df55fa0aad790c2b2a88f3743ba95b (commit)
via 65b9917a960e8b49a947bed1886d1331155b95f5 (commit)
via 5d4e05531e443e355fbf8369a37efc239d1c95c4 (commit)
via c92981134284041b71efc68cff49fead91368e47 (commit)
via 60c6d07decbe759bb57da7dfafc79e71c52a9c6c (commit)
via 5634285ef8bed69dcceab61e84b7aefdf1c1ef5d (commit)
via e0c15795fa09d93fa8c6e3aa0722ca9ed01b61a0 (commit)
via 27f88f2ed0a0a7541f3ea9c6d95db5c805e4b062 (commit)
via 1adb9636b2ba1314140411cd142f9b2f95afede9 (commit)
via 439b8e22a099e641bbe9236bc44beed78634568d (commit)
via 4cf570ad0a107cbf70a6e96e8db30eb2c8b8a2ff (commit)
via aa35477883e1a5b1740092296d463ecfd277dbbb (commit)
via 701074ebbf30930b92f8b06d9cc88428aed8db5f (commit)
via e009438536339927f645f9a272c23e43cd0660fc (commit)
via c3bde52d5e1e2748f9d60aa8740fa044260913d5 (commit)
via 6d8da5a713e33008a9e8bac1ba24367a3ba86a10 (commit)
via d63457baaa31c80bb1ffeefd4f111c7d92685c8c (commit)
via dcd6d7ff4c0671a0995fe4051cea0e525d3f82bc (commit)
via 7f150769d5e3485cd801f0b5ab9b1d3b25aae520 (commit)
via 61fdce086a40930595e70168340ee68080b327bf (commit)
via f17fad51f1533f89600fb3c2e265ee2ad79c3f53 (commit)
via 44113e516b30bb58dd7481b2b87a7f88c0ec51a7 (commit)
via 7d4cc051f1ab3470bb5f7b5f8ea9e622fc7c7c9b (commit)
via bbfee9cc079856d3b437a1bbb69b4157092cbf97 (commit)
via 6bdd521c49d10867b63158837f6fdc6d06d1f256 (commit)
via c88718438ee67b52cfea003b9e3ce1e5fe234bd8 (commit)
via dd7fb442ed97cc469db4275fdc3d4628cd44ea79 (commit)
via 032f9633f4a353c11d0d855984aad0f0392a6ac1 (commit)
via 6215c5929bdd6fbb708fd0a2ee034250aa5cc065 (commit)
via d83a117a090eaf417698eea6697ae750dc45c135 (commit)
via ea7f5ad5d326b7ed2d5f0ac1729c2301555b6417 (commit)
via 68ac89fcb9de65cb1c649aa58b317be3fc793fb7 (commit)
via 7f1dcc956a864b70e395d10ba095c0787db802a7 (commit)
via a3e7bf95ad016c9badd98c16614de4a9c168bad1 (commit)
via debb22346698f1be3bbbac4955fd6bd247aa41f4 (commit)
via c2d03d1688ae502c4e0b1eb23427ebae5307a091 (commit)
via 3439230170effea0daec2a106a616965d4830968 (commit)
via ca54736634e25786f6d54317e97f3e4db71064f0 (commit)
via 911b53ae021dbd04a6c12f69aa106fd2d868d54f (commit)
via 1e465d5417011d24cb9aa9ffaf80a369b6511e2c (commit)
via c82f6195acb5a12e91d61956b8b958ceb0a0f821 (commit)
via b458fc09d6749b7435cd3c95952b9ab22322cb49 (commit)
via d059d370074b13b36db3ab685c307ba668faeda6 (commit)
via d8e223ad5439cdf9916e96178a4320403615b507 (commit)
via b8031ec74703c03eec1be362f0d3e321c4d8ebe5 (commit)
via 2117c1db277b10f3bcc48b51d2ca0f821af79f2f (commit)
via e5d4874ace76b0caff412f2394a15a042492560b (commit)
via 76335a521773c8118b7137d79e5f6397614f1904 (commit)
via 292665a460ed22219490c742d52785b503002029 (commit)
via 31cf6504b544e20f5ac84e3f74afcaff817c3693 (commit)
via 0e6639a8432999f2880473b815d8fbeb335a6808 (commit)
via 196b9474f5eeb11a8d96e52fed500270331dabc6 (commit)
via 296a70859ceb0b168c3818a3869991e8b51c3932 (commit)
via f6f425b5e49110b76e9954dc71d152806503c0bf (commit)
via fa9b8636e68a97293c26f51f4ecf50a2753965e4 (commit)
via e438bc6f5d4da2cc953cb76b9a924077d11fe347 (commit)
via 043963cf999791194e2db9e59fb5920ec30fc20f (commit)
via a730ddd17c2a20dc55247b5a86d05e3d0bb740fd (commit)
via b235b396ae97ba25d59f5981da39f1d1e4c072e6 (commit)
via c46aac2b5c86d037c7c3f34fbeb54d7ac0998817 (commit)
via 7d1e13b7fb6a589336cd83bef4f81fa077785beb (commit)
via 49b9f8004299533dd7e54bde3820984d8b04f37b (commit)
via 8f6ca91d01a5155ace94f0c044e674e58f8e7898 (commit)
via be3038ae1b595d1b9942f9aa72fa3d96aed3b22d (commit)
via e81b86767a740bcb1c4d1a0408ad9a70690df0a6 (commit)
via 5222b98f4e2021eb543f836d5e6876eb28eab716 (commit)
via 0d1e50106720fd7c4ec58e88e381ce7cff071648 (commit)
via 8d139f70ee129787af631531e4ea825293007a58 (commit)
via 26841bf1f0c0f0066e17b53bea2261e759bfbdbe (commit)
via 6b4582111d6f9e8a09e305ec3da009d8d393603b (commit)
via 1b5cb4d4168c3fcc2d22bcfdf5260ffc36d0a42e (commit)
via f500fc46e6467263b38c50010170f83c10d22e8a (commit)
via 114e59f9ed93ba3b6e656785df5d527011f8ce2b (commit)
via eaa56b3d005a20f945cd333664cf34633cfe5a7e (commit)
via 236b6ec7a803f9024141e0dacc3dcf75583fea8d (commit)
via 81bb03bbb092bace3bd8a44a6ca2862154503092 (commit)
via b84d1a0e0f13064b8dd68222c063565ac4deec3f (commit)
via 3a6f9f395c141058fb732735beabe7dae1f84bb5 (commit)
from 8c07f46adfdd748ee33b3b5e9d33a78a64dded10 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 045c30f0dffebb30ad8862986be435748ed0efb6
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Nov 11 12:52:02 2011 -0800
[1288] disable adjust_ttl in get_iterator to preserve the original TTLs
of the same name/type when they are different (unusual for modern zones
but possible). added test case for it, and for that purpose added sqlite3
test data and refactored the test framework a bit.
commit a6fd03e989a1fd5ae9514774bb3b3bb2a6668765
Merge: 8c07f46 81986f1
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Nov 11 12:27:19 2011 -0800
[1288] Merge branch 'master' into trac1288 with fixing conflict.
Note that conflict resolution in bind10_src.py was not trivial: we needed
to re-ensure that the ld path path for xfrout.
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 42 +-
configure.ac | 1 +
src/bin/bind10/TODO | 6 -
src/bin/bind10/bind10_messages.mes | 109 ++-
src/bin/bind10/bind10_src.py.in | 440 ++++-----
src/bin/bind10/bob.spec | 73 ++-
src/bin/bind10/run_bind10.sh.in | 3 +-
src/bin/bind10/tests/bind10_test.py.in | 543 ++++++++----
src/bin/bindctl/bindcmd.py | 119 ++--
src/bin/bindctl/bindctl_main.py.in | 3 +-
src/bin/bindctl/tests/bindctl_test.py | 126 ++--
src/bin/cmdctl/cmdctl.py.in | 93 +-
src/bin/cmdctl/cmdctl_messages.mes | 3 +
src/bin/dhcp6/tests/dhcp6_srv_unittest.cc | 8 +-
src/bin/xfrin/tests/xfrin_test.py | 80 ++-
src/bin/xfrin/xfrin.py.in | 53 +-
src/bin/xfrout/tests/Makefile.am | 5 +
src/bin/xfrout/tests/testdata/example.com | 6 +
.../tests/testdata/test.sqlite3} | Bin 11264 -> 11264 bytes
src/bin/xfrout/tests/xfrout_test.py.in | 49 +-
src/bin/xfrout/xfrout.py.in | 6 +-
src/lib/datasrc/client.h | 9 +-
src/lib/datasrc/database.cc | 31 +-
src/lib/datasrc/database.h | 113 +++-
src/lib/datasrc/memory_datasrc.cc | 6 +-
src/lib/datasrc/memory_datasrc.h | 3 +-
src/lib/datasrc/sqlite3_accessor.cc | 179 +++-
src/lib/datasrc/sqlite3_accessor.h | 17 +
src/lib/datasrc/tests/Makefile.am | 1 +
src/lib/datasrc/tests/database_unittest.cc | 87 ++-
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 272 ++++++-
src/lib/datasrc/tests/testdata/test.sqlite3 | Bin 43008 -> 44032 bytes
.../{test.sqlite3 => test.sqlite3.nodiffs} | Bin 43008 -> 43008 bytes
src/lib/dhcp/libdhcp.cc | 60 +-
src/lib/dhcp/libdhcp.h | 26 +-
src/lib/dhcp/option.cc | 169 +++-
src/lib/dhcp/option.h | 92 ++-
src/lib/dhcp/option6_ia.cc | 4 +-
src/lib/dhcp/option6_iaaddr.cc | 4 +-
src/lib/dhcp/pkt4.cc | 36 +-
src/lib/dhcp/pkt4.h | 22 +-
src/lib/dhcp/pkt6.cc | 8 +-
src/lib/dhcp/pkt6.h | 2 +-
src/lib/dhcp/tests/Makefile.am | 2 -
src/lib/dhcp/tests/libdhcp_unittest.cc | 123 +++-
src/lib/dhcp/tests/option6_addrlst_unittest.cc | 17 +-
src/lib/dhcp/tests/option6_ia_unittest.cc | 4 +-
src/lib/dhcp/tests/option6_iaaddr_unittest.cc | 2 +
src/lib/dhcp/tests/option_unittest.cc | 168 ++++-
src/lib/dhcp/tests/pkt4_unittest.cc | 136 +++-
src/lib/dhcp/tests/pkt6_unittest.cc | 8 +-
src/lib/dns/python/name_python.cc | 29 +-
src/lib/dns/python/tests/name_python_test.py | 9 +
src/lib/python/Makefile.am | 9 +-
src/lib/python/bind10_config.py.in | 4 +
src/lib/python/isc/bind10/Makefile.am | 2 +-
src/lib/python/isc/bind10/component.py | 597 ++++++++++++
src/lib/python/isc/bind10/sockcreator.py | 16 +-
src/lib/python/isc/bind10/special_component.py | 164 ++++
src/lib/python/isc/bind10/tests/Makefile.am | 2 +-
src/lib/python/isc/bind10/tests/component_test.py | 955 ++++++++++++++++++++
src/lib/python/isc/config/cfgmgr.py | 11 +-
src/lib/python/isc/config/tests/cfgmgr_test.py | 14 +-
src/lib/python/isc/datasrc/client_inc.cc | 7 +-
src/lib/python/isc/datasrc/client_python.cc | 22 +-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 35 +-
src/lib/util/buffer.h | 22 +-
src/lib/util/tests/buffer_unittest.cc | 32 +
tests/lettuce/README | 127 +++
tests/lettuce/README.tutorial | 157 ++++
.../lettuce/configurations/example.org.config.orig | 17 +
tests/lettuce/configurations/example2.org.config | 18 +
tests/lettuce/configurations/no_db_file.config | 10 +
.../lettuce/data/empty_db.sqlite3 | Bin 11264 -> 11264 bytes
.../lettuce/data}/example.org.sqlite3 | Bin 14336 -> 14336 bytes
tests/lettuce/features/example.feature | 142 +++
tests/lettuce/features/terrain/bind10_control.py | 108 +++
tests/lettuce/features/terrain/querying.py | 279 ++++++
tests/lettuce/features/terrain/steps.py | 73 ++
tests/lettuce/features/terrain/terrain.py | 360 ++++++++
.../lettuce/setup_intree_bind10.sh.in | 8 +-
tests/system/bindctl/tests.sh | 5 +-
82 files changed, 5669 insertions(+), 934 deletions(-)
create mode 100644 src/bin/xfrout/tests/testdata/example.com
copy src/bin/{xfrin/tests/testdata/example.com.sqlite3 => xfrout/tests/testdata/test.sqlite3} (89%)
copy src/lib/datasrc/tests/testdata/{test.sqlite3 => test.sqlite3.nodiffs} (100%)
create mode 100644 src/lib/python/isc/bind10/component.py
create mode 100644 src/lib/python/isc/bind10/special_component.py
create mode 100644 src/lib/python/isc/bind10/tests/component_test.py
create mode 100644 tests/lettuce/README
create mode 100644 tests/lettuce/README.tutorial
create mode 100644 tests/lettuce/configurations/example.org.config.orig
create mode 100644 tests/lettuce/configurations/example2.org.config
create mode 100644 tests/lettuce/configurations/no_db_file.config
copy src/lib/datasrc/tests/testdata/rwtest.sqlite3 => tests/lettuce/data/empty_db.sqlite3 (96%)
copy {src/lib/datasrc/tests/testdata => tests/lettuce/data}/example.org.sqlite3 (100%)
create mode 100644 tests/lettuce/features/example.feature
create mode 100644 tests/lettuce/features/terrain/bind10_control.py
create mode 100644 tests/lettuce/features/terrain/querying.py
create mode 100644 tests/lettuce/features/terrain/steps.py
create mode 100644 tests/lettuce/features/terrain/terrain.py
copy src/bin/bind10/run_bind10.sh.in => tests/lettuce/setup_intree_bind10.sh.in (69%)
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 31e191e..5672beb 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,43 @@
+316. [func]* vorner
+ The configuration of what parts of the system run is more flexible now.
+ Everything that should run must have an entry in Boss/components.
+ (Trac213, git 08e1873a3593b4fa06754654d22d99771aa388a6)
+
+ 315. [func] tomek
+ libdhcp: Support for DHCPv4 packet manipulation is now implemented.
+ All fixed fields are now supported. Generic support for DHCPv4
+ options is available (both parsing and assembly). There is no code
+ that uses this new functionality yet, so it is not usable directly
+ at this time. This code will be used by upcoming b10-dhcp4 daemon.
+ (Trac #1228, git 31d5a4f66b18cca838ca1182b9f13034066427a7)
+
+314. [bug] jelte
+ b10-xfrin would previously initiate incoming transfers upon
+ receiving NOTIFY messages from any address (if the zone was
+ known to b10-xfrin, and using the configured address). It now
+ only starts a transfer if the source address from the NOTIFY
+ packet matches the configured master address and port. This was
+ really already fixed in release bind10-devel-20111014, but there
+ were some deferred cleanups to add.
+ (Trac #1298, git 1177bfe30e17a76bea6b6447e14ae9be9e1ca8c2)
+
+313. [func] jinmei
+ datasrc: Added C++ API for adding zone differences to database
+ based data sources. It's intended to be used for the support for
+ IXFR-in and dynamic update (so they can subsequently be retrieved
+ for IXFR-out). The addRecordDiff method of the DatabaseAccessor
+ defines the interface, and a concrete implementation for SQLite3
+ was provided.
+ (Trac #1329, git 1aa233fab1d74dc776899df61181806679d14013)
+
+312. [func] jelte
+ Added an initial framework for doing system tests using the
+ cucumber-based BDD tool Lettuce. A number of general steps are
+ included, for instance running bind10 with specific
+ configurations, sending queries, and inspecting query answers. A
+ few very basic tests are included as well.
+ (Trac #1290, git 6b75c128bcdcefd85c18ccb6def59e9acedd4437)
+
311. [bug] jelte
Fixed a bug in bindctl where tab-completion for names that
contain a hyphen resulted in unexpected behaviour, such as
@@ -25,7 +65,7 @@
given using the '--with-botan=' flag
(Trac #1194, git dc491833cf75ac1481ba1475795b0f266545013d)
-307. [func] vorner
+307. [func] vorner
When zone transfer in fails with IXFR, it is retried with AXFR
automatically.
(Trac #1279, git cd3588c9020d0310f949bfd053c4d3a4bd84ef88)
diff --git a/configure.ac b/configure.ac
index 554a899..9723b8d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -991,6 +991,7 @@ AC_OUTPUT([doc/version.ent
src/lib/util/python/mkpywrapper.py
src/lib/util/python/gen_wiredata.py
src/lib/server_common/tests/data_path.h
+ tests/lettuce/setup_intree_bind10.sh
tests/system/conf.sh
tests/system/run.sh
tests/system/glue/setup.sh
diff --git a/src/bin/bind10/TODO b/src/bin/bind10/TODO
index eb0abcd..6f50dbd 100644
--- a/src/bin/bind10/TODO
+++ b/src/bin/bind10/TODO
@@ -1,19 +1,13 @@
- Read msgq configuration from configuration manager (Trac #213)
https://bind10.isc.org/ticket/213
- Provide more administrator options:
- - Get process list
- Get information on a process (returns list of times started & stopped,
plus current information such as PID)
- - Add a component (not necessary for parking lot, but...)
- Stop a component
- Force-stop a component
- Mechanism to wait for child to start before continuing
-- Way to ask a child to die politely
-- Start statistics daemon
-- Statistics interaction (?)
- Use .spec file to define comands
- Rename "c-channel" stuff to msgq for clarity
-- Use logger
- Reply to shutdown message?
- Some sort of group creation so termination signals can be sent to
children of children processes (if any)
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index 2769aa9..d850e47 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -20,18 +20,72 @@ The boss process is starting up and will now check if the message bus
daemon is already running. If so, it will not be able to start, as it
needs a dedicated message bus.
-% BIND10_CONFIGURATION_START_AUTH start authoritative server: %1
-This message shows whether or not the authoritative server should be
-started according to the configuration.
-
-% BIND10_CONFIGURATION_START_RESOLVER start resolver: %1
-This message shows whether or not the resolver should be
-started according to the configuration.
-
% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
An error was encountered when the boss module specified
statistics data which is invalid for the boss specification file.
+% BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
+
+% BIND10_COMPONENT_RESTART component %1 is about to restart
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+
+% BIND10_COMPONENT_START component %1 is starting
+The named component is about to be started by the boss process.
+
+% BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+
+% BIND10_COMPONENT_STOP component %1 is being stopped
+A component is about to be asked to stop willingly by the boss.
+
+% BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+
+% BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+
+% BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+
+% BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+
+% BIND10_CONFIGURATOR_RUN running plan of %1 tasks
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+
+% BIND10_CONFIGURATOR_START bind10 component configurator is starting up
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+
+% BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+
+% BIND10_CONFIGURATOR_TASK performing task %1 on %2
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
+
% BIND10_INVALID_USER invalid user: %1
The boss process was started with the -u option, to drop root privileges
and continue running as the specified user, but the user is unknown.
@@ -51,27 +105,15 @@ old process was not shut down correctly, and needs to be killed, or
another instance of BIND10, with the same msgq domain socket, is
running, which needs to be stopped.
-% BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
-
% BIND10_MSGQ_DISAPPEARED msgq channel disappeared
While listening on the message bus channel for messages, it suddenly
disappeared. The msgq daemon may have died. This might lead to an
inconsistent state of the system, and BIND 10 will now shut down.
-% BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
-
-% BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+% BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
% BIND10_READING_BOSS_CONFIGURATION reading boss configuration
The boss process is starting up, and will now process the initial
@@ -107,6 +149,9 @@ The boss module is sending a SIGKILL signal to the given process.
% BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
The boss module is sending a SIGTERM signal to the given process.
+% BIND10_SETUID setting UID to %1
+The boss switches the user it runs as to the given UID.
+
% BIND10_SHUTDOWN stopping the server
The boss process received a command or signal telling it to shut down.
It will send a shutdown command to each process. The processes that do
@@ -125,11 +170,6 @@ which failed is unknown (not one of 'S' for socket or 'B' for bind).
The boss requested a socket from the creator, but the answer is unknown. This
looks like a programmer error.
-% BIND10_SOCKCREATOR_CRASHED the socket creator crashed
-The socket creator terminated unexpectedly. It is not possible to restart it
-(because the boss already gave up root privileges), so the system is going
-to terminate.
-
% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
There should be more data from the socket creator, but it closed the socket.
It probably crashed.
@@ -208,8 +248,15 @@ During the startup process, a number of messages are exchanged between the
Boss process and the processes it starts. This error is output when a
message received by the Boss process is not recognised.
-% BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.
-The given module is being started or restarted without root privileges.
+% BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+
+% BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.
+The resolver is being started or restarted without root privileges.
If the module needs these privileges, it may have problems starting.
Note that this issue should be resolved by the pending 'socket-creator'
process; once that has been implemented, modules should not need root
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index caa6c65..dc1eca4 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -70,7 +70,8 @@ import isc.util.process
import isc.net.parse
import isc.log
from isc.log_messages.bind10_messages import *
-import isc.bind10.sockcreator
+import isc.bind10.component
+import isc.bind10.special_component
isc.log.init("b10-boss")
logger = isc.log.Logger("boss")
@@ -245,14 +246,17 @@ class BoB:
self.cfg_start_resolver = False
self.cfg_start_dhcp6 = False
self.cfg_start_dhcp4 = False
- self.started_auth_family = False
- self.started_resolver_family = False
self.curproc = None
+ # XXX: Not used now, waits for reintroduction of restarts.
self.dead_processes = {}
self.msgq_socket_file = msgq_socket_file
self.nocache = nocache
- self.processes = {}
- self.expected_shutdowns = {}
+ self.component_config = {}
+ # Some time in future, it may happen that a single component has
+ # multple processes. If so happens, name "components" may be
+ # inapropriate. But as the code isn't probably completely ready
+ # for it, we leave it at components for now.
+ self.components = {}
self.runnable = False
self.uid = setuid
self.username = username
@@ -262,66 +266,66 @@ class BoB:
self.cmdctl_port = cmdctl_port
self.brittle = brittle
self.wait_time = wait_time
- self.sockcreator = None
+ self._component_configurator = isc.bind10.component.Configurator(self,
+ isc.bind10.special_component.get_specials())
+ # The priorities here make them start in the correct order. First
+ # the socket creator (which would drop root privileges by then),
+ # then message queue and after that the config manager (which uses
+ # the config manager)
+ self.__core_components = {
+ 'sockcreator': {
+ 'kind': 'core',
+ 'special': 'sockcreator',
+ 'priority': 200
+ },
+ 'msgq': {
+ 'kind': 'core',
+ 'special': 'msgq',
+ 'priority': 199
+ },
+ 'cfgmgr': {
+ 'kind': 'core',
+ 'special': 'cfgmgr',
+ 'priority': 198
+ }
+ }
+ self.__started = False
+ self.exitcode = 0
# If -v was set, enable full debug logging.
if self.verbose:
logger.set_severity("DEBUG", 99)
+ def __propagate_component_config(self, config):
+ comps = dict(config)
+ # Fill in the core components, so they stay alive
+ for comp in self.__core_components:
+ if comp in comps:
+ raise Exception(comp + " is core component managed by " +
+ "bind10 boss, do not set it")
+ comps[comp] = self.__core_components[comp]
+ # Update the configuration
+ self._component_configurator.reconfigure(comps)
+
def config_handler(self, new_config):
# If this is initial update, don't do anything now, leave it to startup
if not self.runnable:
return
- # Now we declare few functions used only internally here. Besides the
- # benefit of not polluting the name space, they are closures, so we
- # don't need to pass some variables
- def start_stop(name, started, start, stop):
- if not'start_' + name in new_config:
- return
- if new_config['start_' + name]:
- if not started:
- if self.uid is not None:
- logger.info(BIND10_START_AS_NON_ROOT, name)
- start()
- else:
- stop()
- # These four functions are passed to start_stop (smells like functional
- # programming little bit)
- def resolver_on():
- self.start_resolver(self.c_channel_env)
- self.started_resolver_family = True
- def resolver_off():
- self.stop_resolver()
- self.started_resolver_family = False
- def auth_on():
- self.start_auth(self.c_channel_env)
- self.start_xfrout(self.c_channel_env)
- self.start_xfrin(self.c_channel_env)
- self.start_zonemgr(self.c_channel_env)
- self.started_auth_family = True
- def auth_off():
- self.stop_zonemgr()
- self.stop_xfrin()
- self.stop_xfrout()
- self.stop_auth()
- self.started_auth_family = False
-
- # The real code of the config handler function follows here
logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
new_config)
- start_stop('resolver', self.started_resolver_family, resolver_on,
- resolver_off)
- start_stop('auth', self.started_auth_family, auth_on, auth_off)
-
- answer = isc.config.ccsession.create_answer(0)
- return answer
+ try:
+ if 'components' in new_config:
+ self.__propagate_component_config(new_config['components'])
+ return isc.config.ccsession.create_answer(0)
+ except Exception as e:
+ return isc.config.ccsession.create_answer(1, str(e))
def get_processes(self):
- pids = list(self.processes.keys())
+ pids = list(self.components.keys())
pids.sort()
process_list = [ ]
for pid in pids:
- process_list.append([pid, self.processes[pid].name])
+ process_list.append([pid, self.components[pid].name()])
return process_list
def _get_stats_data(self):
@@ -370,23 +374,7 @@ class BoB:
"Unknown command")
return answer
- def start_creator(self):
- self.curproc = 'b10-sockcreator'
- creator_path = os.environ['PATH']
- if ADD_LIBEXEC_PATH:
- creator_path = "@@LIBEXECDIR@@:" + creator_path
- self.sockcreator = isc.bind10.sockcreator.Creator(creator_path)
-
- def stop_creator(self, kill=False):
- if self.sockcreator is None:
- return
- if kill:
- self.sockcreator.kill()
- else:
- self.sockcreator.terminate()
- self.sockcreator = None
-
- def kill_started_processes(self):
+ def kill_started_components(self):
"""
Called as part of the exception handling when a process fails to
start, this runs through the list of started processes, killing
@@ -394,31 +382,25 @@ class BoB:
"""
logger.info(BIND10_KILLING_ALL_PROCESSES)
- self.stop_creator(True)
-
- for pid in self.processes:
- logger.info(BIND10_KILL_PROCESS, self.processes[pid].name)
- self.processes[pid].process.kill()
- self.processes = {}
+ for pid in self.components:
+ logger.info(BIND10_KILL_PROCESS, self.components[pid].name())
+ self.components[pid].kill(True)
+ self.components = {}
- def read_bind10_config(self):
+ def _read_bind10_config(self):
"""
Reads the parameters associated with the BoB module itself.
- At present these are the components to start although arguably this
- information should be in the configuration for the appropriate
- module itself. (However, this would cause difficulty in the case of
- xfrin/xfrout and zone manager as we don't need to start those if we
- are not running the authoritative server.)
+ This means the list of components we should start now.
+
+ This could easily be combined into start_all_processes, but
+ it stays because of historical reasons and because the tests
+ replace the method sometimes.
"""
logger.info(BIND10_READING_BOSS_CONFIGURATION)
config_data = self.ccs.get_full_config()
- self.cfg_start_auth = config_data.get("start_auth")
- self.cfg_start_resolver = config_data.get("start_resolver")
-
- logger.info(BIND10_CONFIGURATION_START_AUTH, self.cfg_start_auth)
- logger.info(BIND10_CONFIGURATION_START_RESOLVER, self.cfg_start_resolver)
+ self.__propagate_component_config(config_data['components'])
def log_starting(self, process, port = None, address = None):
"""
@@ -480,17 +462,16 @@ class BoB:
# raised which is caught by the caller of start_all_processes(); this kills
# processes started up to that point before terminating the program.
- def start_msgq(self, c_channel_env):
+ def start_msgq(self):
"""
Start the message queue and connect to the command channel.
"""
self.log_starting("b10-msgq")
- c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
+ msgq_proc = ProcessInfo("b10-msgq", ["b10-msgq"], self.c_channel_env,
True, not self.verbose, uid=self.uid,
username=self.username)
- c_channel.spawn()
- self.processes[c_channel.pid] = c_channel
- self.log_started(c_channel.pid)
+ msgq_proc.spawn()
+ self.log_started(msgq_proc.pid)
# Now connect to the c-channel
cc_connect_start = time.time()
@@ -509,7 +490,9 @@ class BoB:
# on this channel are once relating to process startup.
self.cc_session.group_subscribe("Boss")
- def start_cfgmgr(self, c_channel_env):
+ return msgq_proc
+
+ def start_cfgmgr(self):
"""
Starts the configuration manager process
"""
@@ -520,10 +503,9 @@ class BoB:
if self.config_filename is not None:
args.append("--config-filename=" + self.config_filename)
bind_cfgd = ProcessInfo("b10-cfgmgr", args,
- c_channel_env, uid=self.uid,
+ self.c_channel_env, uid=self.uid,
username=self.username)
bind_cfgd.spawn()
- self.processes[bind_cfgd.pid] = bind_cfgd
self.log_started(bind_cfgd.pid)
# Wait for the configuration manager to start up as subsequent initialization
@@ -539,6 +521,8 @@ class BoB:
if not self.process_running(msg, "ConfigManager"):
raise ProcessStartError("Configuration manager process has not started")
+ return bind_cfgd
+
def start_ccsession(self, c_channel_env):
"""
Start the CC Session
@@ -570,10 +554,20 @@ class BoB:
self.log_starting(name, port, address)
newproc = ProcessInfo(name, args, c_channel_env)
newproc.spawn()
- self.processes[newproc.pid] = newproc
self.log_started(newproc.pid)
+ return newproc
+
+ def register_process(self, pid, component):
+ """
+ Put another process into boss to watch over it. When the process
+ dies, the component.failed() is called with the exit code.
+
+ It is expected the info is a isc.bind10.component.BaseComponent
+ subclass (or anything having the same interface).
+ """
+ self.components[pid] = component
- def start_simple(self, name, c_channel_env, port=None, address=None):
+ def start_simple(self, name):
"""
Most of the BIND-10 processes are started with the command:
@@ -590,7 +584,7 @@ class BoB:
args += ['-v']
# ... and start the process
- self.start_process(name, args, c_channel_env, port, address)
+ return self.start_process(name, args, self.c_channel_env)
# The next few methods start up the rest of the BIND-10 processes.
# Although many of these methods are little more than a call to
@@ -598,10 +592,12 @@ class BoB:
# where modifications can be made if the process start-up sequence changes
# for a given process.
- def start_auth(self, c_channel_env):
+ def start_auth(self):
"""
Start the Authoritative server
"""
+ if self.uid is not None and self.__started:
+ logger.warn(BIND10_START_AS_NON_ROOT_AUTH)
authargs = ['b10-auth']
if self.nocache:
authargs += ['-n']
@@ -611,14 +607,16 @@ class BoB:
authargs += ['-v']
# ... and start
- self.start_process("b10-auth", authargs, c_channel_env)
+ return self.start_process("b10-auth", authargs, self.c_channel_env)
- def start_resolver(self, c_channel_env):
+ def start_resolver(self):
"""
Start the Resolver. At present, all these arguments and switches
are pure speculation. As with the auth daemon, they should be
read from the configuration database.
"""
+ if self.uid is not None and self.__started:
+ logger.warn(BIND10_START_AS_NON_ROOT_RESOLVER)
self.curproc = "b10-resolver"
# XXX: this must be read from the configuration manager in the future
resargs = ['b10-resolver']
@@ -628,7 +626,7 @@ class BoB:
resargs += ['-v']
# ... and start
- self.start_process("b10-resolver", resargs, c_channel_env)
+ return self.start_process("b10-resolver", resargs, self.c_channel_env)
def __ld_path_hack(self, env):
# XXX: a quick-hack workaround. xfrin/out will implicitly use
@@ -644,6 +642,9 @@ class BoB:
# We reuse the ADD_LIBEXEC_PATH variable to see whether we need to
# do this, as the conditions that make this workaround needed are
# the same as for the libexec path addition
+ # TODO: Once #1292 is finished, remove this method and the special
+ # component, use it as normal component.
+ c_channel_env = dict(self.c_channel_env)
if ADD_LIBEXEC_PATH:
cur_path = os.getenv('DYLD_LIBRARY_PATH')
cur_path = '' if cur_path is None else ':' + cur_path
@@ -653,86 +654,56 @@ class BoB:
cur_path = '' if cur_path is None else ':' + cur_path
env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
- def start_xfrout(self, c_channel_env):
- self.__ld_path_hack(c_channel_env)
- self.start_simple("b10-xfrout", c_channel_env)
+ def start_cmdctl(self):
+ """
+ Starts the command control process
+ """
+ args = ["b10-cmdctl"]
+ if self.cmdctl_port is not None:
+ args.append("--port=" + str(self.cmdctl_port))
+ if self.verbose:
+ args.append("-v")
+ return self.start_process("b10-cmdctl", args, self.c_channel_env,
+ self.cmdctl_port)
- def start_xfrin(self, c_channel_env):
+ def start_xfrin(self):
self.__ld_path_hack(c_channel_env)
- self.start_simple("b10-xfrin", c_channel_env)
- def start_zonemgr(self, c_channel_env):
- self.start_simple("b10-zonemgr", c_channel_env)
+ # Set up the command arguments.
+ args = ['b10-xfrin']
+ if self.verbose:
+ args += ['-v']
- def start_stats(self, c_channel_env):
- self.start_simple("b10-stats", c_channel_env)
+ return self.start_process("b10-xfrin", args, c_channel_env)
- def start_stats_httpd(self, c_channel_env):
- self.start_simple("b10-stats-httpd", c_channel_env)
+ def start_xfrout(self, c_channel_env):
+ self.__ld_path_hack(c_channel_env)
- def start_dhcp6(self, c_channel_env):
- self.start_simple("b10-dhcp6", c_channel_env)
+ # Set up the command arguments.
+ args = ['b10-xfrout']
+ if self.verbose:
+ args += ['-v']
- def start_cmdctl(self, c_channel_env):
- """
- Starts the command control process
- """
- args = ["b10-cmdctl"]
- if self.cmdctl_port is not None:
- args.append("--port=" + str(self.cmdctl_port))
- self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
+ return self.start_process("b10-xfrout", args, c_channel_env)
- def start_all_processes(self):
+ def start_all_components(self):
"""
- Starts up all the processes. Any exception generated during the
- starting of the processes is handled by the caller.
+ Starts up all the components. Any exception generated during the
+ starting of the components is handled by the caller.
"""
- # The socket creator first, as it is the only thing that needs root
- self.start_creator()
- # TODO: Once everything uses the socket creator, we can drop root
- # privileges right now
+ # Start the real core (sockcreator, msgq, cfgmgr)
+ self._component_configurator.startup(self.__core_components)
- c_channel_env = self.c_channel_env
- self.start_msgq(c_channel_env)
- self.start_cfgmgr(c_channel_env)
- self.start_ccsession(c_channel_env)
+ # Connect to the msgq. This is not a process, so it's not handled
+ # inside the configurator.
+ self.start_ccsession(self.c_channel_env)
# Extract the parameters associated with Bob. This can only be
# done after the CC Session is started. Note that the logging
# configuration may override the "-v" switch set on the command line.
- self.read_bind10_config()
-
- # Continue starting the processes. The authoritative server (if
- # selected):
- if self.cfg_start_auth:
- self.start_auth(c_channel_env)
+ self._read_bind10_config()
- # ... and resolver (if selected):
- if self.cfg_start_resolver:
- self.start_resolver(c_channel_env)
- self.started_resolver_family = True
-
- # Everything after the main components can run as non-root.
- # TODO: this is only temporary - once the privileged socket creator is
- # fully working, nothing else will run as root.
- if self.uid is not None:
- posix.setuid(self.uid)
-
- # xfrin/xfrout and the zone manager are only meaningful if the
- # authoritative server has been started.
- if self.cfg_start_auth:
- self.start_xfrout(c_channel_env)
- self.start_xfrin(c_channel_env)
- self.start_zonemgr(c_channel_env)
- self.started_auth_family = True
-
- # ... and finally start the remaining processes
- self.start_stats(c_channel_env)
- self.start_stats_httpd(c_channel_env)
- self.start_cmdctl(c_channel_env)
-
- if self.cfg_start_dhcp6:
- self.start_dhcp6(c_channel_env)
+ # TODO: Return the dropping of privileges
def startup(self):
"""
@@ -756,99 +727,81 @@ class BoB:
# this is the case we want, where the msgq is not running
pass
- # Start all processes. If any one fails to start, kill all started
- # processes and exit with an error indication.
+ # Start all components. If any one fails to start, kill all started
+ # components and exit with an error indication.
try:
self.c_channel_env = c_channel_env
- self.start_all_processes()
+ self.start_all_components()
except Exception as e:
- self.kill_started_processes()
+ self.kill_started_components()
return "Unable to start " + self.curproc + ": " + str(e)
# Started successfully
self.runnable = True
+ self.__started = True
return None
- def stop_all_processes(self):
- """Stop all processes."""
- cmd = { "command": ['shutdown']}
-
- self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
- self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
- self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
- self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
- self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
- self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
- self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
- self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
- self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
- # Terminate the creator last
- self.stop_creator()
-
def stop_process(self, process, recipient):
"""
Stop the given process, friendly-like. The process is the name it has
(in logs, etc), the recipient is the address on msgq.
"""
logger.info(BIND10_STOP_PROCESS, process)
- # TODO: Some timeout to solve processes that don't want to die would
- # help. We can even store it in the dict, it is used only as a set
- self.expected_shutdowns[process] = 1
- # Ask the process to die willingly
self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
recipient)
- # Series of stop_process wrappers
- def stop_resolver(self):
- self.stop_process('b10-resolver', 'Resolver')
-
- def stop_auth(self):
- self.stop_process('b10-auth', 'Auth')
-
- def stop_xfrout(self):
- self.stop_process('b10-xfrout', 'Xfrout')
+ def component_shutdown(self, exitcode=0):
+ """
+ Stop the Boss instance from a components' request. The exitcode
+ indicates the desired exit code.
- def stop_xfrin(self):
- self.stop_process('b10-xfrin', 'Xfrin')
+ If we did not start yet, it raises an exception, which is meant
+ to propagate through the component and configurator to the startup
+ routine and abort the startup imediatelly. If it is started up already,
+ we just mark it so we terminate soon.
- def stop_zonemgr(self):
- self.stop_process('b10-zonemgr', 'Zonemgr')
+ It does set the exit code in both cases.
+ """
+ self.exitcode = exitcode
+ if not self.__started:
+ raise Exception("Component failed during startup");
+ else:
+ self.runnable = False
def shutdown(self):
"""Stop the BoB instance."""
logger.info(BIND10_SHUTDOWN)
# first try using the BIND 10 request to stop
try:
- self.stop_all_processes()
+ self._component_configurator.shutdown()
except:
pass
# XXX: some delay probably useful... how much is uncertain
# I have changed the delay from 0.5 to 1, but sometime it's
# still not enough.
- time.sleep(1)
+ time.sleep(1)
self.reap_children()
# next try sending a SIGTERM
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- logger.info(BIND10_SEND_SIGTERM, proc_info.name,
- proc_info.pid)
+ components_to_stop = list(self.components.values())
+ for component in components_to_stop:
+ logger.info(BIND10_SEND_SIGTERM, component.name(), component.pid())
try:
- proc_info.process.terminate()
+ component.kill()
except OSError:
# ignore these (usually ESRCH because the child
# finally exited)
pass
# finally, send SIGKILL (unmaskable termination) until everybody dies
- while self.processes:
+ while self.components:
# XXX: some delay probably useful... how much is uncertain
time.sleep(0.1)
self.reap_children()
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- logger.info(BIND10_SEND_SIGKILL, proc_info.name,
- proc_info.pid)
+ components_to_stop = list(self.components.values())
+ for component in components_to_stop:
+ logger.info(BIND10_SEND_SIGKILL, component.name(),
+ component.pid())
try:
- proc_info.process.kill()
+ component.kill(True)
except OSError:
# ignore these (usually ESRCH because the child
# finally exited)
@@ -870,40 +823,16 @@ class BoB:
# XXX: should be impossible to get any other error here
raise
if pid == 0: break
- if self.sockcreator is not None and self.sockcreator.pid() == pid:
- # This is the socket creator, started and terminated
- # differently. This can't be restarted.
- if self.runnable:
- logger.fatal(BIND10_SOCKCREATOR_CRASHED)
- self.sockcreator = None
- self.runnable = False
- elif pid in self.processes:
- # One of the processes we know about. Get information on it.
- proc_info = self.processes.pop(pid)
- proc_info.restart_schedule.set_run_stop_time()
- self.dead_processes[proc_info.pid] = proc_info
-
- # Write out message, but only if in the running state:
- # During startup and shutdown, these messages are handled
- # elsewhere.
- if self.runnable:
- if exit_status is None:
- logger.warn(BIND10_PROCESS_ENDED_NO_EXIT_STATUS,
- proc_info.name, proc_info.pid)
- else:
- logger.warn(BIND10_PROCESS_ENDED_WITH_EXIT_STATUS,
- proc_info.name, proc_info.pid,
- exit_status)
-
- # Was it a special process?
- if proc_info.name == "b10-msgq":
- logger.fatal(BIND10_MSGQ_DAEMON_ENDED)
- self.runnable = False
-
- # If we're in 'brittle' mode, we want to shutdown after
- # any process dies.
- if self.brittle:
- self.runnable = False
+ if pid in self.components:
+ # One of the components we know about. Get information on it.
+ component = self.components.pop(pid)
+ logger.info(BIND10_PROCESS_ENDED, component.name(), pid,
+ exit_status)
+ if component.running() and self.runnable:
+ # Tell it it failed. But only if it matters (we are
+ # not shutting down and the component considers itself
+ # to be running.
+ component.failed(exit_status);
else:
logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
@@ -917,7 +846,16 @@ class BoB:
The values returned can be safely passed into select() as the
timeout value.
+
"""
+ # TODO: This is an artefact of previous way of handling processes. The
+ # restart queue is currently empty at all times, so this returns None
+ # every time it is called (thought is a relict that is obviously wrong,
+ # it is called and it doesn't hurt).
+ #
+ # It is preserved for archeological reasons for the time when we return
+ # the delayed restarts, most of it might be useful then (or, if it is
+ # found useless, removed).
next_restart = None
# if we're shutting down, then don't restart
if not self.runnable:
@@ -926,10 +864,6 @@ class BoB:
still_dead = {}
now = time.time()
for proc_info in self.dead_processes.values():
- if proc_info.name in self.expected_shutdowns:
- # We don't restart, we wanted it to die
- del self.expected_shutdowns[proc_info.name]
- continue
restart_time = proc_info.restart_schedule.get_restart_time(now)
if restart_time > now:
if (next_restart is None) or (next_restart > restart_time):
@@ -939,7 +873,7 @@ class BoB:
logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
try:
proc_info.respawn()
- self.processes[proc_info.pid] = proc_info
+ self.components[proc_info.pid] = proc_info
logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
except:
still_dead[proc_info.pid] = proc_info
@@ -1131,6 +1065,10 @@ def main():
while boss_of_bind.runnable:
# clean up any processes that exited
boss_of_bind.reap_children()
+ # XXX: As we don't put anything into the processes to be restarted,
+ # this is really a complicated NOP. But we will try to reintroduce
+ # delayed restarts, so it stays here for now, until we find out if
+ # it's useful.
next_restart = boss_of_bind.restart_processes()
if next_restart is None:
wait_time = None
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index b4cfac6..2fffe56 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -4,16 +4,71 @@
"module_description": "Master process",
"config_data": [
{
- "item_name": "start_auth",
- "item_type": "boolean",
+ "item_name": "components",
+ "item_type": "named_set",
"item_optional": false,
- "item_default": true
- },
- {
- "item_name": "start_resolver",
- "item_type": "boolean",
- "item_optional": false,
- "item_default": false
+ "item_default": {
+ "b10-auth": { "special": "auth", "kind": "needed", "priority": 10 },
+ "setuid": {
+ "special": "setuid",
+ "priority": 5,
+ "kind": "dispensable"
+ },
+ "b10-xfrin": { "special": "xfrin", "kind": "dispensable" },
+ "b10-xfrout": { "special": "Xfrout", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-stats": { "address": "Stats", "kind": "dispensable" },
+ "b10-stats-httpd": {
+ "address": "StatsHttpd",
+ "kind": "dispensable"
+ },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ },
+ "named_set_item_spec": {
+ "item_name": "component",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": { },
+ "map_item_spec": [
+ {
+ "item_name": "special",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "process",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "kind",
+ "item_optional": false,
+ "item_type": "string",
+ "item_default": "dispensable"
+ },
+ {
+ "item_name": "address",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "params",
+ "item_optional": true,
+ "item_type": "list",
+ "list_item_spec": {
+ "item_name": "param",
+ "item_optional": false,
+ "item_type": "string",
+ "item_default": ""
+ }
+ },
+ {
+ "item_name": "priority",
+ "item_optional": true,
+ "item_type": "integer"
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 50e6e29..9e4abc0 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -45,6 +45,5 @@ export B10_FROM_BUILD
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
-cd ${BIND10_PATH}
-exec ${PYTHON_EXEC} -O bind10 "$@"
+exec ${PYTHON_EXEC} -O ${BIND10_PATH}/bind10 "$@"
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 1bd6ab4..0aa6778 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -104,7 +104,7 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.msgq_socket_file, None)
self.assertEqual(bob.cc_session, None)
self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.processes, {})
+ self.assertEqual(bob.components, {})
self.assertEqual(bob.dead_processes, {})
self.assertEqual(bob.runnable, False)
self.assertEqual(bob.uid, None)
@@ -122,7 +122,7 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.msgq_socket_file, "alt_socket_file")
self.assertEqual(bob.cc_session, None)
self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.processes, {})
+ self.assertEqual(bob.components, {})
self.assertEqual(bob.dead_processes, {})
self.assertEqual(bob.runnable, False)
self.assertEqual(bob.uid, None)
@@ -218,147 +218,185 @@ class MockBob(BoB):
self.stats = False
self.stats_httpd = False
self.cmdctl = False
+ self.dhcp6 = False
+ self.dhcp4 = False
self.c_channel_env = {}
- self.processes = { }
+ self.components = { }
self.creator = False
+ class MockSockCreator(isc.bind10.component.Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ isc.bind10.component.Component.__init__(self, process, boss,
+ kind, 'SockCreator')
+ self._start_func = boss.start_creator
+
+ specials = isc.bind10.special_component.get_specials()
+ specials['sockcreator'] = MockSockCreator
+ self._component_configurator = \
+ isc.bind10.component.Configurator(self, specials)
+
def start_creator(self):
self.creator = True
+ procinfo = ProcessInfo('b10-sockcreator', ['/bin/false'])
+ procinfo.pid = 1
+ return procinfo
- def stop_creator(self, kill=False):
- self.creator = False
-
- def read_bind10_config(self):
+ def _read_bind10_config(self):
# Configuration options are set directly
pass
- def start_msgq(self, c_channel_env):
+ def start_msgq(self):
self.msgq = True
- self.processes[2] = ProcessInfo('b10-msgq', ['/bin/false'])
- self.processes[2].pid = 2
-
- def start_cfgmgr(self, c_channel_env):
- self.cfgmgr = True
- self.processes[3] = ProcessInfo('b10-cfgmgr', ['/bin/false'])
- self.processes[3].pid = 3
+ procinfo = ProcessInfo('b10-msgq', ['/bin/false'])
+ procinfo.pid = 2
+ return procinfo
def start_ccsession(self, c_channel_env):
+ # this is not a process, don't have to do anything with procinfo
self.ccsession = True
- self.processes[4] = ProcessInfo('b10-ccsession', ['/bin/false'])
- self.processes[4].pid = 4
- def start_auth(self, c_channel_env):
+ def start_cfgmgr(self):
+ self.cfgmgr = True
+ procinfo = ProcessInfo('b10-cfgmgr', ['/bin/false'])
+ procinfo.pid = 3
+ return procinfo
+
+ def start_auth(self):
self.auth = True
- self.processes[5] = ProcessInfo('b10-auth', ['/bin/false'])
- self.processes[5].pid = 5
+ procinfo = ProcessInfo('b10-auth', ['/bin/false'])
+ procinfo.pid = 5
+ return procinfo
- def start_resolver(self, c_channel_env):
+ def start_resolver(self):
self.resolver = True
- self.processes[6] = ProcessInfo('b10-resolver', ['/bin/false'])
- self.processes[6].pid = 6
-
- def start_xfrout(self, c_channel_env):
+ procinfo = ProcessInfo('b10-resolver', ['/bin/false'])
+ procinfo.pid = 6
+ return procinfo
+
+ def start_simple(self, name):
+ procmap = { 'b10-xfrout': self.start_xfrout,
+ 'b10-zonemgr': self.start_zonemgr,
+ 'b10-stats': self.start_stats,
+ 'b10-stats-httpd': self.start_stats_httpd,
+ 'b10-cmdctl': self.start_cmdctl,
+ 'b10-dhcp6': self.start_dhcp6,
+ 'b10-dhcp4': self.start_dhcp4 }
+ return procmap[name]()
+
+ def start_xfrout(self):
self.xfrout = True
- self.processes[7] = ProcessInfo('b10-xfrout', ['/bin/false'])
- self.processes[7].pid = 7
+ procinfo = ProcessInfo('b10-xfrout', ['/bin/false'])
+ procinfo.pid = 7
+ return procinfo
- def start_xfrin(self, c_channel_env):
+ def start_xfrin(self):
self.xfrin = True
- self.processes[8] = ProcessInfo('b10-xfrin', ['/bin/false'])
- self.processes[8].pid = 8
+ procinfo = ProcessInfo('b10-xfrin', ['/bin/false'])
+ procinfo.pid = 8
+ return procinfo
- def start_zonemgr(self, c_channel_env):
+ def start_zonemgr(self):
self.zonemgr = True
- self.processes[9] = ProcessInfo('b10-zonemgr', ['/bin/false'])
- self.processes[9].pid = 9
+ procinfo = ProcessInfo('b10-zonemgr', ['/bin/false'])
+ procinfo.pid = 9
+ return procinfo
- def start_stats(self, c_channel_env):
+ def start_stats(self):
self.stats = True
- self.processes[10] = ProcessInfo('b10-stats', ['/bin/false'])
- self.processes[10].pid = 10
+ procinfo = ProcessInfo('b10-stats', ['/bin/false'])
+ procinfo.pid = 10
+ return procinfo
- def start_stats_httpd(self, c_channel_env):
+ def start_stats_httpd(self):
self.stats_httpd = True
- self.processes[11] = ProcessInfo('b10-stats-httpd', ['/bin/false'])
- self.processes[11].pid = 11
+ procinfo = ProcessInfo('b10-stats-httpd', ['/bin/false'])
+ procinfo.pid = 11
+ return procinfo
- def start_cmdctl(self, c_channel_env):
+ def start_cmdctl(self):
self.cmdctl = True
- self.processes[12] = ProcessInfo('b10-cmdctl', ['/bin/false'])
- self.processes[12].pid = 12
+ procinfo = ProcessInfo('b10-cmdctl', ['/bin/false'])
+ procinfo.pid = 12
+ return procinfo
- def start_dhcp6(self, c_channel_env):
+ def start_dhcp6(self):
self.dhcp6 = True
- self.processes[13] = ProcessInfo('b10-dhcp6', ['/bin/false'])
- self.processes[13]
+ procinfo = ProcessInfo('b10-dhcp6', ['/bin/false'])
+ procinfo.pid = 13
+ return procinfo
- def start_dhcp4(self, c_channel_env):
+ def start_dhcp4(self):
self.dhcp4 = True
- self.processes[14] = ProcessInfo('b10-dhcp4', ['/bin/false'])
- self.processes[14]
-
- # We don't really use all of these stop_ methods. But it might turn out
- # someone would add some stop_ method to BoB and we want that one overriden
- # in case he forgets to update the tests.
+ procinfo = ProcessInfo('b10-dhcp4', ['/bin/false'])
+ procinfo.pid = 14
+ return procinfo
+
+ def stop_process(self, process, recipient):
+ procmap = { 'b10-auth': self.stop_auth,
+ 'b10-resolver': self.stop_resolver,
+ 'b10-xfrout': self.stop_xfrout,
+ 'b10-xfrin': self.stop_xfrin,
+ 'b10-zonemgr': self.stop_zonemgr,
+ 'b10-stats': self.stop_stats,
+ 'b10-stats-httpd': self.stop_stats_httpd,
+ 'b10-cmdctl': self.stop_cmdctl }
+ procmap[process]()
+
+ # Some functions to pretend we stop processes, use by stop_process
def stop_msgq(self):
if self.msgq:
- del self.processes[2]
+ del self.components[2]
self.msgq = False
def stop_cfgmgr(self):
if self.cfgmgr:
- del self.processes[3]
+ del self.components[3]
self.cfgmgr = False
- def stop_ccsession(self):
- if self.ccssession:
- del self.processes[4]
- self.ccsession = False
-
def stop_auth(self):
if self.auth:
- del self.processes[5]
+ del self.components[5]
self.auth = False
def stop_resolver(self):
if self.resolver:
- del self.processes[6]
+ del self.components[6]
self.resolver = False
def stop_xfrout(self):
if self.xfrout:
- del self.processes[7]
+ del self.components[7]
self.xfrout = False
def stop_xfrin(self):
if self.xfrin:
- del self.processes[8]
+ del self.components[8]
self.xfrin = False
def stop_zonemgr(self):
if self.zonemgr:
- del self.processes[9]
+ del self.components[9]
self.zonemgr = False
def stop_stats(self):
if self.stats:
- del self.processes[10]
+ del self.components[10]
self.stats = False
def stop_stats_httpd(self):
if self.stats_httpd:
- del self.processes[11]
+ del self.components[11]
self.stats_httpd = False
def stop_cmdctl(self):
if self.cmdctl:
- del self.processes[12]
+ del self.components[12]
self.cmdctl = False
class TestStartStopProcessesBob(unittest.TestCase):
"""
- Check that the start_all_processes method starts the right combination
- of processes and that the right processes are started and stopped
+ Check that the start_all_components method starts the right combination
+ of components and that the right components are started and stopped
according to changes in configuration.
"""
def check_environment_unchanged(self):
@@ -392,7 +430,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
def check_started_none(self, bob):
"""
Check that the situation is according to configuration where no servers
- should be started. Some processes still need to be running.
+ should be started. Some components still need to be running.
"""
self.check_started(bob, True, False, False)
self.check_environment_unchanged()
@@ -407,14 +445,14 @@ class TestStartStopProcessesBob(unittest.TestCase):
def check_started_auth(self, bob):
"""
- Check the set of processes needed to run auth only is started.
+ Check the set of components needed to run auth only is started.
"""
self.check_started(bob, True, True, False)
self.check_environment_unchanged()
def check_started_resolver(self, bob):
"""
- Check the set of processes needed to run resolver only is started.
+ Check the set of components needed to run resolver only is started.
"""
self.check_started(bob, True, False, True)
self.check_environment_unchanged()
@@ -423,80 +461,65 @@ class TestStartStopProcessesBob(unittest.TestCase):
"""
Check if proper combinations of DHCPv4 and DHCpv6 can be started
"""
- v4found = 0
- v6found = 0
-
- for pid in bob.processes:
- if (bob.processes[pid].name == "b10-dhcp4"):
- v4found += 1
- if (bob.processes[pid].name == "b10-dhcp6"):
- v6found += 1
-
- # there should be exactly one DHCPv4 daemon (if v4==True)
- # there should be exactly one DHCPv6 daemon (if v6==True)
- self.assertEqual(v4==True, v4found==1)
- self.assertEqual(v6==True, v6found==1)
+ self.assertEqual(v4, bob.dhcp4)
+ self.assertEqual(v6, bob.dhcp6)
self.check_environment_unchanged()
- # Checks the processes started when starting neither auth nor resolver
- # is specified.
- def test_start_none(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
- self.check_started_none(bob)
-
- # Checks the processes started when starting only the auth process
- def test_start_auth(self):
- # Create BoB and ensure correct initialization
+ def construct_config(self, start_auth, start_resolver):
+ # The things that are common, not turned on an off
+ config = {}
+ config['b10-stats'] = { 'kind': 'dispensable', 'address': 'Stats' }
+ config['b10-stats-httpd'] = { 'kind': 'dispensable',
+ 'address': 'StatsHttpd' }
+ config['b10-cmdctl'] = { 'kind': 'needed', 'special': 'cmdctl' }
+ if start_auth:
+ config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
+ config['b10-xfrout'] = { 'kind': 'dispensable',
+ 'address': 'Xfrout' }
+ config['b10-xfrin'] = { 'kind': 'dispensable', 'special': 'xfrin' }
+ config['b10-zonemgr'] = { 'kind': 'dispensable',
+ 'address': 'Zonemgr' }
+ if start_resolver:
+ config['b10-resolver'] = { 'kind': 'needed',
+ 'special': 'resolver' }
+ return {'components': config}
+
+ def config_start_init(self, start_auth, start_resolver):
+ """
+ Test the configuration is loaded at the startup.
+ """
bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
+ config = self.construct_config(start_auth, start_resolver)
+ class CC:
+ def get_full_config(self):
+ return config
+ # Provide the fake CC with data
+ bob.ccs = CC()
+ # And make sure it's not overwritten
+ def start_ccsession():
+ bob.ccsession = True
+ bob.start_ccsession = lambda _: start_ccsession()
+ # We need to return the original _read_bind10_config
+ bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
+ bob.start_all_components()
+ self.check_started(bob, True, start_auth, start_resolver)
+ self.check_environment_unchanged()
- self.check_started_auth(bob)
+ def test_start_none(self):
+ self.config_start_init(False, False)
- # Checks the processes started when starting only the resolver process
def test_start_resolver(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = True
+ self.config_start_init(False, True)
- bob.start_all_processes()
-
- self.check_started_resolver(bob)
+ def test_start_auth(self):
+ self.config_start_init(True, False)
- # Checks the processes started when starting both auth and resolver process
def test_start_both(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = True
-
- bob.start_all_processes()
-
- self.check_started_both(bob)
+ self.config_start_init(True, True)
def test_config_start(self):
"""
- Test that the configuration starts and stops processes according
+ Test that the configuration starts and stops components according
to configuration changes.
"""
@@ -504,17 +527,13 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob = MockBob()
self.check_preconditions(bob)
- # Start processes (nothing much should be started, as in
- # test_start_none)
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
+ bob.start_all_components()
bob.runnable = True
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Enable both at once
- bob.config_handler({'start_auth': True, 'start_resolver': True})
+ bob.config_handler(self.construct_config(True, True))
self.check_started_both(bob)
# Not touched by empty change
@@ -522,11 +541,11 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.check_started_both(bob)
# Not touched by change to the same configuration
- bob.config_handler({'start_auth': True, 'start_resolver': True})
+ bob.config_handler(self.construct_config(True, True))
self.check_started_both(bob)
# Turn them both off again
- bob.config_handler({'start_auth': False, 'start_resolver': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Not touched by empty change
@@ -534,47 +553,45 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.check_started_none(bob)
# Not touched by change to the same configuration
- bob.config_handler({'start_auth': False, 'start_resolver': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Start and stop auth separately
- bob.config_handler({'start_auth': True})
+ bob.config_handler(self.construct_config(True, False))
self.check_started_auth(bob)
- bob.config_handler({'start_auth': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Start and stop resolver separately
- bob.config_handler({'start_resolver': True})
+ bob.config_handler(self.construct_config(False, True))
self.check_started_resolver(bob)
- bob.config_handler({'start_resolver': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Alternate
- bob.config_handler({'start_auth': True})
+ bob.config_handler(self.construct_config(True, False))
self.check_started_auth(bob)
- bob.config_handler({'start_auth': False, 'start_resolver': True})
+ bob.config_handler(self.construct_config(False, True))
self.check_started_resolver(bob)
- bob.config_handler({'start_auth': True, 'start_resolver': False})
+ bob.config_handler(self.construct_config(True, False))
self.check_started_auth(bob)
def test_config_start_once(self):
"""
- Tests that a process is started only once.
+ Tests that a component is started only once.
"""
# Create BoB and ensure correct initialization
bob = MockBob()
self.check_preconditions(bob)
- # Start processes (both)
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = True
+ bob.start_all_components()
- bob.start_all_processes()
bob.runnable = True
+ bob.config_handler(self.construct_config(True, True))
self.check_started_both(bob)
bob.start_auth = lambda: self.fail("Started auth again")
@@ -584,12 +601,11 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob.start_resolver = lambda: self.fail("Started resolver again")
# Send again we want to start them. Should not do it, as they are.
- bob.config_handler({'start_auth': True})
- bob.config_handler({'start_resolver': True})
+ bob.config_handler(self.construct_config(True, True))
def test_config_not_started_early(self):
"""
- Test that processes are not started by the config handler before
+ Test that components are not started by the config handler before
startup.
"""
bob = MockBob()
@@ -603,27 +619,29 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob.config_handler({'start_auth': True, 'start_resolver': True})
- # Checks that DHCP (v4 and v6) processes are started when expected
+ # Checks that DHCP (v4 and v6) components are started when expected
def test_start_dhcp(self):
# Create BoB and ensure correct initialization
bob = MockBob()
self.check_preconditions(bob)
- # don't care about DNS stuff
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- # v4 and v6 disabled
- bob.cfg_start_dhcp6 = False
- bob.cfg_start_dhcp4 = False
- bob.start_all_processes()
+ bob.start_all_components()
+ bob.config_handler(self.construct_config(False, False))
self.check_started_dhcp(bob, False, False)
+ def test_start_dhcp_v6only(self):
+ # Create BoB and ensure correct initialization
+ bob = MockBob()
+ self.check_preconditions(bob)
# v6 only enabled
- bob.cfg_start_dhcp6 = True
- bob.cfg_start_dhcp4 = False
- bob.start_all_processes()
+ bob.start_all_components()
+ bob.runnable = True
+ bob._BoB_started = True
+ config = self.construct_config(False, False)
+ config['components']['b10-dhcp6'] = { 'kind': 'needed',
+ 'address': 'Dhcp6' }
+ bob.config_handler(config)
self.check_started_dhcp(bob, False, True)
# uncomment when dhcpv4 becomes implemented
@@ -637,6 +655,12 @@ class TestStartStopProcessesBob(unittest.TestCase):
#bob.cfg_start_dhcp4 = True
#self.check_started_dhcp(bob, True, True)
+class MockComponent:
+ def __init__(self, name, pid):
+ self.name = lambda: name
+ self.pid = lambda: pid
+
+
class TestBossCmd(unittest.TestCase):
def test_ping(self):
"""
@@ -646,7 +670,7 @@ class TestBossCmd(unittest.TestCase):
answer = bob.command_handler("ping", None)
self.assertEqual(answer, {'result': [0, 'pong']})
- def test_show_processes(self):
+ def test_show_processes_empty(self):
"""
Confirm getting a list of processes works.
"""
@@ -654,23 +678,16 @@ class TestBossCmd(unittest.TestCase):
answer = bob.command_handler("show_processes", None)
self.assertEqual(answer, {'result': [0, []]})
- def test_show_processes_started(self):
+ def test_show_processes(self):
"""
Confirm getting a list of processes works.
"""
bob = MockBob()
- bob.start_all_processes()
+ bob.register_process(1, MockComponent('first', 1))
+ bob.register_process(2, MockComponent('second', 2))
answer = bob.command_handler("show_processes", None)
- processes = [[2, 'b10-msgq'],
- [3, 'b10-cfgmgr'],
- [4, 'b10-ccsession'],
- [5, 'b10-auth'],
- [7, 'b10-xfrout'],
- [8, 'b10-xfrin'],
- [9, 'b10-zonemgr'],
- [10, 'b10-stats'],
- [11, 'b10-stats-httpd'],
- [12, 'b10-cmdctl']]
+ processes = [[1, 'first'],
+ [2, 'second']]
self.assertEqual(answer, {'result': [0, processes]})
class TestParseArgs(unittest.TestCase):
@@ -780,10 +797,12 @@ class TestPIDFile(unittest.TestCase):
self.assertRaises(IOError, dump_pid,
'nonexistent_dir' + os.sep + 'bind10.pid')
+# TODO: Do we want brittle mode? Probably yes. So we need to re-enable to after that.
+ at unittest.skip("Brittle mode temporarily broken")
class TestBrittle(unittest.TestCase):
def test_brittle_disabled(self):
bob = MockBob()
- bob.start_all_processes()
+ bob.start_all_components()
bob.runnable = True
bob.reap_children()
@@ -796,7 +815,7 @@ class TestBrittle(unittest.TestCase):
def test_brittle_enabled(self):
bob = MockBob()
- bob.start_all_processes()
+ bob.start_all_components()
bob.runnable = True
bob.brittle = True
@@ -809,6 +828,158 @@ class TestBrittle(unittest.TestCase):
sys.stdout = old_stdout
self.assertFalse(bob.runnable)
+class TestBossComponents(unittest.TestCase):
+ """
+ Test the boss propagates component configuration properly to the
+ component configurator and acts sane.
+ """
+ def setUp(self):
+ self.__param = None
+ self.__called = False
+ self.__compconfig = {
+ 'comp': {
+ 'kind': 'needed',
+ 'process': 'cat'
+ }
+ }
+
+ def __unary_hook(self, param):
+ """
+ A hook function that stores the parameter for later examination.
+ """
+ self.__param = param
+
+ def __nullary_hook(self):
+ """
+ A hook function that notes down it was called.
+ """
+ self.__called = True
+
+ def __check_core(self, config):
+ """
+ A function checking that the config contains parts for the valid
+ core component configuration.
+ """
+ self.assertIsNotNone(config)
+ for component in ['sockcreator', 'msgq', 'cfgmgr']:
+ self.assertTrue(component in config)
+ self.assertEqual(component, config[component]['special'])
+ self.assertEqual('core', config[component]['kind'])
+
+ def __check_extended(self, config):
+ """
+ This checks that the config contains the core and one more component.
+ """
+ self.__check_core(config)
+ self.assertTrue('comp' in config)
+ self.assertEqual('cat', config['comp']['process'])
+ self.assertEqual('needed', config['comp']['kind'])
+ self.assertEqual(4, len(config))
+
+ def test_correct_run(self):
+ """
+ Test the situation when we run in usual scenario, nothing fails,
+ we just start, reconfigure and then stop peacefully.
+ """
+ bob = MockBob()
+ # Start it
+ orig = bob._component_configurator.startup
+ bob._component_configurator.startup = self.__unary_hook
+ bob.start_all_components()
+ bob._component_configurator.startup = orig
+ self.__check_core(self.__param)
+ self.assertEqual(3, len(self.__param))
+
+ # Reconfigure it
+ self.__param = None
+ orig = bob._component_configurator.reconfigure
+ bob._component_configurator.reconfigure = self.__unary_hook
+ # Otherwise it does not work
+ bob.runnable = True
+ bob.config_handler({'components': self.__compconfig})
+ self.__check_extended(self.__param)
+ currconfig = self.__param
+ # If we reconfigure it, but it does not contain the components part,
+ # nothing is called
+ bob.config_handler({})
+ self.assertEqual(self.__param, currconfig)
+ self.__param = None
+ bob._component_configurator.reconfigure = orig
+ # Check a configuration that messes up the core components is rejected.
+ compconf = dict(self.__compconfig)
+ compconf['msgq'] = { 'process': 'echo' }
+ result = bob.config_handler({'components': compconf})
+ # Check it rejected it
+ self.assertEqual(1, result['result'][0])
+
+ # We can't call shutdown, that one relies on the stuff in main
+ # We check somewhere else that the shutdown is actually called
+ # from there (the test_kills).
+
+ def test_kills(self):
+ """
+ Test that the boss kills components which don't want to stop.
+ """
+ bob = MockBob()
+ killed = []
+ class ImmortalComponent:
+ """
+ An immortal component. It does not stop when it is told so
+ (anyway it is not told so). It does not die if it is killed
+ the first time. It dies only when killed forcefully.
+ """
+ def kill(self, forcefull=False):
+ killed.append(forcefull)
+ if forcefull:
+ bob.components = {}
+ def pid(self):
+ return 1
+ def name(self):
+ return "Immortal"
+ bob.components = {}
+ bob.register_process(1, ImmortalComponent())
+
+ # While at it, we check the configurator shutdown is actually called
+ orig = bob._component_configurator.shutdown
+ bob._component_configurator.shutdown = self.__nullary_hook
+ self.__called = False
+
+ bob.shutdown()
+
+ self.assertEqual([False, True], killed)
+ self.assertTrue(self.__called)
+
+ bob._component_configurator.shutdown = orig
+
+ def test_component_shutdown(self):
+ """
+ Test the component_shutdown sets all variables accordingly.
+ """
+ bob = MockBob()
+ self.assertRaises(Exception, bob.component_shutdown, 1)
+ self.assertEqual(1, bob.exitcode)
+ bob._BoB__started = True
+ bob.component_shutdown(2)
+ self.assertEqual(2, bob.exitcode)
+ self.assertFalse(bob.runnable)
+
+ def test_init_config(self):
+ """
+ Test initial configuration is loaded.
+ """
+ bob = MockBob()
+ # Start it
+ bob._component_configurator.reconfigure = self.__unary_hook
+ # We need to return the original read_bind10_config
+ bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
+ # And provide a session to read the data from
+ class CC:
+ pass
+ bob.ccs = CC()
+ bob.ccs.get_full_config = lambda: {'components': self.__compconfig}
+ bob.start_all_components()
+ self.__check_extended(self.__param)
+
if __name__ == '__main__':
# store os.environ for test_unchanged_environment
original_os_environ = copy.deepcopy(os.environ)
diff --git a/src/bin/bindctl/bindcmd.py b/src/bin/bindctl/bindcmd.py
index c464e64..b67bc4b 100644
--- a/src/bin/bindctl/bindcmd.py
+++ b/src/bin/bindctl/bindcmd.py
@@ -71,21 +71,21 @@ Type \"<module_name> <command_name> help\" for help on the specific command.
\nAvailable module names: """
class ValidatedHTTPSConnection(http.client.HTTPSConnection):
- '''Overrides HTTPSConnection to support certification
+ '''Overrides HTTPSConnection to support certification
validation. '''
def __init__(self, host, ca_certs):
http.client.HTTPSConnection.__init__(self, host)
self.ca_certs = ca_certs
def connect(self):
- ''' Overrides the connect() so that we do
+ ''' Overrides the connect() so that we do
certificate validation. '''
sock = socket.create_connection((self.host, self.port),
self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
-
+
req_cert = ssl.CERT_NONE
if self.ca_certs:
req_cert = ssl.CERT_REQUIRED
@@ -95,7 +95,7 @@ class ValidatedHTTPSConnection(http.client.HTTPSConnection):
ca_certs=self.ca_certs)
class BindCmdInterpreter(Cmd):
- """simple bindctl example."""
+ """simple bindctl example."""
def __init__(self, server_port='localhost:8080', pem_file=None,
csv_file_dir=None):
@@ -128,29 +128,33 @@ class BindCmdInterpreter(Cmd):
socket.gethostname())).encode())
digest = session_id.hexdigest()
return digest
-
+
def run(self):
'''Parse commands from user and send them to cmdctl. '''
try:
if not self.login_to_cmdctl():
- return
+ return 1
self.cmdloop()
print('\nExit from bindctl')
+ return 0
except FailToLogin as err:
# error already printed when this was raised, ignoring
- pass
+ return 1
except KeyboardInterrupt:
print('\nExit from bindctl')
+ return 0
except socket.error as err:
print('Failed to send request, the connection is closed')
+ return 1
except http.client.CannotSendRequest:
print('Can not send request, the connection is busy')
+ return 1
def _get_saved_user_info(self, dir, file_name):
- ''' Read all the available username and password pairs saved in
+ ''' Read all the available username and password pairs saved in
file(path is "dir + file_name"), Return value is one list of elements
- ['name', 'password'], If get information failed, empty list will be
+ ['name', 'password'], If get information failed, empty list will be
returned.'''
if (not dir) or (not os.path.exists(dir)):
return []
@@ -176,7 +180,7 @@ class BindCmdInterpreter(Cmd):
if not os.path.exists(dir):
os.mkdir(dir, 0o700)
- csvfilepath = dir + file_name
+ csvfilepath = dir + file_name
csvfile = open(csvfilepath, 'w')
os.chmod(csvfilepath, 0o600)
writer = csv.writer(csvfile)
@@ -190,7 +194,7 @@ class BindCmdInterpreter(Cmd):
return True
def login_to_cmdctl(self):
- '''Login to cmdctl with the username and password inputted
+ '''Login to cmdctl with the username and password inputted
from user. After the login is sucessful, the username and
password will be saved in 'default_user.csv', when run the next
time, username and password saved in 'default_user.csv' will be
@@ -256,14 +260,14 @@ class BindCmdInterpreter(Cmd):
if self.login_to_cmdctl():
# successful, so try send again
status, reply_msg = self._send_message(url, body)
-
+
if reply_msg:
return json.loads(reply_msg.decode())
else:
return {}
-
- def send_POST(self, url, post_param = None):
+
+ def send_POST(self, url, post_param = None):
'''Send POST request to cmdctl, session id is send with the name
'cookie' in header.
Format: /module_name/command_name
@@ -322,12 +326,12 @@ class BindCmdInterpreter(Cmd):
def _validate_cmd(self, cmd):
'''validate the parameters and merge some parameters together,
merge algorithm is based on the command line syntax, later, if
- a better command line syntax come out, this function should be
- updated first.
+ a better command line syntax come out, this function should be
+ updated first.
'''
if not cmd.module in self.modules:
raise CmdUnknownModuleSyntaxError(cmd.module)
-
+
module_info = self.modules[cmd.module]
if not module_info.has_command_with_name(cmd.command):
raise CmdUnknownCmdSyntaxError(cmd.module, cmd.command)
@@ -335,17 +339,17 @@ class BindCmdInterpreter(Cmd):
command_info = module_info.get_command_with_name(cmd.command)
manda_params = command_info.get_mandatory_param_names()
all_params = command_info.get_param_names()
-
+
# If help is entered, don't do further parameter validation.
for val in cmd.params.keys():
if val == "help":
return
-
- params = cmd.params.copy()
- if not params and manda_params:
- raise CmdMissParamSyntaxError(cmd.module, cmd.command, manda_params[0])
+
+ params = cmd.params.copy()
+ if not params and manda_params:
+ raise CmdMissParamSyntaxError(cmd.module, cmd.command, manda_params[0])
elif params and not all_params:
- raise CmdUnknownParamSyntaxError(cmd.module, cmd.command,
+ raise CmdUnknownParamSyntaxError(cmd.module, cmd.command,
list(params.keys())[0])
elif params:
param_name = None
@@ -376,7 +380,7 @@ class BindCmdInterpreter(Cmd):
param_name = command_info.get_param_name_by_position(name, param_count)
cmd.params[param_name] = cmd.params[name]
del cmd.params[name]
-
+
elif not name in all_params:
raise CmdUnknownParamSyntaxError(cmd.module, cmd.command, name)
@@ -385,7 +389,7 @@ class BindCmdInterpreter(Cmd):
if not name in params and not param_nr in params:
raise CmdMissParamSyntaxError(cmd.module, cmd.command, name)
param_nr += 1
-
+
# Convert parameter value according parameter spec file.
# Ignore check for commands belongs to module 'config'
if cmd.module != CONFIG_MODULE_NAME:
@@ -394,9 +398,9 @@ class BindCmdInterpreter(Cmd):
try:
cmd.params[param_name] = isc.config.config_data.convert_type(param_spec, cmd.params[param_name])
except isc.cc.data.DataTypeError as e:
- raise isc.cc.data.DataTypeError('Invalid parameter value for \"%s\", the type should be \"%s\" \n'
+ raise isc.cc.data.DataTypeError('Invalid parameter value for \"%s\", the type should be \"%s\" \n'
% (param_name, param_spec['item_type']) + str(e))
-
+
def _handle_cmd(self, cmd):
'''Handle a command entered by the user'''
if cmd.command == "help" or ("help" in cmd.params.keys()):
@@ -418,7 +422,7 @@ class BindCmdInterpreter(Cmd):
def add_module_info(self, module_info):
'''Add the information about one module'''
self.modules[module_info.name] = module_info
-
+
def get_module_names(self):
'''Return the names of all known modules'''
return list(self.modules.keys())
@@ -450,15 +454,15 @@ class BindCmdInterpreter(Cmd):
subsequent_indent=" " +
" " * CONST_BINDCTL_HELP_INDENT_WIDTH,
width=70))
-
+
def onecmd(self, line):
if line == 'EOF' or line.lower() == "quit":
self.conn.close()
return True
-
+
if line == 'h':
line = 'help'
-
+
Cmd.onecmd(self, line)
def remove_prefix(self, list, prefix):
@@ -486,7 +490,7 @@ class BindCmdInterpreter(Cmd):
cmd = BindCmdParse(cur_line)
if not cmd.params and text:
hints = self._get_command_startswith(cmd.module, text)
- else:
+ else:
hints = self._get_param_startswith(cmd.module, cmd.command,
text)
if cmd.module == CONFIG_MODULE_NAME:
@@ -502,8 +506,8 @@ class BindCmdInterpreter(Cmd):
except CmdMissCommandNameFormatError as e:
if not text.strip(): # command name is empty
- hints = self.modules[e.module].get_command_names()
- else:
+ hints = self.modules[e.module].get_command_names()
+ else:
hints = self._get_module_startswith(text)
except CmdCommandNameFormatError as e:
@@ -523,36 +527,37 @@ class BindCmdInterpreter(Cmd):
else:
return None
- def _get_module_startswith(self, text):
+
+ def _get_module_startswith(self, text):
return [module
- for module in self.modules
+ for module in self.modules
if module.startswith(text)]
def _get_command_startswith(self, module, text):
- if module in self.modules:
+ if module in self.modules:
return [command
- for command in self.modules[module].get_command_names()
+ for command in self.modules[module].get_command_names()
if command.startswith(text)]
-
- return []
-
- def _get_param_startswith(self, module, command, text):
+ return []
+
+
+ def _get_param_startswith(self, module, command, text):
if module in self.modules:
- module_info = self.modules[module]
- if command in module_info.get_command_names():
+ module_info = self.modules[module]
+ if command in module_info.get_command_names():
cmd_info = module_info.get_command_with_name(command)
- params = cmd_info.get_param_names()
+ params = cmd_info.get_param_names()
hint = []
- if text:
+ if text:
hint = [val for val in params if val.startswith(text)]
else:
hint = list(params)
-
+
if len(hint) == 1 and hint[0] != "help":
- hint[0] = hint[0] + " ="
-
+ hint[0] = hint[0] + " ="
+
return hint
return []
@@ -569,24 +574,24 @@ class BindCmdInterpreter(Cmd):
self._print_correct_usage(err)
except isc.cc.data.DataTypeError as err:
print("Error! ", err)
-
- def _print_correct_usage(self, ept):
+
+ def _print_correct_usage(self, ept):
if isinstance(ept, CmdUnknownModuleSyntaxError):
self.do_help(None)
-
+
elif isinstance(ept, CmdUnknownCmdSyntaxError):
self.modules[ept.module].module_help()
-
+
elif isinstance(ept, CmdMissParamSyntaxError) or \
isinstance(ept, CmdUnknownParamSyntaxError):
self.modules[ept.module].command_help(ept.command)
-
-
+
+
def _append_space_to_hint(self):
"""Append one space at the end of complete hint."""
self.hint = [(val + " ") for val in self.hint]
-
-
+
+
def _handle_help(self, cmd):
if cmd.command == "help":
self.modules[cmd.module].module_help()
diff --git a/src/bin/bindctl/bindctl_main.py.in b/src/bin/bindctl/bindctl_main.py.in
index ee4191d..58c03eb 100755
--- a/src/bin/bindctl/bindctl_main.py.in
+++ b/src/bin/bindctl/bindctl_main.py.in
@@ -146,4 +146,5 @@ if __name__ == '__main__':
tool = BindCmdInterpreter(server_addr, pem_file=options.cert_chain,
csv_file_dir=options.csv_file_dir)
prepare_config_commands(tool)
- tool.run()
+ result = tool.run()
+ sys.exit(result)
diff --git a/src/bin/bindctl/tests/bindctl_test.py b/src/bin/bindctl/tests/bindctl_test.py
index 0635b32..cef35dc 100644
--- a/src/bin/bindctl/tests/bindctl_test.py
+++ b/src/bin/bindctl/tests/bindctl_test.py
@@ -31,14 +31,14 @@ from bindctl_main import set_bindctl_options
from bindctl import cmdparse
from bindctl import bindcmd
from bindctl.moduleinfo import *
-from bindctl.exception import *
+from bindctl.exception import *
try:
from collections import OrderedDict
except ImportError:
from mycollections import OrderedDict
class TestCmdLex(unittest.TestCase):
-
+
def my_assert_raise(self, exception_type, cmd_line):
self.assertRaises(exception_type, cmdparse.BindCmdParse, cmd_line)
@@ -48,13 +48,13 @@ class TestCmdLex(unittest.TestCase):
assert cmd.module == "zone"
assert cmd.command == "add"
self.assertEqual(len(cmd.params), 0)
-
-
+
+
def testCommandWithParameters(self):
lines = {"zone add zone_name = cnnic.cn, file = cnnic.cn.file master=1.1.1.1",
"zone add zone_name = \"cnnic.cn\", file ='cnnic.cn.file' master=1.1.1.1 ",
"zone add zone_name = 'cnnic.cn\", file ='cnnic.cn.file' master=1.1.1.1, " }
-
+
for cmd_line in lines:
cmd = cmdparse.BindCmdParse(cmd_line)
assert cmd.module == "zone"
@@ -75,7 +75,7 @@ class TestCmdLex(unittest.TestCase):
cmd = cmdparse.BindCmdParse('zone cmd name = 1\"\'34**&2 ,value= 44\"\'\"')
self.assertEqual(cmd.params['name'], '1\"\'34**&2')
self.assertEqual(cmd.params['value'], '44\"\'\"')
-
+
cmd = cmdparse.BindCmdParse('zone cmd name = 1\'34**&2value=44\"\'\" value = \"==============\'')
self.assertEqual(cmd.params['name'], '1\'34**&2value=44\"\'\"')
self.assertEqual(cmd.params['value'], '==============')
@@ -83,34 +83,34 @@ class TestCmdLex(unittest.TestCase):
cmd = cmdparse.BindCmdParse('zone cmd name = \"1234, 567890 \" value ==&*/')
self.assertEqual(cmd.params['name'], '1234, 567890 ')
self.assertEqual(cmd.params['value'], '=&*/')
-
+
def testCommandWithListParam(self):
cmd = cmdparse.BindCmdParse("zone set zone_name='cnnic.cn', master='1.1.1.1, 2.2.2.2'")
- assert cmd.params["master"] == '1.1.1.1, 2.2.2.2'
-
+ assert cmd.params["master"] == '1.1.1.1, 2.2.2.2'
+
def testCommandWithHelpParam(self):
cmd = cmdparse.BindCmdParse("zone add help")
assert cmd.params["help"] == "help"
-
+
cmd = cmdparse.BindCmdParse("zone add help *&)&)*&&$#$^%")
assert cmd.params["help"] == "help"
self.assertEqual(len(cmd.params), 1)
-
+
def testCmdModuleNameFormatError(self):
self.my_assert_raise(CmdModuleNameFormatError, "zone=good")
- self.my_assert_raise(CmdModuleNameFormatError, "zo/ne")
- self.my_assert_raise(CmdModuleNameFormatError, "")
+ self.my_assert_raise(CmdModuleNameFormatError, "zo/ne")
+ self.my_assert_raise(CmdModuleNameFormatError, "")
self.my_assert_raise(CmdModuleNameFormatError, "=zone")
- self.my_assert_raise(CmdModuleNameFormatError, "zone,")
-
-
+ self.my_assert_raise(CmdModuleNameFormatError, "zone,")
+
+
def testCmdMissCommandNameFormatError(self):
self.my_assert_raise(CmdMissCommandNameFormatError, "zone")
self.my_assert_raise(CmdMissCommandNameFormatError, "zone ")
self.my_assert_raise(CmdMissCommandNameFormatError, "help ")
-
-
+
+
def testCmdCommandNameFormatError(self):
self.my_assert_raise(CmdCommandNameFormatError, "zone =d")
self.my_assert_raise(CmdCommandNameFormatError, "zone z=d")
@@ -119,11 +119,11 @@ class TestCmdLex(unittest.TestCase):
self.my_assert_raise(CmdCommandNameFormatError, "zone zdd/ \"")
class TestCmdSyntax(unittest.TestCase):
-
+
def _create_bindcmd(self):
"""Create one bindcmd"""
-
- tool = bindcmd.BindCmdInterpreter()
+
+ tool = bindcmd.BindCmdInterpreter()
string_spec = { 'item_type' : 'string',
'item_optional' : False,
'item_default' : ''}
@@ -135,40 +135,40 @@ class TestCmdSyntax(unittest.TestCase):
load_cmd = CommandInfo(name = "load")
load_cmd.add_param(zone_file_param)
load_cmd.add_param(zone_name)
-
- param_master = ParamInfo(name = "master", optional = True, param_spec = string_spec)
- param_master = ParamInfo(name = "port", optional = True, param_spec = int_spec)
- param_allow_update = ParamInfo(name = "allow_update", optional = True, param_spec = string_spec)
+
+ param_master = ParamInfo(name = "master", optional = True, param_spec = string_spec)
+ param_master = ParamInfo(name = "port", optional = True, param_spec = int_spec)
+ param_allow_update = ParamInfo(name = "allow_update", optional = True, param_spec = string_spec)
set_cmd = CommandInfo(name = "set")
set_cmd.add_param(param_master)
set_cmd.add_param(param_allow_update)
set_cmd.add_param(zone_name)
-
- reload_all_cmd = CommandInfo(name = "reload_all")
-
- zone_module = ModuleInfo(name = "zone")
+
+ reload_all_cmd = CommandInfo(name = "reload_all")
+
+ zone_module = ModuleInfo(name = "zone")
zone_module.add_command(load_cmd)
zone_module.add_command(set_cmd)
zone_module.add_command(reload_all_cmd)
-
+
tool.add_module_info(zone_module)
return tool
-
-
+
+
def setUp(self):
self.bindcmd = self._create_bindcmd()
-
-
+
+
def no_assert_raise(self, cmd_line):
cmd = cmdparse.BindCmdParse(cmd_line)
- self.bindcmd._validate_cmd(cmd)
-
-
+ self.bindcmd._validate_cmd(cmd)
+
+
def my_assert_raise(self, exception_type, cmd_line):
cmd = cmdparse.BindCmdParse(cmd_line)
- self.assertRaises(exception_type, self.bindcmd._validate_cmd, cmd)
-
-
+ self.assertRaises(exception_type, self.bindcmd._validate_cmd, cmd)
+
+
def testValidateSuccess(self):
self.no_assert_raise("zone load zone_file='cn' zone_name='cn'")
self.no_assert_raise("zone load zone_file='cn', zone_name='cn', ")
@@ -178,27 +178,27 @@ class TestCmdSyntax(unittest.TestCase):
self.no_assert_raise("zone set allow_update='1.1.1.1' zone_name='cn'")
self.no_assert_raise("zone set zone_name='cn'")
self.my_assert_raise(isc.cc.data.DataTypeError, "zone set zone_name ='cn', port='cn'")
- self.no_assert_raise("zone reload_all")
-
-
+ self.no_assert_raise("zone reload_all")
+
+
def testCmdUnknownModuleSyntaxError(self):
self.my_assert_raise(CmdUnknownModuleSyntaxError, "zoned d")
self.my_assert_raise(CmdUnknownModuleSyntaxError, "dd dd ")
-
-
+
+
def testCmdUnknownCmdSyntaxError(self):
self.my_assert_raise(CmdUnknownCmdSyntaxError, "zone dd")
-
+
def testCmdMissParamSyntaxError(self):
self.my_assert_raise(CmdMissParamSyntaxError, "zone load zone_file='cn'")
self.my_assert_raise(CmdMissParamSyntaxError, "zone load zone_name='cn'")
self.my_assert_raise(CmdMissParamSyntaxError, "zone set allow_update='1.1.1.1'")
self.my_assert_raise(CmdMissParamSyntaxError, "zone set ")
-
+
def testCmdUnknownParamSyntaxError(self):
self.my_assert_raise(CmdUnknownParamSyntaxError, "zone load zone_d='cn'")
- self.my_assert_raise(CmdUnknownParamSyntaxError, "zone reload_all zone_name = 'cn'")
-
+ self.my_assert_raise(CmdUnknownParamSyntaxError, "zone reload_all zone_name = 'cn'")
+
class TestModuleInfo(unittest.TestCase):
def test_get_param_name_by_position(self):
@@ -212,36 +212,36 @@ class TestModuleInfo(unittest.TestCase):
self.assertEqual('sex', cmd.get_param_name_by_position(2, 3))
self.assertEqual('data', cmd.get_param_name_by_position(2, 4))
self.assertEqual('data', cmd.get_param_name_by_position(2, 4))
-
+
self.assertRaises(KeyError, cmd.get_param_name_by_position, 4, 4)
-
+
class TestNameSequence(unittest.TestCase):
"""
Test if the module/command/parameters is saved in the order creation
"""
-
+
def _create_bindcmd(self):
- """Create one bindcmd"""
-
+ """Create one bindcmd"""
+
self._cmd = CommandInfo(name = "load")
self.module = ModuleInfo(name = "zone")
- self.tool = bindcmd.BindCmdInterpreter()
+ self.tool = bindcmd.BindCmdInterpreter()
for random_str in self.random_names:
self._cmd.add_param(ParamInfo(name = random_str))
self.module.add_command(CommandInfo(name = random_str))
- self.tool.add_module_info(ModuleInfo(name = random_str))
-
+ self.tool.add_module_info(ModuleInfo(name = random_str))
+
def setUp(self):
self.random_names = ['1erdfeDDWsd', '3fe', '2009erd', 'Fe231', 'tere142', 'rei8WD']
self._create_bindcmd()
-
- def testSequence(self):
+
+ def testSequence(self):
param_names = self._cmd.get_param_names()
cmd_names = self.module.get_command_names()
module_names = self.tool.get_module_names()
-
+
i = 0
while i < len(self.random_names):
assert self.random_names[i] == param_names[i+1]
@@ -342,7 +342,7 @@ class TestConfigCommands(unittest.TestCase):
# validate log message for socket.err
socket_err_output = io.StringIO()
sys.stdout = socket_err_output
- self.assertRaises(None, self.tool.run())
+ self.assertEqual(1, self.tool.run())
self.assertEqual("Failed to send request, the connection is closed\n",
socket_err_output.getvalue())
socket_err_output.close()
@@ -350,7 +350,7 @@ class TestConfigCommands(unittest.TestCase):
# validate log message for http.client.CannotSendRequest
cannot_send_output = io.StringIO()
sys.stdout = cannot_send_output
- self.assertRaises(None, self.tool.run())
+ self.assertEqual(1, self.tool.run())
self.assertEqual("Can not send request, the connection is busy\n",
cannot_send_output.getvalue())
cannot_send_output.close()
@@ -472,4 +472,4 @@ class TestCommandLineOptions(unittest.TestCase):
if __name__== "__main__":
unittest.main()
-
+
diff --git a/src/bin/cmdctl/cmdctl.py.in b/src/bin/cmdctl/cmdctl.py.in
index a791aa3..ff221db 100755
--- a/src/bin/cmdctl/cmdctl.py.in
+++ b/src/bin/cmdctl/cmdctl.py.in
@@ -17,12 +17,12 @@
''' cmdctl module is the configuration entry point for all commands from bindctl
or some other web tools client of bind10. cmdctl is pure https server which provi-
-des RESTful API. When command client connecting with cmdctl, it should first login
-with legal username and password.
- When cmdctl starting up, it will collect command specification and
+des RESTful API. When command client connecting with cmdctl, it should first login
+with legal username and password.
+ When cmdctl starting up, it will collect command specification and
configuration specification/data of other available modules from configmanager, then
wait for receiving request from client, parse the request and resend the request to
-the proper module. When getting the request result from the module, send back the
+the proper module. When getting the request result from the module, send back the
resut to client.
'''
@@ -81,16 +81,16 @@ SPECFILE_LOCATION = SPECFILE_PATH + os.sep + "cmdctl.spec"
class CmdctlException(Exception):
pass
-
+
class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
'''https connection request handler.
Currently only GET and POST are supported. '''
def do_GET(self):
- '''The client should send its session id in header with
+ '''The client should send its session id in header with
the name 'cookie'
'''
self.session_id = self.headers.get('cookie')
- rcode, reply = http.client.OK, []
+ rcode, reply = http.client.OK, []
if self._is_session_valid():
if self._is_user_logged_in():
rcode, reply = self._handle_get_request()
@@ -106,16 +106,16 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def _handle_get_request(self):
'''Currently only support the following three url GET request '''
id, module = self._parse_request_path()
- return self.server.get_reply_data_for_GET(id, module)
+ return self.server.get_reply_data_for_GET(id, module)
def _is_session_valid(self):
- return self.session_id
+ return self.session_id
def _is_user_logged_in(self):
login_time = self.server.user_sessions.get(self.session_id)
if not login_time:
return False
-
+
idle_time = time.time() - login_time
if idle_time > self.server.idle_timeout:
return False
@@ -125,7 +125,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def _parse_request_path(self):
'''Parse the url, the legal url should like /ldh or /ldh/ldh '''
- groups = URL_PATTERN.match(self.path)
+ groups = URL_PATTERN.match(self.path)
if not groups:
return (None, None)
else:
@@ -133,8 +133,8 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
'''Process POST request. '''
- '''Process user login and send command to proper module
- The client should send its session id in header with
+ '''Process user login and send command to proper module
+ The client should send its session id in header with
the name 'cookie'
'''
self.session_id = self.headers.get('cookie')
@@ -148,7 +148,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
rcode, reply = http.client.UNAUTHORIZED, ["please login"]
else:
rcode, reply = http.client.BAD_REQUEST, ["session isn't valid"]
-
+
self.send_response(rcode)
self.end_headers()
self.wfile.write(json.dumps(reply).encode())
@@ -169,12 +169,12 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
length = self.headers.get('Content-Length')
if not length:
- return False, ["invalid username or password"]
+ return False, ["invalid username or password"]
try:
user_info = json.loads((self.rfile.read(int(length))).decode())
except:
- return False, ["invalid username or password"]
+ return False, ["invalid username or password"]
user_name = user_info.get('username')
if not user_name:
@@ -193,7 +193,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
return False, ["username or password error"]
return True, None
-
+
def _handle_post_request(self):
'''Handle all the post request from client. '''
@@ -215,7 +215,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
if rcode != 0:
ret = http.client.BAD_REQUEST
return ret, reply
-
+
def log_request(self, code='-', size='-'):
'''Rewrite the log request function, log nothing.'''
pass
@@ -239,11 +239,11 @@ class CommandControl():
def _setup_session(self):
'''Setup the session for receving the commands
- sent from other modules. There are two sessions
- for cmdctl, one(self.module_cc) is used for receiving
- commands sent from other modules, another one (self._cc)
- is used to send the command from Bindctl or other tools
- to proper modules.'''
+ sent from other modules. There are two sessions
+ for cmdctl, one(self.module_cc) is used for receiving
+ commands sent from other modules, another one (self._cc)
+ is used to send the command from Bindctl or other tools
+ to proper modules.'''
self._cc = isc.cc.Session()
self._module_cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
self.config_handler,
@@ -251,7 +251,7 @@ class CommandControl():
self._module_name = self._module_cc.get_module_spec().get_module_name()
self._cmdctl_config_data = self._module_cc.get_full_config()
self._module_cc.start()
-
+
def _accounts_file_check(self, filepath):
''' Check whether the accounts file is valid, each row
should be a list with 3 items.'''
@@ -288,7 +288,7 @@ class CommandControl():
errstr = self._accounts_file_check(new_config[key])
else:
errstr = 'unknown config item: ' + key
-
+
if errstr != None:
logger.error(CMDCTL_BAD_CONFIG_DATA, errstr);
return ccsession.create_answer(1, errstr)
@@ -314,7 +314,7 @@ class CommandControl():
self.modules_spec[args[0]] = args[1]
elif command == ccsession.COMMAND_SHUTDOWN:
- #When cmdctl get 'shutdown' command from boss,
+ #When cmdctl get 'shutdown' command from boss,
#shutdown the outer httpserver.
self._httpserver.shutdown()
self._serving = False
@@ -384,12 +384,12 @@ class CommandControl():
specs = self.get_modules_spec()
if module_name not in specs.keys():
return 1, {'error' : 'unknown module'}
-
+
spec_obj = isc.config.module_spec.ModuleSpec(specs[module_name], False)
errors = []
if not spec_obj.validate_command(command_name, params, errors):
return 1, {'error': errors[0]}
-
+
return self.send_command(module_name, command_name, params)
def send_command(self, module_name, command_name, params = None):
@@ -400,7 +400,7 @@ class CommandControl():
command_name, module_name)
if module_name == self._module_name:
- # Process the command sent to cmdctl directly.
+ # Process the command sent to cmdctl directly.
answer = self.command_handler(command_name, params)
else:
msg = ccsession.create_command(command_name, params)
@@ -429,7 +429,7 @@ class CommandControl():
logger.error(CMDCTL_COMMAND_ERROR, command_name, module_name, errstr)
return 1, {'error': errstr}
-
+
def get_cmdctl_config_data(self):
''' If running in source code tree, use keyfile, certificate
and user accounts file in source code. '''
@@ -453,13 +453,15 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
'''Make the server address can be reused.'''
allow_reuse_address = True
- def __init__(self, server_address, RequestHandlerClass,
+ def __init__(self, server_address, RequestHandlerClass,
CommandControlClass,
idle_timeout = 1200, verbose = False):
'''idle_timeout: the max idle time for login'''
socketserver_mixin.NoPollMixIn.__init__(self)
try:
http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass)
+ logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_STARTED,
+ server_address[0], server_address[1])
except socket.error as err:
raise CmdctlException("Error creating server, because: %s \n" % str(err))
@@ -472,9 +474,9 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
self._accounts_file = None
def _create_user_info(self, accounts_file):
- '''Read all user's name and its' salt, hashed password
+ '''Read all user's name and its' salt, hashed password
from accounts file.'''
- if (self._accounts_file == accounts_file) and (len(self._user_infos) > 0):
+ if (self._accounts_file == accounts_file) and (len(self._user_infos) > 0):
return
with self._lock:
@@ -495,10 +497,10 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
self._accounts_file = accounts_file
if len(self._user_infos) == 0:
logger.error(CMDCTL_NO_USER_ENTRIES_READ)
-
+
def get_user_info(self, username):
'''Get user's salt and hashed string. If the user
- doesn't exist, return None, or else, the list
+ doesn't exist, return None, or else, the list
[salt, hashed password] will be returned.'''
with self._lock:
info = self._user_infos.get(username)
@@ -507,9 +509,9 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
def save_user_session_id(self, session_id):
''' Record user's id and login time. '''
self.user_sessions[session_id] = time.time()
-
+
def _check_key_and_cert(self, key, cert):
- # TODO, check the content of key/certificate file
+ # TODO, check the content of key/certificate file
if not os.path.exists(key):
raise CmdctlException("key file '%s' doesn't exist " % key)
@@ -524,7 +526,7 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
certfile = cert,
keyfile = key,
ssl_version = ssl.PROTOCOL_SSLv23)
- return ssl_sock
+ return ssl_sock
except (ssl.SSLError, CmdctlException) as err :
logger.info(CMDCTL_SSL_SETUP_FAILURE_USER_DENIED, err)
self.close_request(sock)
@@ -541,18 +543,18 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
def get_reply_data_for_GET(self, id, module):
'''Currently only support the following three url GET request '''
- rcode, reply = http.client.NO_CONTENT, []
+ rcode, reply = http.client.NO_CONTENT, []
if not module:
if id == CONFIG_DATA_URL:
rcode, reply = http.client.OK, self.cmdctl.get_config_data()
elif id == MODULE_SPEC_URL:
rcode, reply = http.client.OK, self.cmdctl.get_modules_spec()
-
- return rcode, reply
+
+ return rcode, reply
def send_command_to_module(self, module_name, command_name, params):
return self.cmdctl.send_command_with_check(module_name, command_name, params)
-
+
httpd = None
def signal_handler(signal, frame):
@@ -566,10 +568,9 @@ def set_signal_handler():
def run(addr = 'localhost', port = 8080, idle_timeout = 1200, verbose = False):
''' Start cmdctl as one https server. '''
- if verbose:
- sys.stdout.write("[b10-cmdctl] starting on %s port:%d\n" %(addr, port))
- httpd = SecureHTTPServer((addr, port), SecureHTTPRequestHandler,
+ httpd = SecureHTTPServer((addr, port), SecureHTTPRequestHandler,
CommandControl, idle_timeout, verbose)
+
httpd.serve_forever()
def check_port(option, opt_str, value, parser):
@@ -607,6 +608,8 @@ if __name__ == '__main__':
(options, args) = parser.parse_args()
result = 1 # in case of failure
try:
+ if options.verbose:
+ logger.set_severity("DEBUG", 99)
run(options.addr, options.port, options.idle_timeout, options.verbose)
result = 0
except isc.cc.SessionError as err:
diff --git a/src/bin/cmdctl/cmdctl_messages.mes b/src/bin/cmdctl/cmdctl_messages.mes
index e007296..a3371b9 100644
--- a/src/bin/cmdctl/cmdctl_messages.mes
+++ b/src/bin/cmdctl/cmdctl_messages.mes
@@ -64,6 +64,9 @@ be set up. The specific error is given in the log message. Possible
causes may be that the ssl request itself was bad, or the local key or
certificate file could not be read.
+% CMDCTL_STARTED cmdctl is listening for connections on %1:%2
+The cmdctl daemon has started and is now listening for connections.
+
% CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
There was a keyboard interrupt signal to stop the cmdctl daemon. The
daemon will now shut down.
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
index f9a1d9d..72e48e4 100644
--- a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -130,13 +130,15 @@ TEST_F(Dhcpv6SrvTest, Solicit_basic) {
ASSERT_TRUE( tmp );
EXPECT_EQ(clientid->getType(), tmp->getType() );
ASSERT_EQ(clientid->len(), tmp->len() );
- EXPECT_FALSE(memcmp(clientid->getData(), tmp->getData(), tmp->len() ) );
+
+ EXPECT_TRUE( clientid->getData() == tmp->getData() );
+
// check that server included its server-id
tmp = reply->getOption(D6O_SERVERID);
EXPECT_EQ(tmp->getType(), srv->getServerID()->getType() );
ASSERT_EQ(tmp->len(), srv->getServerID()->len() );
- EXPECT_FALSE( memcmp(tmp->getData(), srv->getServerID()->getData(),
- tmp->len()) );
+
+ EXPECT_TRUE(tmp->getData() == srv->getServerID()->getData());
// more checks to be implemented
delete srv;
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index c262c22..1e4d942 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -2022,6 +2022,19 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(self.xfr.command_handler("notify",
self.args)['result'][0], 1)
+ # also try a different port in the actual command
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV6_ADDRESS,
+ 'master_port': str(int(TEST_MASTER_PORT) + 1)
+ }
+ ]}
+ self.xfr.config_handler(zones)
+ # the command should now fail
+ self.assertEqual(self.xfr.command_handler("notify",
+ self.args)['result'][0], 1)
+
+
def test_command_handler_notify_known_zone(self):
# try it with a known zone
self.args['master'] = TEST_MASTER_IPV6_ADDRESS
@@ -2037,21 +2050,6 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(self.xfr.command_handler("notify",
self.args)['result'][0], 0)
- # Note: The rest of the tests won't pass due to the change in #1298
- # We should probably simply remove the test cases, but for now we
- # just comment them out. (Note also that the comment about 'not
- # from the config' is now wrong, because we used the matching address.)
- #
- # and see if we used the address from the command, and not from
- # the config
- # This is actually NOT the address given in the command, which
- # would at this point not make sense, see the TODO in
- # xfrin.py.in Xfrin.command_handler())
-# self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
-# self.xfr.xfrin_started_master_addr)
-# self.assertEqual(int(TEST_MASTER_PORT),
-# self.xfr.xfrin_started_master_port)
-
def test_command_handler_unknown(self):
self.assertEqual(self.xfr.command_handler("xxx", None)['result'][0], 1)
@@ -2413,6 +2411,58 @@ class TestXfrinProcess(unittest.TestCase):
"""
self.__do_test([XFRIN_FAIL, XFRIN_FAIL],
[RRType.IXFR(), RRType.AXFR()], RRType.IXFR())
+class TestFormatting(unittest.TestCase):
+ # If the formatting functions are moved to a more general library
+ # (ticket #1379), these tests should be moved with them.
+ def test_format_zone_str(self):
+ self.assertEqual("example.com/IN",
+ format_zone_str(isc.dns.Name("example.com"),
+ isc.dns.RRClass("IN")))
+ self.assertEqual("example.com/CH",
+ format_zone_str(isc.dns.Name("example.com"),
+ isc.dns.RRClass("CH")))
+ self.assertEqual("example.org/IN",
+ format_zone_str(isc.dns.Name("example.org"),
+ isc.dns.RRClass("IN")))
+
+ def test_format_addrinfo(self):
+ # This test may need to be updated if the input type is changed,
+ # right now it is a nested tuple:
+ # (family, sockettype, (address, port))
+ # of which sockettype is ignored
+ self.assertEqual("192.0.2.1:53",
+ format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+ ("192.0.2.1", 53))))
+ self.assertEqual("192.0.2.2:53",
+ format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+ ("192.0.2.2", 53))))
+ self.assertEqual("192.0.2.1:54",
+ format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+ ("192.0.2.1", 54))))
+ self.assertEqual("[2001:db8::1]:53",
+ format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+ ("2001:db8::1", 53))))
+ self.assertEqual("[2001:db8::2]:53",
+ format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+ ("2001:db8::2", 53))))
+ self.assertEqual("[2001:db8::1]:54",
+ format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+ ("2001:db8::1", 54))))
+ self.assertEqual("/some/file",
+ format_addrinfo((socket.AF_UNIX, socket.SOCK_STREAM,
+ "/some/file")))
+ # second element of passed tuple should be ignored
+ self.assertEqual("192.0.2.1:53",
+ format_addrinfo((socket.AF_INET, None,
+ ("192.0.2.1", 53))))
+ self.assertEqual("192.0.2.1:53",
+ format_addrinfo((socket.AF_INET, "Just some string",
+ ("192.0.2.1", 53))))
+ self.assertRaises(TypeError, format_addrinfo, 1)
+ self.assertRaises(TypeError, format_addrinfo,
+ (socket.AF_INET, "asdf"))
+ self.assertRaises(TypeError, format_addrinfo,
+ (socket.AF_INET, "asdf", ()))
if __name__== "__main__":
try:
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 702643e..911b3b3 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -122,6 +122,36 @@ def _check_zone_class(zone_class_str):
except InvalidRRClass as irce:
raise XfrinZoneInfoException("bad zone class: " + zone_class_str + " (" + str(irce) + ")")
+def format_zone_str(zone_name, zone_class):
+ """Helper function to format a zone name and class as a string of
+ the form '<name>/<class>'.
+ Parameters:
+ zone_name (isc.dns.Name) name to format
+ zone_class (isc.dns.RRClass) class to format
+ """
+ return zone_name.to_text(True) + '/' + str(zone_class)
+
+def format_addrinfo(addrinfo):
+ """Helper function to format the addrinfo as a string of the form
+ <addr>:<port> (for IPv4) or [<addr>]:port (for IPv6). For unix domain
+ sockets, and unknown address families, it returns a basic string
+ conversion of the third element of the passed tuple.
+ Parameters:
+ addrinfo: a 3-tuple consisting of address family, socket type, and,
+ depending on the family, either a 2-tuple with the address
+ and port, or a filename
+ """
+ try:
+ if addrinfo[0] == socket.AF_INET:
+ return str(addrinfo[2][0]) + ":" + str(addrinfo[2][1])
+ elif addrinfo[0] == socket.AF_INET6:
+ return "[" + str(addrinfo[2][0]) + "]:" + str(addrinfo[2][1])
+ else:
+ return str(addrinfo[2])
+ except IndexError:
+ raise TypeError("addrinfo argument to format_addrinfo() does not "
+ "appear to be consisting of (family, socktype, (addr, port))")
+
def get_soa_serial(soa_rdata):
'''Extract the serial field of an SOA RDATA and returns it as an intger.
@@ -498,8 +528,8 @@ class XfrinConnection(asyncore.dispatcher):
return self.__state
def zone_str(self):
- '''A convenient function for logging to include zone name and class'''
- return self._zone_name.to_text() + '/' + str(self._rrclass)
+ '''A convenience function for logging to include zone name and class'''
+ return format_zone_str(self._zone_name, self._rrclass)
def connect_to_master(self):
'''Connect to master in TCP.'''
@@ -1094,20 +1124,22 @@ class Xfrin:
# a security hole. Once we add the ability to have multiple master addresses,
# we should check if it matches one of them, and then use it.)
(zone_name, rrclass) = self._parse_zone_name_and_class(args)
+ zone_str = format_zone_str(zone_name, rrclass)
zone_info = self._get_zone_info(zone_name, rrclass)
notify_addr = self._parse_master_and_port(args, zone_name,
rrclass)
if zone_info is None:
# TODO what to do? no info known about zone. defaults?
- errmsg = "Got notification to retransfer unknown zone " + zone_name.to_text()
- logger.error(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_name.to_text())
+ errmsg = "Got notification to retransfer unknown zone " + zone_str
+ logger.info(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_str)
answer = create_answer(1, errmsg)
else:
request_type = RRType.AXFR()
if zone_info.use_ixfr:
request_type = RRType.IXFR()
master_addr = zone_info.get_master_addr_info()
- if notify_addr == master_addr:
+ if notify_addr[0] == master_addr[0] and\
+ notify_addr[2] == master_addr[2]:
ret = self.xfrin_start(zone_name,
rrclass,
self._get_db_file(),
@@ -1116,11 +1148,12 @@ class Xfrin:
True)
answer = create_answer(ret[0], ret[1])
else:
- errmsg = "Got notification for " + zone_name.to_text()\
- + "from unknown address: " + notify_addr[2][0];
- logger.error(XFRIN_NOTIFY_UNKNOWN_MASTER,
- zone_name.to_text(), notify_addr[2][0],
- master_addr[2][0])
+ notify_addr_str = format_addrinfo(notify_addr)
+ master_addr_str = format_addrinfo(master_addr)
+ errmsg = "Got notification for " + zone_str\
+ + "from unknown address: " + notify_addr_str;
+ logger.info(XFRIN_NOTIFY_UNKNOWN_MASTER, zone_str,
+ notify_addr_str, master_addr_str)
answer = create_answer(1, errmsg)
elif command == 'retransfer' or command == 'refresh':
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index ace8fc9..b06921b 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -2,6 +2,10 @@ PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = xfrout_test.py
noinst_SCRIPTS = $(PYTESTS)
+EXTRA_DIST = testdata/test.sqlite3
+# This one is actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com
+
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
@@ -24,5 +28,6 @@ endif
B10_FROM_BUILD=$(abs_top_builddir) \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/xfrout:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+ TESTDATASRCDIR=$(abs_srcdir)/testdata/ \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrout/tests/testdata/example.com b/src/bin/xfrout/tests/testdata/example.com
new file mode 100644
index 0000000..25c5e6a
--- /dev/null
+++ b/src/bin/xfrout/tests/testdata/example.com
@@ -0,0 +1,6 @@
+;; This is the source of a zone stored in test.sqlite3. It's provided
+;; for reference purposes only.
+example.com. 3600 IN SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+example.com. 3600 IN NS a.dns.example.com.
+a.dns.example.com. 3600 IN A 192.0.2.1
+a.dns.example.com. 7200 IN A 192.0.2.2
diff --git a/src/bin/xfrout/tests/testdata/test.sqlite3 b/src/bin/xfrout/tests/testdata/test.sqlite3
new file mode 100644
index 0000000..af491f5
Binary files /dev/null and b/src/bin/xfrout/tests/testdata/test.sqlite3 differ
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 7202742..6d64c91 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -27,6 +27,7 @@ import xfrout
import isc.log
import isc.acl.dns
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
# our fake socket, where we can read and insert messages
@@ -55,10 +56,10 @@ class MySocket():
self.sendqueue = self.sendqueue[size:]
return result
- def read_msg(self):
+ def read_msg(self, parse_options=Message.PARSE_DEFAULT):
sent_data = self.readsent()
get_msg = Message(Message.PARSE)
- get_msg.from_wire(bytes(sent_data[2:]))
+ get_msg.from_wire(bytes(sent_data[2:]), parse_options)
return get_msg
def clear_send(self):
@@ -68,7 +69,7 @@ class MockDataSrcClient:
def __init__(self, type, config):
pass
- def get_iterator(self, zone_name):
+ def get_iterator(self, zone_name, adjust_ttl=False):
if zone_name == Name('notauth.example.com'):
raise isc.datasrc.Error('no such zone')
self._zone_name = zone_name
@@ -135,7 +136,13 @@ class Dbserver:
def decrease_transfers_counter(self):
self.transfer_counter -= 1
-class TestXfroutSession(unittest.TestCase):
+class TestXfroutSessionBase(unittest.TestCase):
+ '''Base classs for tests related to xfrout sessions
+
+ This class defines common setup/teadown and utility methods. Actual
+ tests are delegated to subclasses.
+
+ '''
def getmsg(self):
msg = Message(Message.PARSE)
msg.from_wire(self.mdata)
@@ -188,12 +195,17 @@ class TestXfroutSession(unittest.TestCase):
'master.Example.com. ' +
'admin.exAmple.com. ' +
'1234 3600 1800 2419200 7200'))
+ # some test replaces a module-wide function. We should ensure the
+ # original is used elsewhere.
+ self.orig_get_rrset_len = xfrout.get_rrset_len
def tearDown(self):
+ xfrout.get_rrset_len = self.orig_get_rrset_len
# transfer_counter must be always be reset no matter happens within
# the XfroutSession object. We check the condition here.
self.assertEqual(0, self.xfrsess._server.transfer_counter)
+class TestXfroutSession(TestXfroutSessionBase):
def test_quota_error(self):
'''Emulating the server being too busy.
@@ -702,6 +714,35 @@ class TestXfroutSession(unittest.TestCase):
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
+
+class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
+ '''Tests for XFR-out sessions using an SQLite3 DB.
+
+ These are provided mainly to confirm the implementation actually works
+ in an environment closer to actual operational environments. So we
+ only check a few common cases; other details are tested using mock
+ data sources.
+
+ '''
+ def setUp(self):
+ super().setUp()
+ self.xfrsess._request_data = self.mdata
+ self.xfrsess._server.get_db_file = lambda : TESTDATA_SRCDIR + \
+ 'test.sqlite3'
+
+ def test_axfr_normal_session(self):
+ XfroutSession._handle(self.xfrsess)
+ response = self.sock.read_msg(Message.PRESERVE_ORDER);
+ self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+ # This zone contains two A RRs for the same name with different TTLs.
+ # These TTLs should be preseved in the AXFR stream.
+ actual_ttls = []
+ for rr in response.get_section(Message.SECTION_ANSWER):
+ if rr.get_type() == RRType.A() and \
+ not rr.get_ttl() in actual_ttls:
+ actual_ttls.append(rr.get_ttl().get_value())
+ self.assertEqual([3600, 7200], sorted(actual_ttls))
+
class MyUnixSockServer(UnixSockServer):
def __init__(self):
self._shutdown_event = threading.Event()
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index 6cd02f7..f918cca 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -316,7 +316,11 @@ class XfroutSession():
self._server.get_db_file() + '"}'
self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
try:
- self._iterator = self._datasrc_client.get_iterator(zone_name)
+ # Note that we disable 'adjust_ttl'. In xfr-out we need to
+ # preserve as many things as possible (even if it's half broken)
+ # stored in the zone.
+ self._iterator = self._datasrc_client.get_iterator(zone_name,
+ False)
except isc.datasrc.Error:
# If the current name server does not have authority for the
# zone, xfrout can't serve for it, return rcode NOTAUTH.
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index 40b7a3f..2c3f709 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -215,11 +215,18 @@ public:
///
/// \param name The name of zone apex to be traversed. It doesn't do
/// nearest match as findZone.
+ /// \param adjust_ttl If true, the iterator will treat RRs with the same
+ /// name and type but different TTL values to be of the
+ /// same RRset, and will adjust the TTL to the lowest
+ /// value found. If false, it will consider the RR to
+ /// belong to a different RRset.
/// \return Pointer to the iterator.
- virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const {
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
+ bool adjust_ttl = true) const {
// This is here to both document the parameter in doxygen (therefore it
// needs a name) and avoid unused parameter warning.
static_cast<void>(name);
+ static_cast<void>(adjust_ttl);
isc_throw(isc::NotImplemented,
"Data source doesn't support iteration");
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index 3b079c6..f06cdc0 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -706,10 +706,12 @@ class DatabaseIterator : public ZoneIterator {
public:
DatabaseIterator(shared_ptr<DatabaseAccessor> accessor,
const Name& zone_name,
- const RRClass& rrclass) :
+ const RRClass& rrclass,
+ bool adjust_ttl) :
accessor_(accessor),
class_(rrclass),
- ready_(true)
+ ready_(true),
+ adjust_ttl_(adjust_ttl)
{
// Get the zone
const pair<bool, int> zone(accessor_->getZone(zone_name.toText()));
@@ -767,13 +769,17 @@ public:
const RRType rtype(rtype_str);
RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
- if (ttl_ != ttl) {
- if (ttl < ttl_) {
- ttl_ = ttl;
- rrset->setTTL(RRTTL(ttl));
+ if (adjust_ttl_) {
+ if (ttl_ != ttl) {
+ if (ttl < ttl_) {
+ ttl_ = ttl;
+ rrset->setTTL(RRTTL(ttl));
+ }
+ LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
+ arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
}
- LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
- arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
+ } else if (ttl_ != ttl) {
+ break;
}
rrset->addRdata(rdata::createRdata(rtype, class_, rdata_));
getData();
@@ -806,15 +812,20 @@ private:
bool ready_, data_ready_;
// Data of the next row
string name_, rtype_, rdata_, ttl_;
+ // Whether to modify differing TTL values, or treat a different TTL as
+ // a different RRset
+ bool adjust_ttl_;
};
}
ZoneIteratorPtr
-DatabaseClient::getIterator(const isc::dns::Name& name) const {
+DatabaseClient::getIterator(const isc::dns::Name& name,
+ bool adjust_ttl) const
+{
ZoneIteratorPtr iterator = ZoneIteratorPtr(new DatabaseIterator(
accessor_->clone(), name,
- rrclass_));
+ rrclass_, adjust_ttl));
LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE).
arg(name);
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index d80a4ab..b3fda6d 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -85,7 +85,7 @@ public:
* Definitions of the fields to be passed to addRecordToZone().
*
* Each derived implementation of addRecordToZone() should expect
- * the "columns" vector to be filled with the values as described in this
+ * the "columns" array to be filled with the values as described in this
* enumeration, in this order.
*/
enum AddRecordColumns {
@@ -103,7 +103,7 @@ public:
* Definitions of the fields to be passed to deleteRecordInZone().
*
* Each derived implementation of deleteRecordInZone() should expect
- * the "params" vector to be filled with the values as described in this
+ * the "params" array to be filled with the values as described in this
* enumeration, in this order.
*/
enum DeleteRecordParams {
@@ -114,6 +114,31 @@ public:
};
/**
+ * Operation mode when adding a record diff.
+ *
+ * This is used as the "operation" parameter value of addRecordDiff().
+ */
+ enum DiffOperation {
+ DIFF_ADD = 0, ///< This diff is for adding an RR
+ DIFF_DELETE = 1 ///< This diff is for deleting an RR
+ };
+
+ /**
+ * Definitions of the fields to be passed to addRecordDiff().
+ *
+ * Each derived implementation of addRecordDiff() should expect
+ * the "params" array to be filled with the values as described in this
+ * enumeration, in this order.
+ */
+ enum DiffRecordParams {
+ DIFF_NAME = 0, ///< The owner name of the record (a domain name)
+ DIFF_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
+ DIFF_TTL = 2, ///< The TTL of the record (in numeric form)
+ DIFF_RDATA = 3, ///< Full text representation of the record's RDATA
+ DIFF_PARAM_COUNT = 4 ///< Number of parameters
+ };
+
+ /**
* \brief Destructor
*
* It is empty, but needs a virtual one, since we will use the derived
@@ -453,6 +478,82 @@ public:
/// to the method or internal database error.
virtual void rollback() = 0;
+ /// Install a single RR diff in difference sequences for zone update.
+ ///
+ /// This method inserts parameters of an update operation for a single RR
+ /// (either adding or deleting one) in the underlying database.
+ /// (These parameters would normally be a separate database table, but
+ /// actual realization can differ in specific implementations).
+ /// The information given via this method generally corresponds to either
+ /// a single call to \c addRecordToZone() or \c deleteRecordInZone(),
+ /// and this method is expected to be called immediately after (or before)
+ /// a call to either of those methods.
+ ///
+ /// Note, however, that this method passes more detailed information
+ /// than those update methods: it passes "serial", even if the diff
+ /// is not for the SOA RR; it passes TTL for a diff that deletes an RR
+ /// while in \c deleteRecordInZone() it's omitted. This is because
+ /// the stored diffs are expected to be retrieved in the form that
+ /// \c getRecordDiffs() is expected to meet. This means if the caller
+ /// wants to use this method with other update operations, it must
+ /// ensure the additional information is ready when this method is called.
+ ///
+ /// \note \c getRecordDiffs() is not yet implemented.
+ ///
+ /// The caller of this method must ensure that the added diffs via
+ /// this method in a single transaction form an IXFR-style difference
+ /// sequences: Each difference sequence is a sequence of RRs:
+ /// an older version of SOA (to be deleted), zero or more other deleted
+ /// RRs, the post-transaction SOA (to be added), and zero or more other
+ /// added RRs. So, for example, the first call to this method in a
+ /// transaction must always be deleting an SOA. Also, the \c serial
+ /// parameter must be equal to the value of the serial field of the
+ /// SOA that was last added or deleted (if the call is to add or delete
+ /// an SOA RR, \c serial must be identical to the serial of that SOA).
+ /// The underlying derived class implementation may or may not check
+ /// this condition, but if the caller doesn't meet the condition
+ /// a subsequent call to \c getRecordDiffs() will not work as expected.
+ ///
+ /// Any call to this method must be in a transaction, and, for now,
+ /// it must be a transaction triggered by \c startUpdateZone() (that is,
+ /// it cannot be a transaction started by \c startTransaction()).
+ /// All calls to this method are considered to be part of an atomic
+ /// transaction: Until \c commit() is performed, the added diffs are
+ /// not visible outside the transaction; if \c rollback() is performed,
+ /// all added diffs are canceled; and the added sequences are not
+ /// affected by any concurrent attempt of adding diffs (conflict resolution
+ /// is up to the database implementation).
+ ///
+ /// Also for now, all diffs are assumed to be for the zone that is
+ /// being updated in the context of \c startUpdateZone(). So the
+ /// \c zone_id parameter must be identical to the zone ID returned by
+ /// \c startUpdateZone().
+ ///
+ /// In a future version we may loosen this condition so that diffs can be
+ /// added in a generic transaction and may not even have to belong to
+ /// a single zone. For this possible extension \c zone_id parameter is
+ /// included even if it's redundant under the current restriction.
+ ///
+ /// The support for adding (or retrieving) diffs is optional; if it's
+ /// not supported in a specific data source, this method for the
+ /// corresponding derived class will throw an \c NotImplemented exception.
+ ///
+ /// \exception DataSourceError Invalid call without starting a transaction,
+ /// zone ID doesn't match the zone being updated, or other internal
+ /// database error.
+ /// \exception NotImplemented Adding diffs is not supported in the
+ /// data source.
+ /// \exception Other The concrete derived method may throw other
+ /// data source specific exceptions.
+ ///
+ /// \param zone_id The zone for the diff to be added.
+ /// \param serial The SOA serial to which the diff belongs.
+ /// \param operation Either \c DIFF_ADD or \c DIFF_DELETE.
+ /// \param params An array of strings that defines a record for the diff.
+ virtual void addRecordDiff(
+ int zone_id, uint32_t serial, DiffOperation operation,
+ const std::string (¶ms)[DIFF_PARAM_COUNT]) = 0;
+
/// Clone the accessor with the same configuration.
///
/// Each derived class implementation of this method will create a new
@@ -762,9 +863,15 @@ public:
* \exception Anything else the underlying DatabaseConnection might
* want to throw.
* \param name The origin of the zone to iterate.
+ * \param adjust_ttl If true, the iterator will treat RRs with the same
+ * name and type but different TTL values to be of the
+ * same RRset, and will adjust the TTL to the lowest
+ * value found. If false, it will consider the RR to
+ * belong to a different RRset.
* \return Shared pointer to the iterator (it will never be NULL)
*/
- virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const;
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
+ bool adjust_ttl = true) const;
/// This implementation internally clones the accessor from the one
/// used in the client and starts a separate transaction using the cloned
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 8da43d0..6c0f589 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -789,7 +789,11 @@ public:
} // End of anonymous namespace
ZoneIteratorPtr
-InMemoryClient::getIterator(const Name& name) const {
+InMemoryClient::getIterator(const Name& name, bool) const {
+ // note: adjust_ttl argument is ignored, as the RRsets are already
+ // individually stored, and hence cannot have different TTLs anymore at
+ // this point
+
ZoneTable::FindResult result(impl_->zone_table.findZone(name));
if (result.code != result::SUCCESS) {
isc_throw(DataSourceError, "No such zone: " + name.toText());
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 610deff..1b6c120 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -272,7 +272,8 @@ public:
virtual FindResult findZone(const isc::dns::Name& name) const;
/// \brief Implementation of the getIterator method
- virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const;
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
+ bool adjust_ttl = true) const;
/// In-memory data source is read-only, so this derived method will
/// result in a NotImplemented exception.
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
index cf1593a..01b9f41 100644
--- a/src/lib/datasrc/sqlite3_accessor.cc
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -52,7 +52,9 @@ enum StatementID {
DEL_RECORD = 8,
ITERATE = 9,
FIND_PREVIOUS = 10,
- NUM_STATEMENTS = 11
+ ADD_RECORD_DIFF = 11,
+ GET_RECORD_DIFF = 12, // This is temporary for testing "add diff"
+ NUM_STATEMENTS = 13
};
const char* const text_statements[NUM_STATEMENTS] = {
@@ -81,7 +83,12 @@ const char* const text_statements[NUM_STATEMENTS] = {
*/
"SELECT name FROM records " // FIND_PREVIOUS
"WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
- "rname < $2 ORDER BY rname DESC LIMIT 1"
+ "rname < $2 ORDER BY rname DESC LIMIT 1",
+ "INSERT INTO diffs " // ADD_RECORD_DIFF
+ "(zone_id, version, operation, name, rrtype, ttl, rdata) "
+ "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"
+ , "SELECT name, rrtype, ttl, rdata, version, operation " // GET_RECORD_DIFF
+ "FROM diffs WHERE zone_id = ?1 ORDER BY id, operation"
};
struct SQLite3Parameters {
@@ -94,12 +101,45 @@ struct SQLite3Parameters {
}
}
+ // This method returns the specified ID of SQLITE3 statement. If it's
+ // not yet prepared it internally creates a new one. This way we can
+ // avoid preparing unnecessary statements and minimize the overhead.
+ sqlite3_stmt*
+ getStatement(int id) {
+ assert(id < NUM_STATEMENTS);
+ if (statements_[id] == NULL) {
+ assert(db_ != NULL);
+ sqlite3_stmt* prepared = NULL;
+ if (sqlite3_prepare_v2(db_, text_statements[id], -1, &prepared,
+ NULL) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not prepare SQLite statement: "
+ << text_statements[id] <<
+ ": " << sqlite3_errmsg(db_));
+ }
+ statements_[id] = prepared;
+ }
+ return (statements_[id]);
+ }
+
+ void
+ finalizeStatements() {
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ if (statements_[i] != NULL) {
+ sqlite3_finalize(statements_[i]);
+ statements_[i] = NULL;
+ }
+ }
+ }
+
sqlite3* db_;
int version_;
- sqlite3_stmt* statements_[NUM_STATEMENTS];
bool in_transaction; // whether or not a transaction has been started
bool updating_zone; // whether or not updating the zone
int updated_zone_id; // valid only when in_transaction is true
+private:
+ // statements_ are private and must be accessed via getStatement() outside
+ // of this structure.
+ sqlite3_stmt* statements_[NUM_STATEMENTS];
};
// This is a helper class to encapsulate the code logic of executing
@@ -116,18 +156,19 @@ public:
// DataSourceError exception.
StatementProcessor(SQLite3Parameters& dbparameters, StatementID stmt_id,
const char* desc) :
- dbparameters_(dbparameters), stmt_id_(stmt_id), desc_(desc)
+ dbparameters_(dbparameters), stmt_(dbparameters.getStatement(stmt_id)),
+ desc_(desc)
{
- sqlite3_clear_bindings(dbparameters_.statements_[stmt_id_]);
+ sqlite3_clear_bindings(stmt_);
}
~StatementProcessor() {
- sqlite3_reset(dbparameters_.statements_[stmt_id_]);
+ sqlite3_reset(stmt_);
}
void exec() {
- if (sqlite3_step(dbparameters_.statements_[stmt_id_]) != SQLITE_DONE) {
- sqlite3_reset(dbparameters_.statements_[stmt_id_]);
+ if (sqlite3_step(stmt_) != SQLITE_DONE) {
+ sqlite3_reset(stmt_);
isc_throw(DataSourceError, "failed to " << desc_ << ": " <<
sqlite3_errmsg(dbparameters_.db_));
}
@@ -135,7 +176,7 @@ public:
private:
SQLite3Parameters& dbparameters_;
- const StatementID stmt_id_;
+ sqlite3_stmt* stmt_;
const char* const desc_;
};
@@ -170,10 +211,6 @@ namespace {
class Initializer {
public:
~Initializer() {
- for (int i = 0; i < NUM_STATEMENTS; ++i) {
- sqlite3_finalize(params_.statements_[i]);
- }
-
if (params_.db_ != NULL) {
sqlite3_close(params_.db_);
}
@@ -206,6 +243,11 @@ const char* const SCHEMA_LIST[] = {
"ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
"rdata STRING NOT NULL)",
"CREATE INDEX nsec3_byhash ON nsec3 (hash)",
+ "CREATE TABLE diffs (id INTEGER PRIMARY KEY, "
+ "zone_id INTEGER NOT NULL, version INTEGER NOT NULL, "
+ "operation INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+ "rrtype STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdata STRING NOT NULL)",
NULL
};
@@ -214,7 +256,7 @@ prepare(sqlite3* const db, const char* const statement) {
sqlite3_stmt* prepared = NULL;
if (sqlite3_prepare_v2(db, statement, -1, &prepared, NULL) != SQLITE_OK) {
isc_throw(SQLite3Error, "Could not prepare SQLite statement: " <<
- statement);
+ statement << ": " << sqlite3_errmsg(db));
}
return (prepared);
}
@@ -304,10 +346,6 @@ checkAndSetupSchema(Initializer* initializer) {
schema_version = create_database(db);
}
initializer->params_.version_ = schema_version;
-
- for (int i = 0; i < NUM_STATEMENTS; ++i) {
- initializer->params_.statements_[i] = prepare(db, text_statements[i]);
- }
}
}
@@ -345,12 +383,7 @@ SQLite3Accessor::close(void) {
"SQLite data source is being closed before open");
}
- // XXX: sqlite3_finalize() could fail. What should we do in that case?
- for (int i = 0; i < NUM_STATEMENTS; ++i) {
- sqlite3_finalize(dbparameters_->statements_[i]);
- dbparameters_->statements_[i] = NULL;
- }
-
+ dbparameters_->finalizeStatements();
sqlite3_close(dbparameters_->db_);
dbparameters_->db_ = NULL;
}
@@ -358,7 +391,7 @@ SQLite3Accessor::close(void) {
std::pair<bool, int>
SQLite3Accessor::getZone(const std::string& name) const {
int rc;
- sqlite3_stmt* const stmt = dbparameters_->statements_[ZONE];
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(ZONE);
// Take the statement (simple SELECT id FROM zones WHERE...)
// and prepare it (bind the parameters to it)
@@ -439,7 +472,6 @@ public:
accessor_(accessor),
statement_(NULL),
name_(name)
-
{
// We create the statement now and then just keep getting data from it
statement_ = prepare(accessor->dbparameters_->db_,
@@ -522,7 +554,7 @@ private:
const IteratorType iterator_type_;
boost::shared_ptr<const SQLite3Accessor> accessor_;
- sqlite3_stmt *statement_;
+ sqlite3_stmt* statement_;
const std::string name_;
};
@@ -563,10 +595,9 @@ SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
StatementProcessor delzone_exec(*dbparameters_, DEL_ZONE_RECORDS,
"delete zone records");
- sqlite3_clear_bindings(
- dbparameters_->statements_[DEL_ZONE_RECORDS]);
- if (sqlite3_bind_int(dbparameters_->statements_[DEL_ZONE_RECORDS],
- 1, zone_info.second) != SQLITE_OK) {
+ sqlite3_stmt* stmt = dbparameters_->getStatement(DEL_ZONE_RECORDS);
+ sqlite3_clear_bindings(stmt);
+ if (sqlite3_bind_int(stmt, 1, zone_info.second) != SQLITE_OK) {
isc_throw(DataSourceError,
"failed to bind SQLite3 parameter: " <<
sqlite3_errmsg(dbparameters_->db_));
@@ -635,7 +666,7 @@ void
doUpdate(SQLite3Parameters& dbparams, StatementID stmt_id,
COLUMNS_TYPE update_params, const char* exec_desc)
{
- sqlite3_stmt* const stmt = dbparams.statements_[stmt_id];
+ sqlite3_stmt* const stmt = dbparams.getStatement(stmt_id);
StatementProcessor executer(dbparams, stmt_id, exec_desc);
int param_id = 0;
@@ -681,34 +712,98 @@ SQLite3Accessor::deleteRecordInZone(const string (¶ms)[DEL_PARAM_COUNT]) {
*dbparameters_, DEL_RECORD, params, "delete record from zone");
}
+void
+SQLite3Accessor::addRecordDiff(int zone_id, uint32_t serial,
+ DiffOperation operation,
+ const std::string (¶ms)[DIFF_PARAM_COUNT])
+{
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "adding record diff without update "
+ "transaction on " << getDBName());
+ }
+ if (zone_id != dbparameters_->updated_zone_id) {
+ isc_throw(DataSourceError, "bad zone ID for adding record diff on "
+ << getDBName() << ": " << zone_id << ", must be "
+ << dbparameters_->updated_zone_id);
+ }
+
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(ADD_RECORD_DIFF);
+ StatementProcessor executer(*dbparameters_, ADD_RECORD_DIFF,
+ "add record diff");
+ int param_id = 0;
+ if (sqlite3_bind_int(stmt, ++param_id, zone_id)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ if (sqlite3_bind_int64(stmt, ++param_id, serial)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ if (sqlite3_bind_int(stmt, ++param_id, operation)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ for (int i = 0; i < DIFF_PARAM_COUNT; ++i) {
+ if (sqlite3_bind_text(stmt, ++param_id, params[i].c_str(),
+ -1, SQLITE_TRANSIENT) != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ }
+ executer.exec();
+}
+
+vector<vector<string> >
+SQLite3Accessor::getRecordDiff(int zone_id) {
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(GET_RECORD_DIFF);
+ sqlite3_bind_int(stmt, 1, zone_id);
+
+ vector<vector<string> > result;
+ while (sqlite3_step(stmt) == SQLITE_ROW) {
+ vector<string> row_result;
+ for (int i = 0; i < 6; ++i) {
+ row_result.push_back(convertToPlainChar(sqlite3_column_text(stmt,
+ i),
+ dbparameters_->db_));
+ }
+ result.push_back(row_result);
+ }
+ sqlite3_reset(stmt);
+
+ return (result);
+}
+
std::string
SQLite3Accessor::findPreviousName(int zone_id, const std::string& rname)
const
{
- sqlite3_reset(dbparameters_->statements_[FIND_PREVIOUS]);
- sqlite3_clear_bindings(dbparameters_->statements_[FIND_PREVIOUS]);
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(FIND_PREVIOUS);
+ sqlite3_reset(stmt);
+ sqlite3_clear_bindings(stmt);
- if (sqlite3_bind_int(dbparameters_->statements_[FIND_PREVIOUS], 1,
- zone_id) != SQLITE_OK) {
+ if (sqlite3_bind_int(stmt, 1, zone_id) != SQLITE_OK) {
isc_throw(SQLite3Error, "Could not bind zone ID " << zone_id <<
" to SQL statement (find previous): " <<
sqlite3_errmsg(dbparameters_->db_));
}
- if (sqlite3_bind_text(dbparameters_->statements_[FIND_PREVIOUS], 2,
- rname.c_str(), -1, SQLITE_STATIC) != SQLITE_OK) {
+ if (sqlite3_bind_text(stmt, 2, rname.c_str(), -1, SQLITE_STATIC) !=
+ SQLITE_OK) {
isc_throw(SQLite3Error, "Could not bind name " << rname <<
" to SQL statement (find previous): " <<
sqlite3_errmsg(dbparameters_->db_));
}
std::string result;
- const int rc = sqlite3_step(dbparameters_->statements_[FIND_PREVIOUS]);
+ const int rc = sqlite3_step(stmt);
if (rc == SQLITE_ROW) {
// We found it
- result = convertToPlainChar(sqlite3_column_text(dbparameters_->
- statements_[FIND_PREVIOUS], 0), dbparameters_->db_);
+ result = convertToPlainChar(sqlite3_column_text(stmt, 0),
+ dbparameters_->db_);
}
- sqlite3_reset(dbparameters_->statements_[FIND_PREVIOUS]);
+ sqlite3_reset(stmt);
if (rc == SQLITE_DONE) {
// No NSEC records here, this DB doesn't support DNSSEC or
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index 3d1c85d..6b5369c 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -157,6 +157,23 @@ public:
virtual void deleteRecordInZone(
const std::string (¶ms)[DEL_PARAM_COUNT]);
+ /// This derived version of the method prepares an SQLite3 statement
+ /// for adding the diff first time it's called, and if it fails throws
+ // an \c SQLite3Error exception.
+ virtual void addRecordDiff(
+ int zone_id, uint32_t serial, DiffOperation operation,
+ const std::string (¶ms)[DIFF_PARAM_COUNT]);
+
+ // A short term method for tests until we implement more complete
+ // API to retrieve diffs (#1330). It returns all records of the diffs
+ // table whose zone_id column is identical to the given value.
+ // Since this is a short term workaround, it ignores some corner cases
+ // (such as an SQLite3 execution failure) and is not very efficient,
+ // in favor of brevity. Once #1330 is completed, this method must be
+ // removed, and the tests using this method must be rewritten using the
+ // official API.
+ std::vector<std::vector<std::string> > getRecordDiff(int zone_id);
+
/// The SQLite3 implementation of this method returns a string starting
/// with a fixed prefix of "sqlite3_" followed by the DB file name
/// removing any path name. For example, for the DB file
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index 3d2ba6d..e5cca0a 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -76,4 +76,5 @@ EXTRA_DIST += testdata/sql1.example.com.signed
EXTRA_DIST += testdata/sql2.example.com.signed
EXTRA_DIST += testdata/test-root.sqlite3
EXTRA_DIST += testdata/test.sqlite3
+EXTRA_DIST += testdata/test.sqlite3.nodiffs
EXTRA_DIST += testdata/rwtest.sqlite3
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index 9775321..1a471bf 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -12,7 +12,7 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <boost/foreach.hpp>
+#include <boost/shared_ptr.hpp>
#include <gtest/gtest.h>
@@ -33,7 +33,10 @@
using namespace isc::datasrc;
using namespace std;
-using namespace boost;
+// don't import the entire boost namespace. It will unexpectedly hide uint32_t
+// for some systems.
+using boost::shared_ptr;
+using boost::dynamic_pointer_cast;
using namespace isc::dns;
namespace {
@@ -233,6 +236,8 @@ public:
virtual void rollback() {}
virtual void addRecordToZone(const string (&)[ADD_COLUMN_COUNT]) {}
virtual void deleteRecordInZone(const string (&)[DEL_PARAM_COUNT]) {}
+ virtual void addRecordDiff(int, uint32_t, DiffOperation,
+ const std::string (&)[DIFF_PARAM_COUNT]) {}
virtual const std::string& getDBName() const {
return (database_name_);
@@ -435,10 +440,22 @@ private:
data[DatabaseAccessor::TTL_COLUMN] = "300";
data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::2";
return (true);
+ case 6:
+ data[DatabaseAccessor::NAME_COLUMN] = "ttldiff.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 7:
+ data[DatabaseAccessor::NAME_COLUMN] = "ttldiff.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "600";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+ return (true);
default:
ADD_FAILURE() <<
"Request past the end of iterator context";
- case 6:
+ case 8:
return (false);
}
}
@@ -1055,6 +1072,16 @@ TYPED_TEST(DatabaseClientTest, iterator) {
this->expected_rdatas_.push_back("2001:db8::2");
checkRRset(rrset, Name("x.example.org"), this->qclass_, RRType::AAAA(),
RRTTL(300), this->expected_rdatas_);
+
+ rrset = it->getNextRRset();
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ checkRRset(rrset, Name("ttldiff.example.org"), this->qclass_, RRType::A(),
+ RRTTL(300), this->expected_rdatas_);
+
+ EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
}
// This has inconsistent TTL in the set (the rest, like nonsense in
@@ -1195,6 +1222,60 @@ doFindTest(ZoneFinder& finder,
}
}
+// When asking for an RRset where RRs somehow have different TTLs, it should
+// convert to the lowest one.
+TEST_F(MockDatabaseClientTest, ttldiff) {
+ ZoneIteratorPtr it(this->client_->getIterator(Name("example.org")));
+ // Walk through the full iterator, we should see 1 rrset with name
+ // ttldiff1.example.org., and two rdatas. Same for ttldiff2
+ Name name("ttldiff.example.org.");
+ bool found = false;
+ //bool found2 = false;
+ ConstRRsetPtr rrset = it->getNextRRset();
+ while(rrset != ConstRRsetPtr()) {
+ if (rrset->getName() == name) {
+ ASSERT_FALSE(found);
+ ASSERT_EQ(2, rrset->getRdataCount());
+ ASSERT_EQ(RRTTL(300), rrset->getTTL());
+ found = true;
+ }
+ rrset = it->getNextRRset();
+ }
+ ASSERT_TRUE(found);
+}
+
+// Unless we ask for individual RRs in our iterator request. In that case
+// every RR should go into its own 'rrset'
+TEST_F(MockDatabaseClientTest, ttldiff_no_adjust_ttl) {
+ ZoneIteratorPtr it(this->client_->getIterator(Name("example.org"), false));
+
+ // Walk through the full iterator, we should see 1 rrset with name
+ // ttldiff1.example.org., and two rdatas. Same for ttldiff2
+ Name name("ttldiff.example.org.");
+ int found1 = false;
+ int found2 = false;
+ ConstRRsetPtr rrset = it->getNextRRset();
+ while(rrset != ConstRRsetPtr()) {
+ if (rrset->getName() == name) {
+ ASSERT_EQ(1, rrset->getRdataCount());
+ // We should find 1 'rrset' with TTL 300 and one with TTL 600
+ if (rrset->getTTL() == RRTTL(300)) {
+ ASSERT_FALSE(found1);
+ found1 = true;
+ } else if (rrset->getTTL() == RRTTL(600)) {
+ ASSERT_FALSE(found2);
+ found2 = true;
+ } else {
+ FAIL() << "Found unexpected TTL: " <<
+ rrset->getTTL().toText();
+ }
+ }
+ rrset = it->getNextRRset();
+ }
+ ASSERT_TRUE(found1);
+ ASSERT_TRUE(found2);
+}
+
TYPED_TEST(DatabaseClientTest, find) {
shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
index 5d66737..90b2ac1 100644
--- a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -22,6 +22,7 @@
#include <dns/rrclass.h>
#include <gtest/gtest.h>
+#include <boost/lexical_cast.hpp>
#include <boost/scoped_ptr.hpp>
#include <fstream>
#include <sqlite3.h>
@@ -29,6 +30,7 @@
using namespace std;
using namespace isc::datasrc;
using boost::shared_ptr;
+using boost::lexical_cast;
using isc::data::ConstElementPtr;
using isc::data::Element;
using isc::dns::RRClass;
@@ -214,8 +216,7 @@ TEST(SQLite3Open, getDBNameExampleROOT) {
EXPECT_EQ(SQLITE_DBNAME_EXAMPLE_ROOT, accessor.getDBName());
}
-// Simple function to cound the number of records for
-// any name
+// Simple function to match records
void
checkRecordRow(const std::string columns[],
const std::string& field0,
@@ -518,6 +519,7 @@ protected:
std::string get_columns[DatabaseAccessor::COLUMN_COUNT];
std::string add_columns[DatabaseAccessor::ADD_COLUMN_COUNT];
std::string del_params[DatabaseAccessor::DEL_PARAM_COUNT];
+ std::string diff_params[DatabaseAccessor::DIFF_PARAM_COUNT];
vector<const char* const*> expected_stored; // placeholder for checkRecords
vector<const char* const*> empty_stored; // indicate no corresponding data
@@ -844,4 +846,270 @@ TEST_F(SQLite3Update, concurrentTransactions) {
accessor->commit();
another_accessor->commit();
}
+
+//
+// Commonly used data for diff related tests. The last two entries are
+// a textual representation of "version" and a textual representation of
+// diff operation (either DIFF_ADD_TEXT or DIFF_DELETE_TEXT). We use this
+// format for the convenience of generating test data and checking the results.
+//
+const char* const DIFF_ADD_TEXT = "0";
+const char* const DIFF_DELETE_TEXT = "1";
+const char* const diff_begin_data[] = {
+ "example.com.", "SOA", "3600",
+ "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200",
+ "1234", DIFF_DELETE_TEXT
+};
+const char* const diff_del_a_data[] = {
+ "dns01.example.com.", "A", "3600", "192.0.2.1", "1234", DIFF_DELETE_TEXT
+};
+const char* const diff_end_data[] = {
+ "example.com.", "SOA", "3600",
+ "ns.example.com. admin.example.com. 1300 3600 1800 2419200 7200",
+ "1300", DIFF_ADD_TEXT
+};
+const char* const diff_add_a_data[] = {
+ "dns01.example.com.", "A", "3600", "192.0.2.10", "1234", DIFF_ADD_TEXT
+};
+
+// The following two are helper functions to convert textual test data
+// to integral zone ID and diff operation.
+int
+getVersion(const char* const diff_data[]) {
+ return (lexical_cast<int>(diff_data[DatabaseAccessor::DIFF_PARAM_COUNT]));
+}
+
+DatabaseAccessor::DiffOperation
+getOperation(const char* const diff_data[]) {
+ return (static_cast<DatabaseAccessor::DiffOperation>(
+ lexical_cast<int>(
+ diff_data[DatabaseAccessor::DIFF_PARAM_COUNT + 1])));
+}
+
+// Common checker function that compares expected and actual sequence of
+// diffs.
+void
+checkDiffs(const vector<const char* const*>& expected,
+ const vector<vector<string> >& actual)
+{
+ EXPECT_EQ(expected.size(), actual.size());
+ const size_t n_diffs = std::min(expected.size(), actual.size());
+ for (size_t i = 0; i < n_diffs; ++i) {
+ for (int j = 0; j < actual[i].size(); ++j) {
+ EXPECT_EQ(expected[i][j], actual[i][j]);
+ }
+ }
+}
+
+TEST_F(SQLite3Update, addRecordDiff) {
+ // A simple case of adding diffs: just changing the SOA, and confirm
+ // the diffs are stored as expected.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ // Until the diffs are committed, they are not visible to other accessors.
+ EXPECT_TRUE(another_accessor->getRecordDiff(zone_id).empty());
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(diff_begin_data);
+ expected_stored.push_back(diff_end_data);
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+ // Now it should be visible to others, too.
+ checkDiffs(expected_stored, another_accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addRecordOfLargeSerial) {
+ // This is essentially the same as the previous test, but using a
+ // very large "version" (SOA serial), which is actually the possible
+ // largest value to confirm the internal code doesn't have an overflow bug
+ // or other failure due to the larger value.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ const char* const begin_data[] = {
+ "example.com.", "SOA", "3600",
+ "ns.example.com. admin.example.com. 4294967295 3600 1800 2419200 7200",
+ "4294967295", DIFF_DELETE_TEXT
+ };
+
+ copy(begin_data, begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ // For "serial" parameter, we intentionally hardcode the value rather
+ // than converting it from the data.
+ accessor->addRecordDiff(zone_id, 0xffffffff, getOperation(diff_begin_data),
+ diff_params);
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(begin_data);
+ expected_stored.push_back(diff_end_data);
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithoutUpdate) {
+ // Right now we require startUpdateZone() prior to performing
+ // addRecordDiff.
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ EXPECT_THROW(accessor->addRecordDiff(0, getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ DataSourceError);
+
+ // For now, we don't allow adding diffs in a general transaction either.
+ accessor->startTransaction();
+ EXPECT_THROW(accessor->addRecordDiff(0, getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, addDiffWithBadZoneID) {
+ // For now, we require zone ID passed to addRecordDiff be equal to
+ // that for the zone being updated.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ EXPECT_THROW(accessor->addRecordDiff(zone_id + 1,
+ getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, addDiffRollback) {
+ // Rollback tentatively added diffs. This is no different from the
+ // update case, but we test it explicitly just in case.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+ accessor->rollback();
+
+ EXPECT_TRUE(accessor->getRecordDiff(zone_id).empty());
+}
+
+TEST_F(SQLite3Update, addDiffInBadOrder) {
+ // At this level, the API is naive, and doesn't care if the diff sequence
+ // is a valid IXFR order.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ // Add diff of 'end', then 'begin'
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(diff_end_data);
+ expected_stored.push_back(diff_begin_data);
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithUpdate) {
+ // A more realistic example: add corresponding diffs while updating zone.
+ // Implementation wise, there should be no reason this could fail if
+ // the basic tests so far pass. But we check it in case we miss something.
+
+ const char* const old_a_record[] = {
+ "dns01.example.com.", "A", "192.0.2.1"
+ };
+ const char* const new_a_record[] = {
+ "dns01.example.com.", "com.example.dns01.", "3600", "A", "",
+ "192.0.2.10"
+ };
+ const char* const old_soa_record[] = {
+ "example.com.", "SOA",
+ "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200",
+ };
+ const char* const new_soa_record[] = {
+ "dns01.example.com.", "com.example.dns01.", "3600", "A", "",
+ "ns.example.com. admin.example.com. 1300 3600 1800 2419200 7200",
+ };
+
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ // Delete SOA (and add that diff)
+ copy(old_soa_record, old_soa_record + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+
+ // Delete A
+ copy(old_a_record, old_a_record + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ copy(diff_del_a_data, diff_del_a_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_del_a_data),
+ getOperation(diff_del_a_data), diff_params);
+
+ // Add SOA
+ copy(new_soa_record, new_soa_record + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ // Add A
+ copy(new_a_record, new_a_record + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+ copy(diff_add_a_data, diff_add_a_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_add_a_data),
+ getOperation(diff_add_a_data), diff_params);
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(diff_begin_data);
+ expected_stored.push_back(diff_del_a_data);
+ expected_stored.push_back(diff_end_data);
+ expected_stored.push_back(diff_add_a_data);
+
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithNoTable) {
+ // An attempt of adding diffs to an old version of database that doesn't
+ // have a diffs table. This will fail in preparing the statement.
+ initAccessor(SQLITE_DBFILE_EXAMPLE + ".nodiffs", "IN");
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ EXPECT_THROW(accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ SQLite3Error);
+}
} // end anonymous namespace
diff --git a/src/lib/datasrc/tests/testdata/test.sqlite3 b/src/lib/datasrc/tests/testdata/test.sqlite3
index cc8cfc3..521cf31 100644
Binary files a/src/lib/datasrc/tests/testdata/test.sqlite3 and b/src/lib/datasrc/tests/testdata/test.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs
new file mode 100644
index 0000000..cc8cfc3
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs differ
diff --git a/src/lib/dhcp/libdhcp.cc b/src/lib/dhcp/libdhcp.cc
index 8e6314e..b95a427 100644
--- a/src/lib/dhcp/libdhcp.cc
+++ b/src/lib/dhcp/libdhcp.cc
@@ -14,16 +14,17 @@
#include <boost/shared_array.hpp>
#include <boost/shared_ptr.hpp>
-#include "dhcp/libdhcp.h"
+#include <util/buffer.h>
+#include <dhcp/libdhcp.h>
#include "config.h"
-#include "dhcp6.h"
-
-#include "option.h"
-#include "option6_ia.h"
-#include "option6_iaaddr.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/option6_iaaddr.h>
using namespace std;
using namespace isc::dhcp;
+using namespace isc::util;
// static array with factories for options
std::map<unsigned short, Option::Factory*> LibDHCP::v6factories_;
@@ -32,7 +33,7 @@ unsigned int
LibDHCP::unpackOptions6(const boost::shared_array<uint8_t> buf,
unsigned int buf_len,
unsigned int offset, unsigned int parse_len,
- isc::dhcp::Option::Option6Collection& options) {
+ isc::dhcp::Option::OptionCollection& options) {
if (offset + parse_len > buf_len) {
isc_throw(OutOfRange, "Option parse failed. Tried to parse "
<< parse_len << " bytes at offset " << offset
@@ -83,13 +84,41 @@ LibDHCP::unpackOptions6(const boost::shared_array<uint8_t> buf,
return (offset);
}
+void
+LibDHCP::unpackOptions4(const std::vector<uint8_t>& buf,
+ isc::dhcp::Option::OptionCollection& options) {
+ size_t offset = 0;
+
+ // 2 - header of DHCPv4 option
+ while (offset + 2 <= buf.size()) {
+ uint8_t opt_type = buf[offset++];
+ uint8_t opt_len = buf[offset++];
+ if (offset + opt_len > buf.size() ) {
+ isc_throw(OutOfRange, "Option parse failed. Tried to parse "
+ << offset + opt_len << " bytes from " << buf.size()
+ << "-byte long buffer.");
+ }
+
+ boost::shared_ptr<Option> opt;
+ switch(opt_type) {
+ default:
+ opt = boost::shared_ptr<Option>(new Option(Option::V4, opt_type,
+ buf.begin()+offset,
+ buf.begin()+offset+opt_len));
+ }
+
+ options.insert(pair<int, boost::shared_ptr<Option> >(opt_type, opt));
+ offset += opt_len;
+ }
+}
+
unsigned int
LibDHCP::packOptions6(boost::shared_array<uint8_t> data,
unsigned int data_len,
unsigned int offset,
- const isc::dhcp::Option::Option6Collection& options) {
+ const isc::dhcp::Option::OptionCollection& options) {
try {
- for (isc::dhcp::Option::Option6Collection::const_iterator it = options.begin();
+ for (Option::OptionCollection::const_iterator it = options.begin();
it != options.end();
++it) {
unsigned short opt_len = (*it).second->len();
@@ -97,7 +126,7 @@ LibDHCP::packOptions6(boost::shared_array<uint8_t> data,
isc_throw(OutOfRange, "Failed to build option " <<
(*it).first << ": out of buffer");
}
- offset = (*it).second->pack(data, data_len, offset);
+ offset = it->second->pack(data, data_len, offset);
}
}
catch (const Exception& e) {
@@ -107,6 +136,17 @@ LibDHCP::packOptions6(boost::shared_array<uint8_t> data,
return (offset);
}
+void
+LibDHCP::packOptions(isc::util::OutputBuffer& buf,
+ const Option::OptionCollection& options) {
+ for (Option::OptionCollection::const_iterator it = options.begin();
+ it != options.end();
+ ++it) {
+ it->second->pack4(buf);
+ }
+}
+
+
bool
LibDHCP::OptionFactoryRegister(Option::Universe u,
unsigned short opt_type,
diff --git a/src/lib/dhcp/libdhcp.h b/src/lib/dhcp/libdhcp.h
index c2ac949..468e6bb 100644
--- a/src/lib/dhcp/libdhcp.h
+++ b/src/lib/dhcp/libdhcp.h
@@ -16,7 +16,8 @@
#define LIBDHCP_H_
#include <iostream>
-#include "dhcp/pkt6.h"
+#include <util/buffer.h>
+#include <dhcp/pkt6.h>
namespace isc {
namespace dhcp {
@@ -39,8 +40,27 @@ public:
static unsigned int
packOptions6(boost::shared_array<uint8_t> buf, unsigned int buf_len,
unsigned int offset,
- const isc::dhcp::Option::Option6Collection& options);
+ const isc::dhcp::Option::OptionCollection& options);
+
+ /// @brief Stores options in a buffer.
+ ///
+ /// Stores all options defined in options containers in a on-wire
+ /// format in output buffer specified by buf.
+ ///
+ /// May throw different exceptions if option assembly fails. There
+ /// may be different reasons (option too large, option malformed,
+ /// too many options etc.)
+ ///
+ /// @param buf
+ /// @param options
+ static void
+ packOptions(isc::util::OutputBuffer& buf,
+ const isc::dhcp::Option::OptionCollection& options);
+
+ static void
+ unpackOptions4(const std::vector<uint8_t>& buf,
+ isc::dhcp::Option::OptionCollection& options);
///
/// Parses provided buffer and creates Option objects.
///
@@ -57,7 +77,7 @@ public:
static unsigned int
unpackOptions6(const boost::shared_array<uint8_t> buf, unsigned int buf_len,
unsigned int offset, unsigned int parse_len,
- isc::dhcp::Option::Option6Collection& options_);
+ isc::dhcp::Option::OptionCollection& options_);
///
/// Registers factory method that produces options of specific option types.
diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc
index dd45c34..daef288 100644
--- a/src/lib/dhcp/option.cc
+++ b/src/lib/dhcp/option.cc
@@ -29,50 +29,117 @@ using namespace isc::dhcp;
using namespace isc::util;
Option::Option(Universe u, unsigned short type)
- :universe_(u), type_(type), data_len_(0) {
-
+ :universe_(u), type_(type) {
+ if ((u == V4) && (type > 255)) {
+ isc_throw(BadValue, "Can't create V4 option of type "
+ << type << ", V4 options are in range 0..255");
+ }
}
Option::Option(Universe u, unsigned short type,
const boost::shared_array<uint8_t>& buf,
unsigned int offset, unsigned int len)
- :universe_(u), type_(type), data_(buf),
- data_len_(len), offset_(offset)
- {
+ :universe_(u), type_(type),
+ offset_(offset)
+{
+ uint8_t* ptr = &buf[offset];
+ data_ = std::vector<uint8_t>(ptr, ptr + len);
+
+ check();
+}
+
+Option::Option(Universe u, unsigned short type, std::vector<uint8_t>& data)
+ :universe_(u), type_(type), data_(data) {
+ check();
+}
- // sanity checks
- // TODO: universe must be in V4 and V6
+Option::Option(Universe u, uint16_t type, vector<uint8_t>::const_iterator first,
+ vector<uint8_t>::const_iterator last)
+ :universe_(u), type_(type), data_(std::vector<uint8_t>(first,last)) {
+ check();
+}
+
+void
+Option::check() {
+ if ( (universe_ != V4) && (universe_ != V6) ) {
+ isc_throw(BadValue, "Invalid universe type specified."
+ << "Only V4 and V6 are allowed.");
+ }
+
+ if (universe_ == V4) {
+
+ if (type_ > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option type " << type_ << " is too big."
+ << "For DHCPv4 allowed type range is 0..255");
+ } else if (data_.size() > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option " << type_ << " is too big.");
+ /// TODO Larger options can be stored as separate instances
+ /// of DHCPv4 options. Clients MUST concatenate them.
+ /// Fortunately, there are no such large options used today.
+ }
+ }
+
+ // no need to check anything for DHCPv6. It allows full range (0-64k) of
+ // both types and data size.
}
unsigned int
Option::pack(boost::shared_array<uint8_t>& buf,
unsigned int buf_len,
unsigned int offset) {
+ if (universe_ != V6) {
+ isc_throw(BadValue, "Failed to pack " << type_ << " option. Do not "
+ << "use this method for options other than DHCPv6.");
+ }
+ return pack6(buf, buf_len, offset);
+}
+
+void
+Option::pack4(isc::util::OutputBuffer& buf) {
switch (universe_) {
- case V4:
- return pack4(buf, buf_len, offset);
+ case V4: {
+ if (data_.size() > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option " << type_ << " is too big."
+ << "At most 255 bytes are supported.");
+ /// TODO Larger options can be stored as separate instances
+ /// of DHCPv4 options. Clients MUST concatenate them.
+ /// Fortunately, there are no such large options used today.
+ }
+
+ buf.writeUint8(type_);
+ buf.writeUint8(len() - getHeaderLen());
+
+ buf.writeData(&data_[0], data_.size());
+
+ LibDHCP::packOptions(buf, options_);
+ return;
+ }
case V6:
- return pack6(buf, buf_len, offset);
+ /// TODO: Do we need a sanity check for option size here?
+ buf.writeUint16(type_);
+ buf.writeUint16(len() - getHeaderLen());
+
+ LibDHCP::packOptions(buf, options_);
+ return;
default:
- isc_throw(BadValue, "Unknown universe defined for Option " << type_);
+ isc_throw(OutOfRange, "Invalid universe type" << universe_);
}
}
-
unsigned int
Option::pack4(boost::shared_array<uint8_t>& buf,
unsigned int buf_len,
unsigned int offset) {
- if ( offset+len() > buf_len ) {
+ if (offset + len() > buf_len) {
isc_throw(OutOfRange, "Failed to pack v4 option=" <<
- type_ << ",len=" << data_len_ << ": too small buffer.");
+ type_ << ",len=" << len() << ": too small buffer.");
}
uint8_t *ptr = &buf[offset];
ptr[0] = type_;
- ptr[1] = data_len_;
+ ptr[1] = len() - getHeaderLen();
ptr += 2;
- memcpy(ptr, &data_[0], data_len_);
+ memcpy(ptr, &data_[0], data_.size());
return offset + len();
}
@@ -81,22 +148,22 @@ unsigned int
Option::pack6(boost::shared_array<uint8_t>& buf,
unsigned int buf_len,
unsigned int offset) {
- if ( offset+len() > buf_len ) {
+ if (offset+len() > buf_len) {
isc_throw(OutOfRange, "Failed to pack v6 option=" <<
type_ << ",len=" << len() << ": too small buffer.");
}
- uint8_t * ptr = &buf[offset];
+ uint8_t* ptr = &buf[offset];
ptr = writeUint16(type_, ptr);
ptr = writeUint16(len() - getHeaderLen(), ptr);
- if (data_len_)
- memcpy(ptr, &data_[offset_], data_len_);
+ if (! data_.empty())
+ memcpy(ptr, &data_[0], data_.size());
// end of fixed part of this option
- offset += OPTION6_HDR_LEN + data_len_;
+ offset += OPTION6_HDR_LEN + data_.size();
return LibDHCP::packOptions6(buf, buf_len, offset, options_);
}
@@ -140,22 +207,27 @@ Option::unpack6(const boost::shared_array<uint8_t>& buf,
<< "): too small buffer.");
}
- data_ = buf;
+ uint8_t* ptr = &buf[offset];
+ data_ = std::vector<uint8_t>(ptr, ptr + parse_len);
+
offset_ = offset;
- data_len_ = buf_len;
- return LibDHCP::unpackOptions6(buf, buf_len, offset, parse_len,
- options_);
+ return (offset+parse_len);
+
+ //return LibDHCP::unpackOptions6(buf, buf_len, offset, parse_len,
+ // options_);
}
+/// Returns length of the complete option (data length + DHCPv4/DHCPv6
+/// option header)
unsigned short
Option::len() {
// length of the whole option is header and data stored in this option...
- int length = getHeaderLen() + data_len_;
+ int length = getHeaderLen() + data_.size();
// ... and sum of lengths of all suboptions
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
@@ -177,16 +249,9 @@ Option::valid() {
return (true);
}
-void
-isc::dhcp::Option::addOption(boost::shared_ptr<isc::dhcp::Option> opt) {
- options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(),
- opt));
-
-}
-
boost::shared_ptr<isc::dhcp::Option>
Option::getOption(unsigned short opt_type) {
- isc::dhcp::Option::Option6Collection::const_iterator x =
+ isc::dhcp::Option::OptionCollection::const_iterator x =
options_.find(opt_type);
if ( x != options_.end() ) {
return (*x).second;
@@ -196,7 +261,7 @@ Option::getOption(unsigned short opt_type) {
bool
Option::delOption(unsigned short opt_type) {
- isc::dhcp::Option::Option6Collection::iterator x = options_.find(opt_type);
+ isc::dhcp::Option::OptionCollection::iterator x = options_.find(opt_type);
if ( x != options_.end() ) {
options_.erase(x);
return true; // delete successful
@@ -208,22 +273,22 @@ Option::delOption(unsigned short opt_type) {
std::string Option::toText(int indent /* =0 */ ) {
std::stringstream tmp;
- for (int i=0; i<indent; i++)
+ for (int i = 0; i < indent; i++)
tmp << " ";
- tmp << "type=" << type_ << ", len=" << data_len_ << ": ";
+ tmp << "type=" << type_ << ", len=" << len()-getHeaderLen() << ": ";
- for (unsigned int i=0; i<data_len_; i++) {
+ for (unsigned int i = 0; i < data_.size(); i++) {
if (i) {
tmp << ":";
}
tmp << setfill('0') << setw(2) << hex
- << static_cast<unsigned short>(data_[offset_+i]);
+ << static_cast<unsigned short>(data_[i]);
}
// print suboptions
- for (Option6Collection::const_iterator opt=options_.begin();
- opt!=options_.end();
+ for (OptionCollection::const_iterator opt = options_.begin();
+ opt != options_.end();
++opt) {
tmp << (*opt).second->toText(indent+2);
}
@@ -235,13 +300,9 @@ Option::getType() {
return type_;
}
-uint8_t*
+const std::vector<uint8_t>&
Option::getData() {
- if (data_len_) {
- return (&data_[offset_]);
- } else {
- return (NULL);
- }
+ return (data_);
}
unsigned short
@@ -255,6 +316,18 @@ Option::getHeaderLen() {
return 0; // should not happen
}
+void
+Option::addOption(boost::shared_ptr<Option> opt) {
+ if (universe_ == V4) {
+ // check for uniqueness (DHCPv4 options must be unique)
+ if (getOption(opt->getType())) {
+ isc_throw(BadValue, "Option " << opt->getType()
+ << " already present in this message.");
+ }
+ }
+ options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
Option::~Option() {
}
diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h
index 5be1be3..3822cf0 100644
--- a/src/lib/dhcp/option.h
+++ b/src/lib/dhcp/option.h
@@ -17,8 +17,10 @@
#include <string>
#include <map>
+#include <vector>
#include <boost/shared_ptr.hpp>
#include <boost/shared_array.hpp>
+#include <util/buffer.h>
namespace isc {
namespace dhcp {
@@ -34,13 +36,9 @@ public:
/// defines option universe DHCPv4 or DHCPv6
enum Universe { V4, V6 };
- /// a collection of DHCPv4 options
- typedef std::map<unsigned int, boost::shared_ptr<Option> >
- Option4Collection;
-
/// a collection of DHCPv6 options
typedef std::multimap<unsigned int, boost::shared_ptr<Option> >
- Option6Collection;
+ OptionCollection;
/// @brief a factory function prototype
///
@@ -80,11 +78,55 @@ public:
const boost::shared_array<uint8_t>& buf, unsigned int offset,
unsigned int len);
- /// @brief writes option in wire-format to buf
+ /// @brief Constructor, used for received options.
+ ///
+ /// This constructor takes vector<uint8_t>& which is used in cases
+ /// when content of the option will be copied and stored within
+ /// option object. V4 Options follow that approach already.
+ /// TODO Migrate V6 options to that approach.
+ ///
+ /// @param u specifies universe (V4 or V6)
+ /// @param type option type (0-255 for V4 and 0-65535 for V6)
+ /// @param data content of the option
+ Option(Universe u, unsigned short type, std::vector<uint8_t>& data);
+
+ /// @brief Constructor, used for received options.
+ ///
+ /// This contructor is similar to the previous one, but it does not take
+ /// the whole vector<uint8_t>, but rather subset of it.
+ ///
+ /// TODO: This can be templated to use different containers, not just
+ /// vector. Prototype should look like this:
+ /// template<typename InputIterator> Option(Universe u, uint16_t type,
+ /// InputIterator first, InputIterator last);
+ ///
+ /// vector<int8_t> myData;
+ /// Example usage: new Option(V4, 123, myData.begin()+1, myData.end()-1)
+ /// This will create DHCPv4 option of type 123 that contains data from
+ /// trimmed (first and last byte removed) myData vector.
+ ///
+ /// @param u specifies universe (V4 or V6)
+ /// @param type option type (0-255 for V4 and 0-65535 for V6)
+ /// @param first iterator to the first element that should be copied
+ /// @param last iterator to the next element after the last one
+ /// to be copied.
+ Option(Universe u, uint16_t type,
+ std::vector<uint8_t>::const_iterator first,
+ std::vector<uint8_t>::const_iterator last);
+
+ /// @brief returns option universe (V4 or V6)
+ ///
+ /// @return universe type
+ Universe
+ getUniverse() { return universe_; };
+
+ /// @brief Writes option in wire-format to a buffer.
///
/// Writes option in wire-format to buffer, returns pointer to first unused
/// byte after stored option (that is useful for writing options one after
- /// another)
+ /// another). Used in DHCPv6 options.
+ ///
+ /// TODO: Migrate DHCPv6 code to pack(OutputBuffer& buf) version
///
/// @param buf pointer to a buffer
/// @param buf_len length of the buffer
@@ -93,10 +135,21 @@ public:
/// @return offset to first unused byte after stored option
///
virtual unsigned int
- pack(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
+ pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
unsigned int offset);
+ /// @brief Writes option in a wire-format to a buffer.
+ ///
+ /// Method will throw if option storing fails for some reason.
+ ///
+ /// TODO Once old (DHCPv6) implementation is rewritten,
+ /// unify pack4() and pack6() and rename them to just pack().
+ ///
+ /// @param buf output buffer (option will be stored there)
+ virtual void
+ pack4(isc::util::OutputBuffer& buf);
+
+
/// @brief Parses buffer.
///
/// Parses received buffer, returns offset to the first unused byte after
@@ -150,7 +203,7 @@ public:
/// Returns pointer to actual data.
///
/// @return pointer to actual data (or NULL if there is no data)
- virtual uint8_t*
+ virtual const std::vector<uint8_t>&
getData();
/// Adds a sub-option.
@@ -242,26 +295,31 @@ protected:
unsigned int offset,
unsigned int parse_len);
+ /// @brief A private method used for option correctness.
+ ///
+ /// It is used in constructors. In there are any problems detected
+ /// (like specifying type > 255 for DHCPv4 option), it will throw
+ /// BadValue or OutOfRange exceptions.
+ void check();
+
/// option universe (V4 or V6)
Universe universe_;
/// option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
unsigned short type_;
- /// shared pointer to a buffer (usually a part of packet)
- boost::shared_array<uint8_t> data_;
-
- /// length of data only. Use len() if you want to
- /// know proper length with option header overhead
- unsigned int data_len_;
+ /// contains content of this data
+ std::vector<uint8_t> data_;
+ /// TODO: Remove this field. vector<uint8_t> should be used
+ /// instead.
/// data is a shared_pointer that points out to the
/// whole packet. offset_ specifies where data for
/// this option begins.
unsigned int offset_;
/// collection for storing suboptions
- Option6Collection options_;
+ OptionCollection options_;
/// TODO: probably 2 different containers have to be used for v4 (unique
/// options) and v6 (options with the same type can repeat)
diff --git a/src/lib/dhcp/option6_ia.cc b/src/lib/dhcp/option6_ia.cc
index ee314db..46daee1 100644
--- a/src/lib/dhcp/option6_ia.cc
+++ b/src/lib/dhcp/option6_ia.cc
@@ -113,7 +113,7 @@ std::string Option6IA::toText(int indent /* = 0*/) {
tmp << " iaid=" << iaid_ << ", t1=" << t1_ << ", t2=" << t2_
<< " " << options_.size() << " sub-options:" << endl;
- for (Option6Collection::const_iterator opt=options_.begin();
+ for (OptionCollection::const_iterator opt=options_.begin();
opt!=options_.end();
++opt) {
tmp << (*opt).second->toText(indent+2);
@@ -127,7 +127,7 @@ unsigned short Option6IA::len() {
OPTION6_IA_LEN /* option content (12) */;
// length of all suboptions
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
diff --git a/src/lib/dhcp/option6_iaaddr.cc b/src/lib/dhcp/option6_iaaddr.cc
index d5b57dd..4177714 100644
--- a/src/lib/dhcp/option6_iaaddr.cc
+++ b/src/lib/dhcp/option6_iaaddr.cc
@@ -108,7 +108,7 @@ std::string Option6IAAddr::toText(int indent /* =0 */) {
<< ", preferred-lft=" << preferred_ << ", valid-lft="
<< valid_ << endl;
- for (Option6Collection::const_iterator opt=options_.begin();
+ for (OptionCollection::const_iterator opt=options_.begin();
opt!=options_.end();
++opt) {
tmp << (*opt).second->toText(indent+2);
@@ -123,7 +123,7 @@ unsigned short Option6IAAddr::len() {
// length of all suboptions
// TODO implement:
// protected: unsigned short Option::lenHelper(int header_size);
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
index 0758539..ba07a10 100644
--- a/src/lib/dhcp/pkt4.cc
+++ b/src/lib/dhcp/pkt4.cc
@@ -88,7 +88,13 @@ size_t
Pkt4::len() {
size_t length = DHCPV4_PKT_HDR_LEN; // DHCPv4 header
- /// TODO: Include options here (ticket #1228)
+ // ... and sum of lengths of all options
+ for (Option::OptionCollection::const_iterator it = options_.begin();
+ it != options_.end();
+ ++it) {
+ length += (*it).second->len();
+ }
+
return (length);
}
@@ -109,7 +115,7 @@ Pkt4::pack() {
bufferOut_.writeData(sname_, MAX_SNAME_LEN);
bufferOut_.writeData(file_, MAX_FILE_LEN);
- /// TODO: Options should follow here (ticket #1228)
+ LibDHCP::packOptions(bufferOut_, options_);
return (true);
}
@@ -136,7 +142,11 @@ Pkt4::unpack() {
bufferIn_.readData(sname_, MAX_SNAME_LEN);
bufferIn_.readData(file_, MAX_FILE_LEN);
- /// TODO: Parse options here (ticket #1228)
+ size_t opts_len = bufferIn_.getLength() - bufferIn_.getPosition();
+ vector<uint8_t> optsBuffer;
+ // fist use of readVector
+ bufferIn_.readVector(optsBuffer, opts_len);
+ LibDHCP::unpackOptions4(optsBuffer, options_);
return (true);
}
@@ -220,6 +230,26 @@ Pkt4::DHCPTypeToBootpType(uint8_t dhcpType) {
}
}
+void
+Pkt4::addOption(boost::shared_ptr<Option> opt) {
+ // check for uniqueness (DHCPv4 options must be unique)
+ if (getOption(opt->getType())) {
+ isc_throw(BadValue, "Option " << opt->getType()
+ << " already present in this message.");
+ }
+ options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
+boost::shared_ptr<isc::dhcp::Option>
+Pkt4::getOption(uint8_t type) {
+ Option::OptionCollection::const_iterator x = options_.find(type);
+ if (x!=options_.end()) {
+ return (*x).second;
+ }
+ return boost::shared_ptr<isc::dhcp::Option>(); // NULL
+}
+
+
} // end of namespace isc::dhcp
} // end of namespace isc
diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h
index 75440de..8517091 100644
--- a/src/lib/dhcp/pkt4.h
+++ b/src/lib/dhcp/pkt4.h
@@ -264,7 +264,7 @@ public:
uint8_t
getHlen() const { return (hlen_); };
- /// @brief Returns chaddr field
+ /// @brief Returns chaddr field.
///
/// Note: This is 16 bytes long field. It doesn't have to be
/// null-terminated. Do no use strlen() or similar on it.
@@ -274,7 +274,7 @@ public:
getChaddr() const { return (chaddr_); };
- /// Returns reference to output buffer
+ /// @brief Returns reference to output buffer.
///
/// Returned buffer will contain reasonable data only for
/// output (TX) packet and after pack() was called. This buffer
@@ -287,6 +287,22 @@ public:
const isc::util::OutputBuffer&
getBuffer() const { return (bufferOut_); };
+ /// @brief Add an option.
+ ///
+ /// Throws BadValue if option with that type is already present.
+ ///
+ /// @param opt option to be added
+ void
+ addOption(boost::shared_ptr<Option> opt);
+
+ /// @brief Returns an option of specified type.
+ ///
+ /// @return returns option of requested type (or NULL)
+ /// if no such option is present
+
+ boost::shared_ptr<Option>
+ getOption(uint8_t opt_type);
+
protected:
/// converts DHCP message type to BOOTP op type
@@ -383,7 +399,7 @@ protected:
uint8_t msg_type_;
/// collection of options present in this message
- isc::dhcp::Option::Option4Collection options_;
+ isc::dhcp::Option::OptionCollection options_;
}; // Pkt4 class
} // isc::dhcp namespace
diff --git a/src/lib/dhcp/pkt6.cc b/src/lib/dhcp/pkt6.cc
index 70be2bb..84c5729 100644
--- a/src/lib/dhcp/pkt6.cc
+++ b/src/lib/dhcp/pkt6.cc
@@ -63,7 +63,7 @@ unsigned short
Pkt6::len() {
unsigned int length = DHCPV6_PKT_HDR_LEN; // DHCPv6 header
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
@@ -197,7 +197,7 @@ Pkt6::toText() {
<< "]:" << remote_port_ << endl;
tmp << "msgtype=" << msg_type_ << ", transid=0x" << hex << transid_
<< dec << endl;
- for (isc::dhcp::Option::Option6Collection::iterator opt=options_.begin();
+ for (isc::dhcp::Option::OptionCollection::iterator opt=options_.begin();
opt != options_.end();
++opt) {
tmp << opt->second->toText() << std::endl;
@@ -207,7 +207,7 @@ Pkt6::toText() {
boost::shared_ptr<isc::dhcp::Option>
Pkt6::getOption(unsigned short opt_type) {
- isc::dhcp::Option::Option6Collection::const_iterator x = options_.find(opt_type);
+ isc::dhcp::Option::OptionCollection::const_iterator x = options_.find(opt_type);
if (x!=options_.end()) {
return (*x).second;
}
@@ -221,7 +221,7 @@ Pkt6::addOption(boost::shared_ptr<Option> opt) {
bool
Pkt6::delOption(unsigned short type) {
- isc::dhcp::Option::Option6Collection::iterator x = options_.find(type);
+ isc::dhcp::Option::OptionCollection::iterator x = options_.find(type);
if (x!=options_.end()) {
options_.erase(x);
return (true); // delete successful
diff --git a/src/lib/dhcp/pkt6.h b/src/lib/dhcp/pkt6.h
index d089444..019eeb2 100644
--- a/src/lib/dhcp/pkt6.h
+++ b/src/lib/dhcp/pkt6.h
@@ -180,7 +180,7 @@ public:
/// TODO Need to implement getOptions() as well
/// collection of options present in this message
- isc::dhcp::Option::Option6Collection options_;
+ isc::dhcp::Option::OptionCollection options_;
protected:
/// Builds on wire packet for TCP transmission.
diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am
index 41cabba..01799da 100644
--- a/src/lib/dhcp/tests/Makefile.am
+++ b/src/lib/dhcp/tests/Makefile.am
@@ -1,8 +1,6 @@
SUBDIRS = .
AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
-AM_CPPFLAGS += -I$(top_builddir)/src/lib/asiolink
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
diff --git a/src/lib/dhcp/tests/libdhcp_unittest.cc b/src/lib/dhcp/tests/libdhcp_unittest.cc
index d9d7c47..11b618c 100644
--- a/src/lib/dhcp/tests/libdhcp_unittest.cc
+++ b/src/lib/dhcp/tests/libdhcp_unittest.cc
@@ -15,16 +15,16 @@
#include <config.h>
#include <iostream>
#include <sstream>
-
#include <arpa/inet.h>
#include <gtest/gtest.h>
-
-#include "dhcp/libdhcp.h"
+#include <util/buffer.h>
+#include <dhcp/libdhcp.h>
#include "config.h"
using namespace std;
using namespace isc;
using namespace isc::dhcp;
+using namespace isc::util;
namespace {
class LibDhcpTest : public ::testing::Test {
@@ -41,9 +41,9 @@ static const uint8_t packed[] = {
1, 1, 0, 1, 114 // opt5 (5 bytes)
};
-TEST_F(LibDhcpTest, packOptions6) {
+TEST(LibDhcpTest, packOptions6) {
boost::shared_array<uint8_t> buf(new uint8_t[512]);
- isc::dhcp::Option::Option6Collection opts; // list of options
+ isc::dhcp::Option::OptionCollection opts; // list of options
// generate content for options
for (int i = 0; i < 64; i++) {
@@ -70,13 +70,13 @@ TEST_F(LibDhcpTest, packOptions6) {
EXPECT_EQ(0, memcmp(&buf[100], packed, 35) );
}
-TEST_F(LibDhcpTest, unpackOptions6) {
+TEST(LibDhcpTest, unpackOptions6) {
// just couple of random options
// Option is used as a simple option implementation
// More advanced uses are validated in tests dedicated for
// specific derived classes.
- isc::dhcp::Option::Option6Collection options; // list of options
+ isc::dhcp::Option::OptionCollection options; // list of options
// we can't use packed directly, as shared_array would try to
// free it eventually
@@ -91,35 +91,35 @@ TEST_F(LibDhcpTest, unpackOptions6) {
EXPECT_EQ(35, offset); // parsed first 35 bytes (offset 0..34)
EXPECT_EQ(options.size(), 5); // there should be 5 options
- isc::dhcp::Option::Option6Collection::const_iterator x = options.find(12);
+ isc::dhcp::Option::OptionCollection::const_iterator x = options.find(12);
ASSERT_FALSE(x == options.end()); // option 1 should exist
EXPECT_EQ(12, x->second->getType()); // this should be option 12
ASSERT_EQ(9, x->second->len()); // it should be of length 9
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+4, 5)); // data len=5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+4, 5)); // data len=5
x = options.find(13);
ASSERT_FALSE(x == options.end()); // option 13 should exist
EXPECT_EQ(13, x->second->getType()); // this should be option 13
ASSERT_EQ(7, x->second->len()); // it should be of length 7
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+13, 3)); // data len=3
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+13, 3)); // data len=3
x = options.find(14);
ASSERT_FALSE(x == options.end()); // option 3 should exist
EXPECT_EQ(14, x->second->getType()); // this should be option 14
ASSERT_EQ(6, x->second->len()); // it should be of length 6
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+20, 2)); // data len=2
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+20, 2)); // data len=2
x = options.find(256);
ASSERT_FALSE(x == options.end()); // option 256 should exist
EXPECT_EQ(256, x->second->getType()); // this should be option 256
ASSERT_EQ(8, x->second->len()); // it should be of length 7
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+26, 4)); // data len=4
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+26, 4)); // data len=4
x = options.find(257);
ASSERT_FALSE(x == options.end()); // option 257 should exist
EXPECT_EQ(257, x->second->getType()); // this should be option 257
ASSERT_EQ(5, x->second->len()); // it should be of length 5
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+34, 1)); // data len=1
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+34, 1)); // data len=1
x = options.find(0);
EXPECT_TRUE(x == options.end()); // option 0 not found
@@ -134,4 +134,101 @@ TEST_F(LibDhcpTest, unpackOptions6) {
EXPECT_TRUE(x == options.end()); // option 32000 not found
}
+
+static uint8_t v4Opts[] = {
+ 12, 3, 0, 1, 2,
+ 13, 3, 10, 11, 12,
+ 14, 3, 20, 21, 22,
+ 254, 3, 30, 31, 32,
+ 128, 3, 40, 41, 42
+};
+
+TEST(LibDhcpTest, packOptions4) {
+
+ vector<uint8_t> payload[5];
+ for (int i = 0; i < 5; i++) {
+ payload[i].resize(3);
+ payload[i][0] = i*10;
+ payload[i][1] = i*10+1;
+ payload[i][2] = i*10+2;
+ }
+
+ boost::shared_ptr<Option> opt1(new Option(Option::V4, 12, payload[0]));
+ boost::shared_ptr<Option> opt2(new Option(Option::V4, 13, payload[1]));
+ boost::shared_ptr<Option> opt3(new Option(Option::V4, 14, payload[2]));
+ boost::shared_ptr<Option> opt4(new Option(Option::V4,254, payload[3]));
+ boost::shared_ptr<Option> opt5(new Option(Option::V4,128, payload[4]));
+
+ isc::dhcp::Option::OptionCollection opts; // list of options
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt1));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt2));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt3));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt4));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt5));
+
+ vector<uint8_t> expVect(v4Opts, v4Opts + sizeof(v4Opts));
+
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW (
+ LibDHCP::packOptions(buf, opts);
+ );
+ ASSERT_EQ(buf.getLength(), sizeof(v4Opts));
+ EXPECT_EQ(0, memcmp(v4Opts, buf.getData(), sizeof(v4Opts)));
+
+}
+
+TEST(LibDhcpTest, unpackOptions4) {
+
+ vector<uint8_t> packed(v4Opts, v4Opts + sizeof(v4Opts));
+ isc::dhcp::Option::OptionCollection options; // list of options
+
+ ASSERT_NO_THROW(
+ LibDHCP::unpackOptions4(packed, options);
+ );
+
+ isc::dhcp::Option::OptionCollection::const_iterator x = options.find(12);
+ ASSERT_FALSE(x == options.end()); // option 1 should exist
+ EXPECT_EQ(12, x->second->getType()); // this should be option 12
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+2, 3)); // data len=3
+
+ x = options.find(13);
+ ASSERT_FALSE(x == options.end()); // option 1 should exist
+ EXPECT_EQ(13, x->second->getType()); // this should be option 13
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+7, 3)); // data len=3
+
+ x = options.find(14);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(14, x->second->getType()); // this should be option 14
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+12, 3)); // data len=3
+
+ x = options.find(254);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(254, x->second->getType()); // this should be option 254
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+17, 3)); // data len=3
+
+ x = options.find(128);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(128, x->second->getType()); // this should be option 254
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+22, 3)); // data len=3
+
+ x = options.find(0);
+ EXPECT_TRUE(x == options.end()); // option 0 not found
+
+ x = options.find(1);
+ EXPECT_TRUE(x == options.end()); // option 1 not found
+
+ x = options.find(2);
+ EXPECT_TRUE(x == options.end()); // option 2 not found
+}
+
}
diff --git a/src/lib/dhcp/tests/option6_addrlst_unittest.cc b/src/lib/dhcp/tests/option6_addrlst_unittest.cc
index 2a2fc1a..60b618b 100644
--- a/src/lib/dhcp/tests/option6_addrlst_unittest.cc
+++ b/src/lib/dhcp/tests/option6_addrlst_unittest.cc
@@ -15,14 +15,12 @@
#include <config.h>
#include <iostream>
#include <sstream>
-
#include <arpa/inet.h>
#include <gtest/gtest.h>
-
-#include "io_address.h"
-#include "dhcp/dhcp6.h"
-#include "dhcp/option.h"
-#include "dhcp/option6_addrlst.h"
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_addrlst.h>
using namespace std;
using namespace isc;
@@ -38,10 +36,10 @@ public:
TEST_F(Option6AddrLstTest, basic) {
- // limiting tests to just a 2001:db8::/32 as is *wrong*.
+ // Limiting tests to just a 2001:db8::/32 as is *wrong*.
// Good tests check corner cases as well.
// ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff checks
- // for integer overflow
+ // for integer overflow.
// ff02::face:b00c checks if multicast addresses
// can be represented properly.
@@ -111,6 +109,8 @@ TEST_F(Option6AddrLstTest, basic) {
opt1 = new Option6AddrLst(D6O_NAME_SERVERS, buf, 128, 0, 16);
);
+ EXPECT_EQ(Option::V6, opt1->getUniverse());
+
EXPECT_EQ(D6O_NAME_SERVERS, opt1->getType());
EXPECT_EQ(20, opt1->len());
Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
@@ -178,6 +178,7 @@ TEST_F(Option6AddrLstTest, constructors) {
EXPECT_NO_THROW(
opt1 = new Option6AddrLst(1234, IOAddress("::1"));
);
+ EXPECT_EQ(Option::V6, opt1->getUniverse());
EXPECT_EQ(1234, opt1->getType());
Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
diff --git a/src/lib/dhcp/tests/option6_ia_unittest.cc b/src/lib/dhcp/tests/option6_ia_unittest.cc
index 91aaba4..3fd52f5 100644
--- a/src/lib/dhcp/tests/option6_ia_unittest.cc
+++ b/src/lib/dhcp/tests/option6_ia_unittest.cc
@@ -67,6 +67,7 @@ TEST_F(Option6IATest, basic) {
0,
12);
+ EXPECT_EQ(Option::V6, opt->getUniverse());
EXPECT_EQ(D6O_IA_NA, opt->getType());
EXPECT_EQ(0xa1a2a3a4, opt->getIAID());
EXPECT_EQ(0x81020304, opt->getT1());
@@ -121,6 +122,7 @@ TEST_F(Option6IATest, simple) {
ia->setT1(2345);
ia->setT2(3456);
+ EXPECT_EQ(Option::V6, ia->getUniverse());
EXPECT_EQ(D6O_IA_NA, ia->getType());
EXPECT_EQ(1234, ia->getIAID());
EXPECT_EQ(2345, ia->getT1());
@@ -251,7 +253,7 @@ TEST_F(Option6IATest, suboptions_unpack) {
EXPECT_EQ(0xcafe, subopt->getType());
EXPECT_EQ(4, subopt->len());
// there should be no data at all
- EXPECT_EQ(static_cast<void*>(NULL), subopt->getData());
+ EXPECT_EQ(0, subopt->getData().size());
subopt = ia->getOption(1); // get option 1
ASSERT_FALSE(subopt); // should be NULL
diff --git a/src/lib/dhcp/tests/option6_iaaddr_unittest.cc b/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
index d1f7628..81c3eb3 100644
--- a/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
+++ b/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
@@ -75,6 +75,8 @@ TEST_F(Option6IAAddrTest, basic) {
EXPECT_EQ(78, offset);
+ EXPECT_EQ(Option::V6, opt->getUniverse());
+
// 4 bytes header + 4 bytes content
EXPECT_EQ("2001:db8:1::dead:beef", opt->getAddress().toText());
EXPECT_EQ(1000, opt->getPreferred());
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
index 49426ae..db3ee3b 100644
--- a/src/lib/dhcp/tests/option_unittest.cc
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -19,6 +19,8 @@
#include <arpa/inet.h>
#include <gtest/gtest.h>
#include <boost/shared_ptr.hpp>
+#include <exceptions/exceptions.h>
+#include <util/buffer.h>
#include "dhcp/dhcp6.h"
#include "dhcp/option.h"
@@ -26,6 +28,7 @@
using namespace std;
using namespace isc;
using namespace isc::dhcp;
+using namespace isc::util;
namespace {
class OptionTest : public ::testing::Test {
@@ -35,26 +38,162 @@ public:
};
// v4 is not really implemented yet. A simple test will do for now
-TEST_F(OptionTest, basic4) {
+TEST_F(OptionTest, v4_basic) {
- Option* opt = new Option(Option::V4, 17);
+ Option* opt = 0;
+ EXPECT_NO_THROW(
+ opt = new Option(Option::V4, 17);
+ );
+ EXPECT_EQ(Option::V4, opt->getUniverse());
EXPECT_EQ(17, opt->getType());
- EXPECT_EQ(static_cast<uint8_t*>(NULL), opt->getData());
+ EXPECT_EQ(0, opt->getData().size());
EXPECT_EQ(2, opt->len()); // just v4 header
EXPECT_NO_THROW(
delete opt;
);
+ opt = 0;
+
+ // V4 options have type 0...255
+ EXPECT_THROW(
+ opt = new Option(Option::V4, 256),
+ BadValue
+ );
+ if (opt) {
+ delete opt;
+ opt = 0;
+ }
+}
+
+const uint8_t dummyPayload[] =
+{ 1, 2, 3, 4};
+
+TEST_F(OptionTest, v4_data1) {
+
+ vector<uint8_t> data(dummyPayload, dummyPayload + sizeof(dummyPayload));
+
+ Option* opt = 0;
+
+ // create DHCPv4 option of type 123
+ // that contains 4 bytes of data
+ ASSERT_NO_THROW(
+ opt= new Option(Option::V4,
+ 123, // type
+ data);
+ );
+
+ // check that content is reported properly
+ EXPECT_EQ(123, opt->getType());
+ vector<uint8_t> optData = opt->getData();
+ ASSERT_EQ(optData.size(), data.size());
+ EXPECT_TRUE(optData == data);
+ EXPECT_EQ(2, opt->getHeaderLen());
+ EXPECT_EQ(6, opt->len());
+
+ // now store that option into a buffer
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ // check content of that buffer
+
+ // 2 byte header + 4 bytes data
+ ASSERT_EQ(6, buf.getLength());
+
+ // that's how this option is supposed to look like
+ uint8_t exp[] = { 123, 4, 1, 2, 3, 4 };
+
+ /// TODO: use vector<uint8_t> getData() when it will be implemented
+ EXPECT_EQ(0, memcmp(exp, buf.getData(), 6));
+
+ // check that we can destroy that option
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+// this is almost the same test as v4_data1, but it uses
+// different constructor
+TEST_F(OptionTest, v4_data2) {
+
+ vector<uint8_t> data(dummyPayload, dummyPayload + sizeof(dummyPayload));
+
+ vector<uint8_t> expData = data;
+
+ // Add fake data in front and end. Main purpose of this test is to check
+ // that only subset of the whole vector can be used for creating option.
+ data.insert(data.begin(), 56);
+ data.push_back(67);
+
+ // Data contains extra garbage at beginning and at the end. It should be
+ // ignored, as we pass interators to proper data. Only subset (limited by
+ // iterators) of the vector should be used.
+ // expData contains expected content (just valid data, without garbage).
+
+ Option* opt = 0;
+
+ // Create DHCPv4 option of type 123 that contains
+ // 4 bytes (sizeof(dummyPayload).
+ ASSERT_NO_THROW(
+ opt= new Option(Option::V4,
+ 123, // type
+ data.begin() + 1,
+ data.end() - 1);
+ );
+
+ // check that content is reported properly
+ EXPECT_EQ(123, opt->getType());
+ vector<uint8_t> optData = opt->getData();
+ ASSERT_EQ(optData.size(), expData.size());
+ EXPECT_TRUE(optData == expData);
+ EXPECT_EQ(2, opt->getHeaderLen());
+ EXPECT_EQ(6, opt->len());
+
+ // now store that option into a buffer
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ // check content of that buffer
+
+ // 2 byte header + 4 bytes data
+ ASSERT_EQ(6, buf.getLength());
+
+ // that's how this option is supposed to look like
+ uint8_t exp[] = { 123, 4, 1, 2, 3, 4 };
+
+ /// TODO: use vector<uint8_t> getData() when it will be implemented
+ EXPECT_EQ(0, memcmp(exp, buf.getData(), 6));
+
+ // check that we can destroy that option
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+TEST_F(OptionTest, v4_toText) {
+
+ vector<uint8_t> buf(3);
+ buf[0] = 0;
+ buf[1] = 0xf;
+ buf[2] = 0xff;
+
+ Option opt(Option::V4, 253, buf);
+
+ EXPECT_EQ("type=253, len=3: 00:0f:ff", opt.toText());
}
// tests simple constructor
-TEST_F(OptionTest, basic6) {
+TEST_F(OptionTest, v6_basic) {
Option* opt = new Option(Option::V6, 1);
+ EXPECT_EQ(Option::V6, opt->getUniverse());
EXPECT_EQ(1, opt->getType());
- EXPECT_EQ(static_cast<uint8_t*>(NULL), opt->getData());
+ EXPECT_EQ(0, opt->getData().size());
EXPECT_EQ(4, opt->len()); // just v6 header
EXPECT_NO_THROW(
@@ -64,7 +203,7 @@ TEST_F(OptionTest, basic6) {
// tests contructor used in pkt reception
// option contains actual data
-TEST_F(OptionTest, data1) {
+TEST_F(OptionTest, v6_data1) {
boost::shared_array<uint8_t> buf(new uint8_t[32]);
for (int i = 0; i < 32; i++)
buf[i] = 100+i;
@@ -73,9 +212,10 @@ TEST_F(OptionTest, data1) {
3, // offset
7); // 7 bytes of data
EXPECT_EQ(333, opt->getType());
- ASSERT_EQ(&buf[3], opt->getData());
+
ASSERT_EQ(11, opt->len());
- EXPECT_EQ(0, memcmp(&buf[3], opt->getData(), 7) );
+ ASSERT_EQ(7, opt->getData().size());
+ EXPECT_EQ(0, memcmp(&buf[3], &opt->getData()[0], 7) );
int offset = opt->pack(buf, 32, 20);
EXPECT_EQ(31, offset);
@@ -96,7 +236,7 @@ TEST_F(OptionTest, data1) {
// another text that tests the same thing, just
// with different input parameters
-TEST_F(OptionTest, data2) {
+TEST_F(OptionTest, v6_data2) {
boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
for (int i = 0; i < 128; i++)
@@ -144,7 +284,7 @@ TEST_F(OptionTest, data2) {
// |
// +----opt3
//
-TEST_F(OptionTest, suboptions1) {
+TEST_F(OptionTest, v6_suboptions1) {
boost::shared_array<uint8_t> buf(new uint8_t[128]);
for (int i=0; i<128; i++)
buf[i] = 100+i;
@@ -184,13 +324,13 @@ TEST_F(OptionTest, suboptions1) {
);
}
-// check that an option can contain 2 suboptions:
+// check that an option can contain nested suboptions:
// opt1
// +----opt2
// |
// +----opt3
//
-TEST_F(OptionTest, suboptions2) {
+TEST_F(OptionTest, v6_suboptions2) {
boost::shared_array<uint8_t> buf(new uint8_t[128]);
for (int i=0; i<128; i++)
buf[i] = 100+i;
@@ -226,7 +366,7 @@ TEST_F(OptionTest, suboptions2) {
);
}
-TEST_F(OptionTest, addgetdel) {
+TEST_F(OptionTest, v6_addgetdel) {
boost::shared_array<uint8_t> buf(new uint8_t[128]);
for (int i=0; i<128; i++)
buf[i] = 100+i;
@@ -266,7 +406,7 @@ TEST_F(OptionTest, addgetdel) {
}
-TEST_F(OptionTest, toText) {
+TEST_F(OptionTest, v6_toText) {
boost::shared_array<uint8_t> buf(new uint8_t[3]);
buf[0] = 0;
buf[1] = 0xf;
diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc
index 60b4a7d..c89743f 100644
--- a/src/lib/dhcp/tests/pkt4_unittest.cc
+++ b/src/lib/dhcp/tests/pkt4_unittest.cc
@@ -20,16 +20,17 @@
#include <boost/static_assert.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/shared_array.hpp>
-
-#include "io_address.h"
-#include "dhcp/pkt4.h"
-#include "dhcp/dhcp4.h"
-#include "exceptions/exceptions.h"
+#include <util/buffer.h>
+#include <asiolink/io_address.h>
+#include <dhcp/pkt4.h>
+#include <dhcp/dhcp4.h>
+#include <exceptions/exceptions.h>
using namespace std;
using namespace isc;
using namespace isc::asiolink;
using namespace isc::dhcp;
+using namespace isc::util;
using namespace boost;
namespace {
@@ -282,6 +283,9 @@ TEST(Pkt4Test, hwAddr) {
vector<uint8_t> mac;
uint8_t expectedChaddr[Pkt4::MAX_CHADDR_LEN];
+ // We resize vector to specified length. It is more natural for fixed-length
+ // field, than clear it (shrink size to 0) and push_back each element
+ // (growing length back to MAX_CHADDR_LEN).
mac.resize(Pkt4::MAX_CHADDR_LEN);
Pkt4* pkt = 0;
@@ -433,4 +437,126 @@ TEST(Pkt4Test, file) {
}
+static uint8_t v4Opts[] = {
+ 12, 3, 0, 1, 2,
+ 13, 3, 10, 11, 12,
+ 14, 3, 20, 21, 22,
+ 128, 3, 30, 31, 32,
+ 254, 3, 40, 41, 42
+};
+
+TEST(Pkt4Test, options) {
+ Pkt4* pkt = new Pkt4(DHCPOFFER, 0);
+
+ vector<uint8_t> payload[5];
+ for (int i = 0; i < 5; i++) {
+ payload[i].push_back(i*10);
+ payload[i].push_back(i*10+1);
+ payload[i].push_back(i*10+2);
+ }
+
+ boost::shared_ptr<Option> opt1(new Option(Option::V4, 12, payload[0]));
+ boost::shared_ptr<Option> opt2(new Option(Option::V4, 13, payload[1]));
+ boost::shared_ptr<Option> opt3(new Option(Option::V4, 14, payload[2]));
+ boost::shared_ptr<Option> opt5(new Option(Option::V4,128, payload[3]));
+ boost::shared_ptr<Option> opt4(new Option(Option::V4,254, payload[4]));
+
+ pkt->addOption(opt1);
+ pkt->addOption(opt2);
+ pkt->addOption(opt3);
+ pkt->addOption(opt4);
+ pkt->addOption(opt5);
+
+ EXPECT_TRUE(pkt->getOption(12));
+ EXPECT_TRUE(pkt->getOption(13));
+ EXPECT_TRUE(pkt->getOption(14));
+ EXPECT_TRUE(pkt->getOption(128));
+ EXPECT_TRUE(pkt->getOption(254));
+ EXPECT_FALSE(pkt->getOption(127)); // no such option
+
+ // options are unique in DHCPv4. It should not be possible
+ // to add more than one option of the same type.
+ EXPECT_THROW(
+ pkt->addOption(opt1),
+ BadValue
+ );
+
+ EXPECT_NO_THROW(
+ pkt->pack();
+ );
+
+ const OutputBuffer& buf = pkt->getBuffer();
+ // check that all options are stored, they should take sizeof(v4Opts)
+ ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(v4Opts),
+ buf.getLength());
+
+ // that that this extra data actually contain our options
+ const uint8_t* ptr = static_cast<const uint8_t*>(buf.getData());
+ ptr += Pkt4::DHCPV4_PKT_HDR_LEN; // rewind to end of fixed part
+ EXPECT_EQ(0, memcmp(ptr, v4Opts, sizeof(v4Opts)));
+
+ EXPECT_NO_THROW(
+ delete pkt;
+ );
+}
+
+TEST(Pkt4Test, unpackOptions) {
+
+ vector<uint8_t> expectedFormat = generateTestPacket2();
+
+ for (int i=0; i < sizeof(v4Opts); i++) {
+ expectedFormat.push_back(v4Opts[i]);
+ }
+
+ // now expectedFormat contains fixed format and 5 options
+
+ shared_ptr<Pkt4> pkt(new Pkt4(&expectedFormat[0],
+ expectedFormat.size()));
+
+ EXPECT_NO_THROW(
+ pkt->unpack()
+ );
+
+ EXPECT_TRUE(pkt->getOption(12));
+ EXPECT_TRUE(pkt->getOption(13));
+ EXPECT_TRUE(pkt->getOption(14));
+ EXPECT_TRUE(pkt->getOption(128));
+ EXPECT_TRUE(pkt->getOption(254));
+
+ shared_ptr<Option> x = pkt->getOption(12);
+ ASSERT_TRUE(x); // option 1 should exist
+ EXPECT_EQ(12, x->getType()); // this should be option 12
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+2, 3)); // data len=3
+
+ x = pkt->getOption(13);
+ ASSERT_TRUE(x); // option 13 should exist
+ EXPECT_EQ(13, x->getType()); // this should be option 13
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+7, 3)); // data len=3
+
+ x = pkt->getOption(14);
+ ASSERT_TRUE(x); // option 14 should exist
+ EXPECT_EQ(14, x->getType()); // this should be option 14
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+12, 3)); // data len=3
+
+ x = pkt->getOption(128);
+ ASSERT_TRUE(x); // option 3 should exist
+ EXPECT_EQ(128, x->getType()); // this should be option 254
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+17, 3)); // data len=3
+
+ x = pkt->getOption(254);
+ ASSERT_TRUE(x); // option 3 should exist
+ EXPECT_EQ(254, x->getType()); // this should be option 254
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+22, 3)); // data len=3
+}
+
} // end of anonymous namespace
diff --git a/src/lib/dhcp/tests/pkt6_unittest.cc b/src/lib/dhcp/tests/pkt6_unittest.cc
index 0f110ba..968b24c 100644
--- a/src/lib/dhcp/tests/pkt6_unittest.cc
+++ b/src/lib/dhcp/tests/pkt6_unittest.cc
@@ -18,10 +18,10 @@
#include <arpa/inet.h>
#include <gtest/gtest.h>
-#include "io_address.h"
-#include "dhcp/option.h"
-#include "dhcp/pkt6.h"
-#include "dhcp/dhcp6.h"
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/dhcp6.h>
using namespace std;
using namespace isc;
diff --git a/src/lib/dns/python/name_python.cc b/src/lib/dns/python/name_python.cc
index 4043445..ce556df 100644
--- a/src/lib/dns/python/name_python.cc
+++ b/src/lib/dns/python/name_python.cc
@@ -25,6 +25,8 @@
#include "messagerenderer_python.h"
#include "name_python.h"
+#include <iostream>
+
using namespace isc::dns;
using namespace isc::dns::python;
using namespace isc::util;
@@ -97,7 +99,7 @@ int Name_init(s_Name* self, PyObject* args);
void Name_destroy(s_Name* self);
PyObject* Name_toWire(s_Name* self, PyObject* args);
-PyObject* Name_toText(s_Name* self);
+PyObject* Name_toText(s_Name* self, PyObject* args);
PyObject* Name_str(PyObject* self);
PyObject* Name_getLabelCount(s_Name* self);
PyObject* Name_at(s_Name* self, PyObject* args);
@@ -120,8 +122,9 @@ PyMethodDef Name_methods[] = {
"Returns the length" },
{ "get_labelcount", reinterpret_cast<PyCFunction>(Name_getLabelCount), METH_NOARGS,
"Returns the number of labels" },
- { "to_text", reinterpret_cast<PyCFunction>(Name_toText), METH_NOARGS,
- "Returns the string representation" },
+ { "to_text", reinterpret_cast<PyCFunction>(Name_toText), METH_VARARGS,
+ "Returns the string representation. The optional argument must be either"
+ "True of False. If True, the final dot will be omitted." },
{ "to_wire", reinterpret_cast<PyCFunction>(Name_toWire), METH_VARARGS,
"Converts the Name object to wire format.\n"
"The argument can be either a MessageRenderer or an object that "
@@ -278,8 +281,24 @@ Name_getLabelCount(s_Name* self) {
}
PyObject*
-Name_toText(s_Name* self) {
- return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+Name_toText(s_Name* self, PyObject* args) {
+ PyObject* omit_final_dot_obj = NULL;
+ if (PyArg_ParseTuple(args, "|O", &omit_final_dot_obj)) {
+ bool omit_final_dot = false;
+ if (omit_final_dot_obj != NULL) {
+ if (PyBool_Check(omit_final_dot_obj) != 0) {
+ omit_final_dot = (omit_final_dot_obj == Py_True);
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "Optional argument 1 of to_text() should be True of False");
+ return (NULL);
+ }
+ }
+ return (Py_BuildValue("s",
+ self->cppobj->toText(omit_final_dot).c_str()));
+ } else {
+ return (NULL);
+ }
}
PyObject*
diff --git a/src/lib/dns/python/tests/name_python_test.py b/src/lib/dns/python/tests/name_python_test.py
index b8e625a..5263412 100644
--- a/src/lib/dns/python/tests/name_python_test.py
+++ b/src/lib/dns/python/tests/name_python_test.py
@@ -121,6 +121,15 @@ class NameTest(unittest.TestCase):
self.assertEqual(".", str(self.name2))
self.assertEqual("something.completely.different.", self.name3.to_text())
+ self.assertEqual("example.com.", self.name1.to_text(False))
+ self.assertEqual("example.com", self.name1.to_text(True))
+
+ # make sure it does not behave unexpectedly on wrong arguments
+ self.assertRaises(TypeError, self.name1.to_text, True, 1)
+ self.assertRaises(TypeError, self.name1.to_text, 1)
+ self.assertRaises(TypeError, self.name1.to_text, [])
+ self.assertRaises(TypeError, self.name1.to_text, "foo")
+
def test_to_wire(self):
b1 = bytearray()
self.name1.to_wire(b1)
diff --git a/src/lib/python/Makefile.am b/src/lib/python/Makefile.am
index 5924294..893bb8c 100644
--- a/src/lib/python/Makefile.am
+++ b/src/lib/python/Makefile.am
@@ -1,15 +1,8 @@
SUBDIRS = isc
-python_PYTHON = bind10_config.py
+nodist_python_PYTHON = bind10_config.py
pythondir = $(pyexecdir)
-# Explicitly define DIST_COMMON so ${python_PYTHON} is not included
-# as we don't want the generated file included in distributed tarfile.
-DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in bind10_config.py.in
-
-# When setting DIST_COMMON, then need to add the .in file too.
-EXTRA_DIST = bind10_config.py.in
-
CLEANFILES = bind10_config.pyc
CLEANDIRS = __pycache__
diff --git a/src/lib/python/bind10_config.py.in b/src/lib/python/bind10_config.py.in
index 69b17ed..e54b1a8 100644
--- a/src/lib/python/bind10_config.py.in
+++ b/src/lib/python/bind10_config.py.in
@@ -23,6 +23,10 @@ def reload():
global DATA_PATH
global PLUGIN_PATHS
global PREFIX
+ global LIBEXECDIR
+ LIBEXECDIR = ("@libexecdir@/@PACKAGE@"). \
+ replace("${exec_prefix}", "@exec_prefix@"). \
+ replace("${prefix}", "@prefix@")
BIND10_MSGQ_SOCKET_FILE = os.path.join("@localstatedir@",
"@PACKAGE_NAME@",
"msgq_socket").replace("${prefix}",
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
index 43a7605..c0f1e32 100644
--- a/src/lib/python/isc/bind10/Makefile.am
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -1,4 +1,4 @@
SUBDIRS = . tests
-python_PYTHON = __init__.py sockcreator.py
+python_PYTHON = __init__.py sockcreator.py component.py special_component.py
pythondir = $(pyexecdir)/isc/bind10
diff --git a/src/lib/python/isc/bind10/component.py b/src/lib/python/isc/bind10/component.py
new file mode 100644
index 0000000..603653b
--- /dev/null
+++ b/src/lib/python/isc/bind10/component.py
@@ -0,0 +1,597 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Module for managing components (abstraction of process). It allows starting
+them in given order, handling when they crash (what happens depends on kind
+of component) and shutting down. It also handles the configuration of this.
+
+Dependencies between them are not yet handled. It might turn out they are
+needed, in that case they will be added sometime in future.
+
+This framework allows for a single process to be started multiple times (by
+specifying multiple components with the same configuration). However, the rest
+of the system might not handle such situation well, so until it is made so,
+it would be better to start each process at most once.
+"""
+
+import isc.log
+from isc.log_messages.bind10_messages import *
+import time
+
+logger = isc.log.Logger("boss")
+DBG_TRACE_DATA = 20
+DBG_TRACE_DETAILED = 80
+
+START_CMD = 'start'
+STOP_CMD = 'stop'
+
+STARTED_OK_TIME = 10
+
+STATE_DEAD = 'dead'
+STATE_STOPPED = 'stopped'
+STATE_RUNNING = 'running'
+
+class BaseComponent:
+ """
+ This represents a single component. This one is an abstract base class.
+ There are some methods which should be left untouched, but there are
+ others which define the interface only and should be overriden in
+ concrete implementations.
+
+ The component is in one of the three states:
+ - Stopped - it is either not started yet or it was explicitly stopped.
+ The component is created in this state (it must be asked to start
+ explicitly).
+ - Running - after start() was called, it started successfully and is
+ now running.
+ - Dead - it failed and can not be resurrected.
+
+ Init
+ | stop()
+ | +-----------------------+
+ | | |
+ v | start() success |
+ Stopped --------+--------> Running <----------+
+ | | |
+ |failure | failed() |
+ | | |
+ v | |
+ +<-----------+ |
+ | |
+ | kind == dispensable or kind|== needed and failed late
+ +-----------------------------+
+ |
+ | kind == core or kind == needed and it failed too soon
+ v
+ Dead
+
+ Note that there are still situations which are not handled properly here.
+ We don't recognize a component that is starting up, but not ready yet, one
+ that is already shutting down, impossible to stop, etc. We need to add more
+ states in future to handle it properly.
+ """
+ def __init__(self, boss, kind):
+ """
+ Creates the component in not running mode.
+
+ The parameters are:
+ - `boss` the boss object to plug into. The component needs to plug
+ into it to know when it failed, etc.
+ - `kind` is the kind of component. It may be one of:
+ * 'core' means the system can't run without it and it can't be
+ safely restarted. If it does not start, the system is brought
+ down. If it crashes, the system is turned off as well (with
+ non-zero exit status).
+ * 'needed' means the system is able to restart the component,
+ but it is vital part of the service (like auth server). If
+ it fails to start or crashes in less than 10s after the first
+ startup, the system is brought down. If it crashes later on,
+ it is restarted.
+ * 'dispensable' means the component should be running, but if it
+ doesn't start or crashes for some reason, the system simply tries
+ to restart it and keeps running.
+
+ Note that the __init__ method of child class should have these
+ parameters:
+
+ __init__(self, process, boss, kind, address=None, params=None)
+
+ The extra parameters are:
+ - `process` - which program should be started.
+ - `address` - the address on message buss, used to talk to the
+ component.
+ - `params` - parameters to the program.
+
+ The methods you should not override are:
+ - start
+ - stop
+ - failed
+ - running
+
+ You should override:
+ - _start_internal
+ - _stop_internal
+ - _failed_internal (if you like, the empty default might be suitable)
+ - name
+ - pid
+ - kill
+ """
+ if kind not in ['core', 'needed', 'dispensable']:
+ raise ValueError('Component kind can not be ' + kind)
+ self.__state = STATE_STOPPED
+ self._kind = kind
+ self._boss = boss
+
+ def start(self):
+ """
+ Start the component for the first time or restart it. It runs
+ _start_internal to actually start the component.
+
+ If you try to start an already running component, it raises ValueError.
+ """
+ if self.__state == STATE_DEAD:
+ raise ValueError("Can't resurrect already dead component")
+ if self.running():
+ raise ValueError("Can't start already running component")
+ logger.info(BIND10_COMPONENT_START, self.name())
+ self.__state = STATE_RUNNING
+ self.__start_time = time.time()
+ try:
+ self._start_internal()
+ except Exception as e:
+ logger.error(BIND10_COMPONENT_START_EXCEPTION, self.name(), e)
+ self.failed(None)
+ raise
+
+ def stop(self):
+ """
+ Stop the component. It calls _stop_internal to do the actual
+ stopping.
+
+ If you try to stop a component that is not running, it raises
+ ValueError.
+ """
+ # This is not tested. It talks with the outher world, which is out
+ # of scope of unittests.
+ if not self.running():
+ raise ValueError("Can't stop a component which is not running")
+ logger.info(BIND10_COMPONENT_STOP, self.name())
+ self.__state = STATE_STOPPED
+ self._stop_internal()
+
+ def failed(self, exit_code):
+ """
+ Notify the component it crashed. This will be called from boss object.
+
+ If you try to call failed on a component that is not running,
+ a ValueError is raised.
+
+ If it is a core component or needed component and it was started only
+ recently, the component will become dead and will ask the boss to shut
+ down with error exit status. A dead component can't be started again.
+
+ Otherwise the component will try to restart.
+
+ The exit code is used for logging. It might be None.
+
+ It calles _failed_internal internally.
+ """
+ logger.error(BIND10_COMPONENT_FAILED, self.name(), self.pid(),
+ exit_code if exit_code is not None else "unknown")
+ if not self.running():
+ raise ValueError("Can't fail component that isn't running")
+ self.__state = STATE_STOPPED
+ self._failed_internal()
+ # If it is a core component or the needed component failed to start
+ # (including it stopped really soon)
+ if self._kind == 'core' or \
+ (self._kind == 'needed' and time.time() - STARTED_OK_TIME <
+ self.__start_time):
+ self.__state = STATE_DEAD
+ logger.fatal(BIND10_COMPONENT_UNSATISFIED, self.name())
+ self._boss.component_shutdown(1)
+ # This means we want to restart
+ else:
+ logger.warn(BIND10_COMPONENT_RESTART, self.name())
+ self.start()
+
+ def running(self):
+ """
+ Informs if the component is currently running. It assumes the failed
+ is called whenever the component really fails and there might be some
+ time in between actual failure and the call, so this might be
+ inaccurate (it corresponds to the thing the object thinks is true, not
+ to the real "external" state).
+
+ It is not expected for this method to be overriden.
+ """
+ return self.__state == STATE_RUNNING
+
+ def _start_internal(self):
+ """
+ This method does the actual starting of a process. You need to override
+ this method to do the actual starting.
+
+ The ability to override this method presents some flexibility. It
+ allows processes started in a strange way, as well as components that
+ have no processes at all or components with multiple processes (in case
+ of multiple processes, care should be taken to make their
+ started/stopped state in sync and all the processes that can fail
+ should be registered).
+
+ You should register all the processes created by calling
+ self._boss.register_process.
+ """
+ pass
+
+ def _stop_internal(self):
+ """
+ This is the method that does the actual stopping of a component.
+ You need to provide it in a concrete implementation.
+
+ Also, note that it is a bad idea to raise exceptions from here.
+ Under such circumstance, the component will be considered stopped,
+ and the exception propagated, but we can't be sure it really is
+ dead.
+ """
+ pass
+
+ def _failed_internal(self):
+ """
+ This method is called from failed. You can replace it if you need
+ some specific behaviour when the component crashes. The default
+ implementation is empty.
+
+ Do not raise exceptions from here, please. The propper shutdown
+ would have not happened.
+ """
+ pass
+
+ def name(self):
+ """
+ Provides human readable name of the component, for logging and similar
+ purposes.
+
+ You need to provide this method in a concrete implementation.
+ """
+ pass
+
+ def pid(self):
+ """
+ Provides a PID of a process, if the component is real running process.
+ This may return None in cases when there's no process involved with the
+ component or in case the component is not started yet.
+
+ However, it is expected the component preserves the pid after it was
+ stopped, to ensure we can log it when we ask it to be killed (in case
+ the process refused to stop willingly).
+
+ You need to provide this method in a concrete implementation.
+ """
+ pass
+
+ def kill(self, forcefull=False):
+ """
+ Kills the component.
+
+ If forcefull is true, it should do it in more direct and aggressive way
+ (for example by using SIGKILL or some equivalent). If it is false, more
+ peaceful way should be used (SIGTERM or equivalent).
+
+ You need to provide this method in a concrete implementation.
+ """
+ pass
+
+class Component(BaseComponent):
+ """
+ The most common implementation of a component. It can be used either
+ directly, and it will just start the process without anything special,
+ or slightly customised by passing a start_func hook to the __init__
+ to change the way it starts.
+
+ If such customisation isn't enough, you should inherit BaseComponent
+ directly. It is not recommended to override methods of this class
+ on one-by-one basis.
+ """
+ def __init__(self, process, boss, kind, address=None, params=None,
+ start_func=None):
+ """
+ Creates the component in not running mode.
+
+ The parameters are:
+ - `process` is the name of the process to start.
+ - `boss` the boss object to plug into. The component needs to plug
+ into it to know when it failed, etc.
+ - `kind` is the kind of component. Refer to the documentation of
+ BaseComponent for details.
+ - `address` is the address on message bus. It is used to ask it to
+ shut down at the end. If you specialize the class for a component
+ that is shut down differently, it might be None.
+ - `params` is a list of parameters to pass to the process when it
+ starts. It is currently unused and this support is left out for
+ now.
+ - `start_func` is a function called when it is started. It is supposed
+ to start up the process and return a ProcInfo object describing it.
+ There's a sensible default if not provided, which just launches
+ the program without any special care.
+ """
+ BaseComponent.__init__(self, boss, kind)
+ self._process = process
+ self._start_func = start_func
+ self._address = address
+ self._params = params
+ self._procinfo = None
+
+ def _start_internal(self):
+ """
+ You can change the "core" of this function by setting self._start_func
+ to a function without parameters. Such function should start the
+ process and return the procinfo object describing the running process.
+
+ If you don't provide the _start_func, the usual startup by calling
+ boss.start_simple is performed.
+ """
+ # This one is not tested. For one, it starts a real process
+ # which is out of scope of unit tests, for another, it just
+ # delegates the starting to other function in boss (if a derived
+ # class does not provide an override function), which is tested
+ # by use.
+ if self._start_func is not None:
+ procinfo = self._start_func()
+ else:
+ # TODO Handle params, etc
+ procinfo = self._boss.start_simple(self._process)
+ self._procinfo = procinfo
+ self._boss.register_process(self.pid(), self)
+
+ def _stop_internal(self):
+ self._boss.stop_process(self._process, self._address)
+ # TODO Some way to wait for the process that doesn't want to
+ # terminate and kill it would prove nice (or add it to boss somewhere?)
+
+ def name(self):
+ """
+ Returns the name, derived from the process name.
+ """
+ return self._process
+
+ def pid(self):
+ return self._procinfo.pid if self._procinfo is not None else None
+
+ def kill(self, forcefull=False):
+ if self._procinfo is not None:
+ if forcefull:
+ self._procinfo.process.kill()
+ else:
+ self._procinfo.process.terminate()
+
+class Configurator:
+ """
+ This thing keeps track of configuration changes and starts and stops
+ components as it goes. It also handles the inital startup and final
+ shutdown.
+
+ Note that this will allow you to stop (by invoking reconfigure) a core
+ component. There should be some kind of layer protecting users from ever
+ doing so (users must not stop the config manager, message queue and stuff
+ like that or the system won't start again). However, if a user specifies
+ b10-auth as core, it is safe to stop that one.
+
+ The parameters are:
+ * `boss`: The boss we are managing for.
+ * `specials`: Dict of specially started components. Each item is a class
+ representing the component.
+
+ The configuration passed to it (by startup() and reconfigure()) is a
+ dictionary, each item represents one component that should be running.
+ The key is an unique identifier used to reference the component. The
+ value is a dictionary describing the component. All items in the
+ description is optional unless told otherwise and they are as follows:
+ * `special` - Some components are started in a special way. If it is
+ present, it specifies which class from the specials parameter should
+ be used to create the component. In that case, some of the following
+ items might be irrelevant, depending on the special component choosen.
+ If it is not there, the basic Component class is used.
+ * `process` - Name of the executable to start. If it is not present,
+ it defaults to the identifier of the component.
+ * `kind` - The kind of component, either of 'core', 'needed' and
+ 'dispensable'. This specifies what happens if the component fails.
+ This one is required.
+ * `address` - The address of the component on message bus. It is used
+ to shut down the component. All special components currently either
+ know their own address or don't need one and ignore it. The common
+ components should provide this.
+ * `params` - The command line parameters of the executable. Defaults
+ to no parameters. It is currently unused.
+ * `priority` - When starting the component, the components with higher
+ priority are started before the ones with lower priority. If it is
+ not present, it defaults to 0.
+ """
+ def __init__(self, boss, specials = {}):
+ """
+ Initializes the configurator, but nothing is started yet.
+
+ The boss parameter is the boss object used to start and stop processes.
+ """
+ self.__boss = boss
+ # These could be __private, but as we access them from within unittest,
+ # it's more comfortable to have them just _protected.
+
+ # They are tuples (configuration, component)
+ self._components = {}
+ self._running = False
+ self.__specials = specials
+
+ def __reconfigure_internal(self, old, new):
+ """
+ Does a switch from one configuration to another.
+ """
+ self._run_plan(self._build_plan(old, new))
+
+ def startup(self, configuration):
+ """
+ Starts the first set of processes. This configuration is expected
+ to be hardcoded from the boss itself to start the configuration
+ manager and other similar things.
+ """
+ if self._running:
+ raise ValueError("Trying to start the component configurator " +
+ "twice")
+ logger.info(BIND10_CONFIGURATOR_START)
+ self.__reconfigure_internal(self._components, configuration)
+ self._running = True
+
+ def shutdown(self):
+ """
+ Shuts everything down.
+
+ It is not expected that anyone would want to shutdown and then start
+ the configurator again, so we don't explicitly make sure that would
+ work. However, we are not avare of anything that would make it not
+ work either.
+ """
+ if not self._running:
+ raise ValueError("Trying to shutdown the component " +
+ "configurator while it's not yet running")
+ logger.info(BIND10_CONFIGURATOR_STOP)
+ self._running = False
+ self.__reconfigure_internal(self._components, {})
+
+ def reconfigure(self, configuration):
+ """
+ Changes configuration from the current one to the provided. It
+ starts and stops all the components as needed (eg. if there's
+ a component that was not in the original configuration, it is
+ started, any component that was in the old and is not in the
+ new one is stopped).
+ """
+ if not self._running:
+ raise ValueError("Trying to reconfigure the component " +
+ "configurator while it's not yet running")
+ logger.info(BIND10_CONFIGURATOR_RECONFIGURE)
+ self.__reconfigure_internal(self._components, configuration)
+
+ def _build_plan(self, old, new):
+ """
+ Builds a plan how to transfer from the old configuration to the new
+ one. It'll be sorted by priority and it will contain the components
+ (already created, but not started). Each command in the plan is a dict,
+ so it can be extended any time in future to include whatever
+ parameters each operation might need.
+
+ Any configuration problems are expected to be handled here, so the
+ plan is not yet run.
+ """
+ logger.debug(DBG_TRACE_DATA, BIND10_CONFIGURATOR_BUILD, old, new)
+ plan = []
+ # Handle removals of old components
+ for cname in old.keys():
+ if cname not in new:
+ component = self._components[cname][1]
+ if component.running():
+ plan.append({
+ 'command': STOP_CMD,
+ 'component': component,
+ 'name': cname
+ })
+ # Handle transitions of configuration of what is here
+ for cname in new.keys():
+ if cname in old:
+ for option in ['special', 'process', 'kind', 'address',
+ 'params']:
+ if new[cname].get(option) != old[cname][0].get(option):
+ raise NotImplementedError('Changing configuration of' +
+ ' a running component is ' +
+ 'not yet supported. Remove' +
+ ' and re-add ' + cname +
+ ' to get the same effect')
+ # Handle introduction of new components
+ plan_add = []
+ for cname in new.keys():
+ if cname not in old:
+ component_config = new[cname]
+ creator = Component
+ if 'special' in component_config:
+ # TODO: Better error handling
+ creator = self.__specials[component_config['special']]
+ component = creator(component_config.get('process', cname),
+ self.__boss, component_config['kind'],
+ component_config.get('address'),
+ component_config.get('params'))
+ priority = component_config.get('priority', 0)
+ # We store tuples, priority first, so we can easily sort
+ plan_add.append((priority, {
+ 'component': component,
+ 'command': START_CMD,
+ 'name': cname,
+ 'config': component_config
+ }))
+ # Push the starts there sorted by priority
+ plan.extend([command for (_, command) in sorted(plan_add,
+ reverse=True,
+ key=lambda command:
+ command[0])])
+ return plan
+
+ def running(self):
+ """
+ Returns if the configurator is running (eg. was started by startup and
+ not yet stopped by shutdown).
+ """
+ return self._running
+
+ def _run_plan(self, plan):
+ """
+ Run a plan, created beforehand by _build_plan.
+
+ With the start and stop commands, it also adds and removes components
+ in _components.
+
+ Currently implemented commands are:
+ * start
+ * stop
+
+ The plan is a list of tasks, each task is a dictionary. It must contain
+ at last 'component' (a component object to work with) and 'command'
+ (the command to do). Currently, both existing commands need 'name' of
+ the component as well (the identifier from configuration). The 'start'
+ one needs the 'config' to be there, which is the configuration description
+ of the component.
+ """
+ done = 0
+ try:
+ logger.debug(DBG_TRACE_DATA, BIND10_CONFIGURATOR_RUN, len(plan))
+ for task in plan:
+ component = task['component']
+ command = task['command']
+ logger.debug(DBG_TRACE_DETAILED, BIND10_CONFIGURATOR_TASK,
+ command, component.name())
+ if command == START_CMD:
+ component.start()
+ self._components[task['name']] = (task['config'],
+ component)
+ elif command == STOP_CMD:
+ if component.running():
+ component.stop()
+ del self._components[task['name']]
+ else:
+ # Can Not Happen (as the plans are generated by ourselves).
+ # Therefore not tested.
+ raise NotImplementedError("Command unknown: " + command)
+ done += 1
+ except:
+ logger.error(BIND10_CONFIGURATOR_PLAN_INTERRUPTED, done, len(plan))
+ raise
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
index 7522d4a..c681d07 100644
--- a/src/lib/python/isc/bind10/sockcreator.py
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -18,6 +18,7 @@ import struct
import os
import copy
import subprocess
+import copy
from isc.log_messages.bind10_messages import *
from libutil_io_python import recv_fd
@@ -201,6 +202,9 @@ class WrappedSocket:
class Creator(Parser):
"""
This starts the socket creator and allows asking for the sockets.
+
+ Note: __process shouldn't be reset once created. See the note
+ of the SockCreator class for details.
"""
def __init__(self, path):
(local, remote) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -212,11 +216,20 @@ class Creator(Parser):
env['PATH'] = path
self.__process = subprocess.Popen(['b10-sockcreator'], env=env,
stdin=remote.fileno(),
- stdout=remote2.fileno())
+ stdout=remote2.fileno(),
+ preexec_fn=self.__preexec_work)
remote.close()
remote2.close()
Parser.__init__(self, WrappedSocket(local))
+ def __preexec_work(self):
+ """Function used before running a program that needs to run as a
+ different user."""
+ # Put us into a separate process group so we don't get
+ # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+ # other means).
+ os.setpgrp()
+
def pid(self):
return self.__process.pid
@@ -224,4 +237,3 @@ class Creator(Parser):
logger.warn(BIND10_SOCKCREATOR_KILL)
if self.__process is not None:
self.__process.kill()
- self.__process = None
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
new file mode 100644
index 0000000..edf320f
--- /dev/null
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -0,0 +1,164 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from isc.bind10.component import Component, BaseComponent
+import isc.bind10.sockcreator
+from bind10_config import LIBEXECDIR
+import os
+import posix
+import isc.log
+from isc.log_messages.bind10_messages import *
+
+logger = isc.log.Logger("boss")
+
+class SockCreator(BaseComponent):
+ """
+ The socket creator component. Will start and stop the socket creator
+ accordingly.
+
+ Note: _creator shouldn't be reset explicitly once created. The
+ underlying Popen object would then wait() the child process internally,
+ which breaks the assumption of the boss, who is expecting to see
+ the process die in waitpid().
+ """
+ def __init__(self, process, boss, kind, address=None, params=None):
+ BaseComponent.__init__(self, boss, kind)
+ self.__creator = None
+
+ def _start_internal(self):
+ self._boss.curproc = 'b10-sockcreator'
+ self.__creator = isc.bind10.sockcreator.Creator(LIBEXECDIR + ':' +
+ os.environ['PATH'])
+ self._boss.register_process(self.pid(), self)
+ self._boss.log_started(self.pid())
+
+ def _stop_internal(self):
+ self.__creator.terminate()
+
+ def name(self):
+ return "Socket creator"
+
+ def pid(self):
+ """
+ Pid of the socket creator. It is provided differently from a usual
+ component.
+ """
+ return self.__creator.pid() if self.__creator else None
+
+ def kill(self, forcefull=False):
+ # We don't really care about forcefull here
+ if self.__creator:
+ self.__creator.kill()
+
+class Msgq(Component):
+ """
+ The message queue. Starting is passed to boss, stopping is not supported
+ and we leave the boss kill it by signal.
+ """
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, None, None,
+ boss.start_msgq)
+
+ def _stop_internal(self):
+ """
+ We can't really stop the message queue, as many processes may need
+ it for their shutdown and it doesn't have a shutdown command anyway.
+ But as it is stateless, it's OK to kill it.
+
+ So we disable this method (as the only time it could be called is
+ during shutdown) and wait for the boss to kill it in the next shutdown
+ step.
+
+ This actually breaks the recommendation at Component we shouldn't
+ override its methods one by one. This is a special case, because
+ we don't provide a different implementation, we completely disable
+ the method by providing an empty one. This can't hurt the internals.
+ """
+ pass
+
+class CfgMgr(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'ConfigManager',
+ None, boss.start_cfgmgr)
+
+class Auth(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Auth', None,
+ boss.start_auth)
+
+class Resolver(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Resolver', None,
+ boss.start_resolver)
+
+class CmdCtl(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Cmdctl', None,
+ boss.start_cmdctl)
+
+class XfrIn(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Xfrin', None,
+ boss.start_xfrin)
+
+class XfrOut(Component):
+ def __init__(self, process, boss, kind, address=None, params=None):
+ Component.__init__(self, process, boss, kind, 'Xfrout', None,
+ boss.start_xfrout)
+
+class SetUID(BaseComponent):
+ """
+ This is a pseudo-component which drops root privileges when started
+ and sets the uid stored in boss.
+
+ This component does nothing when stopped.
+ """
+ def __init__(self, process, boss, kind, address=None, params=None):
+ BaseComponent.__init__(self, boss, kind)
+ self.uid = boss.uid
+
+ def _start_internal(self):
+ if self.uid is not None:
+ logger.info(BIND10_SETUID, self.uid)
+ posix.setuid(self.uid)
+
+ def _stop_internal(self): pass
+ def kill(self, forcefull=False): pass
+
+ def name(self):
+ return "Set UID"
+
+ def pid(self):
+ return None
+
+def get_specials():
+ """
+ List of specially started components. Each one should be the class than can
+ be created for that component.
+ """
+ return {
+ 'sockcreator': SockCreator,
+ 'msgq': Msgq,
+ 'cfgmgr': CfgMgr,
+ # TODO: Should these be replaced by configuration in config manager only?
+ # They should not have any parameters anyway
+ 'auth': Auth,
+ 'resolver': Resolver,
+ 'cmdctl': CmdCtl,
+ # FIXME: Temporary workaround before #1292 is done
+ 'xfrin': XfrIn,
+ # TODO: Remove when not needed, workaround before sockcreator works
+ 'setuid': SetUID
+ }
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
index df8ab30..df625b2 100644
--- a/src/lib/python/isc/bind10/tests/Makefile.am
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -1,7 +1,7 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
-PYTESTS = sockcreator_test.py
+PYTESTS = sockcreator_test.py component_test.py
EXTRA_DIST = $(PYTESTS)
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
new file mode 100644
index 0000000..15fa470
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -0,0 +1,955 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Tests for the isc.bind10.component module and the
+isc.bind10.special_component module.
+"""
+
+import unittest
+import isc.log
+import time
+import copy
+from isc.bind10.component import Component, Configurator, BaseComponent
+import isc.bind10.special_component
+
+class TestError(Exception):
+ """
+ Just a private exception not known to anybody we use for our tests.
+ """
+ pass
+
+class BossUtils:
+ """
+ A class that brings some utilities for pretending we're Boss.
+ This is expected to be inherited by the testcases themselves.
+ """
+ def setUp(self):
+ """
+ Part of setup. Should be called by descendant's setUp.
+ """
+ self._shutdown = False
+ self._exitcode = None
+ # Back up the time function, we may want to replace it with something
+ self.__orig_time = isc.bind10.component.time.time
+
+ def tearDown(self):
+ """
+ Clean up after tests. If the descendant implements a tearDown, it
+ should call this method internally.
+ """
+ # Return the original time function
+ isc.bind10.component.time.time = self.__orig_time
+
+ def component_shutdown(self, exitcode=0):
+ """
+ Mock function to shut down. We just note we were asked to do so.
+ """
+ self._shutdown = True
+ self._exitcode = exitcode
+
+ def _timeskip(self):
+ """
+ Skip in time to future some 30s. Implemented by replacing the
+ time.time function in the tested module with function that returns
+ current time increased by 30.
+ """
+ tm = time.time()
+ isc.bind10.component.time.time = lambda: tm + 30
+
+ # Few functions that pretend to start something. Part of pretending of
+ # being boss.
+ def start_msgq(self):
+ pass
+
+ def start_cfgmgr(self):
+ pass
+
+ def start_auth(self):
+ pass
+
+ def start_resolver(self):
+ pass
+
+ def start_cmdctl(self):
+ pass
+
+ def start_xfrin(self):
+ pass
+
+class ComponentTests(BossUtils, unittest.TestCase):
+ """
+ Tests for the bind10.component.Component class
+ """
+ def setUp(self):
+ """
+ Pretend a newly started system.
+ """
+ BossUtils.setUp(self)
+ self._shutdown = False
+ self._exitcode = None
+ self.__start_called = False
+ self.__stop_called = False
+ self.__failed_called = False
+ self.__registered_processes = {}
+ self.__stop_process_params = None
+ self.__start_simple_params = None
+ # Pretending to be boss
+ self.uid = None
+ self.__uid_set = None
+
+ def __start(self):
+ """
+ Mock function, installed into the component into _start_internal.
+ This only notes the component was "started".
+ """
+ self.__start_called = True
+
+ def __stop(self):
+ """
+ Mock function, installed into the component into _stop_internal.
+ This only notes the component was "stopped".
+ """
+ self.__stop_called = True
+
+ def __fail(self):
+ """
+ Mock function, installed into the component into _failed_internal.
+ This only notes the component called the method.
+ """
+ self.__failed_called = True
+
+ def __fail_to_start(self):
+ """
+ Mock function. It can be installed into the component's _start_internal
+ to simulate a component that fails to start by raising an exception.
+ """
+ orig_started = self.__start_called
+ self.__start_called = True
+ if not orig_started:
+ # This one is from restart. Avoid infinite recursion for now.
+ # FIXME: We should use the restart scheduler to avoid it, not this.
+ raise TestError("Test error")
+
+ def __create_component(self, kind):
+ """
+ Convenience function that creates a component of given kind
+ and installs the mock functions into it so we can hook up into
+ its behaviour.
+
+ The process used is some nonsense, as this isn't used in this
+ kind of tests and we pretend to be the boss.
+ """
+ component = Component('No process', self, kind, 'homeless', [])
+ component._start_internal = self.__start
+ component._stop_internal = self.__stop
+ component._failed_internal = self.__fail
+ return component
+
+ def test_name(self):
+ """
+ Test the name provides whatever we passed to the constructor as process.
+ """
+ component = self.__create_component('core')
+ self.assertEqual('No process', component.name())
+
+ def test_guts(self):
+ """
+ Test the correct data are stored inside the component.
+ """
+ component = self.__create_component('core')
+ self.assertEqual(self, component._boss)
+ self.assertEqual("No process", component._process)
+ self.assertEqual(None, component._start_func)
+ self.assertEqual("homeless", component._address)
+ self.assertEqual([], component._params)
+
+ def __check_startup(self, component):
+ """
+ Check that nothing was called yet. A newly created component should
+ not get started right away, so this should pass after the creation.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertFalse(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertFalse(self.__failed_called)
+ self.assertFalse(component.running())
+ # We can't stop or fail the component yet
+ self.assertRaises(ValueError, component.stop)
+ self.assertRaises(ValueError, component.failed, 1)
+
+ def __check_started(self, component):
+ """
+ Check the component was started, but not stopped anyhow yet.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertFalse(self.__failed_called)
+ self.assertTrue(component.running())
+
+ def __check_dead(self, component):
+ """
+ Check the component is completely dead, and the server too.
+ """
+ self.assertTrue(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertTrue(self.__failed_called)
+ self.assertEqual(1, self._exitcode)
+ self.assertFalse(component.running())
+ # Surely it can't be stopped when already dead
+ self.assertRaises(ValueError, component.stop)
+ # Nor started
+ self.assertRaises(ValueError, component.start)
+ # Nor it can fail again
+ self.assertRaises(ValueError, component.failed, 1)
+
+ def __check_restarted(self, component):
+ """
+ Check the component restarted successfully.
+
+ Currently, it is implemented as starting it again right away. This will
+ change, it will register itself into the restart schedule in boss. But
+ as the integration with boss is not clear yet, we don't know how
+ exactly that will happen.
+
+ Reset the self.__start_called to False before calling the function when
+ the component should fail.
+ """
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertFalse(self.__stop_called)
+ self.assertTrue(self.__failed_called)
+ self.assertTrue(component.running())
+ # Check it can't be started again
+ self.assertRaises(ValueError, component.start)
+
+ def __do_start_stop(self, kind):
+ """
+ This is a body of a test. It creates a component of given kind,
+ then starts it and stops it. It checks correct functions are called
+ and the component's status is correct.
+
+ It also checks the component can't be started/stopped twice.
+ """
+ # Create it and check it did not do any funny stuff yet
+ component = self.__create_component(kind)
+ self.__check_startup(component)
+ # Start it and check it called the correct starting functions
+ component.start()
+ self.__check_started(component)
+ # Check it can't be started twice
+ self.assertRaises(ValueError, component.start)
+ # Stop it again and check
+ component.stop()
+ self.assertFalse(self._shutdown)
+ self.assertTrue(self.__start_called)
+ self.assertTrue(self.__stop_called)
+ self.assertFalse(self.__failed_called)
+ self.assertFalse(component.running())
+ # Check it can't be stopped twice
+ self.assertRaises(ValueError, component.stop)
+ # Or failed
+ self.assertRaises(ValueError, component.failed, 1)
+ # But it can be started again if it is stopped
+ # (no more checking here, just it doesn't crash)
+ component.start()
+
+ def test_start_stop_core(self):
+ """
+ A start-stop test for core component. See do_start_stop.
+ """
+ self.__do_start_stop('core')
+
+ def test_start_stop_needed(self):
+ """
+ A start-stop test for needed component. See do_start_stop.
+ """
+ self.__do_start_stop('needed')
+
+ def test_start_stop_dispensable(self):
+ """
+ A start-stop test for dispensable component. See do_start_stop.
+ """
+ self.__do_start_stop('dispensable')
+
+ def test_start_fail_core(self):
+ """
+ Start and then fail a core component. It should stop the whole server.
+ """
+ # Just ordinary startup
+ component = self.__create_component('core')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Pretend the component died
+ component.failed(1)
+ # It should bring down the whole server
+ self.__check_dead(component)
+
+ def test_start_fail_core_later(self):
+ """
+ Start and then fail a core component, but let it be running for longer time.
+ It should still stop the whole server.
+ """
+ # Just ordinary startup
+ component = self.__create_component('core')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ self._timeskip()
+ # Pretend the component died some time later
+ component.failed(1)
+ # Check the component is still dead
+ self.__check_dead(component)
+
+ def test_start_fail_needed(self):
+ """
+ Start and then fail a needed component. As this happens really soon after
+ being started, it is considered failure to start and should bring down the
+ whole server.
+ """
+ # Just ordinary startup
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail right away.
+ component.failed(1)
+ self.__check_dead(component)
+
+ def test_start_fail_needed_later(self):
+ """
+ Start and then fail a needed component. But the failure is later on, so
+ we just restart it and will be happy.
+ """
+ # Just ordinary startup
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail later on
+ self.__start_called = False
+ self._timeskip()
+ component.failed(1)
+ self.__check_restarted(component)
+
+ def test_start_fail_dispensable(self):
+ """
+ Start and then fail a dispensable component. Should just get restarted.
+ """
+ # Just ordinary startup
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail right away
+ self.__start_called = False
+ component.failed(1)
+ self.__check_restarted(component)
+
+ def test_start_fail_dispensable(self):
+ """
+ Start and then later on fail a dispensable component. Should just get
+ restarted.
+ """
+ # Just ordinary startup
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component.start()
+ self.__check_started(component)
+ # Make it fail later on
+ self.__start_called = False
+ self._timeskip()
+ component.failed(1)
+ self.__check_restarted(component)
+
+ def test_fail_core(self):
+ """
+ Failure to start a core component. Should bring the system down
+ and the exception should get through.
+ """
+ component = self.__create_component('core')
+ self.__check_startup(component)
+ component._start_internal = self.__fail_to_start
+ self.assertRaises(TestError, component.start)
+ self.__check_dead(component)
+
+ def test_fail_needed(self):
+ """
+ Failure to start a needed component. Should bring the system down
+ and the exception should get through.
+ """
+ component = self.__create_component('needed')
+ self.__check_startup(component)
+ component._start_internal = self.__fail_to_start
+ self.assertRaises(TestError, component.start)
+ self.__check_dead(component)
+
+ def test_fail_dispensable(self):
+ """
+ Failure to start a dispensable component. The exception should get
+ through, but it should be restarted.
+ """
+ component = self.__create_component('dispensable')
+ self.__check_startup(component)
+ component._start_internal = self.__fail_to_start
+ self.assertRaises(TestError, component.start)
+ self.__check_restarted(component)
+
+ def test_bad_kind(self):
+ """
+ Test the component rejects nonsensical kinds. This includes bad
+ capitalization.
+ """
+ for kind in ['Core', 'CORE', 'nonsense', 'need ed', 'required']:
+ self.assertRaises(ValueError, Component, 'No process', self, kind)
+
+ def test_pid_not_running(self):
+ """
+ Test that a componet that is not yet started doesn't have a PID.
+ But it won't fail if asked for and return None.
+ """
+ for component_type in [Component,
+ isc.bind10.special_component.SockCreator,
+ isc.bind10.special_component.Msgq,
+ isc.bind10.special_component.CfgMgr,
+ isc.bind10.special_component.Auth,
+ isc.bind10.special_component.Resolver,
+ isc.bind10.special_component.CmdCtl,
+ isc.bind10.special_component.XfrIn,
+ isc.bind10.special_component.SetUID]:
+ component = component_type('none', self, 'needed')
+ self.assertIsNone(component.pid())
+
+ def test_kill_unstarted(self):
+ """
+ Try to kill the component if it's not started. Should not fail.
+
+ We do not try to kill a running component, as we should not start
+ it during unit tests.
+ """
+ component = Component('component', self, 'needed')
+ component.kill()
+ component.kill(True)
+
+ def register_process(self, pid, process):
+ """
+ Part of pretending to be a boss
+ """
+ self.__registered_processes[pid] = process
+
+ def test_component_attributes(self):
+ """
+ Test the default attributes of Component (not BaseComponent) and
+ some of the methods we might be allowed to call.
+ """
+ class TestProcInfo:
+ def __init__(self):
+ self.pid = 42
+ component = Component('component', self, 'needed', 'Address',
+ ['hello'], TestProcInfo)
+ self.assertEqual('component', component._process)
+ self.assertEqual('component', component.name())
+ self.assertIsNone(component._procinfo)
+ self.assertIsNone(component.pid())
+ self.assertEqual(['hello'], component._params)
+ self.assertEqual('Address', component._address)
+ self.assertFalse(component.running())
+ self.assertEqual({}, self.__registered_processes)
+ component.start()
+ self.assertTrue(component.running())
+ # Some versions of unittest miss assertIsInstance
+ self.assertTrue(isinstance(component._procinfo, TestProcInfo))
+ self.assertEqual(42, component.pid())
+ self.assertEqual(component, self.__registered_processes.get(42))
+
+ def stop_process(self, process, address):
+ """
+ Part of pretending to be boss.
+ """
+ self.__stop_process_params = (process, address)
+
+ def start_simple(self, process):
+ """
+ Part of pretending to be boss.
+ """
+ self.__start_simple_params = process
+
+ def test_component_start_stop_internal(self):
+ """
+ Test the behavior of _stop_internal and _start_internal.
+ """
+ component = Component('component', self, 'needed', 'Address')
+ component.start()
+ self.assertTrue(component.running())
+ self.assertEqual('component', self.__start_simple_params)
+ component.stop()
+ self.assertFalse(component.running())
+ self.assertEqual(('component', 'Address'), self.__stop_process_params)
+
+ def test_component_kill(self):
+ """
+ Check the kill is propagated. The case when component wasn't started
+ yet is already tested elsewhere.
+ """
+ class Process:
+ def __init__(self):
+ self.killed = False
+ self.terminated = False
+ def kill(self):
+ self.killed = True
+ def terminate(self):
+ self.terminated = True
+ process = Process()
+ class ProcInfo:
+ def __init__(self):
+ self.process = process
+ self.pid = 42
+ component = Component('component', self, 'needed', 'Address',
+ [], ProcInfo)
+ component.start()
+ self.assertTrue(component.running())
+ component.kill()
+ self.assertTrue(process.terminated)
+ self.assertFalse(process.killed)
+ process.terminated = False
+ component.kill(True)
+ self.assertTrue(process.killed)
+ self.assertFalse(process.terminated)
+
+ def setuid(self, uid):
+ self.__uid_set = uid
+
+ def test_setuid(self):
+ """
+ Some tests around the SetUID pseudo-component.
+ """
+ component = isc.bind10.special_component.SetUID(None, self, 'needed',
+ None)
+ orig_setuid = isc.bind10.special_component.posix.setuid
+ isc.bind10.special_component.posix.setuid = self.setuid
+ component.start()
+ # No uid set in boss, nothing called.
+ self.assertIsNone(self.__uid_set)
+ # Doesn't do anything, but doesn't crash
+ component.stop()
+ component.kill()
+ component.kill(True)
+ self.uid = 42
+ component = isc.bind10.special_component.SetUID(None, self, 'needed',
+ None)
+ component.start()
+ # This time, it get's called
+ self.assertEqual(42, self.__uid_set)
+
+class TestComponent(BaseComponent):
+ """
+ A test component. It does not start any processes or so, it just logs
+ information about what happens.
+ """
+ def __init__(self, owner, name, kind, address=None, params=None):
+ """
+ Initializes the component. The owner is the test that started the
+ component. The logging will happen into it.
+
+ The process is used as a name for the logging.
+ """
+ BaseComponent.__init__(self, owner, kind)
+ self.__owner = owner
+ self.__name = name
+ self.log('init')
+ self.log(kind)
+ self._address = address
+ self._params = params
+
+ def log(self, event):
+ """
+ Log an event into the owner. The owner can then check the correct
+ order of events that happened.
+ """
+ self.__owner.log.append((self.__name, event))
+
+ def _start_internal(self):
+ self.log('start')
+
+ def _stop_internal(self):
+ self.log('stop')
+
+ def _failed_internal(self):
+ self.log('failed')
+
+ def kill(self, forcefull=False):
+ self.log('killed')
+
+class FailComponent(BaseComponent):
+ """
+ A mock component that fails whenever it is started.
+ """
+ def __init__(self, name, boss, kind, address=None, params=None):
+ BaseComponent.__init__(self, boss, kind)
+
+ def _start_internal(self):
+ raise TestError("test error")
+
+class ConfiguratorTest(BossUtils, unittest.TestCase):
+ """
+ Tests for the configurator.
+ """
+ def setUp(self):
+ """
+ Prepare some test data for the tests.
+ """
+ BossUtils.setUp(self)
+ self.log = []
+ # The core "hardcoded" configuration
+ self.__core = {
+ 'core1': {
+ 'priority': 5,
+ 'process': 'core1',
+ 'special': 'test',
+ 'kind': 'core'
+ },
+ 'core2': {
+ 'process': 'core2',
+ 'special': 'test',
+ 'kind': 'core'
+ },
+ 'core3': {
+ 'process': 'core3',
+ 'priority': 3,
+ 'special': 'test',
+ 'kind': 'core'
+ }
+ }
+ # How they should be started. They are created in the order they are
+ # found in the dict, but then they should be started by priority.
+ # This expects that the same dict returns its keys in the same order
+ # every time
+ self.__core_log_create = []
+ for core in self.__core.keys():
+ self.__core_log_create.append((core, 'init'))
+ self.__core_log_create.append((core, 'core'))
+ self.__core_log_start = [('core1', 'start'), ('core3', 'start'),
+ ('core2', 'start')]
+ self.__core_log = self.__core_log_create + self.__core_log_start
+ self.__specials = { 'test': self.__component_test }
+
+ def __component_test(self, process, boss, kind, address=None, params=None):
+ """
+ Create a test component. It will log events to us.
+ """
+ self.assertEqual(self, boss)
+ return TestComponent(self, process, kind, address, params)
+
+ def test_init(self):
+ """
+ Tests the configurator can be created and it does not create
+ any components yet, nor does it remember anything.
+ """
+ configurator = Configurator(self, self.__specials)
+ self.assertEqual([], self.log)
+ self.assertEqual({}, configurator._components)
+ self.assertFalse(configurator.running())
+
+ def test_run_plan(self):
+ """
+ Test the internal function of running plans. Just see it can handle
+ the commands in the given order. We see that by the log.
+
+ Also includes one that raises, so we see it just stops there.
+ """
+ # Prepare the configurator and the plan
+ configurator = Configurator(self, self.__specials)
+ started = self.__component_test('second', self, 'dispensable')
+ started.start()
+ stopped = self.__component_test('first', self, 'core')
+ configurator._components = {'second': started}
+ plan = [
+ {
+ 'component': stopped,
+ 'command': 'start',
+ 'name': 'first',
+ 'config': {'a': 1}
+ },
+ {
+ 'component': started,
+ 'command': 'stop',
+ 'name': 'second',
+ 'config': {}
+ },
+ {
+ 'component': FailComponent('third', self, 'needed'),
+ 'command': 'start',
+ 'name': 'third',
+ 'config': {}
+ },
+ {
+ 'component': self.__component_test('fourth', self, 'core'),
+ 'command': 'start',
+ 'name': 'fourth',
+ 'config': {}
+ }
+ ]
+ # Don't include the preparation into the log
+ self.log = []
+ # The error from the third component is propagated
+ self.assertRaises(TestError, configurator._run_plan, plan)
+ # The first two were handled, the rest not, due to the exception
+ self.assertEqual([('first', 'start'), ('second', 'stop')], self.log)
+ self.assertEqual({'first': ({'a': 1}, stopped)},
+ configurator._components)
+
+ def __build_components(self, config):
+ """
+ Insert the components into the configuration to specify possible
+ Configurator._components.
+
+ Actually, the components are None, but we need something to be there.
+ """
+ result = {}
+ for name in config.keys():
+ result[name] = (config[name], None)
+ return result
+
+ def test_build_plan(self):
+ """
+ Test building the plan correctly. Not complete yet, this grows as we
+ add more ways of changing the plan.
+ """
+ configurator = Configurator(self, self.__specials)
+ plan = configurator._build_plan({}, self.__core)
+ # This should have created the components
+ self.assertEqual(self.__core_log_create, self.log)
+ self.assertEqual(3, len(plan))
+ for (task, name) in zip(plan, ['core1', 'core3', 'core2']):
+ self.assertTrue('component' in task)
+ self.assertEqual('start', task['command'])
+ self.assertEqual(name, task['name'])
+ component = task['component']
+ self.assertIsNone(component._address)
+ self.assertIsNone(component._params)
+
+ # A plan to go from older state to newer one containing more components
+ bigger = copy.copy(self.__core)
+ bigger['additional'] = {
+ 'priority': 6,
+ 'special': 'test',
+ 'process': 'additional',
+ 'kind': 'needed'
+ }
+ self.log = []
+ plan = configurator._build_plan(self.__build_components(self.__core),
+ bigger)
+ self.assertEqual([('additional', 'init'), ('additional', 'needed')],
+ self.log)
+ self.assertEqual(1, len(plan))
+ self.assertTrue('component' in plan[0])
+ component = plan[0]['component']
+ self.assertEqual('start', plan[0]['command'])
+ self.assertEqual('additional', plan[0]['name'])
+
+ # Now remove the one component again
+ # We run the plan so the component is wired into internal structures
+ configurator._run_plan(plan)
+ self.log = []
+ plan = configurator._build_plan(self.__build_components(bigger),
+ self.__core)
+ self.assertEqual([], self.log)
+ self.assertEqual([{
+ 'command': 'stop',
+ 'name': 'additional',
+ 'component': component
+ }], plan)
+
+ # We want to switch a component. So, prepare the configurator so it
+ # holds one
+ configurator._run_plan(configurator._build_plan(
+ self.__build_components(self.__core), bigger))
+ # Get a different configuration with a different component
+ different = copy.copy(self.__core)
+ different['another'] = {
+ 'special': 'test',
+ 'process': 'another',
+ 'kind': 'dispensable'
+ }
+ self.log = []
+ plan = configurator._build_plan(self.__build_components(bigger),
+ different)
+ self.assertEqual([('another', 'init'), ('another', 'dispensable')],
+ self.log)
+ self.assertEqual(2, len(plan))
+ self.assertEqual('stop', plan[0]['command'])
+ self.assertEqual('additional', plan[0]['name'])
+ self.assertTrue('component' in plan[0])
+ self.assertEqual('start', plan[1]['command'])
+ self.assertEqual('another', plan[1]['name'])
+ self.assertTrue('component' in plan[1])
+
+ # Some slightly insane plans, like missing process, having parameters,
+ # no special, etc
+ plan = configurator._build_plan({}, {
+ 'component': {
+ 'kind': 'needed',
+ 'params': ["1", "2"],
+ 'address': 'address'
+ }
+ })
+ self.assertEqual(1, len(plan))
+ self.assertEqual('start', plan[0]['command'])
+ self.assertEqual('component', plan[0]['name'])
+ component = plan[0]['component']
+ self.assertEqual('component', component.name())
+ self.assertEqual(["1", "2"], component._params)
+ self.assertEqual('address', component._address)
+ self.assertEqual('needed', component._kind)
+ # We don't use isinstance on purpose, it would allow a descendant
+ self.assertTrue(type(component) is Component)
+ plan = configurator._build_plan({}, {
+ 'component': { 'kind': 'dispensable' }
+ })
+ self.assertEqual(1, len(plan))
+ self.assertEqual('start', plan[0]['command'])
+ self.assertEqual('component', plan[0]['name'])
+ component = plan[0]['component']
+ self.assertEqual('component', component.name())
+ self.assertIsNone(component._params)
+ self.assertIsNone(component._address)
+ self.assertEqual('dispensable', component._kind)
+
+ def __do_switch(self, option, value):
+ """
+ Start it with some component and then switch the configuration of the
+ component. This will probably raise, as it is not yet supported.
+ """
+ configurator = Configurator(self, self.__specials)
+ compconfig = {
+ 'special': 'test',
+ 'process': 'process',
+ 'priority': 13,
+ 'kind': 'core'
+ }
+ modifiedconfig = copy.copy(compconfig)
+ modifiedconfig[option] = value
+ return configurator._build_plan({'comp': (compconfig, None)},
+ {'comp': modifiedconfig})
+
+ def test_change_config_plan(self):
+ """
+ Test changing a configuration of one component. This is not yet
+ implemented and should therefore throw.
+ """
+ self.assertRaises(NotImplementedError, self.__do_switch, 'kind',
+ 'dispensable')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'special',
+ 'not_a_test')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'process',
+ 'different')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'address',
+ 'different')
+ self.assertRaises(NotImplementedError, self.__do_switch, 'params',
+ ['different'])
+ # This does not change anything on running component, so no need to
+ # raise
+ self.assertEqual([], self.__do_switch('priority', 5))
+ # Check against false positive, if the data are the same, but different
+ # instance
+ self.assertEqual([], self.__do_switch('special', 'test'))
+
+ def __check_shutdown_log(self):
+ """
+ Checks the log for shutting down from the core configuration.
+ """
+ # We know everything must be stopped, we know what it is.
+ # But we don't know the order, so we check everything is exactly
+ # once in the log
+ components = set(self.__core.keys())
+ for (name, command) in self.log:
+ self.assertEqual('stop', command)
+ self.assertTrue(name in components)
+ components.remove(name)
+ self.assertEqual(set([]), components, "Some component wasn't stopped")
+
+ def test_run(self):
+ """
+ Passes some configuration to the startup method and sees if
+ the components are started up. Then it reconfigures it with
+ empty configuration, the original configuration again and shuts
+ down.
+
+ It also checks the components are kept inside the configurator.
+ """
+ configurator = Configurator(self, self.__specials)
+ # Can't reconfigure nor stop yet
+ self.assertRaises(ValueError, configurator.reconfigure, self.__core)
+ self.assertRaises(ValueError, configurator.shutdown)
+ self.assertFalse(configurator.running())
+ # Start it
+ configurator.startup(self.__core)
+ self.assertEqual(self.__core_log, self.log)
+ for core in self.__core.keys():
+ self.assertTrue(core in configurator._components)
+ self.assertEqual(self.__core[core],
+ configurator._components[core][0])
+ self.assertEqual(set(self.__core), set(configurator._components))
+ self.assertTrue(configurator.running())
+ # It can't be started twice
+ self.assertRaises(ValueError, configurator.startup, self.__core)
+
+ self.log = []
+ # Reconfigure - stop everything
+ configurator.reconfigure({})
+ self.assertEqual({}, configurator._components)
+ self.assertTrue(configurator.running())
+ self.__check_shutdown_log()
+
+ # Start it again
+ self.log = []
+ configurator.reconfigure(self.__core)
+ self.assertEqual(self.__core_log, self.log)
+ for core in self.__core.keys():
+ self.assertTrue(core in configurator._components)
+ self.assertEqual(self.__core[core],
+ configurator._components[core][0])
+ self.assertEqual(set(self.__core), set(configurator._components))
+ self.assertTrue(configurator.running())
+
+ # Do a shutdown
+ self.log = []
+ configurator.shutdown()
+ self.assertEqual({}, configurator._components)
+ self.assertFalse(configurator.running())
+ self.__check_shutdown_log()
+
+ # It can't be stopped twice
+ self.assertRaises(ValueError, configurator.shutdown)
+
+ def test_sort_no_prio(self):
+ """
+ There was a bug if there were two things with the same priority
+ (or without priority), it failed as it couldn't compare the dicts
+ there. This tests it doesn't crash.
+ """
+ configurator = Configurator(self, self.__specials)
+ configurator._build_plan({}, {
+ "c1": { 'kind': 'dispensable'},
+ "c2": { 'kind': 'dispensable'}
+ })
+
+if __name__ == '__main__':
+ isc.log.init("bind10") # FIXME Should this be needed?
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 88c0ee8..4d568be 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -117,12 +117,13 @@ class ConfigManagerData:
if file:
file.close();
return config
-
+
def write_to_file(self, output_file_name = None):
"""Writes the current configuration data to a file. If
output_file_name is not specified, the file used in
read_from_file is used."""
filename = None
+
try:
file = tempfile.NamedTemporaryFile(mode='w',
prefix="b10-config.db.",
@@ -291,7 +292,7 @@ class ConfigManager:
# ok, just start with an empty config
self.config = ConfigManagerData(self.data_path,
self.database_filename)
-
+
def write_config(self):
"""Write the current configuration to the file specificied at init()"""
self.config.write_to_file()
@@ -445,7 +446,7 @@ class ConfigManager:
answer = ccsession.create_answer(1, "Wrong number of arguments")
if not answer:
answer = ccsession.create_answer(1, "No answer message from " + cmd[0])
-
+
return answer
def _handle_module_spec(self, spec):
@@ -455,7 +456,7 @@ class ConfigManager:
# todo: error checking (like keyerrors)
answer = {}
self.set_module_spec(spec)
-
+
# We should make one general 'spec update for module' that
# passes both specification and commands at once
spec_update = ccsession.create_command(ccsession.COMMAND_MODULE_SPECIFICATION_UPDATE,
@@ -491,7 +492,7 @@ class ConfigManager:
else:
answer = ccsession.create_answer(1, "Unknown message format: " + str(msg))
return answer
-
+
def run(self):
"""Runs the configuration manager."""
self.running = True
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index c8df3b6..589a398 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -37,7 +37,7 @@ class TestConfigManagerData(unittest.TestCase):
It shouldn't append the data path to it.
"""
abs_path = self.data_path + os.sep + "b10-config-imaginary.db"
- data = ConfigManagerData(os.getcwd(), abs_path)
+ data = ConfigManagerData(self.data_path, abs_path)
self.assertEqual(abs_path, data.db_filename)
self.assertEqual(self.data_path, data.data_path)
@@ -88,7 +88,7 @@ class TestConfigManagerData(unittest.TestCase):
self.assertEqual(cfd1, cfd2)
cfd2.data['test'] = { 'a': [ 1, 2, 3]}
self.assertNotEqual(cfd1, cfd2)
-
+
class TestConfigManager(unittest.TestCase):
@@ -198,8 +198,8 @@ class TestConfigManager(unittest.TestCase):
self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
config_spec = self.cm.get_config_spec('Spec2')
self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
-
-
+
+
def test_get_commands_spec(self):
commands_spec = self.cm.get_commands_spec()
self.assertEqual(commands_spec, {})
@@ -250,7 +250,7 @@ class TestConfigManager(unittest.TestCase):
def test_write_config(self):
# tested in ConfigManagerData tests
pass
-
+
def _handle_msg_helper(self, msg, expected_answer):
answer = self.cm.handle_msg(msg)
self.assertEqual(expected_answer, answer)
@@ -338,7 +338,7 @@ class TestConfigManager(unittest.TestCase):
# self.fake_session.get_message(self.name, None))
#self.assertEqual({'version': 1, 'TestModule': {'test': 124}}, self.cm.config.data)
#
- self._handle_msg_helper({ "command":
+ self._handle_msg_helper({ "command":
["module_spec", self.spec.get_full_spec()]
},
{'result': [0]})
@@ -359,7 +359,7 @@ class TestConfigManager(unittest.TestCase):
#self.assertEqual({'commands_update': [ self.name, self.commands ] },
# self.fake_session.get_message("Cmdctl", None))
- self._handle_msg_helper({ "command":
+ self._handle_msg_helper({ "command":
["shutdown"]
},
{'result': [0]})
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
index b81f48d..6465bf3 100644
--- a/src/lib/python/isc/datasrc/client_inc.cc
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -89,7 +89,7 @@ None\n\
";
const char* const DataSourceClient_getIterator_doc = "\
-get_iterator(name) -> ZoneIterator\n\
+get_iterator(name, adjust_ttl=True) -> ZoneIterator\n\
\n\
Returns an iterator to the given zone.\n\
\n\
@@ -111,6 +111,11 @@ anything else.\n\
Parameters:\n\
isc.dns.Name The name of zone apex to be traversed. It doesn't do\n\
nearest match as find_zone.\n\
+ adjust_ttl If True, the iterator will treat RRs with the same\n\
+ name and type but different TTL values to be of the\n\
+ same RRset, and will adjust the TTL to the lowest\n\
+ value found. If false, it will consider the RR to\n\
+ belong to a different RRset.\n\
\n\
Return Value(s): Pointer to the iterator.\n\
";
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
index caebd25..49235a6 100644
--- a/src/lib/python/isc/datasrc/client_python.cc
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -83,11 +83,27 @@ DataSourceClient_findZone(PyObject* po_self, PyObject* args) {
PyObject*
DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
- PyObject *name_obj;
- if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
+ PyObject* name_obj;
+ PyObject* adjust_ttl_obj = NULL;
+ if (PyArg_ParseTuple(args, "O!|O", &name_type, &name_obj,
+ &adjust_ttl_obj)) {
try {
+ bool adjust_ttl = true;
+ if (adjust_ttl_obj != NULL) {
+ // store result in local var so we can explicitely check for
+ // -1 error return value
+ int adjust_ttl_no = PyObject_Not(adjust_ttl_obj);
+ if (adjust_ttl_no == 1) {
+ adjust_ttl = false;
+ } else if (adjust_ttl_no == -1) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Error getting value of adjust_ttl");
+ return (NULL);
+ }
+ }
return (createZoneIteratorObject(
- self->cppobj->getInstance().getIterator(PyName_ToName(name_obj)),
+ self->cppobj->getInstance().getIterator(PyName_ToName(name_obj),
+ adjust_ttl),
po_self));
} catch (const isc::NotImplemented& ne) {
PyErr_SetString(getDataSourceException("NotImplemented"),
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index c649f6e..68e075a 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -63,7 +63,7 @@ def check_for_rrset(expected_rrsets, rrset):
class DataSrcClient(unittest.TestCase):
- def test_constructors(self):
+ def test_(self):
# can't construct directly
self.assertRaises(TypeError, isc.datasrc.ZoneIterator)
@@ -82,12 +82,13 @@ class DataSrcClient(unittest.TestCase):
isc.datasrc.DataSourceClient, "memory",
"{ \"foo\": 1 }")
+ @unittest.skip("This test may fail depending on sqlite3 library behavior")
def test_iterate(self):
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
# for RRSIGS, the TTL's are currently modified. This test should
# start failing when we fix that.
- rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
+ rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."), False)
# we do not know the order in which they are returned by the iterator
# but we do want to check them, so we put all records into one list
@@ -137,6 +138,13 @@ class DataSrcClient(unittest.TestCase):
])
# For RRSIGS, we can't add the fake data through the API, so we
# simply pass no rdata at all (which is skipped by the check later)
+
+ # Since we passed adjust_ttl = False to get_iterator, we get several
+ # sets of RRSIGs, one for each TTL
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
@@ -158,6 +166,8 @@ class DataSrcClient(unittest.TestCase):
])
add_rrset(expected_rrset_list, name, rrclass,
isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
# rrs is an iterator, but also has direct get_next_rrset(), use
# the latter one here
@@ -172,17 +182,35 @@ class DataSrcClient(unittest.TestCase):
# Now check there are none left
self.assertEqual(0, len(expected_rrset_list),
"RRset(s) not returned by iterator: " +
- str([rrset.to_text() for rrset in expected_rrset_list ]
+ str([rrset.get_name().to_text() + '/' +
+ rrset.get_type().to_text() for rrset in
+ expected_rrset_list ]
))
# TODO should we catch this (iterating past end) and just return None
# instead of failing?
self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+ # Without the adjust_ttl argument, it should return 55 RRsets
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
rrets = dsc.get_iterator(isc.dns.Name("example.com"))
# there are more than 80 RRs in this zone... let's just count them
# (already did a full check of the smaller zone above)
self.assertEqual(55, len(list(rrets)))
+
+ # same test, but now with explicit True argument for adjust_ttl
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"), True)
+ # there are more than 80 RRs in this zone... let's just count them
+ # (already did a full check of the smaller zone above)
+ self.assertEqual(55, len(list(rrets)))
+
+ # Count should be 71 if we request individual rrsets for differing ttls
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"), False)
+ # there are more than 80 RRs in this zone... let's just count them
+ # (already did a full check of the smaller zone above)
+ self.assertEqual(71, len(list(rrets)))
# TODO should we catch this (iterating past end) and just return None
# instead of failing?
self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
@@ -539,4 +567,5 @@ class DataSrcUpdater(unittest.TestCase):
if __name__ == "__main__":
isc.log.init("bind10")
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/lib/util/buffer.h b/src/lib/util/buffer.h
index b7a8e28..eb90d64 100644
--- a/src/lib/util/buffer.h
+++ b/src/lib/util/buffer.h
@@ -207,6 +207,24 @@ public:
}
//@}
+ /// @brief Read specified number of bytes as a vector.
+ ///
+ /// If specified buffer is too short, it will be expanded
+ /// using vector::resize() method.
+ ///
+ /// @param Reference to a buffer (data will be stored there).
+ /// @param Size specified number of bytes to read in a vector.
+ ///
+ void readVector(std::vector<uint8_t>& data, size_t len)
+ {
+ if (position_ + len > len_) {
+ isc_throw(InvalidBufferPosition, "read beyond end of buffer");
+ }
+
+ data.resize(len);
+ readData(&data[0], len);
+ }
+
private:
size_t position_;
@@ -519,6 +537,6 @@ typedef boost::shared_ptr<OutputBuffer> OutputBufferPtr;
} // namespace isc
#endif // __BUFFER_H
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/util/tests/buffer_unittest.cc b/src/lib/util/tests/buffer_unittest.cc
index 0cd1823..666924e 100644
--- a/src/lib/util/tests/buffer_unittest.cc
+++ b/src/lib/util/tests/buffer_unittest.cc
@@ -239,4 +239,36 @@ TEST_F(BufferTest, outputBufferZeroSize) {
});
}
+TEST_F(BufferTest, readVectorAll) {
+ std::vector<uint8_t> vec;
+
+ // check that vector can read the whole buffer
+ ibuffer.readVector(vec, 5);
+
+ ASSERT_EQ(5, vec.size());
+ EXPECT_EQ(0, memcmp(&vec[0], testdata, 5));
+
+ // ibuffer is 5 bytes long. Can't read past it.
+ EXPECT_THROW(
+ ibuffer.readVector(vec, 1),
+ isc::util::InvalidBufferPosition
+ );
+}
+
+TEST_F(BufferTest, readVectorChunks) {
+ std::vector<uint8_t> vec;
+
+ // check that vector can read the whole buffer
+ ibuffer.readVector(vec, 3);
+ EXPECT_EQ(3, vec.size());
+
+ EXPECT_EQ(0, memcmp(&vec[0], testdata, 3));
+
+ EXPECT_NO_THROW(
+ ibuffer.readVector(vec, 2)
+ );
+
+ EXPECT_EQ(0, memcmp(&vec[0], testdata+3, 2));
+}
+
}
diff --git a/tests/lettuce/README b/tests/lettuce/README
new file mode 100644
index 0000000..21a57c7
--- /dev/null
+++ b/tests/lettuce/README
@@ -0,0 +1,127 @@
+BIND10 system testing with Lettuce
+or: to BDD or not to BDD
+
+In this directory, we define a set of behavioral tests for BIND 10. Currently,
+these tests are specific for BIND10, but we are keeping in mind that RFC-related
+tests could be separated, so that we can test other systems as well.
+
+Prerequisites:
+- Installed version of BIND 10 (but see below how to run it from source tree)
+- dig
+- lettuce (http://lettuce.it)
+
+To install lettuce, if you have the python pip installation tool, simply do
+pip install lettuce
+See http://lettuce.it/intro/install.html
+
+Most systems have the pip tool in a separate package; on Debian-based systems
+it is called python-pip. On FreeBSD the port is devel/py-pip.
+
+Running the tests
+-----------------
+
+At this moment, we have a fixed port for local tests in our setups, port 47806.
+This port must be free. (TODO: can we make this run-time discovered?).
+Port 47805 is used for cmdctl, and must also be available.
+(note, we will need to extend this to a range, or if possible, we will need to
+do some on-the-fly available port finding)
+
+The bind10 main program, bindctl, and dig must all be in the default search
+path of your environment, and BIND 10 must not be running if you use the
+installed version when you run the tests.
+
+If you want to test an installed version of bind 10, just run 'lettuce' in
+this directory.
+
+We have provided a script that sets up the shell environment to run the tests
+with the build tree version of bind. If your shell uses export to set
+environment variables, you can source the script setup_intree_bind10.sh, then
+run lettuce.
+
+Due to the default way lettuce prints its output, it is advisable to run it
+in a terminal that is wide than the default. If you see a lot of lines twice
+in different colors, the terminal is not wide enough.
+
+If you just want to run one specific feature test, use
+lettuce features/<feature file>
+
+To run a specific scenario from a feature, use
+lettuce features/<feature file> -s <scenario number>
+
+We have set up the tests to assume that lettuce is run from this directory,
+so even if you specify a specific feature file, you should do it from this
+directory.
+
+What to do when a test fails
+----------------------------
+
+First of all, look at the error it printed and see what step it occurred in.
+If written well, the output should explain most of what went wrong.
+
+The stacktrace that is printed is *not* of bind10, but of the testing
+framework; this helps in finding more information about what exactly the test
+tried to achieve when it failed (as well as help debug the tests themselves).
+
+Furthermore, if any scenario fails, the output from long-running processes
+will be stored in the directory output/. The name of the files will be
+<Feature name>-<Scenario name>-<Process name>.stdout and
+<Feature name>-<Scenario name>-<Process name>.stderr
+Where spaces and other non-standard characters are replaced by an underscore.
+The process name is either the standard name for said process (e.g. 'bind10'),
+or the name given to it by the test ('when i run bind10 as <name>').
+
+These files *will* be overwritten or deleted if the same scenarios are run
+again, so if you want to inspect them after a failed test, either do so
+immediately or move the files.
+
+Adding and extending tests
+--------------------------
+
+If you want to add tests, it is advisable to first go through the examples to
+see what is possible, and read the documentation on http://www.lettuce.it
+
+There is also a README.tutorial file here.
+
+We have a couple of conventions to keep things manageable.
+
+Configuration files go into the configurations/ directory.
+Data files go into the data/ directory.
+Step definition go into the features/terrain/ directory (the name terrain is
+chosen for the same reason Lettuce chose terrain.py, this is the place the
+tests 'live' in).
+Feature definitions go directly into the features/ directory.
+
+These directories are currently not divided further; we may want to consider
+this as the set grows. Due to a (current?) limitation of Lettuce, for
+feature files this is currently not possible; the python files containing
+steps and terrain must be below or at the same level of the feature files.
+
+Long-running processes should be started through the world.RunningProcesses
+instance. If you want to add a process (e.g. bind9), create start, stop and
+control steps in terrain/<base_name>_control.py, and let it use the
+RunningProcesses API (defined in terrain.py). See bind10_control.py for an
+example.
+
+For sending queries and checking the results, steps have been defined in
+terrain/querying.py. These use dig and store the results split up into text
+strings. This is intentionally not parsed through our own library (as that way
+we might run into a 'symmetric bug'). If you need something more advanced from
+query results, define it here.
+
+Some very general steps are defined in terrain/steps.py.
+Initialization code, cleanup code, and helper classes are defined in
+terrain/terrain.py.
+
+To find the right steps, case insensitive matching is used. Parameters taken
+from the steps are case-sensitive though. So a step defined as
+'do foo with value (bar)' will be matched when using
+'Do Foo with value xyz', but xyz will be taken as given.
+
+If you need to add steps that are very particular to one test, create a new
+file with a name relevant for that test in terrain. We may want to consider
+creating a specific subdirectory for these, but at this moment it is unclear
+whether we need to.
+
+We should try to keep steps as general as possible, while not making them to
+complex and error-prone.
+
diff --git a/tests/lettuce/README.tutorial b/tests/lettuce/README.tutorial
new file mode 100644
index 0000000..18c94cf
--- /dev/null
+++ b/tests/lettuce/README.tutorial
@@ -0,0 +1,157 @@
+Quick tutorial and overview
+---------------------------
+
+Lettuce is a framework for doing Behaviour Driven Development (BDD).
+
+The idea behind BDD is that you first write down your requirements in
+the form of scenarios, then implement their behaviour.
+
+We do not plan on doing full BDD, but such a system should also help
+us make system tests. And, hopefully, being able to better identify
+what exactly is going wrong when a test fails.
+
+Lettuce is a python implementation of the Cucumber framework, which is
+a ruby system. So far we chose lettuce because we already need python
+anyway, so chances are higher that any system we want to run it on
+supports it. It only supports a subset of cucumber, but more cucumber
+features are planned. As I do not know much details of cucumber, I
+can't really say what is there and what is not.
+
+A slight letdown is that the current version does not support python 3.
+However, as long as the tool-calling glue is python2, this should not
+cause any problems, since these aren't unit tests; We do not plan to use
+our libraries directly, but only through the runnable scripts and
+executables.
+
+-----
+
+Features, Scenarios, Steps.
+
+Lettuce makes a distinction between features, scenarios, and steps.
+
+Features are general, well, features. Each 'feature' has its own file
+ending in .feature. A feature file contains a description and a number
+of scenarios. Each scenario tests one or more particular parts of the
+feature. Each scenario consists of a number of steps.
+
+So let's open up a simple one.
+
+-- example.feature
+Feature: showing off BIND 10
+ This is to show BIND 10 running and that it answer queries
+
+ Scenario: Starting bind10
+ # steps go here
+--
+
+I have predefined a number of steps we can use, as we build test we
+will need to expand these, but we will look at them shortly.
+
+This file defines a feature, just under the feature name we can
+provide a description of the feature.
+
+The one scenario we have no has no steps, so if we run it we should
+see something like:
+
+-- output
+> lettuce
+Feature: showing off BIND 10
+ This is to show BIND 10 running and that it answer queries
+
+ Scenario: Starting bind10
+
+1 feature (1 passed)
+1 scenario (1 passed)
+0 step (0 passed)
+--
+
+Let's first add some steps that send queries.
+
+--
+ A query for www.example.com should have rcode REFUSED
+ A query for www.example.org should have rcode NOERROR
+--
+
+Since we didn't start any bind10, dig will time out and the result
+should be an error saying it got no answer. Errors are in the
+form of stack traces (trigger by failed assertions), so we can find
+out easily where in the tests they occurred. Especially when the total
+set of steps gets bigger we might need that.
+
+So let's add a step that starts bind10.
+
+--
+ When I start bind10 with configuration example.org.config
+--
+
+This is not good enough; it will fire of the process, but setting up
+b10-auth may take a few moments, so we need to add a step to wait for
+it to be started before we continue.
+
+--
+ Then wait for bind10 auth to start
+--
+
+And let's run the tests again.
+
+--
+> lettuce
+
+Feature: showing off BIND 10
+ This is to show BIND 10 running and that it answer queries
+
+ Scenario: Starting bind10
+ When I start bind10 with configuration example.org.config
+ Then wait for bind10 auth to start
+ A query for www.example.com should have rcode REFUSED
+ A query for www.example.org should have rcode NOERROR
+
+1 feature (1 passed)
+1 scenario (1 passed)
+4 steps (4 passed)
+(finished within 2 seconds)
+--
+
+So take a look at one of those steps, let's pick the first one.
+
+A step is defined through a python decorator, which in essence is a regular
+expression; lettuce searches through all defined steps to find one that
+matches. These are 'partial' matches (unless specified otherwise in the
+regular expression itself), so if the step is defined with "do foo bar", the
+scenario can add words for readability "When I do foo bar".
+
+Each captured group will be passed as an argument to the function we define.
+For bind10, i defined a configuration file, a cmdctl port, and a process
+name. The first two should be self-evident, and the process name is an
+optional name we give it, should we want to address it in the rest of the
+tests. This is most useful if we want to start multiple instances. In the
+next step (the wait for auth to start), I added a 'of <instance>'. So if we
+define the bind10 'as b10_second_instance', we can specify that one here as
+'of b10_second_instance'.
+
+--
+ When I start bind10 with configuration second.config
+ with cmdctl port 12345 as b10_second_instance
+--
+(line wrapped for readability)
+
+But notice how we needed two steps, which we probably always need (but
+not entirely always)? We can also combine steps; for instance:
+
+--
+ at step('have bind10 running(?: with configuration ([\w.]+))?')
+def have_bind10_running(step, config_file):
+ step.given('start bind10 with configuration ' + config_file)
+ step.given('wait for bind10 auth to start')
+--
+
+Now we can replace the two steps with one:
+
+--
+ Given I have bind10 running
+--
+
+That's it for the quick overview. For some more examples, with comments,
+take a look at features/example.feature. You can read more about lettuce and
+its features on http://www.lettuce.it, and if you plan on adding tests and
+scenarios, please consult the last section of the main README first.
diff --git a/tests/lettuce/configurations/example.org.config.orig b/tests/lettuce/configurations/example.org.config.orig
new file mode 100644
index 0000000..642f2dd
--- /dev/null
+++ b/tests/lettuce/configurations/example.org.config.orig
@@ -0,0 +1,17 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "debuglevel": 99,
+ "severity": "DEBUG",
+ "name": "auth"
+ } ]
+ },
+ "Auth": {
+ "database_file": "data/example.org.sqlite3",
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/configurations/example2.org.config b/tests/lettuce/configurations/example2.org.config
new file mode 100644
index 0000000..1a40d1b
--- /dev/null
+++ b/tests/lettuce/configurations/example2.org.config
@@ -0,0 +1,18 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "severity": "DEBUG",
+ "name": "auth",
+ "debuglevel": 99
+ }
+ ]
+ },
+ "Auth": {
+ "database_file": "data/example.org.sqlite3",
+ "listen_on": [ {
+ "port": 47807,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/configurations/no_db_file.config b/tests/lettuce/configurations/no_db_file.config
new file mode 100644
index 0000000..f865354
--- /dev/null
+++ b/tests/lettuce/configurations/no_db_file.config
@@ -0,0 +1,10 @@
+{
+ "version": 2,
+ "Auth": {
+ "database_file": "data/test_nonexistent_db.sqlite3",
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/data/empty_db.sqlite3 b/tests/lettuce/data/empty_db.sqlite3
new file mode 100644
index 0000000..f27a8b8
Binary files /dev/null and b/tests/lettuce/data/empty_db.sqlite3 differ
diff --git a/tests/lettuce/data/example.org.sqlite3 b/tests/lettuce/data/example.org.sqlite3
new file mode 100644
index 0000000..070012f
Binary files /dev/null and b/tests/lettuce/data/example.org.sqlite3 differ
diff --git a/tests/lettuce/features/example.feature b/tests/lettuce/features/example.feature
new file mode 100644
index 0000000..d1ed6b3
--- /dev/null
+++ b/tests/lettuce/features/example.feature
@@ -0,0 +1,142 @@
+Feature: Example feature
+ This is an example Feature set. Is is mainly intended to show
+ our use of the lettuce tool and our own framework for it
+ The first scenario is to show what a simple test would look like, and
+ is intentionally uncommented.
+ The later scenarios have comments to show what the test steps do and
+ support
+
+ Scenario: A simple example
+ Given I have bind10 running with configuration example.org.config
+ A query for www.example.org should have rcode NOERROR
+ A query for www.doesnotexist.org should have rcode REFUSED
+ The SOA serial for example.org should be 1234
+
+ Scenario: New database
+ # This test checks whether a database file is automatically created
+ # Underwater, we take advantage of our intialization routines so
+ # that we are sure this file does not exist, see
+ # features/terrain/terrain.py
+
+ # Standard check to test (non-)existence of a file
+ # This file is actually automatically
+ The file data/test_nonexistent_db.sqlite3 should not exist
+
+ # In the first scenario, we used 'given I have bind10 running', which
+ # is actually a compound step consisting of the following two
+ # one to start the server
+ When I start bind10 with configuration no_db_file.config
+ # And one to wait until it reports that b10-auth has started
+ Then wait for bind10 auth to start
+
+ # This is a general step to stop a named process. By convention,
+ # the default name for any process is the same as the one we
+ # use in the start step (for bind 10, that is 'I start bind10 with')
+ # See scenario 'Multiple instances' for more.
+ Then stop process bind10
+
+ # Now we use the first step again to see if the file has been created
+ The file data/test_nonexistent_db.sqlite3 should exist
+
+ Scenario: example.org queries
+ # This scenario performs a number of queries and inspects the results
+ # Simple queries have already been show, but after we have sent a query,
+ # we can also do more extensive checks on the result.
+ # See querying.py for more information on these steps.
+
+ # note: lettuce can group similar checks by using tables, but we
+ # intentionally do not make use of that here
+
+ # This is a compound statement that starts and waits for the
+ # started message
+ Given I have bind10 running with configuration example.org.config
+
+ # Some simple queries that is not examined further
+ A query for www.example.com should have rcode REFUSED
+ A query for www.example.org should have rcode NOERROR
+
+ # A query where we look at some of the result properties
+ A query for www.example.org should have rcode NOERROR
+ The last query response should have qdcount 1
+ The last query response should have ancount 1
+ The last query response should have nscount 3
+ The last query response should have adcount 0
+ # The answer section can be inspected in its entirety; in the future
+ # we may add more granular inspection steps
+ The answer section of the last query response should be
+ """
+ www.example.org. 3600 IN A 192.0.2.1
+ """
+
+ A query for example.org type NS should have rcode NOERROR
+ The answer section of the last query response should be
+ """
+ example.org. 3600 IN NS ns1.example.org.
+ example.org. 3600 IN NS ns2.example.org.
+ example.org. 3600 IN NS ns3.example.org.
+ """
+
+ # We have a specific step for checking SOA serial numbers
+ The SOA serial for example.org should be 1234
+
+ # Another query where we look at some of the result properties
+ A query for doesnotexist.example.org should have rcode NXDOMAIN
+ The last query response should have qdcount 1
+ The last query response should have ancount 0
+ The last query response should have nscount 1
+ The last query response should have adcount 0
+ # When checking flags, we must pass them exactly as they appear in
+ # the output of dig.
+ The last query response should have flags qr aa rd
+
+ A query for www.example.org type TXT should have rcode NOERROR
+ The last query response should have ancount 0
+
+ # Some queries where we specify more details about what to send and
+ # where
+ A query for www.example.org class CH should have rcode REFUSED
+ A query for www.example.org to 127.0.0.1 should have rcode NOERROR
+ A query for www.example.org to 127.0.0.1:47806 should have rcode NOERROR
+ A query for www.example.org type A class IN to 127.0.0.1:47806 should have rcode NOERROR
+
+ Scenario: changing database
+ # This scenario contains a lot of 'wait for' steps
+ # If those are not present, the asynchronous nature of the application
+ # can cause some of the things we send to be handled out of order;
+ # for instance auth could still be serving the old zone when we send
+ # the new query, or already respond from the new database.
+ # Therefore we wait for specific log messages after each operation
+ #
+ # This scenario outlines every single step, and does not use
+ # 'steps of steps' (e.g. Given I have bind10 running)
+ # We can do that but as an example this is probably better to learn
+ # the system
+
+ When I start bind10 with configuration example.org.config
+ Then wait for bind10 auth to start
+ Wait for bind10 stderr message CMDCTL_STARTED
+ A query for www.example.org should have rcode NOERROR
+ Wait for new bind10 stderr message AUTH_SEND_NORMAL_RESPONSE
+ Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3
+ And wait for new bind10 stderr message DATASRC_SQLITE_OPEN
+ A query for www.example.org should have rcode REFUSED
+ Wait for new bind10 stderr message AUTH_SEND_NORMAL_RESPONSE
+ Then set bind10 configuration Auth/database_file to data/example.org.sqlite3
+ And wait for new bind10 stderr message DATASRC_SQLITE_OPEN
+ A query for www.example.org should have rcode NOERROR
+
+ Scenario: two bind10 instances
+ # This is more a test of the test system, start 2 bind10's
+ When I start bind10 with configuration example.org.config as bind10_one
+ And I start bind10 with configuration example2.org.config with cmdctl port 47804 as bind10_two
+
+ Then wait for bind10 auth of bind10_one to start
+ Then wait for bind10 auth of bind10_two to start
+ A query for www.example.org to 127.0.0.1:47806 should have rcode NOERROR
+ A query for www.example.org to 127.0.0.1:47807 should have rcode NOERROR
+
+ Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3
+ And wait for bind10_one stderr message DATASRC_SQLITE_OPEN
+
+ A query for www.example.org to 127.0.0.1:47806 should have rcode REFUSED
+ A query for www.example.org to 127.0.0.1:47807 should have rcode NOERROR
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
new file mode 100644
index 0000000..e104a81
--- /dev/null
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -0,0 +1,108 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from lettuce import *
+import subprocess
+import re
+
+ at step('start bind10(?: with configuration (\S+))?' +\
+ '(?: with cmdctl port (\d+))?(?: as (\S+))?')
+def start_bind10(step, config_file, cmdctl_port, process_name):
+ """
+ Start BIND 10 with the given optional config file, cmdctl port, and
+ store the running process in world with the given process name.
+ Parameters:
+ config_file ('with configuration <file>', optional): this configuration
+ will be used. The path is relative to the base lettuce
+ directory.
+ cmdctl_port ('with cmdctl port <portnr>', optional): The port on which
+ b10-cmdctl listens for bindctl commands. Defaults to 47805.
+ process_name ('as <name>', optional). This is the name that can be used
+ in the following steps of the scenario to refer to this
+ BIND 10 instance. Defaults to 'bind10'.
+ This call will block until BIND10_STARTUP_COMPLETE or BIND10_STARTUP_ERROR
+ is logged. In the case of the latter, or if it times out, the step (and
+ scenario) will fail.
+ It will also fail if there is a running process with the given process_name
+ already.
+ """
+ args = [ 'bind10', '-v' ]
+ if config_file is not None:
+ args.append('-p')
+ args.append("configurations/")
+ args.append('-c')
+ args.append(config_file)
+ if cmdctl_port is None:
+ args.append('--cmdctl-port=47805')
+ else:
+ args.append('--cmdctl-port=' + cmdctl_port)
+ if process_name is None:
+ process_name = "bind10"
+ else:
+ args.append('-m')
+ args.append(process_name + '_msgq.socket')
+
+ world.processes.add_process(step, process_name, args)
+
+ # check output to know when startup has been completed
+ message = world.processes.wait_for_stderr_str(process_name,
+ ["BIND10_STARTUP_COMPLETE",
+ "BIND10_STARTUP_ERROR"])
+ assert message == "BIND10_STARTUP_COMPLETE", "Got: " + str(message)
+
+ at step('wait for bind10 auth (?:of (\w+) )?to start')
+def wait_for_auth(step, process_name):
+ """Wait for b10-auth to run. This is done by blocking until the message
+ AUTH_SERVER_STARTED is logged.
+ Parameters:
+ process_name ('of <name', optional): The name of the BIND 10 instance
+ to wait for. Defaults to 'bind10'.
+ """
+ if process_name is None:
+ process_name = "bind10"
+ world.processes.wait_for_stderr_str(process_name, ['AUTH_SERVER_STARTED'],
+ False)
+
+ at step('have bind10 running(?: with configuration ([\w.]+))?')
+def have_bind10_running(step, config_file):
+ """
+ Compound convenience step for running bind10, which consists of
+ start_bind10 and wait_for_auth.
+ Currently only supports the 'with configuration' option.
+ """
+ step.given('start bind10 with configuration ' + config_file)
+ step.given('wait for bind10 auth to start')
+
+ at step('set bind10 configuration (\S+) to (.*)(?: with cmdctl port (\d+))?')
+def set_config_command(step, name, value, cmdctl_port):
+ """
+ Run bindctl, set the given configuration to the given value, and commit it.
+ Parameters:
+ name ('configuration <name>'): Identifier of the configuration to set
+ value ('to <value>'): value to set it to.
+ cmdctl_port ('with cmdctl port <portnr>', optional): cmdctl port to send
+ the command to. Defaults to 47805.
+ Fails if cmdctl does not exit with status code 0.
+ """
+ if cmdctl_port is None:
+ cmdctl_port = '47805'
+ args = ['bindctl', '-p', cmdctl_port]
+ bindctl = subprocess.Popen(args, 1, None, subprocess.PIPE,
+ subprocess.PIPE, None)
+ bindctl.stdin.write("config set " + name + " " + value + "\n")
+ bindctl.stdin.write("config commit\n")
+ bindctl.stdin.write("quit\n")
+ result = bindctl.wait()
+ assert result == 0, "bindctl exit code: " + str(result)
diff --git a/tests/lettuce/features/terrain/querying.py b/tests/lettuce/features/terrain/querying.py
new file mode 100644
index 0000000..ea89b18
--- /dev/null
+++ b/tests/lettuce/features/terrain/querying.py
@@ -0,0 +1,279 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This script provides querying functionality
+# The most important step is
+#
+# query for <name> [type X] [class X] [to <addr>[:port]] should have rcode <rc>
+#
+# By default, it will send queries to 127.0.0.1:47806 unless specified
+# otherwise. The rcode is always checked. If the result is not NO_ANSWER,
+# the result will be stored in last_query_result, which can then be inspected
+# more closely, for instance with the step
+#
+# "the last query response should have <property> <value>"
+#
+# Also see example.feature for some examples
+
+from lettuce import *
+import subprocess
+import re
+
+#
+# define a class to easily access different parts
+# We may consider using our full library for this, but for now
+# simply store several parts of the response as text values in
+# this structure.
+# (this actually has the advantage of not relying on our own libraries
+# to test our own, well, libraries)
+#
+# The following attributes are 'parsed' from the response, all as strings,
+# and end up as direct attributes of the QueryResult object:
+# opcode, rcode, id, flags, qdcount, ancount, nscount, adcount
+# (flags is one string with all flags, in the order they appear in the
+# response packet.)
+#
+# this will set 'rcode' as the result code, we 'define' one additional
+# rcode, "NO_ANSWER", if the dig process returned an error code itself
+# In this case none of the other attributes will be set.
+#
+# The different sections will be lists of strings, one for each RR in the
+# section. The question section will start with ';', as per dig output
+#
+# See server_from_sqlite3.feature for various examples to perform queries
+class QueryResult(object):
+ status_re = re.compile("opcode: ([A-Z])+, status: ([A-Z]+), id: ([0-9]+)")
+ flags_re = re.compile("flags: ([a-z ]+); QUERY: ([0-9]+), ANSWER: " +
+ "([0-9]+), AUTHORITY: ([0-9]+), ADDITIONAL: ([0-9]+)")
+
+ def __init__(self, name, qtype, qclass, address, port):
+ """
+ Constructor. This fires of a query using dig.
+ Parameters:
+ name: The domain name to query
+ qtype: The RR type to query. Defaults to A if it is None.
+ qclass: The RR class to query. Defaults to IN if it is None.
+ address: The IP adress to send the query to.
+ port: The port number to send the query to.
+ All parameters must be either strings or have the correct string
+ representation.
+ Only one query attempt will be made.
+ """
+ args = [ 'dig', '+tries=1', '@' + str(address), '-p', str(port) ]
+ if qtype is not None:
+ args.append('-t')
+ args.append(str(qtype))
+ if qclass is not None:
+ args.append('-c')
+ args.append(str(qclass))
+ args.append(name)
+ dig_process = subprocess.Popen(args, 1, None, None, subprocess.PIPE,
+ None)
+ result = dig_process.wait()
+ if result != 0:
+ self.rcode = "NO_ANSWER"
+ else:
+ self.rcode = None
+ parsing = "HEADER"
+ self.question_section = []
+ self.answer_section = []
+ self.authority_section = []
+ self.additional_section = []
+ self.line_handler = self.parse_header
+ for out in dig_process.stdout:
+ self.line_handler(out)
+
+ def _check_next_header(self, line):
+ """
+ Returns true if we found a next header, and sets the internal
+ line handler to the appropriate value.
+ """
+ if line == ";; ANSWER SECTION:\n":
+ self.line_handler = self.parse_answer
+ elif line == ";; AUTHORITY SECTION:\n":
+ self.line_handler = self.parse_authority
+ elif line == ";; ADDITIONAL SECTION:\n":
+ self.line_handler = self.parse_additional
+ elif line.startswith(";; Query time"):
+ self.line_handler = self.parse_footer
+ else:
+ return False
+ return True
+
+ def parse_header(self, line):
+ """
+ Parse the header lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ status_match = self.status_re.search(line)
+ flags_match = self.flags_re.search(line)
+ if status_match is not None:
+ self.opcode = status_match.group(1)
+ self.rcode = status_match.group(2)
+ elif flags_match is not None:
+ self.flags = flags_match.group(1)
+ self.qdcount = flags_match.group(2)
+ self.ancount = flags_match.group(3)
+ self.nscount = flags_match.group(4)
+ self.adcount = flags_match.group(5)
+
+ def parse_question(self, line):
+ """
+ Parse the question section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.question_section.append(line.strip())
+
+ def parse_answer(self, line):
+ """
+ Parse the answer section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.answer_section.append(line.strip())
+
+ def parse_authority(self, line):
+ """
+ Parse the authority section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.authority_section.append(line.strip())
+
+ def parse_additional(self, line):
+ """
+ Parse the additional section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.additional_section.append(line.strip())
+
+ def parse_footer(self, line):
+ """
+ Parse the footer lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ pass
+
+ at step('A query for ([\w.]+) (?:type ([A-Z]+) )?(?:class ([A-Z]+) )?' +
+ '(?:to ([^:]+)(?::([0-9]+))? )?should have rcode ([\w.]+)')
+def query(step, query_name, qtype, qclass, addr, port, rcode):
+ """
+ Run a query, check the rcode of the response, and store the query
+ result in world.last_query_result.
+ Parameters:
+ query_name ('query for <name>'): The domain name to query.
+ qtype ('type <type>', optional): The RR type to query. Defaults to A.
+ qclass ('class <class>', optional): The RR class to query. Defaults to IN.
+ addr ('to <address>', optional): The IP address of the nameserver to query.
+ Defaults to 127.0.0.1.
+ port (':<port>', optional): The port number of the nameserver to query.
+ Defaults to 47806.
+ rcode ('should have rcode <rcode>'): The expected rcode of the answer.
+ """
+ if qtype is None:
+ qtype = "A"
+ if qclass is None:
+ qclass = "IN"
+ if addr is None:
+ addr = "127.0.0.1"
+ if port is None:
+ port = 47806
+ query_result = QueryResult(query_name, qtype, qclass, addr, port)
+ assert query_result.rcode == rcode,\
+ "Expected: " + rcode + ", got " + query_result.rcode
+ world.last_query_result = query_result
+
+ at step('The SOA serial for ([\w.]+) should be ([0-9]+)')
+def query_soa(step, query_name, serial):
+ """
+ Convenience function to check the SOA SERIAL value of the given zone at
+ the nameserver at the default address (127.0.0.1:47806).
+ Parameters:
+ query_name ('for <name>'): The zone to find the SOA record for.
+ serial ('should be <number>'): The expected value of the SOA SERIAL.
+ If the rcode is not NOERROR, or the answer section does not contain the
+ SOA record, this step fails.
+ """
+ query_result = QueryResult(query_name, "SOA", "IN", "127.0.0.1", "47806")
+ assert "NOERROR" == query_result.rcode,\
+ "Got " + query_result.rcode + ", expected NOERROR"
+ assert len(query_result.answer_section) == 1,\
+ "Too few or too many answers in SOA response"
+ soa_parts = query_result.answer_section[0].split()
+ assert serial == soa_parts[6],\
+ "Got SOA serial " + soa_parts[6] + ", expected " + serial
+
+ at step('last query response should have (\S+) (.+)')
+def check_last_query(step, item, value):
+ """
+ Check a specific value in the reponse from the last successful query sent.
+ Parameters:
+ item: The item to check the value of
+ value: The expected value.
+ This performs a very simple direct string comparison of the QueryResult
+ member with the given item name and the given value.
+ Fails if the item is unknown, or if its value does not match the expected
+ value.
+ """
+ assert world.last_query_result is not None
+ assert item in world.last_query_result.__dict__
+ lq_val = world.last_query_result.__dict__[item]
+ assert str(value) == str(lq_val),\
+ "Got: " + str(lq_val) + ", expected: " + str(value)
+
+ at step('([a-zA-Z]+) section of the last query response should be')
+def check_last_query_section(step, section):
+ """
+ Check the entire contents of the given section of the response of the last
+ query.
+ Parameters:
+ section ('<section> section'): The name of the section (QUESTION, ANSWER,
+ AUTHORITY or ADDITIONAL).
+ The expected response is taken from the multiline part of the step in the
+ scenario. Differing whitespace is ignored, but currently the order is
+ significant.
+ Fails if they do not match.
+ """
+ response_string = None
+ if section.lower() == 'question':
+ response_string = "\n".join(world.last_query_result.question_section)
+ elif section.lower() == 'answer':
+ response_string = "\n".join(world.last_query_result.answer_section)
+ elif section.lower() == 'authority':
+ response_string = "\n".join(world.last_query_result.answer_section)
+ elif section.lower() == 'additional':
+ response_string = "\n".join(world.last_query_result.answer_section)
+ else:
+ assert False, "Unknown section " + section
+ # replace whitespace of any length by one space
+ response_string = re.sub("[ \t]+", " ", response_string)
+ expect = re.sub("[ \t]+", " ", step.multiline)
+ assert response_string.strip() == expect.strip(),\
+ "Got:\n'" + response_string + "'\nExpected:\n'" + step.multiline +"'"
+
+
diff --git a/tests/lettuce/features/terrain/steps.py b/tests/lettuce/features/terrain/steps.py
new file mode 100644
index 0000000..4050940
--- /dev/null
+++ b/tests/lettuce/features/terrain/steps.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# This file contains a number of common steps that are general and may be used
+# By a lot of feature files.
+#
+
+from lettuce import *
+import os
+
+ at step('stop process (\w+)')
+def stop_a_named_process(step, process_name):
+ """
+ Stop the process with the given name.
+ Parameters:
+ process_name ('process <name>'): Name of the process to stop.
+ """
+ world.processes.stop_process(process_name)
+
+ at step('wait for (new )?(\w+) stderr message (\w+)')
+def wait_for_message(step, new, process_name, message):
+ """
+ Block until the given message is printed to the given process's stderr
+ output.
+ Parameter:
+ new: (' new', optional): Only check the output printed since last time
+ this step was used for this process.
+ process_name ('<name> stderr'): Name of the process to check the output of.
+ message ('message <message>'): Output (part) to wait for.
+ Fails if the message is not found after 10 seconds.
+ """
+ world.processes.wait_for_stderr_str(process_name, [message], new)
+
+ at step('wait for (new )?(\w+) stdout message (\w+)')
+def wait_for_message(step, process_name, message):
+ """
+ Block until the given message is printed to the given process's stdout
+ output.
+ Parameter:
+ new: (' new', optional): Only check the output printed since last time
+ this step was used for this process.
+ process_name ('<name> stderr'): Name of the process to check the output of.
+ message ('message <message>'): Output (part) to wait for.
+ Fails if the message is not found after 10 seconds.
+ """
+ world.processes.wait_for_stdout_str(process_name, [message], new)
+
+ at step('the file (\S+) should (not )?exist')
+def check_existence(step, file_name, should_not_exist):
+ """
+ Check the existence of the given file.
+ Parameters:
+ file_name ('file <name>'): File to check existence of.
+ should_not_exist ('not', optional): Whether it should or should not exist.
+ Fails if the file should exist and does not, or vice versa.
+ """
+ if should_not_exist is None:
+ assert os.path.exists(file_name), file_name + " does not exist"
+ else:
+ assert not os.path.exists(file_name), file_name + " exists"
diff --git a/tests/lettuce/features/terrain/terrain.py b/tests/lettuce/features/terrain/terrain.py
new file mode 100644
index 0000000..634d2fb
--- /dev/null
+++ b/tests/lettuce/features/terrain/terrain.py
@@ -0,0 +1,360 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# This is the 'terrain' in which the lettuce lives. By convention, this is
+# where global setup and teardown is defined.
+#
+# We declare some attributes of the global 'world' variables here, so the
+# tests can safely assume they are present.
+#
+# We also use it to provide scenario invariants, such as resetting data.
+#
+
+from lettuce import *
+import subprocess
+import os.path
+import shutil
+import re
+import time
+
+# In order to make sure we start all tests with a 'clean' environment,
+# We perform a number of initialization steps, like restoring configuration
+# files, and removing generated data files.
+
+# This approach may not scale; if so we should probably provide specific
+# initialization steps for scenarios. But until that is shown to be a problem,
+# It will keep the scenarios cleaner.
+
+# This is a list of files that are freshly copied before each scenario
+# The first element is the original, the second is the target that will be
+# used by the tests that need them
+copylist = [
+["configurations/example.org.config.orig", "configurations/example.org.config"]
+]
+
+# This is a list of files that, if present, will be removed before a scenario
+removelist = [
+"data/test_nonexistent_db.sqlite3"
+]
+
+# When waiting for output data of a running process, use OUTPUT_WAIT_INTERVAL
+# as the interval in which to check again if it has not been found yet.
+# If we have waited OUTPUT_WAIT_MAX_INTERVALS times, we will abort with an
+# error (so as not to hang indefinitely)
+OUTPUT_WAIT_INTERVAL = 0.5
+OUTPUT_WAIT_MAX_INTERVALS = 20
+
+# class that keeps track of one running process and the files
+# we created for it.
+class RunningProcess:
+ def __init__(self, step, process_name, args):
+ # set it to none first so destructor won't error if initializer did
+ """
+ Initialize the long-running process structure, and start the process.
+ Parameters:
+ step: The scenario step it was called from. This is used for
+ determining the output files for redirection of stdout
+ and stderr.
+ process_name: The name to refer to this running process later.
+ args: Array of arguments to pass to Popen().
+ """
+ self.process = None
+ self.step = step
+ self.process_name = process_name
+ self.remove_files_on_exit = True
+ self._check_output_dir()
+ self._create_filenames()
+ self._start_process(args)
+
+ def _start_process(self, args):
+ """
+ Start the process.
+ Parameters:
+ args:
+ Array of arguments to pass to Popen().
+ """
+ stderr_write = open(self.stderr_filename, "w")
+ stdout_write = open(self.stdout_filename, "w")
+ self.process = subprocess.Popen(args, 1, None, subprocess.PIPE,
+ stdout_write, stderr_write)
+ # open them again, this time for reading
+ self.stderr = open(self.stderr_filename, "r")
+ self.stdout = open(self.stdout_filename, "r")
+
+ def mangle_filename(self, filebase, extension):
+ """
+ Remove whitespace and non-default characters from a base string,
+ and return the substituted value. Whitespace is replaced by an
+ underscore. Any other character that is not an ASCII letter, a
+ number, a dot, or a hyphen or underscore is removed.
+ Parameter:
+ filebase: The string to perform the substitution and removal on
+ extension: An extension to append to the result value
+ Returns the modified filebase with the given extension
+ """
+ filebase = re.sub("\s+", "_", filebase)
+ filebase = re.sub("[^a-zA-Z0-9.\-_]", "", filebase)
+ return filebase + "." + extension
+
+ def _check_output_dir(self):
+ # We may want to make this overridable by the user, perhaps
+ # through an environment variable. Since we currently expect
+ # lettuce to be run from our lettuce dir, we shall just use
+ # the relative path 'output/'
+ """
+ Make sure the output directory for stdout/stderr redirection
+ exists.
+ Fails if it exists but is not a directory, or if it does not
+ and we are unable to create it.
+ """
+ self._output_dir = os.getcwd() + os.sep + "output"
+ if not os.path.exists(self._output_dir):
+ os.mkdir(self._output_dir)
+ assert os.path.isdir(self._output_dir),\
+ self._output_dir + " is not a directory."
+
+ def _create_filenames(self):
+ """
+ Derive the filenames for stdout/stderr redirection from the
+ feature, scenario, and process name. The base will be
+ "<Feature>-<Scenario>-<process name>.[stdout|stderr]"
+ """
+ filebase = self.step.scenario.feature.name + "-" +\
+ self.step.scenario.name + "-" + self.process_name
+ self.stderr_filename = self._output_dir + os.sep +\
+ self.mangle_filename(filebase, "stderr")
+ self.stdout_filename = self._output_dir + os.sep +\
+ self.mangle_filename(filebase, "stdout")
+
+ def stop_process(self):
+ """
+ Stop this process by calling terminate(). Blocks until process has
+ exited. If remove_files_on_exit is True, redirected output files
+ are removed.
+ """
+ if self.process is not None:
+ self.process.terminate()
+ self.process.wait()
+ self.process = None
+ if self.remove_files_on_exit:
+ self._remove_files()
+
+ def _remove_files(self):
+ """
+ Remove the files created for redirection of stdout/stderr output.
+ """
+ os.remove(self.stderr_filename)
+ os.remove(self.stdout_filename)
+
+ def _wait_for_output_str(self, filename, running_file, strings, only_new):
+ """
+ Wait for a line of output in this process. This will (if only_new is
+ False) first check all previous output from the process, and if not
+ found, check all output since the last time this method was called.
+ For each line in the output, the given strings array is checked. If
+ any output lines checked contains one of the strings in the strings
+ array, that string (not the line!) is returned.
+ Parameters:
+ filename: The filename to read previous output from, if applicable.
+ running_file: The open file to read new output from.
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ """
+ if not only_new:
+ full_file = open(filename, "r")
+ for line in full_file:
+ for string in strings:
+ if line.find(string) != -1:
+ full_file.close()
+ return string
+ wait_count = 0
+ while wait_count < OUTPUT_WAIT_MAX_INTERVALS:
+ where = running_file.tell()
+ line = running_file.readline()
+ if line:
+ for string in strings:
+ if line.find(string) != -1:
+ return string
+ else:
+ wait_count += 1
+ time.sleep(OUTPUT_WAIT_INTERVAL)
+ running_file.seek(where)
+ assert False, "Timeout waiting for process output: " + str(strings)
+
+ def wait_for_stderr_str(self, strings, only_new = True):
+ """
+ Wait for one of the given strings in this process's stderr output.
+ Parameters:
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ """
+ return self._wait_for_output_str(self.stderr_filename, self.stderr,
+ strings, only_new)
+
+ def wait_for_stdout_str(self, strings, only_new = True):
+ """
+ Wait for one of the given strings in this process's stdout output.
+ Parameters:
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ """
+ return self._wait_for_output_str(self.stdout_filename, self.stdout,
+ strings, only_new)
+
+# Container class for a number of running processes
+# i.e. servers like bind10, etc
+# one-shot programs like dig or bindctl are started and closed separately
+class RunningProcesses:
+ def __init__(self):
+ """
+ Initialize with no running processes.
+ """
+ self.processes = {}
+
+ def add_process(self, step, process_name, args):
+ """
+ Start a process with the given arguments, and store it under the given
+ name.
+ Parameters:
+ step: The scenario step it was called from. This is used for
+ determining the output files for redirection of stdout
+ and stderr.
+ process_name: The name to refer to this running process later.
+ args: Array of arguments to pass to Popen().
+ Fails if a process with the given name is already running.
+ """
+ assert process_name not in self.processes,\
+ "Process " + name + " already running"
+ self.processes[process_name] = RunningProcess(step, process_name, args)
+
+ def get_process(self, process_name):
+ """
+ Return the Process with the given process name.
+ Parameters:
+ process_name: The name of the process to return.
+ Fails if the process is not running.
+ """
+ assert process_name in self.processes,\
+ "Process " + name + " unknown"
+ return self.processes[process_name]
+
+ def stop_process(self, process_name):
+ """
+ Stop the Process with the given process name.
+ Parameters:
+ process_name: The name of the process to return.
+ Fails if the process is not running.
+ """
+ assert process_name in self.processes,\
+ "Process " + name + " unknown"
+ self.processes[process_name].stop_process()
+ del self.processes[process_name]
+
+ def stop_all_processes(self):
+ """
+ Stop all running processes.
+ """
+ for process in self.processes.values():
+ process.stop_process()
+
+ def keep_files(self):
+ """
+ Keep the redirection files for stdout/stderr output of all processes
+ instead of removing them when they are stopped later.
+ """
+ for process in self.processes.values():
+ process.remove_files_on_exit = False
+
+ def wait_for_stderr_str(self, process_name, strings, only_new = True):
+ """
+ Wait for one of the given strings in the given process's stderr output.
+ Parameters:
+ process_name: The name of the process to check the stderr output of.
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ Fails if the process is unknown.
+ """
+ assert process_name in self.processes,\
+ "Process " + process_name + " unknown"
+ return self.processes[process_name].wait_for_stderr_str(strings,
+ only_new)
+
+ def wait_for_stdout_str(self, process_name, strings, only_new = True):
+ """
+ Wait for one of the given strings in the given process's stdout output.
+ Parameters:
+ process_name: The name of the process to check the stdout output of.
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ Fails if the process is unknown.
+ """
+ assert process_name in self.processes,\
+ "Process " + process_name + " unknown"
+ return self.processes[process_name].wait_for_stdout_str(strings,
+ only_new)
+
+ at before.each_scenario
+def initialize(scenario):
+ """
+ Global initialization for each scenario.
+ """
+ # Keep track of running processes
+ world.processes = RunningProcesses()
+
+ # Convenience variable to access the last query result from querying.py
+ world.last_query_result = None
+
+ # Some tests can modify the settings. If the tests fail half-way, or
+ # don't clean up, this can leave configurations or data in a bad state,
+ # so we copy them from originals before each scenario
+ for item in copylist:
+ shutil.copy(item[0], item[1])
+
+ for item in removelist:
+ if os.path.exists(item):
+ os.remove(item)
+
+ at after.each_scenario
+def cleanup(scenario):
+ """
+ Global cleanup for each scenario.
+ """
+ # Keep output files if the scenario failed
+ if not scenario.passed:
+ world.processes.keep_files()
+ # Stop any running processes we may have had around
+ world.processes.stop_all_processes()
+
diff --git a/tests/lettuce/setup_intree_bind10.sh.in b/tests/lettuce/setup_intree_bind10.sh.in
new file mode 100755
index 0000000..40fd82d
--- /dev/null
+++ b/tests/lettuce/setup_intree_bind10.sh.in
@@ -0,0 +1,46 @@
+#! /bin/sh
+
+# Copyright (C) 2010 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
+export PYTHON_EXEC
+
+BIND10_PATH=@abs_top_builddir@/src/bin/bind10
+
+PATH=@abs_top_builddir@/src/bin/bind10:@abs_top_builddir@/src/bin/bindctl:@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
+export PATH
+
+PYTHONPATH=@abs_top_builddir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
+export PYTHONPATH
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
+B10_FROM_SOURCE=@abs_top_srcdir@
+export B10_FROM_SOURCE
+# TODO: We need to do this feature based (ie. no general from_source)
+# But right now we need a second one because some spec files are
+# generated and hence end up under builddir
+B10_FROM_BUILD=@abs_top_builddir@
+export B10_FROM_BUILD
+
+BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
+export BIND10_MSGQ_SOCKET_FILE
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 49ef0f1..565b306 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -50,7 +50,7 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Stopping b10-auth and checking that ($n)"
-echo 'config set Boss/start_auth false
+echo 'config remove Boss/components b10-auth
config commit
quit
' | $RUN_BINDCTL \
@@ -61,7 +61,8 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Restarting b10-auth and checking that ($n)"
-echo 'config set Boss/start_auth true
+echo 'config add Boss/components b10-auth
+config set Boss/components/b10-auth { "special": "auth", "kind": "needed" }
config commit
quit
' | $RUN_BINDCTL \
More information about the bind10-changes
mailing list