BIND 10 trac213-incremental, updated. 65f4be2b65bf19baad6bbeda742b44dff7cd9b4a [213] Remove some unneeded assignments in tests
BIND 10 source code commits
bind10-changes at lists.isc.org
Thu Nov 10 09:33:33 UTC 2011
The branch, trac213-incremental has been updated
via 65f4be2b65bf19baad6bbeda742b44dff7cd9b4a (commit)
via a3ba4cca05891f1052aae6bbe28c125799c7fe6f (commit)
via 4dc03f5419813b974b9794aa2cba4f55557fbbb5 (commit)
via dc2ea48db152796f6c0f62641f00646ef32e2b9c (commit)
via b513f0ab652e11892c232b6170f675fbb9990609 (commit)
via bde035f1ebcb1a9c7678692538f9aec18f5232e6 (commit)
via b85213cd68ec24c5deede886d466bf0911b9e762 (commit)
via 056a1342f0d73cf53a37ed672a8a4ad907c4cfa2 (commit)
via 71de39fb8126b7200b2f6dcd9689a000c958fe0e (commit)
via f337180ad87778e3b91111efe93c3e31b1c92a91 (commit)
via 01c6801b65e167ba2cf635143b988bf4bcbbdc68 (commit)
via 31d5a4f66b18cca838ca1182b9f13034066427a7 (commit)
via 0f7a43ef24e2fedfa554200cbfa3d83971dbfd90 (commit)
via 9f854755d1bad72bc4bd94accbc60d211c880cb7 (commit)
via 0a3592efda9bd97cf251163cf9a30f38122cb7c2 (commit)
via 1177bfe30e17a76bea6b6447e14ae9be9e1ca8c2 (commit)
via 9862bdf184aceb37cfdbb4fbb455209bdf88a0f4 (commit)
via e6a596fe8f57103c735d8e135f855d46c248844c (commit)
via f8cea54b5bb8f870a01beebbdcde5eb90dd7d8b4 (commit)
via 137a61f2afcd6d16ea20c3a4436046d783a5babf (commit)
via 6b75c128bcdcefd85c18ccb6def59e9acedd4437 (commit)
via 1a5bd80bbe01abbb2a5932bc43fab8e7a287dcf5 (commit)
via c03e6df1521a378fa3cb9eab4a11db93e6e34969 (commit)
via 8cea64b69af8d5ef21497d2f1c9812968ce5d8f7 (commit)
via 1aa233fab1d74dc776899df61181806679d14013 (commit)
via 45bd390859827c02965765b4b146b5351cbbb1c1 (commit)
via 0f6b216a89583edd445942386df5a388b39149d5 (commit)
via ac552055bc8a4d996a0c24eb5f13d01667a3d77a (commit)
via 26aaecc388f8c152b5d63a1f3906ba5a625b0e31 (commit)
via 10c84106e8b34d78fa1916e4bc3db15030fd94f9 (commit)
via 23cfc5b4d9b384172d0eadd2269ed6a6121966a8 (commit)
via 8d7ef6fe3b696ee2cffdc4f10fdf673968933077 (commit)
via 6cd1c3aa7fb998fe9f873045b74185f793177cb5 (commit)
via e6d7624e503084067e6c4659c6bdbd89c038fdd7 (commit)
via 4b56e1807d8ce8b86da6793b67b50ff57ee62b9e (commit)
via 5c16ff47ae8d485da0684ee7dd5547eeef3c6232 (commit)
via 65d8475336b8e884ff261b9a1fe03688e1618cf4 (commit)
via 388e77cae5d9260bcc314465f6711bcdd782a26d (commit)
via 96c94d6baf0a68b641cc9b93966b09b38ebaa15b (commit)
via 4aa0057db95051e8e554bb5fcbcfbfecf822a5cd (commit)
via 007d31f50876cd58a031dd86b461145e77bea63e (commit)
via 27b7f9d36113514773777eb94bf66a3ef8c49a82 (commit)
via 6716721a7c10737d86a4a29af530d54a458f83ca (commit)
via e8aa8b8b994146dfff6d29435a66c88dcf79eb69 (commit)
via 586c93cef97215330b8bdffed6c35335fb66173d (commit)
via 5d6c71aeb2575883488b2cde87501aa84260b1ab (commit)
via 233d2d783e6be97f208998f9fcf79404eea5c9b3 (commit)
via dee6a4739aee15e8899da2e35d179cb1d8623e76 (commit)
via 50672f2d6073e813fb80250398b6e6a2b93c915d (commit)
via 1a90f118bf69d6239ca290f712bfeb89a9027efd (commit)
via 5d290088a1b996011217cf801e37600d5bcd037e (commit)
via 3d59d6a24e3a84c3ca453721649e6adfab863c0e (commit)
via a95b528af25a2b3bda91f9b88c04a20b0b783208 (commit)
via 58e8ca7d1c5d8f4b69aa174405e4ef280b8012cc (commit)
via aa13f832395794bab3647ed375ac8a6e2d26e55f (commit)
via 0ea04c4bb216cc822be49626d4b0269956fd070e (commit)
via b03d29677700c1dd2a527dafe9987defb7556e97 (commit)
via 043ff1e7ec5f2c8e3d6b7e278418fc03eea2b09f (commit)
via 9697c6b3cc3e49d96efc6777c1dba5ecb00eb785 (commit)
via 67a11e710e06647dfb65ea6e592fd80851422dad (commit)
via b4b9c3e18f8d76b695d7b84f1b128ccba229d814 (commit)
via bb76c3f643eb85fc8b1ed8087f72368ad1d23aa3 (commit)
via 2764ae7bde7b314773b7258d23fce3813c4407b2 (commit)
via 1d9614bc52634bd512121f34af66290a2cdb2958 (commit)
via 34092bce6cb5755eb6b53979f8f624ca78b592fb (commit)
via 35ca4f5aa94daa5e3a8ddcb02812e7d76685e65e (commit)
via 6d46a3787127f87aa65c9dfb626476f79b4f0194 (commit)
via c692292fb26bf6af6e94b7e160c0c7af27e123ac (commit)
via d6a9dffdd4ee8af94e31ae9462e2ef851b49fca8 (commit)
via 09e4d880b9e7260caf6b5ec763aa1e0712531657 (commit)
via 33a0d78c8ff1bd0083251fdad2def37c6c9064dc (commit)
via a28f94240549b3b869e6aef5265d46afbd09f6aa (commit)
via b843d05fdaefa92abcec50a781dbdfbadb4c9bed (commit)
via 0428f6fcc7b5acc73f70913a17bd6f23c5a6ad3a (commit)
via b7e1847c3a1979d3ac593de435e142335cbc7188 (commit)
via b3af32e148d004ef5010d37eddccf6df57bdb257 (commit)
via 2104208cfcc7ab912cf2d530697c7192608f3c5d (commit)
via 7e1e5f38f1d28c8e19337fb56f3dacba81341ec8 (commit)
via 8635b169171d0d88ce19f46039ded6e1dab7b72c (commit)
via 05d4deb643271e0f0b0dcfb22809714086d50788 (commit)
via 1c8dfb0cdb80841bea487ee355ce85c749223810 (commit)
via a3fd03e16b71ae4e9b480e4e48c7ddfa393555ac (commit)
via 0613c0e0ebfcc8e3cf37678bd6799889569beb83 (commit)
via 66bb38a4d0cf296f48181d624d22b1074688de38 (commit)
via 7d2826b519f95b2fecd299e15952e897c5a60b2b (commit)
via e9f0637479f992936b2feab96e50a84a6a4dfebd (commit)
via c3b01cc59ba03c6054af4bae42e08965b3f60eb0 (commit)
via 409e800ffc208240ec70eb63bc2e56aadfbb21e1 (commit)
via 6e4e3ac19c322c65679c6c5653cc41b80305d9b9 (commit)
via f80ab7879cc29f875c40dde6b44e3796ac98d6da (commit)
via 00a99483151a21e73ef432dcba73347e1fd407f2 (commit)
via d5ade3d32087884e477d8f5b2fa200324b96ea0a (commit)
via 0e776c32330aee466073771600390ce74b959b38 (commit)
via 723a57edeb33afe206a8e350cfc583d5cb451051 (commit)
via 25c802dd1c30580b94345e83eeb6a168ab329a33 (commit)
via 76bbf34210a5cf70853337a9a9f064c07c7aca76 (commit)
via d27f4125c99d13a7a73dee8c196a0d95050a4b62 (commit)
via 081271155ea18a33a135711a983e8882a2f56eea (commit)
via e41f8459ca5dbc886e838e6e32585ba5c7eb96e6 (commit)
via e856c49ae33b2b79d8eab0b313e4ba25db261c4a (commit)
via 3a6d50835b621e4825ec0d8434ce066bd31020d0 (commit)
via 2182b30eb6b833fe4c902d635aa97ad901518904 (commit)
via 9aaf85731baa1ea5fe9484efc9bf48b264f60d1e (commit)
via 6d2960ff386a85c9738fc4cfd3975ee1d58eaa04 (commit)
via dc491833cf75ac1481ba1475795b0f266545013d (commit)
via d07206bb5c5ec4b3411e759a9afc75c2c593a4fa (commit)
via 8fc9df7f444af31a936e1f261f7560b1e222a3ef (commit)
via 254eb201171f450826e2c907098f0c78a7e3c7f4 (commit)
via d38014229e33d2bdb3875e53b9486d54b3920ecc (commit)
via 17565e10ce667cfd7048d4867795ba3cb6876f2e (commit)
via 1cdc35417c6f25f254b7053e801e8415eeba9d84 (commit)
via 0ec187bc1e3cdde29b20f2465c4d5417e04e2d6f (commit)
via ce39dd192fc8ba15479fda1a9da08deb8c3d2225 (commit)
via eb35651d68eba80cbe7a5bc23e72d3544719a33a (commit)
via bef6ceb2905d328c712a45754be23393d56b2a31 (commit)
via e5c133124da1b724f0f452f63fa947fa036c24d3 (commit)
via 1aedd1b56bd3764739d247dda7477bb799a37ac6 (commit)
via cd3588c9020d0310f949bfd053c4d3a4bd84ef88 (commit)
via 40e0797d688e26dae0e93553f63328aa649e9681 (commit)
via 1107b46ec39da9cdac19af44ba79ae5ee8722025 (commit)
via b561ddc9d182cc237143fbd37ab9e6a0277da948 (commit)
via 1cdc605c50c999ffc1225bee5817aa0ae26bcc4d (commit)
via 8b5b28cdbd7be0c7a79950b52679ac4be3db274b (commit)
via df9b10fae5385c1c0f1cacb2894eee347abe1f09 (commit)
via b5040b229739c8c69463fe462aa8f7b4a8e47f7f (commit)
via 357106fc545e6d506c4ec757d306a955c68d1d5f (commit)
via 8b52836ccac5c331b30812c608d52aa7fc538de5 (commit)
via e715842e4d36c12fb17a8ee3d0a41218ff86ad7c (commit)
via 20b2131e3753a2ac7e78597a95bf244748b7dd3c (commit)
via 0f988d9f9fc26ec5dd3ee1e298ac544af3da2fd3 (commit)
via efa6b47c19bc9f992f1c5c0196e07a01d030ecce (commit)
via ed04555e46292f9d573372b07000384b6f0118af (commit)
via 46adf014f18c6b3f9a685b8f0fdd0775a583a7c5 (commit)
via 9b76badecd4b688c95c5f83ecdc168922e48680b (commit)
via 07520bd77da400ca476432f8bedcd934d992ec81 (commit)
via 2ab68057dceb0d471daf4524ba338f8f45e942f2 (commit)
via 11981fea517310f13154bf7695f0278b571ac28a (commit)
via 092dbe3f2af0a0c010a081f64947dbedb11b3468 (commit)
via bfab5a33ceabe3f0d31bd465d13308c8b84adf68 (commit)
via ef51c8418dc44bf2882c898990b30fc76ca9a97b (commit)
via ab642e89554bedf0a66c2358db71ec16ddeb2e7f (commit)
via 91c2cf35e41642a997df020de797324bb4cfedcc (commit)
via c6e8dd84e81f5686d45cc41f514d4f61d075a276 (commit)
via 94282a20ab88b590e8b3b2522da2873ca81c632a (commit)
via 4ddb345fdc31614e191b0a0dea282ddd2ebe279e (commit)
via 18b04945884fbcc1783e52bed845c34395c0acf8 (commit)
via 7d25b201c0bc91987c4d9743d0c21b9486b98fd8 (commit)
via b01c18148a840b0d5719cbcd2653bf1b346e45f9 (commit)
via 41f528a9eacdb430406a0d9047049585cae31db8 (commit)
via 0fed56c3692e358184958cc1263cff67db0f62cb (commit)
via 1173960107363c04608726b57218a54d2b3b3d56 (commit)
via e76affc220a5f62b24e34152afdda62328a327ec (commit)
via d15cad92c958a6380c90ba76a2ea968e1d8304dc (commit)
via e098bcfbef9b8a66c3330bd37c6bbd8d72a1399e (commit)
via a7d0518a8c66ebc0eb471eccd67054d27caa07a3 (commit)
via b93bdb9b324b7dc56bd12b5c781e20275bfc3310 (commit)
via 351ce9ee1612362800453a280dabc012565493c6 (commit)
via 673ef8efd5d474d66d62d134348730518160cbf9 (commit)
via 4cf570ad0a107cbf70a6e96e8db30eb2c8b8a2ff (commit)
via aa35477883e1a5b1740092296d463ecfd277dbbb (commit)
via 701074ebbf30930b92f8b06d9cc88428aed8db5f (commit)
via e009438536339927f645f9a272c23e43cd0660fc (commit)
via c3bde52d5e1e2748f9d60aa8740fa044260913d5 (commit)
via 6d8da5a713e33008a9e8bac1ba24367a3ba86a10 (commit)
via d63457baaa31c80bb1ffeefd4f111c7d92685c8c (commit)
via dcd6d7ff4c0671a0995fe4051cea0e525d3f82bc (commit)
via 61fdce086a40930595e70168340ee68080b327bf (commit)
via f17fad51f1533f89600fb3c2e265ee2ad79c3f53 (commit)
via 44113e516b30bb58dd7481b2b87a7f88c0ec51a7 (commit)
via 7d4cc051f1ab3470bb5f7b5f8ea9e622fc7c7c9b (commit)
via bbfee9cc079856d3b437a1bbb69b4157092cbf97 (commit)
via 797d30d14f37c6d3fdce9c1140ffebd91021bfb6 (commit)
via 6bdd521c49d10867b63158837f6fdc6d06d1f256 (commit)
via 56d8610a9e10792048a10cce86196deee928e203 (commit)
via 9a4db0085e43df8d8facd885eb9c9a0b52280090 (commit)
via c88718438ee67b52cfea003b9e3ce1e5fe234bd8 (commit)
via dd7fb442ed97cc469db4275fdc3d4628cd44ea79 (commit)
via 032f9633f4a353c11d0d855984aad0f0392a6ac1 (commit)
via 6d842a64386a5c64a5136cadb4a1e646ee1901e5 (commit)
via 9741148f1166694a65612ea27be4080dbf7194cc (commit)
via b4591042f81a9ec8157bc74d023f1fa5c91999e7 (commit)
via 834f8d0f752eda6b2baa5dffb48bc0d86de8c90a (commit)
via 27a209e24883177391c382906dcd0104a54faf79 (commit)
via 1c71878fcb9d5579383561cdaacd78b81fc28694 (commit)
via 4d18d306085f15ff218dd7dca303aa53122aa2d3 (commit)
via 12114c5c973d70be91bfe946962e4373fa4d890a (commit)
via 8820f1314ddcaea75e069f2a11bced9bd1b80ef8 (commit)
via c5825a1d48bb2def1c6113629e30de4ac9dd2b0a (commit)
via a0007d1c88df41e7796f89e24f7af5b40660fbf3 (commit)
from 1db4e8af5cf9a8600e8005807f0aa5109756c064 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 65f4be2b65bf19baad6bbeda742b44dff7cd9b4a
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Thu Nov 10 10:32:08 2011 +0100
[213] Remove some unneeded assignments in tests
commit a3ba4cca05891f1052aae6bbe28c125799c7fe6f
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Thu Nov 10 10:07:55 2011 +0100
[213] Remove temporary workaround
commit 4dc03f5419813b974b9794aa2cba4f55557fbbb5
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Thu Nov 10 10:02:04 2011 +0100
[213] XfrIn is special
Readded as it got lost in the merges somewhere.
commit dc2ea48db152796f6c0f62641f00646ef32e2b9c
Merge: b513f0ab652e11892c232b6170f675fbb9990609 01c6801b65e167ba2cf635143b988bf4bcbbdc68
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Thu Nov 10 10:01:36 2011 +0100
Merge branch 'master' into trac213-incremental
Conflicts:
src/bin/bind10/bind10_src.py.in
commit b513f0ab652e11892c232b6170f675fbb9990609
Merge: bde035f1ebcb1a9c7678692538f9aec18f5232e6 71de39fb8126b7200b2f6dcd9689a000c958fe0e
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Thu Nov 10 09:35:04 2011 +0100
Merge branch 'trac213-incremental-noroot' into trac213-incremental
Conflicts:
src/bin/bind10/bind10_src.py.in
src/bin/bind10/tests/bind10_test.py.in
commit bde035f1ebcb1a9c7678692538f9aec18f5232e6
Merge: b85213cd68ec24c5deede886d466bf0911b9e762 0f7a43ef24e2fedfa554200cbfa3d83971dbfd90
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Thu Nov 10 09:21:05 2011 +0100
Merge branch 'trac213-incremental-config' into trac213-incremental
Conflicts:
src/bin/bind10/bind10_messages.mes
src/bin/bind10/bind10_src.py.in
src/bin/bind10/tests/bind10_test.py.in
commit b85213cd68ec24c5deede886d466bf0911b9e762
Merge: 056a1342f0d73cf53a37ed672a8a4ad907c4cfa2 4aa0057db95051e8e554bb5fcbcfbfecf822a5cd
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Wed Nov 9 21:45:33 2011 +0100
Merge branch 'trac213-incremental-restarts' into trac213-incremental
Conflicts:
src/bin/bind10/bind10_src.py.in
commit 056a1342f0d73cf53a37ed672a8a4ad907c4cfa2
Merge: 1db4e8af5cf9a8600e8005807f0aa5109756c064 6d2960ff386a85c9738fc4cfd3975ee1d58eaa04
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Wed Nov 9 21:34:45 2011 +0100
Merge branch 'trac213-incremental-families' into trac213-incremental
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 67 ++++
configure.ac | 82 +++--
src/bin/auth/query.cc | 39 ++-
src/bin/auth/query.h | 5 +
src/bin/auth/tests/query_unittest.cc | 122 ++++++-
src/bin/bind10/bind10_messages.mes | 47 +--
src/bin/bind10/bind10_src.py.in | 300 ++++-----------
src/bin/bind10/bob.spec | 73 +++-
src/bin/bind10/run_bind10.sh.in | 3 +-
src/bin/bind10/tests/bind10_test.py.in | 412 +++++++++++++-------
src/bin/bindctl/bindcmd.py | 131 ++++---
src/bin/bindctl/bindctl_main.py.in | 3 +-
src/bin/bindctl/tests/bindctl_test.py | 126 +++---
src/bin/cmdctl/cmdctl.py.in | 93 +++---
src/bin/cmdctl/cmdctl_messages.mes | 3 +
src/bin/dhcp6/tests/dhcp6_srv_unittest.cc | 8 +-
src/bin/xfrin/tests/xfrin_test.py | 207 +++++++++-
src/bin/xfrin/xfrin.py.in | 99 ++++--
src/bin/xfrin/xfrin_messages.mes | 6 +
src/lib/acl/dns.h | 4 +-
src/lib/acl/loader.h | 6 +-
src/lib/asiolink/Makefile.am | 9 +-
src/lib/asiolink/dummy_io_cb.h | 7 +-
src/lib/asiolink/io_address.cc | 15 +
src/lib/asiolink/io_address.h | 18 +
src/lib/asiolink/io_asio_socket.h | 20 +-
src/lib/asiolink/tests/io_address_unittest.cc | 16 +
src/lib/cache/logger.h | 2 +-
src/lib/cache/message_cache.h | 2 +
src/lib/cache/resolver_cache.h | 4 +-
src/lib/cache/rrset_entry.h | 4 +-
src/lib/cc/logger.h | 2 +-
src/lib/cc/session.cc | 3 +-
src/lib/config/tests/testdata/spec32.spec | 21 +
src/lib/datasrc/database.cc | 91 +++--
src/lib/datasrc/database.h | 168 +++++++--
src/lib/datasrc/iterator.h | 44 ++
src/lib/datasrc/logger.h | 2 +-
src/lib/datasrc/memory_datasrc.cc | 4 +
src/lib/datasrc/rbtree.h | 6 +-
src/lib/datasrc/sqlite3_accessor.cc | 221 ++++++++---
src/lib/datasrc/sqlite3_accessor.h | 23 +-
src/lib/datasrc/tests/Makefile.am | 1 +
src/lib/datasrc/tests/database_unittest.cc | 278 ++++++++++----
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 412 ++++++++++++++++++--
src/lib/datasrc/tests/testdata/test.sqlite3 | Bin 43008 -> 44032 bytes
.../{test.sqlite3 => test.sqlite3.nodiffs} | Bin 43008 -> 43008 bytes
src/lib/dhcp/libdhcp.cc | 60 +++-
src/lib/dhcp/libdhcp.h | 26 ++-
src/lib/dhcp/option.cc | 169 ++++++---
src/lib/dhcp/option.h | 92 ++++-
src/lib/dhcp/option6_ia.cc | 4 +-
src/lib/dhcp/option6_iaaddr.cc | 4 +-
src/lib/dhcp/pkt4.cc | 88 ++++-
src/lib/dhcp/pkt4.h | 73 +++-
src/lib/dhcp/pkt6.cc | 15 +-
src/lib/dhcp/pkt6.h | 2 +-
src/lib/dhcp/tests/Makefile.am | 2 -
src/lib/dhcp/tests/libdhcp_unittest.cc | 123 ++++++-
src/lib/dhcp/tests/option6_addrlst_unittest.cc | 17 +-
src/lib/dhcp/tests/option6_ia_unittest.cc | 4 +-
src/lib/dhcp/tests/option6_iaaddr_unittest.cc | 2 +
src/lib/dhcp/tests/option_unittest.cc | 168 ++++++++-
src/lib/dhcp/tests/pkt4_unittest.cc | 344 +++++++++++-----
src/lib/dhcp/tests/pkt6_unittest.cc | 8 +-
src/lib/dns/message.h | 4 +-
src/lib/dns/messagerenderer.cc | 2 -
src/lib/dns/name.cc | 2 +-
src/lib/dns/python/name_python.cc | 29 ++-
src/lib/dns/python/tests/name_python_test.py | 9 +
src/lib/dns/rdatafields.h | 2 +-
src/lib/dns/rrset.h | 2 +-
src/lib/dns/tsigkey.h | 13 +-
src/lib/log/log_formatter.h | 2 +-
src/lib/log/logger_level_impl.h | 2 +-
src/lib/log/logger_manager_impl.h | 2 -
src/lib/log/logger_specification.h | 2 +-
src/lib/log/message_dictionary.h | 2 +-
src/lib/nsas/nameserver_address_store.h | 5 +-
src/lib/nsas/zone_entry.h | 2 +-
src/lib/python/isc/bind10/sockcreator.py | 5 +-
src/lib/python/isc/bind10/special_component.py | 41 ++-
src/lib/python/isc/bind10/tests/component_test.py | 33 ++-
src/lib/python/isc/config/ccsession.py | 1 +
src/lib/python/isc/config/cfgmgr.py | 11 +-
src/lib/python/isc/config/config_data.py | 8 +-
src/lib/python/isc/config/tests/ccsession_test.py | 49 +++-
src/lib/python/isc/config/tests/cfgmgr_test.py | 14 +-
.../python/isc/config/tests/config_data_test.py | 2 +-
src/lib/python/isc/datasrc/Makefile.am | 1 +
src/lib/python/isc/datasrc/iterator_inc.cc | 33 ++
src/lib/python/isc/datasrc/iterator_python.cc | 29 ++-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 25 ++
src/lib/resolve/recursive_query.cc | 1 +
src/lib/resolve/recursive_query.h | 16 +-
src/lib/resolve/resolve.h | 1 -
src/lib/server_common/client.h | 2 +-
src/lib/server_common/logger.h | 2 +-
src/lib/util/buffer.h | 22 +-
src/lib/util/tests/buffer_unittest.cc | 32 ++
tests/lettuce/README | 127 ++++++
tests/lettuce/README.tutorial | 157 ++++++++
.../lettuce/configurations/example.org.config.orig | 17 +
tests/lettuce/configurations/example2.org.config | 18 +
tests/lettuce/configurations/no_db_file.config | 10 +
.../lettuce/data/empty_db.sqlite3 | Bin 11264 -> 11264 bytes
.../lettuce/data}/example.org.sqlite3 | Bin 14336 -> 14336 bytes
tests/lettuce/features/example.feature | 142 +++++++
tests/lettuce/features/terrain/bind10_control.py | 108 +++++
tests/lettuce/features/terrain/querying.py | 279 +++++++++++++
tests/lettuce/features/terrain/steps.py | 73 ++++
tests/lettuce/features/terrain/terrain.py | 360 +++++++++++++++++
.../lettuce/setup_intree_bind10.sh.in | 8 +-
tests/system/bindctl/tests.sh | 5 +-
tests/system/ixfr/in-3/tests.sh | 21 +-
tests/system/ixfr/named_noixfr.conf | 1 +
116 files changed, 4897 insertions(+), 1247 deletions(-)
copy src/lib/datasrc/tests/testdata/{test.sqlite3 => test.sqlite3.nodiffs} (100%)
create mode 100644 tests/lettuce/README
create mode 100644 tests/lettuce/README.tutorial
create mode 100644 tests/lettuce/configurations/example.org.config.orig
create mode 100644 tests/lettuce/configurations/example2.org.config
create mode 100644 tests/lettuce/configurations/no_db_file.config
copy src/lib/datasrc/tests/testdata/rwtest.sqlite3 => tests/lettuce/data/empty_db.sqlite3 (96%)
copy {src/lib/datasrc/tests/testdata => tests/lettuce/data}/example.org.sqlite3 (100%)
create mode 100644 tests/lettuce/features/example.feature
create mode 100644 tests/lettuce/features/terrain/bind10_control.py
create mode 100644 tests/lettuce/features/terrain/querying.py
create mode 100644 tests/lettuce/features/terrain/steps.py
create mode 100644 tests/lettuce/features/terrain/terrain.py
copy src/bin/bind10/run_bind10.sh.in => tests/lettuce/setup_intree_bind10.sh.in (69%)
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index c8da2f7..45671b7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,70 @@
+ 315. [func] tomek
+ libdhcp: Support for DHCPv4 packet manipulation is now implemented.
+ All fixed fields are now supported. Generic support for DHCPv4
+ options is available (both parsing and assembly). There is no code
+ that uses this new functionality yet, so it is not usable directly
+ at this time. This code will be used by upcoming b10-dhcp4 daemon.
+ (Trac #1228, git 31d5a4f66b18cca838ca1182b9f13034066427a7)
+
+314. [bug] jelte
+ b10-xfrin would previously initiate incoming transfers upon
+ receiving NOTIFY messages from any address (if the zone was
+ known to b10-xfrin, and using the configured address). It now
+ only starts a transfer if the source address from the NOTIFY
+ packet matches the configured master address and port. This was
+ really already fixed in release bind10-devel-20111014, but there
+ were some deferred cleanups to add.
+ (Trac #1298, git 1177bfe30e17a76bea6b6447e14ae9be9e1ca8c2)
+
+313. [func] jinmei
+ datasrc: Added C++ API for adding zone differences to database
+ based data sources. It's intended to be used for the support for
+ IXFR-in and dynamic update (so they can subsequently be retrieved
+ for IXFR-out). The addRecordDiff method of the DatabaseAccessor
+ defines the interface, and a concrete implementation for SQLite3
+ was provided.
+ (Trac #1329, git 1aa233fab1d74dc776899df61181806679d14013)
+
+312. [func] jelte
+ Added an initial framework for doing system tests using the
+ cucumber-based BDD tool Lettuce. A number of general steps are
+ included, for instance running bind10 with specific
+ configurations, sending queries, and inspecting query answers. A
+ few very basic tests are included as well.
+ (Trac #1290, git 6b75c128bcdcefd85c18ccb6def59e9acedd4437)
+
+311. [bug] jelte
+ Fixed a bug in bindctl where tab-completion for names that
+ contain a hyphen resulted in unexpected behaviour, such as
+ appending the already-typed part again.
+ (Trac #1345, git f80ab7879cc29f875c40dde6b44e3796ac98d6da)
+
+310. [bug] jelte
+ Fixed a bug where bindctl could not set a value that is optional
+ and has no default, resulting in the error that the setting
+ itself was unknown. bindctl now correctly sees the setting and
+ is able to set it.
+ (Trac #1344, git 0e776c32330aee466073771600390ce74b959b38)
+
+309. [bug] jelte
+ Fixed a bug in bindctl where the removal of elements from a set
+ with default values was not stored, unless the set had been
+ modified in another way already.
+ (Trac #1343, git 25c802dd1c30580b94345e83eeb6a168ab329a33)
+
+308. [build] jelte
+ The configure script will now use pkg-config for finding
+ information about the Botan library. If pkg-config is unavailable,
+ or unaware of Botan, it will fall back to botan-config. It will
+ also use botan-config when a specific botan library directory is
+ given using the '--with-botan=' flag
+ (Trac #1194, git dc491833cf75ac1481ba1475795b0f266545013d)
+
+307. [func] vorner
+ When zone transfer in fails with IXFR, it is retried with AXFR
+ automatically.
+ (Trac #1279, git cd3588c9020d0310f949bfd053c4d3a4bd84ef88)
+
306. [bug] Stephen
Boss process now waits for the configuration manager to initialize
itself before continuing with startup. This fixes a race condition
diff --git a/configure.ac b/configure.ac
index 1fdf5bf..9723b8d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -447,41 +447,64 @@ if test "${botan_path}" != "yes" ; then
AC_MSG_ERROR([${botan_path}/bin/botan-config not found])
fi
else
+ # First see if pkg-config knows of it.
+ # Unfortunately, the botan.pc files have their minor version in them
+ # too, so we need to try them one by one
+ BOTAN_CONFIG=""
+ AC_PATH_PROG([PKG_CONFIG], [pkg-config])
+ if test "$PKG_CONFIG" != "" ; then
+ BOTAN_VERSIONS="botan-1.10 botan-1.9 botan-1.8"
+ for version in $BOTAN_VERSIONS; do
+ AC_MSG_CHECKING([Checking botan version with pkg-config $version])
+
+ if [ $PKG_CONFIG --exists ${version} ]; then
+ AC_MSG_RESULT([found])
+ BOTAN_CONFIG="$PKG_CONFIG ${version}"
+ break
+ else
+ AC_MSG_RESULT([not found])
+ fi
+ done
+ fi
+ # If we had no pkg-config, or it didn't know about botan, use botan-config
+ if test "$BOTAN_CONFIG" = "" ; then
AC_PATH_PROG([BOTAN_CONFIG], [botan-config])
+ fi
fi
-if test -x "${BOTAN_CONFIG}" ; then
- BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
- # We expect botan-config --libs to contain -L<path_to_libbotan>, but
- # this is not always the case. As a heuristics workaround we add
- # -L`botan-config --prefix/lib` in this case. Same for BOTAN_INCLUDES
- # (but using include instead of lib) below.
+BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
+BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
+
+# We expect botan-config --libs to contain -L<path_to_libbotan>, but
+# this is not always the case. As a heuristics workaround we add
+# -L`botan-config --prefix/lib` in this case. Same for BOTAN_INCLUDES
+# (but using include instead of lib) below.
+if [ $BOTAN_CONFIG --prefix >/dev/null 2>&1 ] ; then
echo ${BOTAN_LDFLAGS} | grep -- -L > /dev/null || \
- BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
- BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
+ BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
echo ${BOTAN_INCLUDES} | grep -- -I > /dev/null || \
- BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
- # See python_rpath for some info on why we do this
- if test $rpath_available = yes; then
- BOTAN_RPATH=
- for flag in ${BOTAN_LDFLAGS}; do
- BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
- done
- AC_SUBST(BOTAN_RPATH)
-
- # According to the libtool manual, it should be sufficient if we
- # specify the "-R libdir" in our wrapper library of botan (no other
- # programs will need libbotan directly); "libdir" should be added to
- # the program's binary image. But we've seen in our build environments
- # that (some versions of?) libtool doesn't propagate -R as documented,
- # and it caused a linker error at run time. To work around this, we
- # also add the rpath to the global LDFLAGS.
- LDFLAGS="$BOTAN_RPATH $LDFLAGS"
- fi
-
- AC_SUBST(BOTAN_LDFLAGS)
- AC_SUBST(BOTAN_INCLUDES)
+ BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
fi
+# See python_rpath for some info on why we do this
+if test $rpath_available = yes; then
+ BOTAN_RPATH=
+ for flag in ${BOTAN_LDFLAGS}; do
+ BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
+ done
+AC_SUBST(BOTAN_RPATH)
+
+# According to the libtool manual, it should be sufficient if we
+# specify the "-R libdir" in our wrapper library of botan (no other
+# programs will need libbotan directly); "libdir" should be added to
+# the program's binary image. But we've seen in our build environments
+# that (some versions of?) libtool doesn't propagate -R as documented,
+# and it caused a linker error at run time. To work around this, we
+# also add the rpath to the global LDFLAGS.
+ LDFLAGS="$BOTAN_RPATH $LDFLAGS"
+fi
+
+AC_SUBST(BOTAN_LDFLAGS)
+AC_SUBST(BOTAN_INCLUDES)
CPPFLAGS_SAVED=$CPPFLAGS
CPPFLAGS="$BOTAN_INCLUDES $CPPFLAGS"
@@ -968,6 +991,7 @@ AC_OUTPUT([doc/version.ent
src/lib/util/python/mkpywrapper.py
src/lib/util/python/gen_wiredata.py
src/lib/server_common/tests/data_path.h
+ tests/lettuce/setup_intree_bind10.sh
tests/system/conf.sh
tests/system/run.sh
tests/system/glue/setup.sh
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 547a4ff..b2e0234 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -168,6 +168,24 @@ Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
}
void
+Query::addWildcardProof(ZoneFinder& finder) {
+ // The query name shouldn't exist in the zone if there were no wildcard
+ // substitution. Confirm that by specifying NO_WILDCARD. It should result
+ // in NXDOMAIN and an NSEC RR that proves it should be returned.
+ const ZoneFinder::FindResult fresult =
+ finder.find(qname_, RRType::NSEC(), NULL,
+ dnssec_opt_ | ZoneFinder::NO_WILDCARD);
+ if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
+ fresult.rrset->getRdataCount() == 0) {
+ isc_throw(BadNSEC, "Unexpected result for wildcard proof");
+ return;
+ }
+ response_.addRRset(Message::SECTION_AUTHORITY,
+ boost::const_pointer_cast<RRset>(fresult.rrset),
+ dnssec_);
+}
+
+void
Query::addAuthAdditional(ZoneFinder& finder) {
// Fill in authority and addtional sections.
ZoneFinder::FindResult ns_result = finder.find(finder.getOrigin(),
@@ -259,6 +277,7 @@ Query::process() {
break;
}
case ZoneFinder::CNAME:
+ case ZoneFinder::WILDCARD_CNAME:
/*
* We don't do chaining yet. Therefore handling a CNAME is
* mostly the same as handling SUCCESS, but we didn't get
@@ -271,8 +290,15 @@ Query::process() {
response_.addRRset(Message::SECTION_ANSWER,
boost::const_pointer_cast<RRset>(db_result.rrset),
dnssec_);
+
+ // If the answer is a result of wildcard substitution,
+ // add a proof that there's no closer name.
+ if (dnssec_ && db_result.code == ZoneFinder::WILDCARD_CNAME) {
+ addWildcardProof(*result.zone_finder);
+ }
break;
case ZoneFinder::SUCCESS:
+ case ZoneFinder::WILDCARD:
if (qtype_is_any) {
// If quety type is ANY, insert all RRs under the domain
// into answer section.
@@ -299,6 +325,12 @@ Query::process() {
{
addAuthAdditional(*result.zone_finder);
}
+
+ // If the answer is a result of wildcard substitution,
+ // add a proof that there's no closer name.
+ if (dnssec_ && db_result.code == ZoneFinder::WILDCARD) {
+ addWildcardProof(*result.zone_finder);
+ }
break;
case ZoneFinder::DELEGATION:
response_.setHeaderFlag(Message::HEADERFLAG_AA, false);
@@ -324,10 +356,9 @@ Query::process() {
}
break;
default:
- // These are new result codes (WILDCARD and WILDCARD_NXRRSET)
- // They should not happen from the in-memory and the database
- // backend isn't used yet.
- // TODO: Implement before letting the database backends in
+ // This is basically a bug of the data source implementation,
+ // but could also happen in the middle of development where
+ // we try to add a new result code.
isc_throw(isc::NotImplemented, "Unknown result code");
break;
}
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index f43dc77..3282c0d 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -77,6 +77,11 @@ private:
void addNXDOMAINProof(isc::datasrc::ZoneFinder& finder,
isc::dns::ConstRRsetPtr nsec);
+ /// Add NSEC RRs that prove a wildcard answer is the best one.
+ ///
+ /// This corresponds to Section 3.1.3.3 of RFC 4035.
+ void addWildcardProof(isc::datasrc::ZoneFinder& finder);
+
/// \brief Look up additional data (i.e., address records for the names
/// included in NS or MX records) and add them to the additional section.
///
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index 943c9ec..16a2409 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -92,6 +92,14 @@ const char* const other_zone_rrs =
"cnamemailer.example.com. 3600 IN CNAME www.example.com.\n"
"cnamemx.example.com. 3600 IN MX 10 cnamemailer.example.com.\n"
"mx.delegation.example.com. 3600 IN A 192.0.2.100\n";
+// Wildcards
+const char* const wild_txt = "*.wild.example.com. 3600 IN A 192.0.2.7\n";
+const char* const nsec_wild_txt =
+ "*.wild.example.com. 3600 IN NSEC www.example.com. A NSEC RRSIG\n";
+const char* const cnamewild_txt =
+ "*.cnamewild.example.com. 3600 IN CNAME www.example.org.\n";
+const char* const nsec_cnamewild_txt = "*.cnamewild.example.com. "
+ "3600 IN NSEC delegation.example.com. CNAME NSEC RRSIG\n";
// Used in NXDOMAIN proof test. We are going to test some unusual case where
// the best possible wildcard is below the "next domain" of the NSEC RR that
// proves the NXDOMAIN, i.e.,
@@ -170,7 +178,8 @@ public:
cname_nxdom_txt << cname_out_txt << dname_txt << dname_a_txt <<
other_zone_rrs << no_txt << nz_txt <<
nsec_apex_txt << nsec_mx_txt << nsec_no_txt << nsec_nz_txt <<
- nsec_nxdomain_txt << nsec_www_txt << nonsec_a_txt;
+ nsec_nxdomain_txt << nsec_www_txt << nonsec_a_txt <<
+ wild_txt << nsec_wild_txt << cnamewild_txt << nsec_cnamewild_txt;
masterLoad(zone_stream, origin_, rrclass_,
boost::bind(&MockZoneFinder::loadRRset, this, _1));
@@ -259,6 +268,24 @@ private:
boost::scoped_ptr<ZoneFinder::FindResult> nsec_result_;
};
+// A helper function that generates a new RRset based on "wild_rrset",
+// replacing its owner name with 'real_name'.
+ConstRRsetPtr
+substituteWild(const RRset& wild_rrset, const Name& real_name) {
+ RRsetPtr rrset(new RRset(real_name, wild_rrset.getClass(),
+ wild_rrset.getType(), wild_rrset.getTTL()));
+ // For simplicity we only consider the case with one RDATA (for now)
+ rrset->addRdata(wild_rrset.getRdataIterator()->getCurrent());
+ ConstRRsetPtr wild_sig = wild_rrset.getRRsig();
+ if (wild_sig) {
+ RRsetPtr sig(new RRset(real_name, wild_sig->getClass(),
+ wild_sig->getType(), wild_sig->getTTL()));
+ sig->addRdata(wild_sig->getRdataIterator()->getCurrent());
+ rrset->addRRsig(sig);
+ }
+ return (rrset);
+}
+
ZoneFinder::FindResult
MockZoneFinder::find(const Name& name, const RRType& type,
RRsetList* target, const FindOptions options)
@@ -365,6 +392,33 @@ MockZoneFinder::find(const Name& name, const RRType& type,
return (FindResult(NXRRSET, RRsetPtr()));
}
+ // Another possibility is wildcard. For simplicity we only check
+ // hardcoded specific cases, ignoring other details such as canceling
+ // due to the existence of closer name.
+ if ((options & NO_WILDCARD) == 0) {
+ const Name wild_suffix("wild.example.com");
+ if (name.compare(wild_suffix).getRelation() ==
+ NameComparisonResult::SUBDOMAIN) {
+ domain = domains_.find(Name("*").concatenate(wild_suffix));
+ assert(domain != domains_.end());
+ RRsetStore::const_iterator found_rrset = domain->second.find(type);
+ assert(found_rrset != domain->second.end());
+ return (FindResult(WILDCARD,
+ substituteWild(*found_rrset->second, name)));
+ }
+ const Name cnamewild_suffix("cnamewild.example.com");
+ if (name.compare(cnamewild_suffix).getRelation() ==
+ NameComparisonResult::SUBDOMAIN) {
+ domain = domains_.find(Name("*").concatenate(cnamewild_suffix));
+ assert(domain != domains_.end());
+ RRsetStore::const_iterator found_rrset =
+ domain->second.find(RRType::CNAME());
+ assert(found_rrset != domain->second.end());
+ return (FindResult(WILDCARD_CNAME,
+ substituteWild(*found_rrset->second, name)));
+ }
+ }
+
// This is an NXDOMAIN case.
// If we need DNSSEC proof, find the "previous name" that has an NSEC RR
// and return NXDOMAIN with the found NSEC. Otherwise, just return the
@@ -804,6 +858,72 @@ TEST_F(QueryTest, nxrrsetWithoutNSEC) {
NULL, mock_finder->getOrigin());
}
+TEST_F(QueryTest, wildcardNSEC) {
+ // The qname matches *.wild.example.com. The response should contain
+ // an NSEC that proves the non existence of a closer name.
+ Query(memory_client, Name("www.wild.example.com"), RRType::A(), response,
+ true).process();
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 6, 6,
+ (string(wild_txt).replace(0, 1, "www") +
+ string("www.wild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("A") + "\n").c_str(),
+ (zone_ns_txt + string("example.com. 3600 IN RRSIG NS 5 "
+ "3 3600 20000101000000 "
+ "20000201000000 12345 "
+ "example.com. FAKEFAKEFAKE\n") +
+ string(nsec_wild_txt) +
+ string("*.wild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC") + "\n").c_str(),
+ NULL, // we are not interested in additionals in this test
+ mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, CNAMEwildNSEC) {
+ // Similar to the previous case, but the matching wildcard record is
+ // CNAME.
+ Query(memory_client, Name("www.cnamewild.example.com"), RRType::A(),
+ response, true).process();
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 2, 0,
+ (string(cnamewild_txt).replace(0, 1, "www") +
+ string("www.cnamewild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("CNAME") + "\n").c_str(),
+ (string(nsec_cnamewild_txt) +
+ string("*.cnamewild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC") + "\n").c_str(),
+ NULL, // we are not interested in additionals in this test
+ mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, badWildcardProof1) {
+ // Unexpected case in wildcard proof: ZoneFinder::find() returns SUCCESS
+ // when NXDOMAIN is expected.
+ mock_finder->setNSECResult(Name("www.wild.example.com"),
+ ZoneFinder::SUCCESS,
+ mock_finder->delegation_rrset_);
+ EXPECT_THROW(Query(memory_client, Name("www.wild.example.com"),
+ RRType::A(), response, true).process(),
+ Query::BadNSEC);
+}
+
+TEST_F(QueryTest, badWildcardProof2) {
+ // "wildcard proof" doesn't return RRset.
+ mock_finder->setNSECResult(Name("www.wild.example.com"),
+ ZoneFinder::NXDOMAIN, ConstRRsetPtr());
+ EXPECT_THROW(Query(memory_client, Name("www.wild.example.com"),
+ RRType::A(), response, true).process(),
+ Query::BadNSEC);
+}
+
+TEST_F(QueryTest, badWildcardProof3) {
+ // "wildcard proof" returns empty NSEC.
+ mock_finder->setNSECResult(Name("www.wild.example.com"),
+ ZoneFinder::NXDOMAIN,
+ mock_finder->empty_nsec_rrset_);
+ EXPECT_THROW(Query(memory_client, Name("www.wild.example.com"),
+ RRType::A(), response, true).process(),
+ Query::BadNSEC);
+}
+
/*
* This tests that when there's no SOA and we need a negative answer. It should
* throw in that case.
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index fd83d8d..d850e47 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -20,14 +20,6 @@ The boss process is starting up and will now check if the message bus
daemon is already running. If so, it will not be able to start, as it
needs a dedicated message bus.
-% BIND10_CONFIGURATION_START_AUTH start authoritative server: %1
-This message shows whether or not the authoritative server should be
-started according to the configuration.
-
-% BIND10_CONFIGURATION_START_RESOLVER start resolver: %1
-This message shows whether or not the resolver should be
-started according to the configuration.
-
% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
An error was encountered when the boss module specified
statistics data which is invalid for the boss specification file.
@@ -113,27 +105,15 @@ old process was not shut down correctly, and needs to be killed, or
another instance of BIND10, with the same msgq domain socket, is
running, which needs to be stopped.
-% BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
-
% BIND10_MSGQ_DISAPPEARED msgq channel disappeared
While listening on the message bus channel for messages, it suddenly
disappeared. The msgq daemon may have died. This might lead to an
inconsistent state of the system, and BIND 10 will now shut down.
-% BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
-
-% BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+% BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
% BIND10_READING_BOSS_CONFIGURATION reading boss configuration
The boss process is starting up, and will now process the initial
@@ -169,6 +149,9 @@ The boss module is sending a SIGKILL signal to the given process.
% BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
The boss module is sending a SIGTERM signal to the given process.
+% BIND10_SETUID setting UID to %1
+The boss switches the user it runs as to the given UID.
+
% BIND10_SHUTDOWN stopping the server
The boss process received a command or signal telling it to shut down.
It will send a shutdown command to each process. The processes that do
@@ -187,11 +170,6 @@ which failed is unknown (not one of 'S' for socket or 'B' for bind).
The boss requested a socket from the creator, but the answer is unknown. This
looks like a programmer error.
-% BIND10_SOCKCREATOR_CRASHED the socket creator crashed
-The socket creator terminated unexpectedly. It is not possible to restart it
-(because the boss already gave up root privileges), so the system is going
-to terminate.
-
% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
There should be more data from the socket creator, but it closed the socket.
It probably crashed.
@@ -270,8 +248,15 @@ During the startup process, a number of messages are exchanged between the
Boss process and the processes it starts. This error is output when a
message received by the Boss process is not recognised.
-% BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.
-The given module is being started or restarted without root privileges.
+% BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+
+% BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.
+The resolver is being started or restarted without root privileges.
If the module needs these privileges, it may have problems starting.
Note that this issue should be resolved by the pending 'socket-creator'
process; once that has been implemented, modules should not need root
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index 2bee1a5..0b4f4cb 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -246,15 +246,17 @@ class BoB:
self.cfg_start_resolver = False
self.cfg_start_dhcp6 = False
self.cfg_start_dhcp4 = False
- self.started_auth_family = False
- self.started_resolver_family = False
self.curproc = None
+ # XXX: Not used now, waits for reintroduction of restarts.
self.dead_processes = {}
self.msgq_socket_file = msgq_socket_file
self.nocache = nocache
self.component_config = {}
- self.processes = {}
- self.expected_shutdowns = {}
+ # Some time in future, it may happen that a single component has
+ # multple processes. If so happens, name "components" may be
+ # inapropriate. But as the code isn't probably completely ready
+ # for it, we leave it at components for now.
+ self.components = {}
self.runnable = False
self.uid = setuid
self.username = username
@@ -264,7 +266,6 @@ class BoB:
self.cmdctl_port = cmdctl_port
self.brittle = brittle
self.wait_time = wait_time
- self.sockcreator = None
self._component_configurator = isc.bind10.component.Configurator(self,
isc.bind10.special_component.get_specials())
# The priorities here make them start in the correct order. First
@@ -310,70 +311,21 @@ class BoB:
# If this is initial update, don't do anything now, leave it to startup
if not self.runnable:
return
- # Now we declare few functions used only internally here. Besides the
- # benefit of not polluting the name space, they are closures, so we
- # don't need to pass some variables
- def start_stop(name, started, start, stop):
- if not'start_' + name in new_config:
- return
- if new_config['start_' + name]:
- if not started:
- if self.uid is not None:
- logger.info(BIND10_START_AS_NON_ROOT, name)
- start()
- else:
- stop()
- # These four functions are passed to start_stop (smells like functional
- # programming little bit)
- def resolver_on():
- self.component_config['b10-resolver'] = { 'kind': 'needed',
- 'special': 'resolver' }
- self.__propagate_component_config(self.component_config)
- self.started_resolver_family = True
- def resolver_off():
- if 'b10-resolver' in self.component_config:
- del self.component_config['b10-resolver']
- self.__propagate_component_config(self.component_config)
- self.started_resolver_family = False
- def auth_on():
- self.component_config['b10-auth'] = { 'kind': 'needed',
- 'special': 'auth' }
- self.component_config['b10-xfrout'] = { 'kind': 'dispensable',
- 'address': 'Xfrout' }
- self.component_config['b10-xfrin'] = { 'kind': 'dispensable',
- 'special': 'xfrin' }
- self.component_config['b10-zonemgr'] = { 'kind': 'dispensable',
- 'address': 'Zonemgr' }
- self.__propagate_component_config(self.component_config)
- self.started_auth_family = True
- def auth_off():
- if 'b10-zonemgr' in self.component_config:
- del self.component_config['b10-zonemgr']
- if 'b10-xfrin' in self.component_config:
- del self.component_config['b10-xfrin']
- if 'b10-xfrout' in self.component_config:
- del self.component_config['b10-xfrout']
- if 'b10-auth' in self.component_config:
- del self.component_config['b10-auth']
- self.__propagate_component_config(self.component_config)
- self.started_auth_family = False
-
- # The real code of the config handler function follows here
logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
new_config)
- start_stop('resolver', self.started_resolver_family, resolver_on,
- resolver_off)
- start_stop('auth', self.started_auth_family, auth_on, auth_off)
-
- answer = isc.config.ccsession.create_answer(0)
- return answer
+ try:
+ if 'components' in new_config:
+ self.__propagate_component_config(new_config['components'])
+ return isc.config.ccsession.create_answer(0)
+ except Exception as e:
+ return isc.config.ccsession.create_answer(1, str(e))
def get_processes(self):
- pids = list(self.processes.keys())
+ pids = list(self.components.keys())
pids.sort()
process_list = [ ]
for pid in pids:
- process_list.append([pid, self.processes[pid].name])
+ process_list.append([pid, self.components[pid].name()])
return process_list
def _get_stats_data(self):
@@ -422,7 +374,7 @@ class BoB:
"Unknown command")
return answer
- def kill_started_processes(self):
+ def kill_started_components(self):
"""
Called as part of the exception handling when a process fails to
start, this runs through the list of started processes, killing
@@ -430,31 +382,25 @@ class BoB:
"""
logger.info(BIND10_KILLING_ALL_PROCESSES)
- self.stop_creator(True)
-
- for pid in self.processes:
- logger.info(BIND10_KILL_PROCESS, self.processes[pid].name)
- self.processes[pid].process.kill()
- self.processes = {}
+ for pid in self.components:
+ logger.info(BIND10_KILL_PROCESS, self.components[pid].name())
+ self.components[pid].kill(True)
+ self.components = {}
- def read_bind10_config(self):
+ def _read_bind10_config(self):
"""
Reads the parameters associated with the BoB module itself.
- At present these are the components to start although arguably this
- information should be in the configuration for the appropriate
- module itself. (However, this would cause difficulty in the case of
- xfrin/xfrout and zone manager as we don't need to start those if we
- are not running the authoritative server.)
+ This means the list of components we should start now.
+
+ This could easily be combined into start_all_processes, but
+ it stays because of historical reasons and because the tests
+ replace the method sometimes.
"""
logger.info(BIND10_READING_BOSS_CONFIGURATION)
config_data = self.ccs.get_full_config()
- self.cfg_start_auth = config_data.get("start_auth")
- self.cfg_start_resolver = config_data.get("start_resolver")
-
- logger.info(BIND10_CONFIGURATION_START_AUTH, self.cfg_start_auth)
- logger.info(BIND10_CONFIGURATION_START_RESOLVER, self.cfg_start_resolver)
+ self.__propagate_component_config(config_data['components'])
def log_starting(self, process, port = None, address = None):
"""
@@ -608,26 +554,18 @@ class BoB:
self.log_starting(name, port, address)
newproc = ProcessInfo(name, args, c_channel_env)
newproc.spawn()
- # This is now done in register_process()
- #self.processes[newproc.pid] = newproc
self.log_started(newproc.pid)
return newproc
- def register_process(self, pid, info):
+ def register_process(self, pid, component):
"""
Put another process into boss to watch over it. When the process
- dies, the info.failed() is called with the exit code.
+ dies, the component.failed() is called with the exit code.
It is expected the info is a isc.bind10.component.BaseComponent
subclass (or anything having the same interface).
"""
- if '_procinfo' in dir(info):
- # FIXME: This is temporary and the interface of the component
- # doesn't guarantee the existence.
- self.processes[pid] = info._procinfo
- else:
- # XXX: a short term hack. This is the sockcreator.
- self.sockcreator = info._SockCreator__creator
+ self.components[pid] = component
def start_simple(self, name):
"""
@@ -658,6 +596,8 @@ class BoB:
"""
Start the Authoritative server
"""
+ if self.uid is not None and self.__started:
+ logger.warn(BIND10_START_AS_NON_ROOT_AUTH)
authargs = ['b10-auth']
if self.nocache:
authargs += ['-n']
@@ -675,6 +615,8 @@ class BoB:
are pure speculation. As with the auth daemon, they should be
read from the configuration database.
"""
+ if self.uid is not None and self.__started:
+ logger.warn(BIND10_START_AS_NON_ROOT_RESOLVER)
self.curproc = "b10-resolver"
# XXX: this must be read from the configuration manager in the future
resargs = ['b10-resolver']
@@ -693,6 +635,8 @@ class BoB:
args = ["b10-cmdctl"]
if self.cmdctl_port is not None:
args.append("--port=" + str(self.cmdctl_port))
+ if self.verbose:
+ args.append("-v")
return self.start_process("b10-cmdctl", args, self.c_channel_env,
self.cmdctl_port)
@@ -727,72 +671,24 @@ class BoB:
return self.start_process("b10-xfrin", args, c_channel_env)
- def start_all_processes(self):
+ def start_all_components(self):
"""
- Starts up all the processes. Any exception generated during the
- starting of the processes is handled by the caller.
+ Starts up all the components. Any exception generated during the
+ starting of the components is handled by the caller.
"""
# Start the real core (sockcreator, msgq, cfgmgr)
self._component_configurator.startup(self.__core_components)
# Connect to the msgq. This is not a process, so it's not handled
# inside the configurator.
- c_channel_env = self.c_channel_env
- self.start_ccsession(c_channel_env)
+ self.start_ccsession(self.c_channel_env)
# Extract the parameters associated with Bob. This can only be
# done after the CC Session is started. Note that the logging
# configuration may override the "-v" switch set on the command line.
- self.read_bind10_config()
-
- # Continue starting the processes. The authoritative server (if
- # selected):
- component_config = {}
- if self.cfg_start_auth:
- component_config['b10-auth'] = { 'kind': 'needed',
- 'special': 'auth' }
- self.__propagate_component_config(component_config)
-
- # ... and resolver (if selected):
- if self.cfg_start_resolver:
- component_config['b10-resolver'] = { 'kind': 'needed',
- 'special': 'resolver' }
- self.started_resolver_family = True
- self.__propagate_component_config(component_config)
-
- # Everything after the main components can run as non-root.
- # TODO: this is only temporary - once the privileged socket creator is
- # fully working, nothing else will run as root.
- if self.uid is not None:
- posix.setuid(self.uid)
-
- # xfrin/xfrout and the zone manager are only meaningful if the
- # authoritative server has been started.
- if self.cfg_start_auth:
- component_config['b10-xfrout'] = { 'kind': 'dispensable',
- 'address': 'Xfrout' }
- component_config['b10-xfrin'] = { 'kind': 'dispensable',
- 'special': 'xfrin' }
- component_config['b10-zonemgr'] = { 'kind': 'dispensable',
- 'address': 'Zonemgr' }
- self.__propagate_component_config(component_config)
- self.started_auth_family = True
-
- # ... and finally start the remaining processes
- component_config['b10-stats'] = { 'kind': 'dispensable',
- 'address': 'Stats' }
- component_config['b10-stats-httpd'] = { 'kind': 'dispensable',
- 'address': 'StatsHttpd' }
- component_config['b10-cmdctl'] = { 'kind': 'needed',
- 'special': 'cmdctl' }
-
- if self.cfg_start_dhcp6:
- component_config['b10-dhcp6'] = { 'kind': 'dispensable',
- 'address': 'DHCP6' }
-
- self.__propagate_component_config(component_config)
-
- self.component_config = component_config
+ self._read_bind10_config()
+
+ # TODO: Return the dropping of privileges
def startup(self):
"""
@@ -816,13 +712,13 @@ class BoB:
# this is the case we want, where the msgq is not running
pass
- # Start all processes. If any one fails to start, kill all started
- # processes and exit with an error indication.
+ # Start all components. If any one fails to start, kill all started
+ # components and exit with an error indication.
try:
self.c_channel_env = c_channel_env
- self.start_all_processes()
+ self.start_all_components()
except Exception as e:
- self.kill_started_processes()
+ self.kill_started_components()
return "Unable to start " + self.curproc + ": " + str(e)
# Started successfully
@@ -836,10 +732,6 @@ class BoB:
(in logs, etc), the recipient is the address on msgq.
"""
logger.info(BIND10_STOP_PROCESS, process)
- # TODO: Some timeout to solve processes that don't want to die would
- # help. We can even store it in the dict, it is used only as a set
- self.expected_shutdowns[process] = 1
- # Ask the process to die willingly
self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
recipient)
@@ -861,22 +753,6 @@ class BoB:
else:
self.runnable = False
- # Series of stop_process wrappers
- def stop_resolver(self):
- self.stop_process('b10-resolver', 'Resolver')
-
- def stop_auth(self):
- self.stop_process('b10-auth', 'Auth')
-
- def stop_xfrout(self):
- self.stop_process('b10-xfrout', 'Xfrout')
-
- def stop_xfrin(self):
- self.stop_process('b10-xfrin', 'Xfrin')
-
- def stop_zonemgr(self):
- self.stop_process('b10-zonemgr', 'Zonemgr')
-
def shutdown(self):
"""Stop the BoB instance."""
logger.info(BIND10_SHUTDOWN)
@@ -891,27 +767,26 @@ class BoB:
time.sleep(1)
self.reap_children()
# next try sending a SIGTERM
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- logger.info(BIND10_SEND_SIGTERM, proc_info.name,
- proc_info.pid)
+ components_to_stop = list(self.components.values())
+ for component in components_to_stop:
+ logger.info(BIND10_SEND_SIGTERM, component.name(), component.pid())
try:
- proc_info.process.terminate()
+ component.kill()
except OSError:
# ignore these (usually ESRCH because the child
# finally exited)
pass
# finally, send SIGKILL (unmaskable termination) until everybody dies
- while self.processes:
+ while self.components:
# XXX: some delay probably useful... how much is uncertain
time.sleep(0.1)
self.reap_children()
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- logger.info(BIND10_SEND_SIGKILL, proc_info.name,
- proc_info.pid)
+ components_to_stop = list(self.components.values())
+ for component in components_to_stop:
+ logger.info(BIND10_SEND_SIGKILL, component.name(),
+ component.pid())
try:
- proc_info.process.kill()
+ component.kill(True)
except OSError:
# ignore these (usually ESRCH because the child
# finally exited)
@@ -933,40 +808,16 @@ class BoB:
# XXX: should be impossible to get any other error here
raise
if pid == 0: break
- if self.sockcreator is not None and self.sockcreator.pid() == pid:
- # This is the socket creator, started and terminated
- # differently. This can't be restarted.
- if self.runnable:
- logger.fatal(BIND10_SOCKCREATOR_CRASHED)
- self.sockcreator = None
- self.runnable = False
- elif pid in self.processes:
- # One of the processes we know about. Get information on it.
- proc_info = self.processes.pop(pid)
- proc_info.restart_schedule.set_run_stop_time()
- self.dead_processes[proc_info.pid] = proc_info
-
- # Write out message, but only if in the running state:
- # During startup and shutdown, these messages are handled
- # elsewhere.
- if self.runnable:
- if exit_status is None:
- logger.warn(BIND10_PROCESS_ENDED_NO_EXIT_STATUS,
- proc_info.name, proc_info.pid)
- else:
- logger.warn(BIND10_PROCESS_ENDED_WITH_EXIT_STATUS,
- proc_info.name, proc_info.pid,
- exit_status)
-
- # Was it a special process?
- if proc_info.name == "b10-msgq":
- logger.fatal(BIND10_MSGQ_DAEMON_ENDED)
- self.runnable = False
-
- # If we're in 'brittle' mode, we want to shutdown after
- # any process dies.
- if self.brittle:
- self.runnable = False
+ if pid in self.components:
+ # One of the components we know about. Get information on it.
+ component = self.components.pop(pid)
+ logger.info(BIND10_PROCESS_ENDED, component.name(), pid,
+ exit_status)
+ if component.running() and self.runnable:
+ # Tell it it failed. But only if it matters (we are
+ # not shutting down and the component considers itself
+ # to be running.
+ component.failed(exit_status);
else:
logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
@@ -980,7 +831,16 @@ class BoB:
The values returned can be safely passed into select() as the
timeout value.
+
"""
+ # TODO: This is an artefact of previous way of handling processes. The
+ # restart queue is currently empty at all times, so this returns None
+ # every time it is called (thought is a relict that is obviously wrong,
+ # it is called and it doesn't hurt).
+ #
+ # It is preserved for archeological reasons for the time when we return
+ # the delayed restarts, most of it might be useful then (or, if it is
+ # found useless, removed).
next_restart = None
# if we're shutting down, then don't restart
if not self.runnable:
@@ -989,10 +849,6 @@ class BoB:
still_dead = {}
now = time.time()
for proc_info in self.dead_processes.values():
- if proc_info.name in self.expected_shutdowns:
- # We don't restart, we wanted it to die
- del self.expected_shutdowns[proc_info.name]
- continue
restart_time = proc_info.restart_schedule.get_restart_time(now)
if restart_time > now:
if (next_restart is None) or (next_restart > restart_time):
@@ -1002,7 +858,7 @@ class BoB:
logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
try:
proc_info.respawn()
- self.processes[proc_info.pid] = proc_info
+ self.components[proc_info.pid] = proc_info
logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
except:
still_dead[proc_info.pid] = proc_info
@@ -1194,6 +1050,10 @@ def main():
while boss_of_bind.runnable:
# clean up any processes that exited
boss_of_bind.reap_children()
+ # XXX: As we don't put anything into the processes to be restarted,
+ # this is really a complicated NOP. But we will try to reintroduce
+ # delayed restarts, so it stays here for now, until we find out if
+ # it's useful.
next_restart = boss_of_bind.restart_processes()
if next_restart is None:
wait_time = None
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index b4cfac6..4a3cc85 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -4,16 +4,71 @@
"module_description": "Master process",
"config_data": [
{
- "item_name": "start_auth",
- "item_type": "boolean",
+ "item_name": "components",
+ "item_type": "named_set",
"item_optional": false,
- "item_default": true
- },
- {
- "item_name": "start_resolver",
- "item_type": "boolean",
- "item_optional": false,
- "item_default": false
+ "item_default": {
+ "b10-auth": { "special": "auth", "kind": "needed", "priority": 10 },
+ "setuid": {
+ "special": "setuid",
+ "priority": 5,
+ "kind": "dispensable"
+ },
+ "b10-xfrin": { "special": "xfrin", "kind": "dispensable" },
+ "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-stats": { "address": "Stats", "kind": "dispensable" },
+ "b10-stats-httpd": {
+ "address": "StatsHttpd",
+ "kind": "dispensable"
+ },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ },
+ "named_set_item_spec": {
+ "item_name": "component",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": { },
+ "map_item_spec": [
+ {
+ "item_name": "special",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "process",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "kind",
+ "item_optional": false,
+ "item_type": "string",
+ "item_default": "dispensable"
+ },
+ {
+ "item_name": "address",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "params",
+ "item_optional": true,
+ "item_type": "list",
+ "list_item_spec": {
+ "item_name": "param",
+ "item_optional": false,
+ "item_type": "string",
+ "item_default": ""
+ }
+ },
+ {
+ "item_name": "priority",
+ "item_optional": true,
+ "item_type": "integer"
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 50e6e29..9e4abc0 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -45,6 +45,5 @@ export B10_FROM_BUILD
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
-cd ${BIND10_PATH}
-exec ${PYTHON_EXEC} -O bind10 "$@"
+exec ${PYTHON_EXEC} -O ${BIND10_PATH}/bind10 "$@"
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 85a949a..0aa6778 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -104,7 +104,7 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.msgq_socket_file, None)
self.assertEqual(bob.cc_session, None)
self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.processes, {})
+ self.assertEqual(bob.components, {})
self.assertEqual(bob.dead_processes, {})
self.assertEqual(bob.runnable, False)
self.assertEqual(bob.uid, None)
@@ -122,7 +122,7 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.msgq_socket_file, "alt_socket_file")
self.assertEqual(bob.cc_session, None)
self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.processes, {})
+ self.assertEqual(bob.components, {})
self.assertEqual(bob.dead_processes, {})
self.assertEqual(bob.runnable, False)
self.assertEqual(bob.uid, None)
@@ -221,7 +221,7 @@ class MockBob(BoB):
self.dhcp6 = False
self.dhcp4 = False
self.c_channel_env = {}
- self.processes = { }
+ self.components = { }
self.creator = False
class MockSockCreator(isc.bind10.component.Component):
@@ -241,10 +241,7 @@ class MockBob(BoB):
procinfo.pid = 1
return procinfo
- def stop_creator(self, kill=False):
- self.creator = False
-
- def read_bind10_config(self):
+ def _read_bind10_config(self):
# Configuration options are set directly
pass
@@ -278,7 +275,6 @@ class MockBob(BoB):
def start_simple(self, name):
procmap = { 'b10-xfrout': self.start_xfrout,
- 'b10-xfrin': self.start_xfrin,
'b10-zonemgr': self.start_zonemgr,
'b10-stats': self.start_stats,
'b10-stats-httpd': self.start_stats_httpd,
@@ -324,13 +320,13 @@ class MockBob(BoB):
return procinfo
def start_dhcp6(self):
- self.stats = True
+ self.dhcp6 = True
procinfo = ProcessInfo('b10-dhcp6', ['/bin/false'])
procinfo.pid = 13
return procinfo
def start_dhcp4(self):
- self.stats = True
+ self.dhcp4 = True
procinfo = ProcessInfo('b10-dhcp4', ['/bin/false'])
procinfo.pid = 14
return procinfo
@@ -346,63 +342,61 @@ class MockBob(BoB):
'b10-cmdctl': self.stop_cmdctl }
procmap[process]()
- # We don't really use all of these stop_ methods. But it might turn out
- # someone would add some stop_ method to BoB and we want that one overriden
- # in case he forgets to update the tests.
+ # Some functions to pretend we stop processes, use by stop_process
def stop_msgq(self):
if self.msgq:
- del self.processes[2]
+ del self.components[2]
self.msgq = False
def stop_cfgmgr(self):
if self.cfgmgr:
- del self.processes[3]
+ del self.components[3]
self.cfgmgr = False
def stop_auth(self):
if self.auth:
- del self.processes[5]
+ del self.components[5]
self.auth = False
def stop_resolver(self):
if self.resolver:
- del self.processes[6]
+ del self.components[6]
self.resolver = False
def stop_xfrout(self):
if self.xfrout:
- del self.processes[7]
+ del self.components[7]
self.xfrout = False
def stop_xfrin(self):
if self.xfrin:
- del self.processes[8]
+ del self.components[8]
self.xfrin = False
def stop_zonemgr(self):
if self.zonemgr:
- del self.processes[9]
+ del self.components[9]
self.zonemgr = False
def stop_stats(self):
if self.stats:
- del self.processes[10]
+ del self.components[10]
self.stats = False
def stop_stats_httpd(self):
if self.stats_httpd:
- del self.processes[11]
+ del self.components[11]
self.stats_httpd = False
def stop_cmdctl(self):
if self.cmdctl:
- del self.processes[12]
+ del self.components[12]
self.cmdctl = False
class TestStartStopProcessesBob(unittest.TestCase):
"""
- Check that the start_all_processes method starts the right combination
- of processes and that the right processes are started and stopped
+ Check that the start_all_components method starts the right combination
+ of components and that the right components are started and stopped
according to changes in configuration.
"""
def check_environment_unchanged(self):
@@ -436,7 +430,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
def check_started_none(self, bob):
"""
Check that the situation is according to configuration where no servers
- should be started. Some processes still need to be running.
+ should be started. Some components still need to be running.
"""
self.check_started(bob, True, False, False)
self.check_environment_unchanged()
@@ -451,14 +445,14 @@ class TestStartStopProcessesBob(unittest.TestCase):
def check_started_auth(self, bob):
"""
- Check the set of processes needed to run auth only is started.
+ Check the set of components needed to run auth only is started.
"""
self.check_started(bob, True, True, False)
self.check_environment_unchanged()
def check_started_resolver(self, bob):
"""
- Check the set of processes needed to run resolver only is started.
+ Check the set of components needed to run resolver only is started.
"""
self.check_started(bob, True, False, True)
self.check_environment_unchanged()
@@ -467,80 +461,65 @@ class TestStartStopProcessesBob(unittest.TestCase):
"""
Check if proper combinations of DHCPv4 and DHCpv6 can be started
"""
- v4found = 0
- v6found = 0
-
- for pid in bob.processes:
- if (bob.processes[pid].name == "b10-dhcp4"):
- v4found += 1
- if (bob.processes[pid].name == "b10-dhcp6"):
- v6found += 1
-
- # there should be exactly one DHCPv4 daemon (if v4==True)
- # there should be exactly one DHCPv6 daemon (if v6==True)
- self.assertEqual(v4==True, v4found==1)
- self.assertEqual(v6==True, v6found==1)
+ self.assertEqual(v4, bob.dhcp4)
+ self.assertEqual(v6, bob.dhcp6)
self.check_environment_unchanged()
- # Checks the processes started when starting neither auth nor resolver
- # is specified.
- def test_start_none(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
- self.check_started_none(bob)
-
- # Checks the processes started when starting only the auth process
- def test_start_auth(self):
- # Create BoB and ensure correct initialization
+ def construct_config(self, start_auth, start_resolver):
+ # The things that are common, not turned on an off
+ config = {}
+ config['b10-stats'] = { 'kind': 'dispensable', 'address': 'Stats' }
+ config['b10-stats-httpd'] = { 'kind': 'dispensable',
+ 'address': 'StatsHttpd' }
+ config['b10-cmdctl'] = { 'kind': 'needed', 'special': 'cmdctl' }
+ if start_auth:
+ config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
+ config['b10-xfrout'] = { 'kind': 'dispensable',
+ 'address': 'Xfrout' }
+ config['b10-xfrin'] = { 'kind': 'dispensable', 'special': 'xfrin' }
+ config['b10-zonemgr'] = { 'kind': 'dispensable',
+ 'address': 'Zonemgr' }
+ if start_resolver:
+ config['b10-resolver'] = { 'kind': 'needed',
+ 'special': 'resolver' }
+ return {'components': config}
+
+ def config_start_init(self, start_auth, start_resolver):
+ """
+ Test the configuration is loaded at the startup.
+ """
bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
+ config = self.construct_config(start_auth, start_resolver)
+ class CC:
+ def get_full_config(self):
+ return config
+ # Provide the fake CC with data
+ bob.ccs = CC()
+ # And make sure it's not overwritten
+ def start_ccsession():
+ bob.ccsession = True
+ bob.start_ccsession = lambda _: start_ccsession()
+ # We need to return the original _read_bind10_config
+ bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
+ bob.start_all_components()
+ self.check_started(bob, True, start_auth, start_resolver)
+ self.check_environment_unchanged()
- self.check_started_auth(bob)
+ def test_start_none(self):
+ self.config_start_init(False, False)
- # Checks the processes started when starting only the resolver process
def test_start_resolver(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
+ self.config_start_init(False, True)
- # Start processes and check what was started
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = True
-
- bob.start_all_processes()
-
- self.check_started_resolver(bob)
+ def test_start_auth(self):
+ self.config_start_init(True, False)
- # Checks the processes started when starting both auth and resolver process
def test_start_both(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = True
-
- bob.start_all_processes()
-
- self.check_started_both(bob)
+ self.config_start_init(True, True)
def test_config_start(self):
"""
- Test that the configuration starts and stops processes according
+ Test that the configuration starts and stops components according
to configuration changes.
"""
@@ -548,17 +527,13 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob = MockBob()
self.check_preconditions(bob)
- # Start processes (nothing much should be started, as in
- # test_start_none)
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
+ bob.start_all_components()
bob.runnable = True
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Enable both at once
- bob.config_handler({'start_auth': True, 'start_resolver': True})
+ bob.config_handler(self.construct_config(True, True))
self.check_started_both(bob)
# Not touched by empty change
@@ -566,11 +541,11 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.check_started_both(bob)
# Not touched by change to the same configuration
- bob.config_handler({'start_auth': True, 'start_resolver': True})
+ bob.config_handler(self.construct_config(True, True))
self.check_started_both(bob)
# Turn them both off again
- bob.config_handler({'start_auth': False, 'start_resolver': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Not touched by empty change
@@ -578,47 +553,45 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.check_started_none(bob)
# Not touched by change to the same configuration
- bob.config_handler({'start_auth': False, 'start_resolver': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Start and stop auth separately
- bob.config_handler({'start_auth': True})
+ bob.config_handler(self.construct_config(True, False))
self.check_started_auth(bob)
- bob.config_handler({'start_auth': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Start and stop resolver separately
- bob.config_handler({'start_resolver': True})
+ bob.config_handler(self.construct_config(False, True))
self.check_started_resolver(bob)
- bob.config_handler({'start_resolver': False})
+ bob.config_handler(self.construct_config(False, False))
self.check_started_none(bob)
# Alternate
- bob.config_handler({'start_auth': True})
+ bob.config_handler(self.construct_config(True, False))
self.check_started_auth(bob)
- bob.config_handler({'start_auth': False, 'start_resolver': True})
+ bob.config_handler(self.construct_config(False, True))
self.check_started_resolver(bob)
- bob.config_handler({'start_auth': True, 'start_resolver': False})
+ bob.config_handler(self.construct_config(True, False))
self.check_started_auth(bob)
def test_config_start_once(self):
"""
- Tests that a process is started only once.
+ Tests that a component is started only once.
"""
# Create BoB and ensure correct initialization
bob = MockBob()
self.check_preconditions(bob)
- # Start processes (both)
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = True
+ bob.start_all_components()
- bob.start_all_processes()
bob.runnable = True
+ bob.config_handler(self.construct_config(True, True))
self.check_started_both(bob)
bob.start_auth = lambda: self.fail("Started auth again")
@@ -628,12 +601,11 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob.start_resolver = lambda: self.fail("Started resolver again")
# Send again we want to start them. Should not do it, as they are.
- bob.config_handler({'start_auth': True})
- bob.config_handler({'start_resolver': True})
+ bob.config_handler(self.construct_config(True, True))
def test_config_not_started_early(self):
"""
- Test that processes are not started by the config handler before
+ Test that components are not started by the config handler before
startup.
"""
bob = MockBob()
@@ -647,36 +619,29 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob.config_handler({'start_auth': True, 'start_resolver': True})
- # Checks that DHCP (v4 and v6) processes are started when expected
+ # Checks that DHCP (v4 and v6) components are started when expected
def test_start_dhcp(self):
# Create BoB and ensure correct initialization
bob = MockBob()
self.check_preconditions(bob)
- # don't care about DNS stuff
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- # v4 and v6 disabled
- bob.cfg_start_dhcp6 = False
- bob.cfg_start_dhcp4 = False
- bob.start_all_processes()
+ bob.start_all_components()
+ bob.config_handler(self.construct_config(False, False))
self.check_started_dhcp(bob, False, False)
def test_start_dhcp_v6only(self):
# Create BoB and ensure correct initialization
bob = MockBob()
self.check_preconditions(bob)
-
- # don't care about DNS stuff
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
# v6 only enabled
- bob.cfg_start_dhcp6 = True
- bob.cfg_start_dhcp4 = False
- bob.start_all_processes()
+ bob.start_all_components()
+ bob.runnable = True
+ bob._BoB_started = True
+ config = self.construct_config(False, False)
+ config['components']['b10-dhcp6'] = { 'kind': 'needed',
+ 'address': 'Dhcp6' }
+ bob.config_handler(config)
self.check_started_dhcp(bob, False, True)
# uncomment when dhcpv4 becomes implemented
@@ -690,6 +655,12 @@ class TestStartStopProcessesBob(unittest.TestCase):
#bob.cfg_start_dhcp4 = True
#self.check_started_dhcp(bob, True, True)
+class MockComponent:
+ def __init__(self, name, pid):
+ self.name = lambda: name
+ self.pid = lambda: pid
+
+
class TestBossCmd(unittest.TestCase):
def test_ping(self):
"""
@@ -699,7 +670,7 @@ class TestBossCmd(unittest.TestCase):
answer = bob.command_handler("ping", None)
self.assertEqual(answer, {'result': [0, 'pong']})
- def test_show_processes(self):
+ def test_show_processes_empty(self):
"""
Confirm getting a list of processes works.
"""
@@ -707,23 +678,16 @@ class TestBossCmd(unittest.TestCase):
answer = bob.command_handler("show_processes", None)
self.assertEqual(answer, {'result': [0, []]})
- def test_show_processes_started(self):
+ def test_show_processes(self):
"""
Confirm getting a list of processes works.
"""
bob = MockBob()
- bob.start_all_processes()
+ bob.register_process(1, MockComponent('first', 1))
+ bob.register_process(2, MockComponent('second', 2))
answer = bob.command_handler("show_processes", None)
- processes = [[1, 'b10-sockcreator'],
- [2, 'b10-msgq'],
- [3, 'b10-cfgmgr'],
- [5, 'b10-auth'],
- [7, 'b10-xfrout'],
- [8, 'b10-xfrin'],
- [9, 'b10-zonemgr'],
- [10, 'b10-stats'],
- [11, 'b10-stats-httpd'],
- [12, 'b10-cmdctl']]
+ processes = [[1, 'first'],
+ [2, 'second']]
self.assertEqual(answer, {'result': [0, processes]})
class TestParseArgs(unittest.TestCase):
@@ -833,10 +797,12 @@ class TestPIDFile(unittest.TestCase):
self.assertRaises(IOError, dump_pid,
'nonexistent_dir' + os.sep + 'bind10.pid')
+# TODO: Do we want brittle mode? Probably yes. So we need to re-enable to after that.
+ at unittest.skip("Brittle mode temporarily broken")
class TestBrittle(unittest.TestCase):
def test_brittle_disabled(self):
bob = MockBob()
- bob.start_all_processes()
+ bob.start_all_components()
bob.runnable = True
bob.reap_children()
@@ -849,7 +815,7 @@ class TestBrittle(unittest.TestCase):
def test_brittle_enabled(self):
bob = MockBob()
- bob.start_all_processes()
+ bob.start_all_components()
bob.runnable = True
bob.brittle = True
@@ -862,6 +828,158 @@ class TestBrittle(unittest.TestCase):
sys.stdout = old_stdout
self.assertFalse(bob.runnable)
+class TestBossComponents(unittest.TestCase):
+ """
+ Test the boss propagates component configuration properly to the
+ component configurator and acts sane.
+ """
+ def setUp(self):
+ self.__param = None
+ self.__called = False
+ self.__compconfig = {
+ 'comp': {
+ 'kind': 'needed',
+ 'process': 'cat'
+ }
+ }
+
+ def __unary_hook(self, param):
+ """
+ A hook function that stores the parameter for later examination.
+ """
+ self.__param = param
+
+ def __nullary_hook(self):
+ """
+ A hook function that notes down it was called.
+ """
+ self.__called = True
+
+ def __check_core(self, config):
+ """
+ A function checking that the config contains parts for the valid
+ core component configuration.
+ """
+ self.assertIsNotNone(config)
+ for component in ['sockcreator', 'msgq', 'cfgmgr']:
+ self.assertTrue(component in config)
+ self.assertEqual(component, config[component]['special'])
+ self.assertEqual('core', config[component]['kind'])
+
+ def __check_extended(self, config):
+ """
+ This checks that the config contains the core and one more component.
+ """
+ self.__check_core(config)
+ self.assertTrue('comp' in config)
+ self.assertEqual('cat', config['comp']['process'])
+ self.assertEqual('needed', config['comp']['kind'])
+ self.assertEqual(4, len(config))
+
+ def test_correct_run(self):
+ """
+ Test the situation when we run in usual scenario, nothing fails,
+ we just start, reconfigure and then stop peacefully.
+ """
+ bob = MockBob()
+ # Start it
+ orig = bob._component_configurator.startup
+ bob._component_configurator.startup = self.__unary_hook
+ bob.start_all_components()
+ bob._component_configurator.startup = orig
+ self.__check_core(self.__param)
+ self.assertEqual(3, len(self.__param))
+
+ # Reconfigure it
+ self.__param = None
+ orig = bob._component_configurator.reconfigure
+ bob._component_configurator.reconfigure = self.__unary_hook
+ # Otherwise it does not work
+ bob.runnable = True
+ bob.config_handler({'components': self.__compconfig})
+ self.__check_extended(self.__param)
+ currconfig = self.__param
+ # If we reconfigure it, but it does not contain the components part,
+ # nothing is called
+ bob.config_handler({})
+ self.assertEqual(self.__param, currconfig)
+ self.__param = None
+ bob._component_configurator.reconfigure = orig
+ # Check a configuration that messes up the core components is rejected.
+ compconf = dict(self.__compconfig)
+ compconf['msgq'] = { 'process': 'echo' }
+ result = bob.config_handler({'components': compconf})
+ # Check it rejected it
+ self.assertEqual(1, result['result'][0])
+
+ # We can't call shutdown, that one relies on the stuff in main
+ # We check somewhere else that the shutdown is actually called
+ # from there (the test_kills).
+
+ def test_kills(self):
+ """
+ Test that the boss kills components which don't want to stop.
+ """
+ bob = MockBob()
+ killed = []
+ class ImmortalComponent:
+ """
+ An immortal component. It does not stop when it is told so
+ (anyway it is not told so). It does not die if it is killed
+ the first time. It dies only when killed forcefully.
+ """
+ def kill(self, forcefull=False):
+ killed.append(forcefull)
+ if forcefull:
+ bob.components = {}
+ def pid(self):
+ return 1
+ def name(self):
+ return "Immortal"
+ bob.components = {}
+ bob.register_process(1, ImmortalComponent())
+
+ # While at it, we check the configurator shutdown is actually called
+ orig = bob._component_configurator.shutdown
+ bob._component_configurator.shutdown = self.__nullary_hook
+ self.__called = False
+
+ bob.shutdown()
+
+ self.assertEqual([False, True], killed)
+ self.assertTrue(self.__called)
+
+ bob._component_configurator.shutdown = orig
+
+ def test_component_shutdown(self):
+ """
+ Test the component_shutdown sets all variables accordingly.
+ """
+ bob = MockBob()
+ self.assertRaises(Exception, bob.component_shutdown, 1)
+ self.assertEqual(1, bob.exitcode)
+ bob._BoB__started = True
+ bob.component_shutdown(2)
+ self.assertEqual(2, bob.exitcode)
+ self.assertFalse(bob.runnable)
+
+ def test_init_config(self):
+ """
+ Test initial configuration is loaded.
+ """
+ bob = MockBob()
+ # Start it
+ bob._component_configurator.reconfigure = self.__unary_hook
+ # We need to return the original read_bind10_config
+ bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
+ # And provide a session to read the data from
+ class CC:
+ pass
+ bob.ccs = CC()
+ bob.ccs.get_full_config = lambda: {'components': self.__compconfig}
+ bob.start_all_components()
+ self.__check_extended(self.__param)
+
if __name__ == '__main__':
# store os.environ for test_unchanged_environment
original_os_environ = copy.deepcopy(os.environ)
diff --git a/src/bin/bindctl/bindcmd.py b/src/bin/bindctl/bindcmd.py
index 8c2b674..b67bc4b 100644
--- a/src/bin/bindctl/bindcmd.py
+++ b/src/bin/bindctl/bindcmd.py
@@ -46,6 +46,16 @@ except ImportError:
# if we have readline support, use that, otherwise use normal stdio
try:
import readline
+ # This is a fix for the problem described in
+ # http://bind10.isc.org/ticket/1345
+ # If '-' is seen as a word-boundary, the final completion-step
+ # (as handled by the cmd module, and hence outside our reach) can
+ # mistakenly add data twice, resulting in wrong completion results
+ # The solution is to remove it.
+ delims = readline.get_completer_delims()
+ delims = delims.replace('-', '')
+ readline.set_completer_delims(delims)
+
my_readline = readline.get_line_buffer
except ImportError:
my_readline = sys.stdin.readline
@@ -61,21 +71,21 @@ Type \"<module_name> <command_name> help\" for help on the specific command.
\nAvailable module names: """
class ValidatedHTTPSConnection(http.client.HTTPSConnection):
- '''Overrides HTTPSConnection to support certification
+ '''Overrides HTTPSConnection to support certification
validation. '''
def __init__(self, host, ca_certs):
http.client.HTTPSConnection.__init__(self, host)
self.ca_certs = ca_certs
def connect(self):
- ''' Overrides the connect() so that we do
+ ''' Overrides the connect() so that we do
certificate validation. '''
sock = socket.create_connection((self.host, self.port),
self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
-
+
req_cert = ssl.CERT_NONE
if self.ca_certs:
req_cert = ssl.CERT_REQUIRED
@@ -85,7 +95,7 @@ class ValidatedHTTPSConnection(http.client.HTTPSConnection):
ca_certs=self.ca_certs)
class BindCmdInterpreter(Cmd):
- """simple bindctl example."""
+ """simple bindctl example."""
def __init__(self, server_port='localhost:8080', pem_file=None,
csv_file_dir=None):
@@ -118,29 +128,33 @@ class BindCmdInterpreter(Cmd):
socket.gethostname())).encode())
digest = session_id.hexdigest()
return digest
-
+
def run(self):
'''Parse commands from user and send them to cmdctl. '''
try:
if not self.login_to_cmdctl():
- return
+ return 1
self.cmdloop()
print('\nExit from bindctl')
+ return 0
except FailToLogin as err:
# error already printed when this was raised, ignoring
- pass
+ return 1
except KeyboardInterrupt:
print('\nExit from bindctl')
+ return 0
except socket.error as err:
print('Failed to send request, the connection is closed')
+ return 1
except http.client.CannotSendRequest:
print('Can not send request, the connection is busy')
+ return 1
def _get_saved_user_info(self, dir, file_name):
- ''' Read all the available username and password pairs saved in
+ ''' Read all the available username and password pairs saved in
file(path is "dir + file_name"), Return value is one list of elements
- ['name', 'password'], If get information failed, empty list will be
+ ['name', 'password'], If get information failed, empty list will be
returned.'''
if (not dir) or (not os.path.exists(dir)):
return []
@@ -166,7 +180,7 @@ class BindCmdInterpreter(Cmd):
if not os.path.exists(dir):
os.mkdir(dir, 0o700)
- csvfilepath = dir + file_name
+ csvfilepath = dir + file_name
csvfile = open(csvfilepath, 'w')
os.chmod(csvfilepath, 0o600)
writer = csv.writer(csvfile)
@@ -180,7 +194,7 @@ class BindCmdInterpreter(Cmd):
return True
def login_to_cmdctl(self):
- '''Login to cmdctl with the username and password inputted
+ '''Login to cmdctl with the username and password inputted
from user. After the login is sucessful, the username and
password will be saved in 'default_user.csv', when run the next
time, username and password saved in 'default_user.csv' will be
@@ -246,14 +260,14 @@ class BindCmdInterpreter(Cmd):
if self.login_to_cmdctl():
# successful, so try send again
status, reply_msg = self._send_message(url, body)
-
+
if reply_msg:
return json.loads(reply_msg.decode())
else:
return {}
-
- def send_POST(self, url, post_param = None):
+
+ def send_POST(self, url, post_param = None):
'''Send POST request to cmdctl, session id is send with the name
'cookie' in header.
Format: /module_name/command_name
@@ -312,12 +326,12 @@ class BindCmdInterpreter(Cmd):
def _validate_cmd(self, cmd):
'''validate the parameters and merge some parameters together,
merge algorithm is based on the command line syntax, later, if
- a better command line syntax come out, this function should be
- updated first.
+ a better command line syntax come out, this function should be
+ updated first.
'''
if not cmd.module in self.modules:
raise CmdUnknownModuleSyntaxError(cmd.module)
-
+
module_info = self.modules[cmd.module]
if not module_info.has_command_with_name(cmd.command):
raise CmdUnknownCmdSyntaxError(cmd.module, cmd.command)
@@ -325,17 +339,17 @@ class BindCmdInterpreter(Cmd):
command_info = module_info.get_command_with_name(cmd.command)
manda_params = command_info.get_mandatory_param_names()
all_params = command_info.get_param_names()
-
+
# If help is entered, don't do further parameter validation.
for val in cmd.params.keys():
if val == "help":
return
-
- params = cmd.params.copy()
- if not params and manda_params:
- raise CmdMissParamSyntaxError(cmd.module, cmd.command, manda_params[0])
+
+ params = cmd.params.copy()
+ if not params and manda_params:
+ raise CmdMissParamSyntaxError(cmd.module, cmd.command, manda_params[0])
elif params and not all_params:
- raise CmdUnknownParamSyntaxError(cmd.module, cmd.command,
+ raise CmdUnknownParamSyntaxError(cmd.module, cmd.command,
list(params.keys())[0])
elif params:
param_name = None
@@ -366,7 +380,7 @@ class BindCmdInterpreter(Cmd):
param_name = command_info.get_param_name_by_position(name, param_count)
cmd.params[param_name] = cmd.params[name]
del cmd.params[name]
-
+
elif not name in all_params:
raise CmdUnknownParamSyntaxError(cmd.module, cmd.command, name)
@@ -375,7 +389,7 @@ class BindCmdInterpreter(Cmd):
if not name in params and not param_nr in params:
raise CmdMissParamSyntaxError(cmd.module, cmd.command, name)
param_nr += 1
-
+
# Convert parameter value according parameter spec file.
# Ignore check for commands belongs to module 'config'
if cmd.module != CONFIG_MODULE_NAME:
@@ -384,9 +398,9 @@ class BindCmdInterpreter(Cmd):
try:
cmd.params[param_name] = isc.config.config_data.convert_type(param_spec, cmd.params[param_name])
except isc.cc.data.DataTypeError as e:
- raise isc.cc.data.DataTypeError('Invalid parameter value for \"%s\", the type should be \"%s\" \n'
+ raise isc.cc.data.DataTypeError('Invalid parameter value for \"%s\", the type should be \"%s\" \n'
% (param_name, param_spec['item_type']) + str(e))
-
+
def _handle_cmd(self, cmd):
'''Handle a command entered by the user'''
if cmd.command == "help" or ("help" in cmd.params.keys()):
@@ -408,7 +422,7 @@ class BindCmdInterpreter(Cmd):
def add_module_info(self, module_info):
'''Add the information about one module'''
self.modules[module_info.name] = module_info
-
+
def get_module_names(self):
'''Return the names of all known modules'''
return list(self.modules.keys())
@@ -440,15 +454,15 @@ class BindCmdInterpreter(Cmd):
subsequent_indent=" " +
" " * CONST_BINDCTL_HELP_INDENT_WIDTH,
width=70))
-
+
def onecmd(self, line):
if line == 'EOF' or line.lower() == "quit":
self.conn.close()
return True
-
+
if line == 'h':
line = 'help'
-
+
Cmd.onecmd(self, line)
def remove_prefix(self, list, prefix):
@@ -476,7 +490,7 @@ class BindCmdInterpreter(Cmd):
cmd = BindCmdParse(cur_line)
if not cmd.params and text:
hints = self._get_command_startswith(cmd.module, text)
- else:
+ else:
hints = self._get_param_startswith(cmd.module, cmd.command,
text)
if cmd.module == CONFIG_MODULE_NAME:
@@ -492,8 +506,8 @@ class BindCmdInterpreter(Cmd):
except CmdMissCommandNameFormatError as e:
if not text.strip(): # command name is empty
- hints = self.modules[e.module].get_command_names()
- else:
+ hints = self.modules[e.module].get_command_names()
+ else:
hints = self._get_module_startswith(text)
except CmdCommandNameFormatError as e:
@@ -507,44 +521,43 @@ class BindCmdInterpreter(Cmd):
hints = []
self.hint = hints
- #self._append_space_to_hint()
if state < len(self.hint):
return self.hint[state]
else:
return None
-
- def _get_module_startswith(self, text):
+
+ def _get_module_startswith(self, text):
return [module
- for module in self.modules
+ for module in self.modules
if module.startswith(text)]
def _get_command_startswith(self, module, text):
- if module in self.modules:
+ if module in self.modules:
return [command
- for command in self.modules[module].get_command_names()
+ for command in self.modules[module].get_command_names()
if command.startswith(text)]
-
- return []
-
- def _get_param_startswith(self, module, command, text):
+ return []
+
+
+ def _get_param_startswith(self, module, command, text):
if module in self.modules:
- module_info = self.modules[module]
- if command in module_info.get_command_names():
+ module_info = self.modules[module]
+ if command in module_info.get_command_names():
cmd_info = module_info.get_command_with_name(command)
- params = cmd_info.get_param_names()
+ params = cmd_info.get_param_names()
hint = []
- if text:
+ if text:
hint = [val for val in params if val.startswith(text)]
else:
hint = list(params)
-
+
if len(hint) == 1 and hint[0] != "help":
- hint[0] = hint[0] + " ="
-
+ hint[0] = hint[0] + " ="
+
return hint
return []
@@ -561,24 +574,24 @@ class BindCmdInterpreter(Cmd):
self._print_correct_usage(err)
except isc.cc.data.DataTypeError as err:
print("Error! ", err)
-
- def _print_correct_usage(self, ept):
+
+ def _print_correct_usage(self, ept):
if isinstance(ept, CmdUnknownModuleSyntaxError):
self.do_help(None)
-
+
elif isinstance(ept, CmdUnknownCmdSyntaxError):
self.modules[ept.module].module_help()
-
+
elif isinstance(ept, CmdMissParamSyntaxError) or \
isinstance(ept, CmdUnknownParamSyntaxError):
self.modules[ept.module].command_help(ept.command)
-
-
+
+
def _append_space_to_hint(self):
"""Append one space at the end of complete hint."""
self.hint = [(val + " ") for val in self.hint]
-
-
+
+
def _handle_help(self, cmd):
if cmd.command == "help":
self.modules[cmd.module].module_help()
diff --git a/src/bin/bindctl/bindctl_main.py.in b/src/bin/bindctl/bindctl_main.py.in
index ee4191d..58c03eb 100755
--- a/src/bin/bindctl/bindctl_main.py.in
+++ b/src/bin/bindctl/bindctl_main.py.in
@@ -146,4 +146,5 @@ if __name__ == '__main__':
tool = BindCmdInterpreter(server_addr, pem_file=options.cert_chain,
csv_file_dir=options.csv_file_dir)
prepare_config_commands(tool)
- tool.run()
+ result = tool.run()
+ sys.exit(result)
diff --git a/src/bin/bindctl/tests/bindctl_test.py b/src/bin/bindctl/tests/bindctl_test.py
index 0635b32..cef35dc 100644
--- a/src/bin/bindctl/tests/bindctl_test.py
+++ b/src/bin/bindctl/tests/bindctl_test.py
@@ -31,14 +31,14 @@ from bindctl_main import set_bindctl_options
from bindctl import cmdparse
from bindctl import bindcmd
from bindctl.moduleinfo import *
-from bindctl.exception import *
+from bindctl.exception import *
try:
from collections import OrderedDict
except ImportError:
from mycollections import OrderedDict
class TestCmdLex(unittest.TestCase):
-
+
def my_assert_raise(self, exception_type, cmd_line):
self.assertRaises(exception_type, cmdparse.BindCmdParse, cmd_line)
@@ -48,13 +48,13 @@ class TestCmdLex(unittest.TestCase):
assert cmd.module == "zone"
assert cmd.command == "add"
self.assertEqual(len(cmd.params), 0)
-
-
+
+
def testCommandWithParameters(self):
lines = {"zone add zone_name = cnnic.cn, file = cnnic.cn.file master=1.1.1.1",
"zone add zone_name = \"cnnic.cn\", file ='cnnic.cn.file' master=1.1.1.1 ",
"zone add zone_name = 'cnnic.cn\", file ='cnnic.cn.file' master=1.1.1.1, " }
-
+
for cmd_line in lines:
cmd = cmdparse.BindCmdParse(cmd_line)
assert cmd.module == "zone"
@@ -75,7 +75,7 @@ class TestCmdLex(unittest.TestCase):
cmd = cmdparse.BindCmdParse('zone cmd name = 1\"\'34**&2 ,value= 44\"\'\"')
self.assertEqual(cmd.params['name'], '1\"\'34**&2')
self.assertEqual(cmd.params['value'], '44\"\'\"')
-
+
cmd = cmdparse.BindCmdParse('zone cmd name = 1\'34**&2value=44\"\'\" value = \"==============\'')
self.assertEqual(cmd.params['name'], '1\'34**&2value=44\"\'\"')
self.assertEqual(cmd.params['value'], '==============')
@@ -83,34 +83,34 @@ class TestCmdLex(unittest.TestCase):
cmd = cmdparse.BindCmdParse('zone cmd name = \"1234, 567890 \" value ==&*/')
self.assertEqual(cmd.params['name'], '1234, 567890 ')
self.assertEqual(cmd.params['value'], '=&*/')
-
+
def testCommandWithListParam(self):
cmd = cmdparse.BindCmdParse("zone set zone_name='cnnic.cn', master='1.1.1.1, 2.2.2.2'")
- assert cmd.params["master"] == '1.1.1.1, 2.2.2.2'
-
+ assert cmd.params["master"] == '1.1.1.1, 2.2.2.2'
+
def testCommandWithHelpParam(self):
cmd = cmdparse.BindCmdParse("zone add help")
assert cmd.params["help"] == "help"
-
+
cmd = cmdparse.BindCmdParse("zone add help *&)&)*&&$#$^%")
assert cmd.params["help"] == "help"
self.assertEqual(len(cmd.params), 1)
-
+
def testCmdModuleNameFormatError(self):
self.my_assert_raise(CmdModuleNameFormatError, "zone=good")
- self.my_assert_raise(CmdModuleNameFormatError, "zo/ne")
- self.my_assert_raise(CmdModuleNameFormatError, "")
+ self.my_assert_raise(CmdModuleNameFormatError, "zo/ne")
+ self.my_assert_raise(CmdModuleNameFormatError, "")
self.my_assert_raise(CmdModuleNameFormatError, "=zone")
- self.my_assert_raise(CmdModuleNameFormatError, "zone,")
-
-
+ self.my_assert_raise(CmdModuleNameFormatError, "zone,")
+
+
def testCmdMissCommandNameFormatError(self):
self.my_assert_raise(CmdMissCommandNameFormatError, "zone")
self.my_assert_raise(CmdMissCommandNameFormatError, "zone ")
self.my_assert_raise(CmdMissCommandNameFormatError, "help ")
-
-
+
+
def testCmdCommandNameFormatError(self):
self.my_assert_raise(CmdCommandNameFormatError, "zone =d")
self.my_assert_raise(CmdCommandNameFormatError, "zone z=d")
@@ -119,11 +119,11 @@ class TestCmdLex(unittest.TestCase):
self.my_assert_raise(CmdCommandNameFormatError, "zone zdd/ \"")
class TestCmdSyntax(unittest.TestCase):
-
+
def _create_bindcmd(self):
"""Create one bindcmd"""
-
- tool = bindcmd.BindCmdInterpreter()
+
+ tool = bindcmd.BindCmdInterpreter()
string_spec = { 'item_type' : 'string',
'item_optional' : False,
'item_default' : ''}
@@ -135,40 +135,40 @@ class TestCmdSyntax(unittest.TestCase):
load_cmd = CommandInfo(name = "load")
load_cmd.add_param(zone_file_param)
load_cmd.add_param(zone_name)
-
- param_master = ParamInfo(name = "master", optional = True, param_spec = string_spec)
- param_master = ParamInfo(name = "port", optional = True, param_spec = int_spec)
- param_allow_update = ParamInfo(name = "allow_update", optional = True, param_spec = string_spec)
+
+ param_master = ParamInfo(name = "master", optional = True, param_spec = string_spec)
+ param_master = ParamInfo(name = "port", optional = True, param_spec = int_spec)
+ param_allow_update = ParamInfo(name = "allow_update", optional = True, param_spec = string_spec)
set_cmd = CommandInfo(name = "set")
set_cmd.add_param(param_master)
set_cmd.add_param(param_allow_update)
set_cmd.add_param(zone_name)
-
- reload_all_cmd = CommandInfo(name = "reload_all")
-
- zone_module = ModuleInfo(name = "zone")
+
+ reload_all_cmd = CommandInfo(name = "reload_all")
+
+ zone_module = ModuleInfo(name = "zone")
zone_module.add_command(load_cmd)
zone_module.add_command(set_cmd)
zone_module.add_command(reload_all_cmd)
-
+
tool.add_module_info(zone_module)
return tool
-
-
+
+
def setUp(self):
self.bindcmd = self._create_bindcmd()
-
-
+
+
def no_assert_raise(self, cmd_line):
cmd = cmdparse.BindCmdParse(cmd_line)
- self.bindcmd._validate_cmd(cmd)
-
-
+ self.bindcmd._validate_cmd(cmd)
+
+
def my_assert_raise(self, exception_type, cmd_line):
cmd = cmdparse.BindCmdParse(cmd_line)
- self.assertRaises(exception_type, self.bindcmd._validate_cmd, cmd)
-
-
+ self.assertRaises(exception_type, self.bindcmd._validate_cmd, cmd)
+
+
def testValidateSuccess(self):
self.no_assert_raise("zone load zone_file='cn' zone_name='cn'")
self.no_assert_raise("zone load zone_file='cn', zone_name='cn', ")
@@ -178,27 +178,27 @@ class TestCmdSyntax(unittest.TestCase):
self.no_assert_raise("zone set allow_update='1.1.1.1' zone_name='cn'")
self.no_assert_raise("zone set zone_name='cn'")
self.my_assert_raise(isc.cc.data.DataTypeError, "zone set zone_name ='cn', port='cn'")
- self.no_assert_raise("zone reload_all")
-
-
+ self.no_assert_raise("zone reload_all")
+
+
def testCmdUnknownModuleSyntaxError(self):
self.my_assert_raise(CmdUnknownModuleSyntaxError, "zoned d")
self.my_assert_raise(CmdUnknownModuleSyntaxError, "dd dd ")
-
-
+
+
def testCmdUnknownCmdSyntaxError(self):
self.my_assert_raise(CmdUnknownCmdSyntaxError, "zone dd")
-
+
def testCmdMissParamSyntaxError(self):
self.my_assert_raise(CmdMissParamSyntaxError, "zone load zone_file='cn'")
self.my_assert_raise(CmdMissParamSyntaxError, "zone load zone_name='cn'")
self.my_assert_raise(CmdMissParamSyntaxError, "zone set allow_update='1.1.1.1'")
self.my_assert_raise(CmdMissParamSyntaxError, "zone set ")
-
+
def testCmdUnknownParamSyntaxError(self):
self.my_assert_raise(CmdUnknownParamSyntaxError, "zone load zone_d='cn'")
- self.my_assert_raise(CmdUnknownParamSyntaxError, "zone reload_all zone_name = 'cn'")
-
+ self.my_assert_raise(CmdUnknownParamSyntaxError, "zone reload_all zone_name = 'cn'")
+
class TestModuleInfo(unittest.TestCase):
def test_get_param_name_by_position(self):
@@ -212,36 +212,36 @@ class TestModuleInfo(unittest.TestCase):
self.assertEqual('sex', cmd.get_param_name_by_position(2, 3))
self.assertEqual('data', cmd.get_param_name_by_position(2, 4))
self.assertEqual('data', cmd.get_param_name_by_position(2, 4))
-
+
self.assertRaises(KeyError, cmd.get_param_name_by_position, 4, 4)
-
+
class TestNameSequence(unittest.TestCase):
"""
Test if the module/command/parameters is saved in the order creation
"""
-
+
def _create_bindcmd(self):
- """Create one bindcmd"""
-
+ """Create one bindcmd"""
+
self._cmd = CommandInfo(name = "load")
self.module = ModuleInfo(name = "zone")
- self.tool = bindcmd.BindCmdInterpreter()
+ self.tool = bindcmd.BindCmdInterpreter()
for random_str in self.random_names:
self._cmd.add_param(ParamInfo(name = random_str))
self.module.add_command(CommandInfo(name = random_str))
- self.tool.add_module_info(ModuleInfo(name = random_str))
-
+ self.tool.add_module_info(ModuleInfo(name = random_str))
+
def setUp(self):
self.random_names = ['1erdfeDDWsd', '3fe', '2009erd', 'Fe231', 'tere142', 'rei8WD']
self._create_bindcmd()
-
- def testSequence(self):
+
+ def testSequence(self):
param_names = self._cmd.get_param_names()
cmd_names = self.module.get_command_names()
module_names = self.tool.get_module_names()
-
+
i = 0
while i < len(self.random_names):
assert self.random_names[i] == param_names[i+1]
@@ -342,7 +342,7 @@ class TestConfigCommands(unittest.TestCase):
# validate log message for socket.err
socket_err_output = io.StringIO()
sys.stdout = socket_err_output
- self.assertRaises(None, self.tool.run())
+ self.assertEqual(1, self.tool.run())
self.assertEqual("Failed to send request, the connection is closed\n",
socket_err_output.getvalue())
socket_err_output.close()
@@ -350,7 +350,7 @@ class TestConfigCommands(unittest.TestCase):
# validate log message for http.client.CannotSendRequest
cannot_send_output = io.StringIO()
sys.stdout = cannot_send_output
- self.assertRaises(None, self.tool.run())
+ self.assertEqual(1, self.tool.run())
self.assertEqual("Can not send request, the connection is busy\n",
cannot_send_output.getvalue())
cannot_send_output.close()
@@ -472,4 +472,4 @@ class TestCommandLineOptions(unittest.TestCase):
if __name__== "__main__":
unittest.main()
-
+
diff --git a/src/bin/cmdctl/cmdctl.py.in b/src/bin/cmdctl/cmdctl.py.in
index a791aa3..ff221db 100755
--- a/src/bin/cmdctl/cmdctl.py.in
+++ b/src/bin/cmdctl/cmdctl.py.in
@@ -17,12 +17,12 @@
''' cmdctl module is the configuration entry point for all commands from bindctl
or some other web tools client of bind10. cmdctl is pure https server which provi-
-des RESTful API. When command client connecting with cmdctl, it should first login
-with legal username and password.
- When cmdctl starting up, it will collect command specification and
+des RESTful API. When command client connecting with cmdctl, it should first login
+with legal username and password.
+ When cmdctl starting up, it will collect command specification and
configuration specification/data of other available modules from configmanager, then
wait for receiving request from client, parse the request and resend the request to
-the proper module. When getting the request result from the module, send back the
+the proper module. When getting the request result from the module, send back the
resut to client.
'''
@@ -81,16 +81,16 @@ SPECFILE_LOCATION = SPECFILE_PATH + os.sep + "cmdctl.spec"
class CmdctlException(Exception):
pass
-
+
class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
'''https connection request handler.
Currently only GET and POST are supported. '''
def do_GET(self):
- '''The client should send its session id in header with
+ '''The client should send its session id in header with
the name 'cookie'
'''
self.session_id = self.headers.get('cookie')
- rcode, reply = http.client.OK, []
+ rcode, reply = http.client.OK, []
if self._is_session_valid():
if self._is_user_logged_in():
rcode, reply = self._handle_get_request()
@@ -106,16 +106,16 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def _handle_get_request(self):
'''Currently only support the following three url GET request '''
id, module = self._parse_request_path()
- return self.server.get_reply_data_for_GET(id, module)
+ return self.server.get_reply_data_for_GET(id, module)
def _is_session_valid(self):
- return self.session_id
+ return self.session_id
def _is_user_logged_in(self):
login_time = self.server.user_sessions.get(self.session_id)
if not login_time:
return False
-
+
idle_time = time.time() - login_time
if idle_time > self.server.idle_timeout:
return False
@@ -125,7 +125,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def _parse_request_path(self):
'''Parse the url, the legal url should like /ldh or /ldh/ldh '''
- groups = URL_PATTERN.match(self.path)
+ groups = URL_PATTERN.match(self.path)
if not groups:
return (None, None)
else:
@@ -133,8 +133,8 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
'''Process POST request. '''
- '''Process user login and send command to proper module
- The client should send its session id in header with
+ '''Process user login and send command to proper module
+ The client should send its session id in header with
the name 'cookie'
'''
self.session_id = self.headers.get('cookie')
@@ -148,7 +148,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
rcode, reply = http.client.UNAUTHORIZED, ["please login"]
else:
rcode, reply = http.client.BAD_REQUEST, ["session isn't valid"]
-
+
self.send_response(rcode)
self.end_headers()
self.wfile.write(json.dumps(reply).encode())
@@ -169,12 +169,12 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
length = self.headers.get('Content-Length')
if not length:
- return False, ["invalid username or password"]
+ return False, ["invalid username or password"]
try:
user_info = json.loads((self.rfile.read(int(length))).decode())
except:
- return False, ["invalid username or password"]
+ return False, ["invalid username or password"]
user_name = user_info.get('username')
if not user_name:
@@ -193,7 +193,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
return False, ["username or password error"]
return True, None
-
+
def _handle_post_request(self):
'''Handle all the post request from client. '''
@@ -215,7 +215,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
if rcode != 0:
ret = http.client.BAD_REQUEST
return ret, reply
-
+
def log_request(self, code='-', size='-'):
'''Rewrite the log request function, log nothing.'''
pass
@@ -239,11 +239,11 @@ class CommandControl():
def _setup_session(self):
'''Setup the session for receving the commands
- sent from other modules. There are two sessions
- for cmdctl, one(self.module_cc) is used for receiving
- commands sent from other modules, another one (self._cc)
- is used to send the command from Bindctl or other tools
- to proper modules.'''
+ sent from other modules. There are two sessions
+ for cmdctl, one(self.module_cc) is used for receiving
+ commands sent from other modules, another one (self._cc)
+ is used to send the command from Bindctl or other tools
+ to proper modules.'''
self._cc = isc.cc.Session()
self._module_cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
self.config_handler,
@@ -251,7 +251,7 @@ class CommandControl():
self._module_name = self._module_cc.get_module_spec().get_module_name()
self._cmdctl_config_data = self._module_cc.get_full_config()
self._module_cc.start()
-
+
def _accounts_file_check(self, filepath):
''' Check whether the accounts file is valid, each row
should be a list with 3 items.'''
@@ -288,7 +288,7 @@ class CommandControl():
errstr = self._accounts_file_check(new_config[key])
else:
errstr = 'unknown config item: ' + key
-
+
if errstr != None:
logger.error(CMDCTL_BAD_CONFIG_DATA, errstr);
return ccsession.create_answer(1, errstr)
@@ -314,7 +314,7 @@ class CommandControl():
self.modules_spec[args[0]] = args[1]
elif command == ccsession.COMMAND_SHUTDOWN:
- #When cmdctl get 'shutdown' command from boss,
+ #When cmdctl get 'shutdown' command from boss,
#shutdown the outer httpserver.
self._httpserver.shutdown()
self._serving = False
@@ -384,12 +384,12 @@ class CommandControl():
specs = self.get_modules_spec()
if module_name not in specs.keys():
return 1, {'error' : 'unknown module'}
-
+
spec_obj = isc.config.module_spec.ModuleSpec(specs[module_name], False)
errors = []
if not spec_obj.validate_command(command_name, params, errors):
return 1, {'error': errors[0]}
-
+
return self.send_command(module_name, command_name, params)
def send_command(self, module_name, command_name, params = None):
@@ -400,7 +400,7 @@ class CommandControl():
command_name, module_name)
if module_name == self._module_name:
- # Process the command sent to cmdctl directly.
+ # Process the command sent to cmdctl directly.
answer = self.command_handler(command_name, params)
else:
msg = ccsession.create_command(command_name, params)
@@ -429,7 +429,7 @@ class CommandControl():
logger.error(CMDCTL_COMMAND_ERROR, command_name, module_name, errstr)
return 1, {'error': errstr}
-
+
def get_cmdctl_config_data(self):
''' If running in source code tree, use keyfile, certificate
and user accounts file in source code. '''
@@ -453,13 +453,15 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
'''Make the server address can be reused.'''
allow_reuse_address = True
- def __init__(self, server_address, RequestHandlerClass,
+ def __init__(self, server_address, RequestHandlerClass,
CommandControlClass,
idle_timeout = 1200, verbose = False):
'''idle_timeout: the max idle time for login'''
socketserver_mixin.NoPollMixIn.__init__(self)
try:
http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass)
+ logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_STARTED,
+ server_address[0], server_address[1])
except socket.error as err:
raise CmdctlException("Error creating server, because: %s \n" % str(err))
@@ -472,9 +474,9 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
self._accounts_file = None
def _create_user_info(self, accounts_file):
- '''Read all user's name and its' salt, hashed password
+ '''Read all user's name and its' salt, hashed password
from accounts file.'''
- if (self._accounts_file == accounts_file) and (len(self._user_infos) > 0):
+ if (self._accounts_file == accounts_file) and (len(self._user_infos) > 0):
return
with self._lock:
@@ -495,10 +497,10 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
self._accounts_file = accounts_file
if len(self._user_infos) == 0:
logger.error(CMDCTL_NO_USER_ENTRIES_READ)
-
+
def get_user_info(self, username):
'''Get user's salt and hashed string. If the user
- doesn't exist, return None, or else, the list
+ doesn't exist, return None, or else, the list
[salt, hashed password] will be returned.'''
with self._lock:
info = self._user_infos.get(username)
@@ -507,9 +509,9 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
def save_user_session_id(self, session_id):
''' Record user's id and login time. '''
self.user_sessions[session_id] = time.time()
-
+
def _check_key_and_cert(self, key, cert):
- # TODO, check the content of key/certificate file
+ # TODO, check the content of key/certificate file
if not os.path.exists(key):
raise CmdctlException("key file '%s' doesn't exist " % key)
@@ -524,7 +526,7 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
certfile = cert,
keyfile = key,
ssl_version = ssl.PROTOCOL_SSLv23)
- return ssl_sock
+ return ssl_sock
except (ssl.SSLError, CmdctlException) as err :
logger.info(CMDCTL_SSL_SETUP_FAILURE_USER_DENIED, err)
self.close_request(sock)
@@ -541,18 +543,18 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
def get_reply_data_for_GET(self, id, module):
'''Currently only support the following three url GET request '''
- rcode, reply = http.client.NO_CONTENT, []
+ rcode, reply = http.client.NO_CONTENT, []
if not module:
if id == CONFIG_DATA_URL:
rcode, reply = http.client.OK, self.cmdctl.get_config_data()
elif id == MODULE_SPEC_URL:
rcode, reply = http.client.OK, self.cmdctl.get_modules_spec()
-
- return rcode, reply
+
+ return rcode, reply
def send_command_to_module(self, module_name, command_name, params):
return self.cmdctl.send_command_with_check(module_name, command_name, params)
-
+
httpd = None
def signal_handler(signal, frame):
@@ -566,10 +568,9 @@ def set_signal_handler():
def run(addr = 'localhost', port = 8080, idle_timeout = 1200, verbose = False):
''' Start cmdctl as one https server. '''
- if verbose:
- sys.stdout.write("[b10-cmdctl] starting on %s port:%d\n" %(addr, port))
- httpd = SecureHTTPServer((addr, port), SecureHTTPRequestHandler,
+ httpd = SecureHTTPServer((addr, port), SecureHTTPRequestHandler,
CommandControl, idle_timeout, verbose)
+
httpd.serve_forever()
def check_port(option, opt_str, value, parser):
@@ -607,6 +608,8 @@ if __name__ == '__main__':
(options, args) = parser.parse_args()
result = 1 # in case of failure
try:
+ if options.verbose:
+ logger.set_severity("DEBUG", 99)
run(options.addr, options.port, options.idle_timeout, options.verbose)
result = 0
except isc.cc.SessionError as err:
diff --git a/src/bin/cmdctl/cmdctl_messages.mes b/src/bin/cmdctl/cmdctl_messages.mes
index e007296..a3371b9 100644
--- a/src/bin/cmdctl/cmdctl_messages.mes
+++ b/src/bin/cmdctl/cmdctl_messages.mes
@@ -64,6 +64,9 @@ be set up. The specific error is given in the log message. Possible
causes may be that the ssl request itself was bad, or the local key or
certificate file could not be read.
+% CMDCTL_STARTED cmdctl is listening for connections on %1:%2
+The cmdctl daemon has started and is now listening for connections.
+
% CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
There was a keyboard interrupt signal to stop the cmdctl daemon. The
daemon will now shut down.
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
index f9a1d9d..72e48e4 100644
--- a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -130,13 +130,15 @@ TEST_F(Dhcpv6SrvTest, Solicit_basic) {
ASSERT_TRUE( tmp );
EXPECT_EQ(clientid->getType(), tmp->getType() );
ASSERT_EQ(clientid->len(), tmp->len() );
- EXPECT_FALSE(memcmp(clientid->getData(), tmp->getData(), tmp->len() ) );
+
+ EXPECT_TRUE( clientid->getData() == tmp->getData() );
+
// check that server included its server-id
tmp = reply->getOption(D6O_SERVERID);
EXPECT_EQ(tmp->getType(), srv->getServerID()->getType() );
ASSERT_EQ(tmp->len(), srv->getServerID()->len() );
- EXPECT_FALSE( memcmp(tmp->getData(), srv->getServerID()->getData(),
- tmp->len()) );
+
+ EXPECT_TRUE(tmp->getData() == srv->getServerID()->getData());
// more checks to be implemented
delete srv;
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 401b4a7..1e4d942 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -20,6 +20,7 @@ import sys
import io
from isc.testutils.tsigctx_mock import MockTSIGContext
from xfrin import *
+import xfrin
from isc.xfrin.diff import Diff
import isc.log
@@ -2021,6 +2022,19 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(self.xfr.command_handler("notify",
self.args)['result'][0], 1)
+ # also try a different port in the actual command
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV6_ADDRESS,
+ 'master_port': str(int(TEST_MASTER_PORT) + 1)
+ }
+ ]}
+ self.xfr.config_handler(zones)
+ # the command should now fail
+ self.assertEqual(self.xfr.command_handler("notify",
+ self.args)['result'][0], 1)
+
+
def test_command_handler_notify_known_zone(self):
# try it with a known zone
self.args['master'] = TEST_MASTER_IPV6_ADDRESS
@@ -2036,21 +2050,6 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(self.xfr.command_handler("notify",
self.args)['result'][0], 0)
- # Note: The rest of the tests won't pass due to the change in #1298
- # We should probably simply remove the test cases, but for now we
- # just comment them out. (Note also that the comment about 'not
- # from the config' is now wrong, because we used the matching address.)
- #
- # and see if we used the address from the command, and not from
- # the config
- # This is actually NOT the address given in the command, which
- # would at this point not make sense, see the TODO in
- # xfrin.py.in Xfrin.command_handler())
-# self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
-# self.xfr.xfrin_started_master_addr)
-# self.assertEqual(int(TEST_MASTER_PORT),
-# self.xfr.xfrin_started_master_port)
-
def test_command_handler_unknown(self):
self.assertEqual(self.xfr.command_handler("xxx", None)['result'][0], 1)
@@ -2287,6 +2286,184 @@ class TestMain(unittest.TestCase):
MockXfrin.check_command_hook = raise_exception
main(MockXfrin, False)
+class TestXfrinProcess(unittest.TestCase):
+ """
+ Some tests for the xfrin_process function. This replaces the
+ XfrinConnection class with itself, so we can emulate whatever behavior we
+ might want.
+
+ Currently only tests for retry if IXFR fails.
+ """
+ def setUp(self):
+ """
+ Backs up the original class implementation so it can be restored
+ and places our own version in place of the constructor.
+
+ Also sets up several internal variables to watch what happens.
+ """
+ # This will hold a "log" of what transfers were attempted.
+ self.__transfers = []
+ # This will "log" if failures or successes happened.
+ self.__published = []
+ # How many connections were created.
+ self.__created_connections = 0
+
+ def __get_connection(self, *args):
+ """
+ Provides a "connection". To mock the connection and see what it is
+ asked to do, we pretend to be the connection.
+ """
+ self.__created_connections += 1
+ return self
+
+ def connect_to_master(self):
+ """
+ Part of pretending to be the connection. It pretends it connected
+ correctly every time.
+ """
+ return True
+
+ def do_xfrin(self, check_soa, request_type):
+ """
+ Part of pretending to be the connection. It looks what answer should
+ be answered now and logs what request happened.
+ """
+ self.__transfers.append(request_type)
+ ret = self.__rets[0]
+ self.__rets = self.__rets[1:]
+ return ret
+
+ def zone_str(self):
+ """
+ Part of pretending to be the connection. It provides the logging name
+ of zone.
+ """
+ return "example.org/IN"
+
+ def publish_xfrin_news(self, zone_name, rrclass, ret):
+ """
+ Part of pretending to be the server as well. This just logs the
+ success/failure of the previous operation.
+ """
+ self.__published.append(ret)
+
+ def close(self):
+ """
+ Part of pretending to be the connection.
+ """
+ pass
+
+ def init_socket(self):
+ """
+ Part of pretending to be the connection.
+ """
+ pass
+
+ def __do_test(self, rets, transfers, request_type):
+ """
+ Do the actual test. The request type, prepared sucesses/failures
+ and expected sequence of transfers is passed to specify what test
+ should happen.
+ """
+ self.__rets = rets
+ published = rets[-1]
+ xfrin.process_xfrin(self, XfrinRecorder(), Name("example.org."),
+ RRClass.IN(), None, None, None, True, None,
+ request_type, self.__get_connection)
+ self.assertEqual([], self.__rets)
+ self.assertEqual(transfers, self.__transfers)
+ # Create a connection for each attempt
+ self.assertEqual(len(transfers), self.__created_connections)
+ self.assertEqual([published], self.__published)
+
+ def test_ixfr_ok(self):
+ """
+ Everything OK the first time, over IXFR.
+ """
+ self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR())
+
+ def test_axfr_ok(self):
+ """
+ Everything OK the first time, over AXFR.
+ """
+ self.__do_test([XFRIN_OK], [RRType.AXFR()], RRType.AXFR())
+
+ def test_axfr_fail(self):
+ """
+ The transfer failed over AXFR. Should not be retried (we don't expect
+ to fail on AXFR, but succeed on IXFR and we didn't use IXFR in the first
+ place for some reason.
+ """
+ self.__do_test([XFRIN_FAIL], [RRType.AXFR()], RRType.AXFR())
+
+ def test_ixfr_fallback(self):
+ """
+ The transfer fails over IXFR, but suceeds over AXFR. It should fall back
+ to it and say everything is OK.
+ """
+ self.__do_test([XFRIN_FAIL, XFRIN_OK], [RRType.IXFR(), RRType.AXFR()],
+ RRType.IXFR())
+
+ def test_ixfr_fail(self):
+ """
+ The transfer fails both over IXFR and AXFR. It should report failure
+ (only once) and should try both before giving up.
+ """
+ self.__do_test([XFRIN_FAIL, XFRIN_FAIL],
+ [RRType.IXFR(), RRType.AXFR()], RRType.IXFR())
+class TestFormatting(unittest.TestCase):
+ # If the formatting functions are moved to a more general library
+ # (ticket #1379), these tests should be moved with them.
+ def test_format_zone_str(self):
+ self.assertEqual("example.com/IN",
+ format_zone_str(isc.dns.Name("example.com"),
+ isc.dns.RRClass("IN")))
+ self.assertEqual("example.com/CH",
+ format_zone_str(isc.dns.Name("example.com"),
+ isc.dns.RRClass("CH")))
+ self.assertEqual("example.org/IN",
+ format_zone_str(isc.dns.Name("example.org"),
+ isc.dns.RRClass("IN")))
+
+ def test_format_addrinfo(self):
+ # This test may need to be updated if the input type is changed,
+ # right now it is a nested tuple:
+ # (family, sockettype, (address, port))
+ # of which sockettype is ignored
+ self.assertEqual("192.0.2.1:53",
+ format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+ ("192.0.2.1", 53))))
+ self.assertEqual("192.0.2.2:53",
+ format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+ ("192.0.2.2", 53))))
+ self.assertEqual("192.0.2.1:54",
+ format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+ ("192.0.2.1", 54))))
+ self.assertEqual("[2001:db8::1]:53",
+ format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+ ("2001:db8::1", 53))))
+ self.assertEqual("[2001:db8::2]:53",
+ format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+ ("2001:db8::2", 53))))
+ self.assertEqual("[2001:db8::1]:54",
+ format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+ ("2001:db8::1", 54))))
+ self.assertEqual("/some/file",
+ format_addrinfo((socket.AF_UNIX, socket.SOCK_STREAM,
+ "/some/file")))
+ # second element of passed tuple should be ignored
+ self.assertEqual("192.0.2.1:53",
+ format_addrinfo((socket.AF_INET, None,
+ ("192.0.2.1", 53))))
+ self.assertEqual("192.0.2.1:53",
+ format_addrinfo((socket.AF_INET, "Just some string",
+ ("192.0.2.1", 53))))
+ self.assertRaises(TypeError, format_addrinfo, 1)
+ self.assertRaises(TypeError, format_addrinfo,
+ (socket.AF_INET, "asdf"))
+ self.assertRaises(TypeError, format_addrinfo,
+ (socket.AF_INET, "asdf", ()))
+
if __name__== "__main__":
try:
isc.log.resetUnitTestRootLogger()
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index bd5635e..911b3b3 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -122,6 +122,36 @@ def _check_zone_class(zone_class_str):
except InvalidRRClass as irce:
raise XfrinZoneInfoException("bad zone class: " + zone_class_str + " (" + str(irce) + ")")
+def format_zone_str(zone_name, zone_class):
+ """Helper function to format a zone name and class as a string of
+ the form '<name>/<class>'.
+ Parameters:
+ zone_name (isc.dns.Name) name to format
+ zone_class (isc.dns.RRClass) class to format
+ """
+ return zone_name.to_text(True) + '/' + str(zone_class)
+
+def format_addrinfo(addrinfo):
+ """Helper function to format the addrinfo as a string of the form
+ <addr>:<port> (for IPv4) or [<addr>]:port (for IPv6). For unix domain
+ sockets, and unknown address families, it returns a basic string
+ conversion of the third element of the passed tuple.
+ Parameters:
+ addrinfo: a 3-tuple consisting of address family, socket type, and,
+ depending on the family, either a 2-tuple with the address
+ and port, or a filename
+ """
+ try:
+ if addrinfo[0] == socket.AF_INET:
+ return str(addrinfo[2][0]) + ":" + str(addrinfo[2][1])
+ elif addrinfo[0] == socket.AF_INET6:
+ return "[" + str(addrinfo[2][0]) + "]:" + str(addrinfo[2][1])
+ else:
+ return str(addrinfo[2])
+ except IndexError:
+ raise TypeError("addrinfo argument to format_addrinfo() does not "
+ "appear to be consisting of (family, socktype, (addr, port))")
+
def get_soa_serial(soa_rdata):
'''Extract the serial field of an SOA RDATA and returns it as an intger.
@@ -498,8 +528,8 @@ class XfrinConnection(asyncore.dispatcher):
return self.__state
def zone_str(self):
- '''A convenient function for logging to include zone name and class'''
- return self._zone_name.to_text() + '/' + str(self._rrclass)
+ '''A convenience function for logging to include zone name and class'''
+ return format_zone_str(self._zone_name, self._rrclass)
def connect_to_master(self):
'''Connect to master in TCP.'''
@@ -775,15 +805,15 @@ class XfrinConnection(asyncore.dispatcher):
return False
def __process_xfrin(server, zone_name, rrclass, db_file,
- shutdown_event, master_addrinfo, check_soa, tsig_key,
- request_type, conn_class=XfrinConnection):
+ shutdown_event, master_addrinfo, check_soa, tsig_key,
+ request_type, conn_class):
conn = None
exception = None
ret = XFRIN_FAIL
try:
# Create a data source client used in this XFR session. Right now we
- # still assume an sqlite3-based data source, and use both the old and
- # new data source APIs. We also need to use a mock client for tests.
+ # still assume an sqlite3-based data source, and use both the old and new
+ # data source APIs. We also need to use a mock client for tests.
# For a temporary workaround to deal with these situations, we skip the
# creation when the given file is none (the test case). Eventually
# this code will be much cleaner.
@@ -796,16 +826,36 @@ def __process_xfrin(server, zone_name, rrclass, db_file,
datasrc_config = "{ \"database_file\": \"" + db_file + "\"}"
datasrc_client = DataSourceClient(datasrc_type, datasrc_config)
- # Create a TCP connection for the XFR session and perform the operation
+ # Create a TCP connection for the XFR session and perform the operation.
sock_map = {}
- conn = conn_class(sock_map, zone_name, rrclass, datasrc_client,
- shutdown_event, master_addrinfo, tsig_key)
- conn.init_socket()
- # XXX: We still need _db_file for temporary workaround in _create_query().
- # This should be removed when we eliminate the need for the workaround.
- conn._db_file = db_file
- if conn.connect_to_master():
- ret = conn.do_xfrin(check_soa, request_type)
+ # In case we were asked to do IXFR and that one fails, we try again with
+ # AXFR. But only if we could actually connect to the server.
+ #
+ # So we start with retry as True, which is set to false on each attempt.
+ # In the case of connected but failed IXFR, we set it to true once again.
+ retry = True
+ while retry:
+ retry = False
+ conn = conn_class(sock_map, zone_name, rrclass, datasrc_client,
+ shutdown_event, master_addrinfo, tsig_key)
+ conn.init_socket()
+ # XXX: We still need _db_file for temporary workaround in _create_query().
+ # This should be removed when we eliminate the need for the workaround.
+ conn._db_file = db_file
+ ret = XFRIN_FAIL
+ if conn.connect_to_master():
+ ret = conn.do_xfrin(check_soa, request_type)
+ if ret == XFRIN_FAIL and request_type == RRType.IXFR():
+ # IXFR failed for some reason. It might mean the server can't
+ # handle it, or we don't have the zone or we are out of sync or
+ # whatever else. So we retry with with AXFR, as it may succeed
+ # in many such cases.
+ retry = True
+ request_type = RRType.AXFR()
+ logger.warn(XFRIN_XFR_TRANSFER_FALLBACK, conn.zone_str())
+ conn.close()
+ conn = None
+
except Exception as ex:
# If exception happens, just remember it here so that we can re-raise
# after cleaning up things. We don't log it here because we want
@@ -1074,20 +1124,22 @@ class Xfrin:
# a security hole. Once we add the ability to have multiple master addresses,
# we should check if it matches one of them, and then use it.)
(zone_name, rrclass) = self._parse_zone_name_and_class(args)
+ zone_str = format_zone_str(zone_name, rrclass)
zone_info = self._get_zone_info(zone_name, rrclass)
notify_addr = self._parse_master_and_port(args, zone_name,
rrclass)
if zone_info is None:
# TODO what to do? no info known about zone. defaults?
- errmsg = "Got notification to retransfer unknown zone " + zone_name.to_text()
- logger.error(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_name.to_text())
+ errmsg = "Got notification to retransfer unknown zone " + zone_str
+ logger.info(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_str)
answer = create_answer(1, errmsg)
else:
request_type = RRType.AXFR()
if zone_info.use_ixfr:
request_type = RRType.IXFR()
master_addr = zone_info.get_master_addr_info()
- if notify_addr == master_addr:
+ if notify_addr[0] == master_addr[0] and\
+ notify_addr[2] == master_addr[2]:
ret = self.xfrin_start(zone_name,
rrclass,
self._get_db_file(),
@@ -1096,11 +1148,12 @@ class Xfrin:
True)
answer = create_answer(ret[0], ret[1])
else:
- errmsg = "Got notification for " + zone_name.to_text()\
- + "from unknown address: " + notify_addr[2][0];
- logger.error(XFRIN_NOTIFY_UNKNOWN_MASTER,
- zone_name.to_text(), notify_addr[2][0],
- master_addr[2][0])
+ notify_addr_str = format_addrinfo(notify_addr)
+ master_addr_str = format_addrinfo(master_addr)
+ errmsg = "Got notification for " + zone_str\
+ + "from unknown address: " + notify_addr_str;
+ logger.info(XFRIN_NOTIFY_UNKNOWN_MASTER, zone_str,
+ notify_addr_str, master_addr_str)
answer = create_answer(1, errmsg)
elif command == 'retransfer' or command == 'refresh':
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index 81bd649..86cdec3 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -29,6 +29,12 @@ this can only happen for AXFR.
The XFR transfer for the given zone has failed due to a protocol error.
The error is shown in the log message.
+% XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+
% XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4
An XFR session failed outside the main protocol handling. This
includes an error at the data source level at the initialization
diff --git a/src/lib/acl/dns.h b/src/lib/acl/dns.h
index 426c961..d08fcf3 100644
--- a/src/lib/acl/dns.h
+++ b/src/lib/acl/dns.h
@@ -71,8 +71,8 @@ struct RequestContext {
///
/// \exception None
///
- /// \parameter remote_address_param The remote IP address
- /// \parameter tsig_param A valid pointer to the TSIG record included in
+ /// \param remote_address_param The remote IP address
+ /// \param tsig_param A valid pointer to the TSIG record included in
/// the request or NULL if the request doesn't contain a TSIG.
RequestContext(const IPAddress& remote_address_param,
const isc::dns::TSIGRecord* tsig_param) :
diff --git a/src/lib/acl/loader.h b/src/lib/acl/loader.h
index f60b144..fc69b44 100644
--- a/src/lib/acl/loader.h
+++ b/src/lib/acl/loader.h
@@ -125,7 +125,7 @@ BasicAction defaultActionLoader(data::ConstElementPtr action);
*
* The rest of the element are matches. The left side is the name of the
* match type (for example match for source IP address or match for message
- * size). The <parameter> is whatever is needed to describe the match and
+ * size). The parameter is whatever is needed to describe the match and
* depends on the match type, the loader passes it verbatim to creator
* of that match type.
*
@@ -148,7 +148,7 @@ public:
/**
* \brief Constructor.
*
- * \param default_action The default action for created ACLs.
+ * \param defaultAction The default action for created ACLs.
* \param actionLoader is the loader which will be used to convert actions
* from their JSON representation. The default value is suitable for
* the BasicAction enum. If you did not specify the second
@@ -202,7 +202,7 @@ public:
* parameters might look like, they are not checked in any way.
* Therefore it's up to the creator (or the check being created)
* to validate the data and throw if it is bad.
- * \param Current loader calling this creator. This can be used
+ * \param loader Current loader calling this creator. This can be used
* to load subexpressions in case of compound check.
*/
virtual boost::shared_ptr<Check<Context> > create(
diff --git a/src/lib/asiolink/Makefile.am b/src/lib/asiolink/Makefile.am
index 22b3a8e..5444547 100644
--- a/src/lib/asiolink/Makefile.am
+++ b/src/lib/asiolink/Makefile.am
@@ -7,9 +7,12 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
CLEANFILES = *.gcno *.gcda
-# This is a wrapper library solely used for b10-auth. The ASIO header files
-# have some code fragments that would hit gcc's unused-parameter warning,
-# which would make the build fail with -Werror (our default setting).
+# This is a wrapper library.
+
+# The ASIO header files have some code fragments that would hit
+# gcc's unused-parameter warning, which would make the build fail
+# with -Werror (our default setting).
+
lib_LTLIBRARIES = libasiolink.la
libasiolink_la_SOURCES = asiolink.h
libasiolink_la_SOURCES += dummy_io_cb.h
diff --git a/src/lib/asiolink/dummy_io_cb.h b/src/lib/asiolink/dummy_io_cb.h
index 2081906..bcaefe9 100644
--- a/src/lib/asiolink/dummy_io_cb.h
+++ b/src/lib/asiolink/dummy_io_cb.h
@@ -39,7 +39,8 @@ public:
/// \brief Asynchronous I/O callback method
///
- /// \param error Unused
+ /// TODO: explain why this method should never be called.
+ /// This should be unused.
void operator()(asio::error_code)
{
// TODO: log an error if this method ever gets called.
@@ -47,8 +48,8 @@ public:
/// \brief Asynchronous I/O callback method
///
- /// \param error Unused
- /// \param length Unused
+ /// TODO: explain why this method should never be called.
+ /// This should be unused.
void operator()(asio::error_code, size_t)
{
// TODO: log an error if this method ever gets called.
diff --git a/src/lib/asiolink/io_address.cc b/src/lib/asiolink/io_address.cc
index 51c0332..0fe1db4 100644
--- a/src/lib/asiolink/io_address.cc
+++ b/src/lib/asiolink/io_address.cc
@@ -15,6 +15,7 @@
#include <config.h>
#include <unistd.h> // for some IPC/network system calls
+#include <stdint.h>
#include <sys/socket.h>
#include <netinet/in.h>
@@ -49,6 +50,11 @@ IOAddress::IOAddress(const ip::address& asio_address) :
asio_address_(asio_address)
{}
+IOAddress::IOAddress(uint32_t v4address):
+ asio_address_(asio::ip::address_v4(v4address)) {
+
+}
+
string
IOAddress::toText() const {
return (asio_address_.to_string());
@@ -84,5 +90,14 @@ IOAddress::getAddress() const {
return asio_address_;
}
+IOAddress::operator uint32_t() const {
+ if (getAddress().is_v4()) {
+ return (getAddress().to_v4().to_ulong());
+ } else {
+ isc_throw(BadValue, "Can't convert " << toText()
+ << " address to IPv4.");
+ }
+}
+
} // namespace asiolink
} // namespace isc
diff --git a/src/lib/asiolink/io_address.h b/src/lib/asiolink/io_address.h
index 9fac580..c40e5b9 100644
--- a/src/lib/asiolink/io_address.h
+++ b/src/lib/asiolink/io_address.h
@@ -19,6 +19,7 @@
// this file. In particular, asio.hpp should never be included here.
// See the description of the namespace below.
#include <unistd.h> // for some network system calls
+#include <stdint.h> // for uint32_t
#include <asio/ip/address.hpp>
#include <functional>
@@ -71,6 +72,15 @@ public:
IOAddress(const asio::ip::address& asio_address);
//@}
+ /// @brief Constructor for ip::address_v4 object.
+ ///
+ /// This constructor is intented to be used when constructing
+ /// IPv4 address out of uint32_t type. Passed value must be in
+ /// network byte order
+ ///
+ /// @param v4address IPv4 address represnted by uint32_t
+ IOAddress(uint32_t v4address);
+
/// \brief Convert the address to a string.
///
/// This method is basically expected to be exception free, but
@@ -139,6 +149,14 @@ public:
return (nequals(other));
}
+ /// \brief Converts IPv4 address to uint32_t
+ ///
+ /// Will throw BadValue exception if that is not IPv4
+ /// address.
+ ///
+ /// \return uint32_t that represents IPv4 address in
+ /// network byte order
+ operator uint32_t () const;
private:
asio::ip::address asio_address_;
diff --git a/src/lib/asiolink/io_asio_socket.h b/src/lib/asiolink/io_asio_socket.h
index 864708c..aeac63d 100644
--- a/src/lib/asiolink/io_asio_socket.h
+++ b/src/lib/asiolink/io_asio_socket.h
@@ -82,8 +82,6 @@ class IOEndpoint;
/// derived class for testing purposes rather than providing factory methods
/// (i.e., getDummy variants below).
///
-/// TODO: Check if IOAsioSocket class is still needed
-///
/// \param C Template parameter identifying type of the callback object.
template <typename C>
@@ -328,10 +326,9 @@ public:
///
/// A call that is a no-op on UDP sockets, this opens a connection to the
/// system identified by the given endpoint.
+ /// The endpoint and callback are unused.
///
- /// \param endpoint Unused
- /// \param callback Unused.
- ///false indicating that the operation completed synchronously.
+ /// \return false indicating that the operation completed synchronously.
virtual bool open(const IOEndpoint*, C&) {
return (false);
}
@@ -339,23 +336,14 @@ public:
/// \brief Send Asynchronously
///
/// Must be supplied as it is abstract in the base class.
- ///
- /// \param data Unused
- /// \param length Unused
- /// \param endpoint Unused
- /// \param callback Unused
+ /// This is unused.
virtual void asyncSend(const void*, size_t, const IOEndpoint*, C&) {
}
/// \brief Receive Asynchronously
///
/// Must be supplied as it is abstract in the base class.
- ///
- /// \param data Unused
- /// \param length Unused
- /// \param offset Unused
- /// \param endpoint Unused
- /// \param callback Unused
+ /// The parameters are unused.
virtual void asyncReceive(void* data, size_t, size_t, IOEndpoint*, C&) {
}
diff --git a/src/lib/asiolink/tests/io_address_unittest.cc b/src/lib/asiolink/tests/io_address_unittest.cc
index eddb0e8..4322283 100644
--- a/src/lib/asiolink/tests/io_address_unittest.cc
+++ b/src/lib/asiolink/tests/io_address_unittest.cc
@@ -83,3 +83,19 @@ TEST(IOAddressTest, from_bytes) {
});
EXPECT_EQ(addr.toText(), IOAddress("192.0.2.3").toText());
}
+
+TEST(IOAddressTest, uint32) {
+ IOAddress addr1("192.0.2.5");
+
+ // operator uint_32() is used here
+ uint32_t tmp = addr1;
+
+ uint32_t expected = (192U << 24) + (0U << 16) + (2U << 8) + 5U;
+
+ EXPECT_EQ(expected, tmp);
+
+ // now let's try opposite conversion
+ IOAddress addr3 = IOAddress(expected);
+
+ EXPECT_EQ(addr3.toText(), "192.0.2.5");
+}
diff --git a/src/lib/cache/logger.h b/src/lib/cache/logger.h
index 3bba413..52c9743 100644
--- a/src/lib/cache/logger.h
+++ b/src/lib/cache/logger.h
@@ -18,7 +18,7 @@
#include <log/macros.h>
#include <cache/cache_messages.h>
-/// \file logger.h
+/// \file cache/logger.h
/// \brief Cache library global logger
///
/// This holds the logger for the cache library. It is a private header
diff --git a/src/lib/cache/message_cache.h b/src/lib/cache/message_cache.h
index 44d7fd1..b418f23 100644
--- a/src/lib/cache/message_cache.h
+++ b/src/lib/cache/message_cache.h
@@ -52,6 +52,8 @@ public:
virtual ~MessageCache();
/// \brief Look up message in cache.
+ /// \param qname Name of the domain for which the message is being sought.
+ /// \param qtype Type of the RR for which the message is being sought.
/// \param message generated response message if the message entry
/// can be found.
///
diff --git a/src/lib/cache/resolver_cache.h b/src/lib/cache/resolver_cache.h
index 9ad4388..5630bd7 100644
--- a/src/lib/cache/resolver_cache.h
+++ b/src/lib/cache/resolver_cache.h
@@ -89,8 +89,8 @@ public:
ResolverClassCache(const isc::dns::RRClass& cache_class);
/// \brief Construct Function.
- /// \param caches_size cache size information for each
- /// messages/rrsets of different classes.
+ /// \param cache_info Cache size information for each message/rrsets of
+ /// different classes.
ResolverClassCache(const CacheSizeInfo& cache_info);
/// \name Lookup Interfaces
diff --git a/src/lib/cache/rrset_entry.h b/src/lib/cache/rrset_entry.h
index 5fa8f2c..09cf79c 100644
--- a/src/lib/cache/rrset_entry.h
+++ b/src/lib/cache/rrset_entry.h
@@ -27,9 +27,9 @@ using namespace isc::nsas;
namespace isc {
namespace cache {
-/// \enum RRset Trustworthiness
+/// \enum RRsetTrustLevel
/// For detail of RRset trustworthiness, please refer to
-/// RFC2181 section5.4.1.
+/// RFC 2181 section 5.4.1.
/// Bigger value is more trustworthy.
enum RRsetTrustLevel {
/// Default trust for RRset.
diff --git a/src/lib/cc/logger.h b/src/lib/cc/logger.h
index 34b5809..d6253d0 100644
--- a/src/lib/cc/logger.h
+++ b/src/lib/cc/logger.h
@@ -18,7 +18,7 @@
#include <cc/cc_messages.h>
#include <log/macros.h>
-/// \file logger.h
+/// \file cc/logger.h
/// \brief Command Channel library global logger
///
/// This holds the logger for the CC library. It is a private header
diff --git a/src/lib/cc/session.cc b/src/lib/cc/session.cc
index e0e24cf..0052aca 100644
--- a/src/lib/cc/session.cc
+++ b/src/lib/cc/session.cc
@@ -254,7 +254,8 @@ SessionImpl::internalRead(const asio::error_code& error,
}
}
-Session::Session(io_service& io_service) : impl_(new SessionImpl(io_service))
+Session::Session(asio::io_service& io_service) :
+ impl_(new SessionImpl(io_service))
{}
Session::~Session() {
diff --git a/src/lib/config/tests/testdata/spec32.spec b/src/lib/config/tests/testdata/spec32.spec
index 68e774e..0d8cf7c 100644
--- a/src/lib/config/tests/testdata/spec32.spec
+++ b/src/lib/config/tests/testdata/spec32.spec
@@ -12,6 +12,27 @@
"item_optional": false,
"item_default": 3
}
+ },
+ { "item_name": "named_set_item2",
+ "item_type": "named_set",
+ "item_optional": true,
+ "item_default": { },
+ "named_set_item_spec": {
+ "item_name": "named_set_element",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "first",
+ "item_type": "integer",
+ "item_optional": true
+ },
+ { "item_name": "second",
+ "item_type": "string",
+ "item_optional": true
+ }
+ ]
+ }
}
]
}
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index d35f6e8..3b079c6 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -704,30 +704,67 @@ namespace {
*/
class DatabaseIterator : public ZoneIterator {
public:
- DatabaseIterator(const DatabaseAccessor::IteratorContextPtr& context,
- const RRClass& rrclass) :
- context_(context),
+ DatabaseIterator(shared_ptr<DatabaseAccessor> accessor,
+ const Name& zone_name,
+ const RRClass& rrclass) :
+ accessor_(accessor),
class_(rrclass),
ready_(true)
{
+ // Get the zone
+ const pair<bool, int> zone(accessor_->getZone(zone_name.toText()));
+ if (!zone.first) {
+ // No such zone, can't continue
+ isc_throw(DataSourceError, "Zone " + zone_name.toText() +
+ " can not be iterated, because it doesn't exist "
+ "in this data source");
+ }
+
+ // Start a separate transaction.
+ accessor_->startTransaction();
+
+ // Find the SOA of the zone (may or may not succeed). Note that
+ // this must be done before starting the iteration context.
+ soa_ = DatabaseClient::Finder(accessor_, zone.second, zone_name).
+ find(zone_name, RRType::SOA(), NULL).rrset;
+
+ // Request the context
+ context_ = accessor_->getAllRecords(zone.second);
+ // It must not return NULL, that's a bug of the implementation
+ if (!context_) {
+ isc_throw(isc::Unexpected, "Iterator context null at " +
+ zone_name.toText());
+ }
+
// Prepare data for the next time
getData();
}
+ virtual ~DatabaseIterator() {
+ if (ready_) {
+ accessor_->commit();
+ }
+ }
+
+ virtual ConstRRsetPtr getSOA() const {
+ return (soa_);
+ }
+
virtual isc::dns::ConstRRsetPtr getNextRRset() {
if (!ready_) {
isc_throw(isc::Unexpected, "Iterating past the zone end");
}
if (!data_ready_) {
// At the end of zone
+ accessor_->commit();
ready_ = false;
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_ITERATE_END);
return (ConstRRsetPtr());
}
- string name_str(name_), rtype_str(rtype_), ttl(ttl_);
- Name name(name_str);
- RRType rtype(rtype_str);
+ const string name_str(name_), rtype_str(rtype_), ttl(ttl_);
+ const Name name(name_str);
+ const RRType rtype(rtype_str);
RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
if (ttl_ != ttl) {
@@ -745,6 +782,7 @@ public:
arg(rrset->getName()).arg(rrset->getType());
return (rrset);
}
+
private:
// Load next row of data
void getData() {
@@ -756,10 +794,14 @@ private:
rdata_ = data[DatabaseAccessor::RDATA_COLUMN];
}
+ // The dedicated accessor
+ shared_ptr<DatabaseAccessor> accessor_;
// The context
- const DatabaseAccessor::IteratorContextPtr context_;
+ DatabaseAccessor::IteratorContextPtr context_;
// Class of the zone
- RRClass class_;
+ const RRClass class_;
+ // SOA of the zone, if any (it should normally exist)
+ ConstRRsetPtr soa_;
// Status
bool ready_, data_ready_;
// Data of the next row
@@ -770,30 +812,13 @@ private:
ZoneIteratorPtr
DatabaseClient::getIterator(const isc::dns::Name& name) const {
- // Get the zone
- std::pair<bool, int> zone(accessor_->getZone(name.toText()));
- if (!zone.first) {
- // No such zone, can't continue
- isc_throw(DataSourceError, "Zone " + name.toText() +
- " can not be iterated, because it doesn't exist "
- "in this data source");
- }
- // Request the context
- DatabaseAccessor::IteratorContextPtr
- context(accessor_->getAllRecords(zone.second));
- // It must not return NULL, that's a bug of the implementation
- if (context == DatabaseAccessor::IteratorContextPtr()) {
- isc_throw(isc::Unexpected, "Iterator context null at " +
- name.toText());
- }
- // Create the iterator and return it
- // TODO: Once #1062 is merged with this, we need to get the
- // actual zone class from the connection, as the DatabaseClient
- // doesn't know it and the iterator needs it (so it wouldn't query
- // it each time)
+ ZoneIteratorPtr iterator = ZoneIteratorPtr(new DatabaseIterator(
+ accessor_->clone(), name,
+ rrclass_));
LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE).
arg(name);
- return (ZoneIteratorPtr(new DatabaseIterator(context, RRClass::IN())));
+
+ return (iterator);
}
//
@@ -815,13 +840,13 @@ public:
virtual ~DatabaseUpdater() {
if (!committed_) {
try {
- accessor_->rollbackUpdateZone();
+ accessor_->rollback();
logger.info(DATASRC_DATABASE_UPDATER_ROLLBACK)
.arg(zone_name_).arg(zone_class_).arg(db_name_);
} catch (const DataSourceError& e) {
// We generally expect that rollback always succeeds, and
// it should in fact succeed in a way we execute it. But
- // as the public API allows rollbackUpdateZone() to fail and
+ // as the public API allows rollback() to fail and
// throw, we should expect it. Obviously we cannot re-throw
// it. The best we can do is to log it as a critical error.
logger.error(DATASRC_DATABASE_UPDATER_ROLLBACKFAIL)
@@ -937,7 +962,7 @@ DatabaseUpdater::commit() {
<< zone_name_ << "/" << zone_class_ << " on "
<< db_name_);
}
- accessor_->commitUpdateZone();
+ accessor_->commit();
committed_ = true; // make sure the destructor won't trigger rollback
// We release the accessor immediately after commit is completed so that
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index 8295779..b9379b7 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -85,7 +85,7 @@ public:
* Definitions of the fields to be passed to addRecordToZone().
*
* Each derived implementation of addRecordToZone() should expect
- * the "columns" vector to be filled with the values as described in this
+ * the "columns" array to be filled with the values as described in this
* enumeration, in this order.
*/
enum AddRecordColumns {
@@ -103,7 +103,7 @@ public:
* Definitions of the fields to be passed to deleteRecordInZone().
*
* Each derived implementation of deleteRecordInZone() should expect
- * the "params" vector to be filled with the values as described in this
+ * the "params" array to be filled with the values as described in this
* enumeration, in this order.
*/
enum DeleteRecordParams {
@@ -114,6 +114,31 @@ public:
};
/**
+ * Operation mode when adding a record diff.
+ *
+ * This is used as the "operation" parameter value of addRecordDiff().
+ */
+ enum DiffOperation {
+ DIFF_ADD = 0, ///< This diff is for adding an RR
+ DIFF_DELETE = 1 ///< This diff is for deleting an RR
+ };
+
+ /**
+ * Definitions of the fields to be passed to addRecordDiff().
+ *
+ * Each derived implementation of addRecordDiff() should expect
+ * the "params" array to be filled with the values as described in this
+ * enumeration, in this order.
+ */
+ enum DiffRecordParams {
+ DIFF_NAME = 0, ///< The owner name of the record (a domain name)
+ DIFF_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
+ DIFF_TTL = 2, ///< The TTL of the record (in numeric form)
+ DIFF_RDATA = 3, ///< Full text representation of the record's RDATA
+ DIFF_PARAM_COUNT = 4 ///< Number of parameters
+ };
+
+ /**
* \brief Destructor
*
* It is empty, but needs a virtual one, since we will use the derived
@@ -260,13 +285,14 @@ public:
/// \c commitUpdateZone()); if it's false, the existing records will be
/// intact unless explicitly deleted by \c deleteRecordInZone().
///
- /// A single \c DatabaseAccessor instance can perform at most one update
+ /// A single \c DatabaseAccessor instance can perform at most one
/// transaction; a duplicate call to this method before
- /// \c commitUpdateZone() or \c rollbackUpdateZone() will result in
- /// a \c DataSourceError exception. If multiple update attempts need
- /// to be performed concurrently (and if the underlying database allows
- /// such operation), separate \c DatabaseAccessor instance must be
- /// created.
+ /// \c commitUpdateZone() or \c rollbackUpdateZone(), or a call to this
+ /// method within another transaction started by \c startTransaction()
+ /// will result in a \c DataSourceError exception.
+ /// If multiple update attempts need to be performed concurrently (and
+ /// if the underlying database allows such operation), separate
+ /// \c DatabaseAccessor instance must be created.
///
/// \note The underlying database may not allow concurrent updates to
/// the same database instance even if different "connections" (or
@@ -295,8 +321,9 @@ public:
/// \c getZone(); for example, a specific implementation may use a
/// completely new zone ID when \c replace is true.
///
- /// \exception DataSourceError Duplicate call to this method, or some
- /// internal database related error.
+ /// \exception DataSourceError Duplicate call to this method, call to
+ /// this method within another transaction, or some internal database
+ /// related error.
///
/// \param zone_name A string representation of the zone name to be updated
/// \param replace Whether to replace the entire zone (see above)
@@ -382,12 +409,32 @@ public:
virtual void deleteRecordInZone(
const std::string (¶ms)[DEL_PARAM_COUNT]) = 0;
- /// Commit updates to the zone.
+ /// Start a general transaction.
///
- /// This method completes a transaction of making updates to the zone
- /// in the context started by startUpdateZone.
+ /// Each derived class version of this method starts a database
+ /// transaction in a way specific to the database details. Any subsequent
+ /// operations on the accessor are guaranteed to be not susceptible to
+ /// any update attempts made during the transaction. The transaction
+ /// must be terminated by either \c commit() or \c rollback().
///
- /// A successful call to \c startUpdateZone() must have preceded to
+ /// In practice, this transaction is intended to be used to perform
+ /// a set of atomic reads and work as a read-only lock. So, in many
+ /// cases \c commit() and \c rollback() will have the same effect.
+ ///
+ /// This transaction cannot coexist with an update transaction started
+ /// by \c startUpdateZone(). Such an attempt will result in
+ /// \c DataSourceError.
+ ///
+ /// \exception DataSourceError An attempt of nested transaction, or some
+ /// internal database related error.
+ virtual void startTransaction() = 0;
+
+ /// Commit a transaction.
+ ///
+ /// This method completes a transaction started by \c startTransaction
+ /// or \c startUpdateZone.
+ ///
+ /// A successful call to one of the "start" methods must have preceded to
/// this call; otherwise a \c DataSourceError exception will be thrown.
/// Once this method successfully completes, the transaction isn't
/// considered to exist any more. So a new transaction can now be
@@ -403,17 +450,16 @@ public:
///
/// \exception DataSourceError Call without a transaction, duplicate call
/// to the method or internal database error.
- virtual void commitUpdateZone() = 0;
+ virtual void commit() = 0;
- /// Rollback updates to the zone made so far.
+ /// Rollback any changes in a transaction made so far.
///
- /// This method rollbacks a transaction of making updates to the zone
- /// in the context started by startUpdateZone. When it succeeds
- /// (it normally should, but see below), the underlying database should
- /// be reverted to the point before performing the corresponding
- /// \c startUpdateZone().
+ /// This method rollbacks a transaction started by \c startTransaction or
+ /// \c startUpdateZone. When it succeeds (it normally should, but see
+ /// below), the underlying database should be reverted to the point
+ /// before performing the corresponding "start" method.
///
- /// A successful call to \c startUpdateZone() must have preceded to
+ /// A successful call to one of the "start" method must have preceded to
/// this call; otherwise a \c DataSourceError exception will be thrown.
/// Once this method successfully completes, the transaction isn't
/// considered to exist any more. So a new transaction can now be
@@ -430,7 +476,83 @@ public:
///
/// \exception DataSourceError Call without a transaction, duplicate call
/// to the method or internal database error.
- virtual void rollbackUpdateZone() = 0;
+ virtual void rollback() = 0;
+
+ /// Install a single RR diff in difference sequences for zone update.
+ ///
+ /// This method inserts parameters of an update operation for a single RR
+ /// (either adding or deleting one) in the underlying database.
+ /// (These parameters would normally be a separate database table, but
+ /// actual realization can differ in specific implementations).
+ /// The information given via this method generally corresponds to either
+ /// a single call to \c addRecordToZone() or \c deleteRecordInZone(),
+ /// and this method is expected to be called immediately after (or before)
+ /// a call to either of those methods.
+ ///
+ /// Note, however, that this method passes more detailed information
+ /// than those update methods: it passes "serial", even if the diff
+ /// is not for the SOA RR; it passes TTL for a diff that deletes an RR
+ /// while in \c deleteRecordInZone() it's omitted. This is because
+ /// the stored diffs are expected to be retrieved in the form that
+ /// \c getRecordDiffs() is expected to meet. This means if the caller
+ /// wants to use this method with other update operations, it must
+ /// ensure the additional information is ready when this method is called.
+ ///
+ /// \note \c getRecordDiffs() is not yet implemented.
+ ///
+ /// The caller of this method must ensure that the added diffs via
+ /// this method in a single transaction form an IXFR-style difference
+ /// sequences: Each difference sequence is a sequence of RRs:
+ /// an older version of SOA (to be deleted), zero or more other deleted
+ /// RRs, the post-transaction SOA (to be added), and zero or more other
+ /// added RRs. So, for example, the first call to this method in a
+ /// transaction must always be deleting an SOA. Also, the \c serial
+ /// parameter must be equal to the value of the serial field of the
+ /// SOA that was last added or deleted (if the call is to add or delete
+ /// an SOA RR, \c serial must be identical to the serial of that SOA).
+ /// The underlying derived class implementation may or may not check
+ /// this condition, but if the caller doesn't meet the condition
+ /// a subsequent call to \c getRecordDiffs() will not work as expected.
+ ///
+ /// Any call to this method must be in a transaction, and, for now,
+ /// it must be a transaction triggered by \c startUpdateZone() (that is,
+ /// it cannot be a transaction started by \c startTransaction()).
+ /// All calls to this method are considered to be part of an atomic
+ /// transaction: Until \c commit() is performed, the added diffs are
+ /// not visible outside the transaction; if \c rollback() is performed,
+ /// all added diffs are canceled; and the added sequences are not
+ /// affected by any concurrent attempt of adding diffs (conflict resolution
+ /// is up to the database implementation).
+ ///
+ /// Also for now, all diffs are assumed to be for the zone that is
+ /// being updated in the context of \c startUpdateZone(). So the
+ /// \c zone_id parameter must be identical to the zone ID returned by
+ /// \c startUpdateZone().
+ ///
+ /// In a future version we may loosen this condition so that diffs can be
+ /// added in a generic transaction and may not even have to belong to
+ /// a single zone. For this possible extension \c zone_id parameter is
+ /// included even if it's redundant under the current restriction.
+ ///
+ /// The support for adding (or retrieving) diffs is optional; if it's
+ /// not supported in a specific data source, this method for the
+ /// corresponding derived class will throw an \c NotImplemented exception.
+ ///
+ /// \exception DataSourceError Invalid call without starting a transaction,
+ /// zone ID doesn't match the zone being updated, or other internal
+ /// database error.
+ /// \exception NotImplemented Adding diffs is not supported in the
+ /// data source.
+ /// \exception Other The concrete derived method may throw other
+ /// data source specific exceptions.
+ ///
+ /// \param zone_id The zone for the diff to be added.
+ /// \param serial The SOA serial to which the diff belongs.
+ /// \param operation Either \c DIFF_ADD or \c DIFF_DELETE.
+ /// \param params An array of strings that defines a record for the diff.
+ virtual void addRecordDiff(
+ int zone_id, uint32_t serial, DiffOperation operation,
+ const std::string (¶ms)[DIFF_PARAM_COUNT]) = 0;
/// Clone the accessor with the same configuration.
///
diff --git a/src/lib/datasrc/iterator.h b/src/lib/datasrc/iterator.h
index 0102fcb..99d3331 100644
--- a/src/lib/datasrc/iterator.h
+++ b/src/lib/datasrc/iterator.h
@@ -12,10 +12,15 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#ifndef __DATASRC_ZONE_ITERATOR_H
+#define __DATASRC_ZONE_ITERATOR_H 1
+
#include <dns/rrset.h>
#include <boost/noncopyable.hpp>
+#include <datasrc/zone.h>
+
namespace isc {
namespace datasrc {
@@ -55,7 +60,46 @@ public:
* gets to the end of the zone.
*/
virtual isc::dns::ConstRRsetPtr getNextRRset() = 0;
+
+ /**
+ * \brief Return the SOA record of the zone in the iterator context.
+ *
+ * This method returns the zone's SOA record (if any, and a valid zone
+ * should have it) in the form of an RRset object. This SOA is identical
+ * to that (again, if any) contained in the sequence of RRsets returned
+ * by the iterator. In that sense this method is redundant, but is
+ * provided as a convenient utility for the application of the
+ * iterator; the application may need to know the SOA serial or the
+ * SOA RR itself for the purpose of protocol handling or skipping the
+ * expensive iteration processing.
+ *
+ * If the zone doesn't have an SOA (which is broken, but some data source
+ * may allow that situation), this method returns NULL. Also, in the
+ * normal and valid case, the SOA should have exactly one RDATA, but
+ * this API does not guarantee it as some data source may accept such an
+ * abnormal condition. It's up to the caller whether to check the number
+ * of RDATA and how to react to the unexpected case.
+ *
+ * Each concrete derived method must ensure that the SOA returned by this
+ * method is identical to the zone's SOA returned via the iteration.
+ * For example, even if another thread or process updates the SOA while
+ * the iterator is working, the result of this method must not be
+ * affected by the update. For database based data sources, this can
+ * be done by making the entire iterator operation as a single database
+ * transaction, but the actual implementation can differ.
+ *
+ * \exception None
+ *
+ * \return A shared pointer to an SOA RRset that would be returned
+ * from the iteration. It will be NULL if the zone doesn't have an SOA.
+ */
+ virtual isc::dns::ConstRRsetPtr getSOA() const = 0;
};
}
}
+#endif // __DATASRC_ZONE_ITERATOR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/logger.h b/src/lib/datasrc/logger.h
index c360900..db4e5cb 100644
--- a/src/lib/datasrc/logger.h
+++ b/src/lib/datasrc/logger.h
@@ -18,7 +18,7 @@
#include <log/macros.h>
#include <datasrc/datasrc_messages.h>
-/// \file logger.h
+/// \file datasrc/logger.h
/// \brief Data Source library global logger
///
/// This holds the logger for the data source library. It is a private header
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 2b556ab..8da43d0 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -780,6 +780,10 @@ public:
return (result);
}
+
+ virtual ConstRRsetPtr getSOA() const {
+ isc_throw(NotImplemented, "Not imelemented");
+ }
};
} // End of anonymous namespace
diff --git a/src/lib/datasrc/rbtree.h b/src/lib/datasrc/rbtree.h
index ccdfa48..b6c098a 100644
--- a/src/lib/datasrc/rbtree.h
+++ b/src/lib/datasrc/rbtree.h
@@ -209,7 +209,7 @@ public:
/// \exception isc::InvalidParameter Unsettable flag is specified
/// \exception None otherwise
/// \param flag The node flag to be changed.
- /// \on If \c true, set the flag to on; otherwise set it to off.
+ /// \param on If \c true, set the flag to on; otherwise set it to off.
void setFlag(Flags flag, bool on = true) {
if ((flag & ~SETTABLE_FLAGS) != 0) {
isc_throw(isc::InvalidParameter,
@@ -226,7 +226,8 @@ public:
private:
/// \name Callback related methods
///
- /// See the description of \c RBTree<T>::find() about callbacks.
+ /// See the description of \c RBTree<T>::find() at \ref callback
+ /// about callbacks.
///
/// These methods never throw an exception.
//@{
@@ -702,6 +703,7 @@ public:
}
/// \brief Find with callback and node chain.
+ /// \anchor callback
///
/// This version of \c find() is specifically designed for the backend
/// of the \c InMemoryZoneFinder class, and implements all necessary
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
index 6d6dbba..efa5717 100644
--- a/src/lib/datasrc/sqlite3_accessor.cc
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -52,7 +52,9 @@ enum StatementID {
DEL_RECORD = 8,
ITERATE = 9,
FIND_PREVIOUS = 10,
- NUM_STATEMENTS = 11
+ ADD_RECORD_DIFF = 11,
+ GET_RECORD_DIFF = 12, // This is temporary for testing "add diff"
+ NUM_STATEMENTS = 13
};
const char* const text_statements[NUM_STATEMENTS] = {
@@ -73,7 +75,7 @@ const char* const text_statements[NUM_STATEMENTS] = {
"DELETE FROM records WHERE zone_id=?1 AND name=?2 " // DEL_RECORD
"AND rdtype=?3 AND rdata=?4",
"SELECT rdtype, ttl, sigtype, rdata, name FROM records " // ITERATE
- "WHERE zone_id = ?1 ORDER BY name, rdtype",
+ "WHERE zone_id = ?1 ORDER BY rname, rdtype",
/*
* This one looks for previous name with NSEC record. It is done by
* using the reversed name. The NSEC is checked because we need to
@@ -81,23 +83,63 @@ const char* const text_statements[NUM_STATEMENTS] = {
*/
"SELECT name FROM records " // FIND_PREVIOUS
"WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
- "rname < $2 ORDER BY rname DESC LIMIT 1"
+ "rname < $2 ORDER BY rname DESC LIMIT 1",
+ "INSERT INTO diffs " // ADD_RECORD_DIFF
+ "(zone_id, version, operation, name, rrtype, ttl, rdata) "
+ "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"
+ , "SELECT name, rrtype, ttl, rdata, version, operation " // GET_RECORD_DIFF
+ "FROM diffs WHERE zone_id = ?1 ORDER BY id, operation"
};
struct SQLite3Parameters {
SQLite3Parameters() :
- db_(NULL), version_(-1), updating_zone(false), updated_zone_id(-1)
+ db_(NULL), version_(-1), in_transaction(false), updating_zone(false),
+ updated_zone_id(-1)
{
for (int i = 0; i < NUM_STATEMENTS; ++i) {
statements_[i] = NULL;
}
}
+ // This method returns the specified ID of SQLITE3 statement. If it's
+ // not yet prepared it internally creates a new one. This way we can
+ // avoid preparing unnecessary statements and minimize the overhead.
+ sqlite3_stmt*
+ getStatement(int id) {
+ assert(id < NUM_STATEMENTS);
+ if (statements_[id] == NULL) {
+ assert(db_ != NULL);
+ sqlite3_stmt* prepared = NULL;
+ if (sqlite3_prepare_v2(db_, text_statements[id], -1, &prepared,
+ NULL) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not prepare SQLite statement: "
+ << text_statements[id] <<
+ ": " << sqlite3_errmsg(db_));
+ }
+ statements_[id] = prepared;
+ }
+ return (statements_[id]);
+ }
+
+ void
+ finalizeStatements() {
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ if (statements_[i] != NULL) {
+ sqlite3_finalize(statements_[i]);
+ statements_[i] = NULL;
+ }
+ }
+ }
+
sqlite3* db_;
int version_;
+ bool in_transaction; // whether or not a transaction has been started
+ bool updating_zone; // whether or not updating the zone
+ int updated_zone_id; // valid only when in_transaction is true
+private:
+ // statements_ are private and must be accessed via getStatement() outside
+ // of this structure.
sqlite3_stmt* statements_[NUM_STATEMENTS];
- bool updating_zone; // whether or not updating the zone
- int updated_zone_id; // valid only when updating_zone is true
};
// This is a helper class to encapsulate the code logic of executing
@@ -114,18 +156,19 @@ public:
// DataSourceError exception.
StatementProcessor(SQLite3Parameters& dbparameters, StatementID stmt_id,
const char* desc) :
- dbparameters_(dbparameters), stmt_id_(stmt_id), desc_(desc)
+ dbparameters_(dbparameters), stmt_(dbparameters.getStatement(stmt_id)),
+ desc_(desc)
{
- sqlite3_clear_bindings(dbparameters_.statements_[stmt_id_]);
+ sqlite3_clear_bindings(stmt_);
}
~StatementProcessor() {
- sqlite3_reset(dbparameters_.statements_[stmt_id_]);
+ sqlite3_reset(stmt_);
}
void exec() {
- if (sqlite3_step(dbparameters_.statements_[stmt_id_]) != SQLITE_DONE) {
- sqlite3_reset(dbparameters_.statements_[stmt_id_]);
+ if (sqlite3_step(stmt_) != SQLITE_DONE) {
+ sqlite3_reset(stmt_);
isc_throw(DataSourceError, "failed to " << desc_ << ": " <<
sqlite3_errmsg(dbparameters_.db_));
}
@@ -133,7 +176,7 @@ public:
private:
SQLite3Parameters& dbparameters_;
- const StatementID stmt_id_;
+ sqlite3_stmt* stmt_;
const char* const desc_;
};
@@ -168,10 +211,6 @@ namespace {
class Initializer {
public:
~Initializer() {
- for (int i = 0; i < NUM_STATEMENTS; ++i) {
- sqlite3_finalize(params_.statements_[i]);
- }
-
if (params_.db_ != NULL) {
sqlite3_close(params_.db_);
}
@@ -204,6 +243,11 @@ const char* const SCHEMA_LIST[] = {
"ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
"rdata STRING NOT NULL)",
"CREATE INDEX nsec3_byhash ON nsec3 (hash)",
+ "CREATE TABLE diffs (id INTEGER PRIMARY KEY, "
+ "zone_id INTEGER NOT NULL, version INTEGER NOT NULL, "
+ "operation INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+ "rrtype STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdata STRING NOT NULL)",
NULL
};
@@ -212,7 +256,7 @@ prepare(sqlite3* const db, const char* const statement) {
sqlite3_stmt* prepared = NULL;
if (sqlite3_prepare_v2(db, statement, -1, &prepared, NULL) != SQLITE_OK) {
isc_throw(SQLite3Error, "Could not prepare SQLite statement: " <<
- statement);
+ statement << ": " << sqlite3_errmsg(db));
}
return (prepared);
}
@@ -302,10 +346,6 @@ checkAndSetupSchema(Initializer* initializer) {
schema_version = create_database(db);
}
initializer->params_.version_ = schema_version;
-
- for (int i = 0; i < NUM_STATEMENTS; ++i) {
- initializer->params_.statements_[i] = prepare(db, text_statements[i]);
- }
}
}
@@ -343,12 +383,7 @@ SQLite3Accessor::close(void) {
"SQLite data source is being closed before open");
}
- // XXX: sqlite3_finalize() could fail. What should we do in that case?
- for (int i = 0; i < NUM_STATEMENTS; ++i) {
- sqlite3_finalize(dbparameters_->statements_[i]);
- dbparameters_->statements_[i] = NULL;
- }
-
+ dbparameters_->finalizeStatements();
sqlite3_close(dbparameters_->db_);
dbparameters_->db_ = NULL;
}
@@ -356,7 +391,7 @@ SQLite3Accessor::close(void) {
std::pair<bool, int>
SQLite3Accessor::getZone(const std::string& name) const {
int rc;
- sqlite3_stmt* const stmt = dbparameters_->statements_[ZONE];
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(ZONE);
// Take the statement (simple SELECT id FROM zones WHERE...)
// and prepare it (bind the parameters to it)
@@ -520,7 +555,7 @@ private:
const IteratorType iterator_type_;
boost::shared_ptr<const SQLite3Accessor> accessor_;
- sqlite3_stmt *statement_;
+ sqlite3_stmt* statement_;
const std::string name_;
};
@@ -543,6 +578,10 @@ SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
isc_throw(DataSourceError,
"duplicate zone update on SQLite3 data source");
}
+ if (dbparameters_->in_transaction) {
+ isc_throw(DataSourceError,
+ "zone update attempt in another SQLite3 transaction");
+ }
const pair<bool, int> zone_info(getZone(zone_name));
if (!zone_info.first) {
@@ -550,17 +589,16 @@ SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
}
StatementProcessor(*dbparameters_, BEGIN,
- "start an SQLite3 transaction").exec();
+ "start an SQLite3 update transaction").exec();
if (replace) {
try {
StatementProcessor delzone_exec(*dbparameters_, DEL_ZONE_RECORDS,
"delete zone records");
- sqlite3_clear_bindings(
- dbparameters_->statements_[DEL_ZONE_RECORDS]);
- if (sqlite3_bind_int(dbparameters_->statements_[DEL_ZONE_RECORDS],
- 1, zone_info.second) != SQLITE_OK) {
+ sqlite3_stmt* stmt = dbparameters_->getStatement(DEL_ZONE_RECORDS);
+ sqlite3_clear_bindings(stmt);
+ if (sqlite3_bind_int(stmt, 1, zone_info.second) != SQLITE_OK) {
isc_throw(DataSourceError,
"failed to bind SQLite3 parameter: " <<
sqlite3_errmsg(dbparameters_->db_));
@@ -577,6 +615,7 @@ SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
}
}
+ dbparameters_->in_transaction = true;
dbparameters_->updating_zone = true;
dbparameters_->updated_zone_id = zone_info.second;
@@ -584,28 +623,40 @@ SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
}
void
-SQLite3Accessor::commitUpdateZone() {
- if (!dbparameters_->updating_zone) {
- isc_throw(DataSourceError, "committing zone update on SQLite3 "
+SQLite3Accessor::startTransaction() {
+ if (dbparameters_->in_transaction) {
+ isc_throw(DataSourceError,
+ "duplicate transaction on SQLite3 data source");
+ }
+
+ StatementProcessor(*dbparameters_, BEGIN,
+ "start an SQLite3 transaction").exec();
+ dbparameters_->in_transaction = true;
+}
+
+void
+SQLite3Accessor::commit() {
+ if (!dbparameters_->in_transaction) {
+ isc_throw(DataSourceError, "performing commit on SQLite3 "
"data source without transaction");
}
StatementProcessor(*dbparameters_, COMMIT,
"commit an SQLite3 transaction").exec();
- dbparameters_->updating_zone = false;
+ dbparameters_->in_transaction = false;
dbparameters_->updated_zone_id = -1;
}
void
-SQLite3Accessor::rollbackUpdateZone() {
- if (!dbparameters_->updating_zone) {
- isc_throw(DataSourceError, "rolling back zone update on SQLite3 "
+SQLite3Accessor::rollback() {
+ if (!dbparameters_->in_transaction) {
+ isc_throw(DataSourceError, "performing rollback on SQLite3 "
"data source without transaction");
}
StatementProcessor(*dbparameters_, ROLLBACK,
"rollback an SQLite3 transaction").exec();
- dbparameters_->updating_zone = false;
+ dbparameters_->in_transaction = false;
dbparameters_->updated_zone_id = -1;
}
@@ -616,7 +667,7 @@ void
doUpdate(SQLite3Parameters& dbparams, StatementID stmt_id,
COLUMNS_TYPE update_params, const char* exec_desc)
{
- sqlite3_stmt* const stmt = dbparams.statements_[stmt_id];
+ sqlite3_stmt* const stmt = dbparams.getStatement(stmt_id);
StatementProcessor executer(dbparams, stmt_id, exec_desc);
int param_id = 0;
@@ -662,34 +713,98 @@ SQLite3Accessor::deleteRecordInZone(const string (¶ms)[DEL_PARAM_COUNT]) {
*dbparameters_, DEL_RECORD, params, "delete record from zone");
}
+void
+SQLite3Accessor::addRecordDiff(int zone_id, uint32_t serial,
+ DiffOperation operation,
+ const std::string (¶ms)[DIFF_PARAM_COUNT])
+{
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "adding record diff without update "
+ "transaction on " << getDBName());
+ }
+ if (zone_id != dbparameters_->updated_zone_id) {
+ isc_throw(DataSourceError, "bad zone ID for adding record diff on "
+ << getDBName() << ": " << zone_id << ", must be "
+ << dbparameters_->updated_zone_id);
+ }
+
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(ADD_RECORD_DIFF);
+ StatementProcessor executer(*dbparameters_, ADD_RECORD_DIFF,
+ "add record diff");
+ int param_id = 0;
+ if (sqlite3_bind_int(stmt, ++param_id, zone_id)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ if (sqlite3_bind_int64(stmt, ++param_id, serial)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ if (sqlite3_bind_int(stmt, ++param_id, operation)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ for (int i = 0; i < DIFF_PARAM_COUNT; ++i) {
+ if (sqlite3_bind_text(stmt, ++param_id, params[i].c_str(),
+ -1, SQLITE_TRANSIENT) != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ }
+ executer.exec();
+}
+
+vector<vector<string> >
+SQLite3Accessor::getRecordDiff(int zone_id) {
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(GET_RECORD_DIFF);
+ sqlite3_bind_int(stmt, 1, zone_id);
+
+ vector<vector<string> > result;
+ while (sqlite3_step(stmt) == SQLITE_ROW) {
+ vector<string> row_result;
+ for (int i = 0; i < 6; ++i) {
+ row_result.push_back(convertToPlainChar(sqlite3_column_text(stmt,
+ i),
+ dbparameters_->db_));
+ }
+ result.push_back(row_result);
+ }
+ sqlite3_reset(stmt);
+
+ return (result);
+}
+
std::string
SQLite3Accessor::findPreviousName(int zone_id, const std::string& rname)
const
{
- sqlite3_reset(dbparameters_->statements_[FIND_PREVIOUS]);
- sqlite3_clear_bindings(dbparameters_->statements_[FIND_PREVIOUS]);
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(FIND_PREVIOUS);
+ sqlite3_reset(stmt);
+ sqlite3_clear_bindings(stmt);
- if (sqlite3_bind_int(dbparameters_->statements_[FIND_PREVIOUS], 1,
- zone_id) != SQLITE_OK) {
+ if (sqlite3_bind_int(stmt, 1, zone_id) != SQLITE_OK) {
isc_throw(SQLite3Error, "Could not bind zone ID " << zone_id <<
" to SQL statement (find previous): " <<
sqlite3_errmsg(dbparameters_->db_));
}
- if (sqlite3_bind_text(dbparameters_->statements_[FIND_PREVIOUS], 2,
- rname.c_str(), -1, SQLITE_STATIC) != SQLITE_OK) {
+ if (sqlite3_bind_text(stmt, 2, rname.c_str(), -1, SQLITE_STATIC) !=
+ SQLITE_OK) {
isc_throw(SQLite3Error, "Could not bind name " << rname <<
" to SQL statement (find previous): " <<
sqlite3_errmsg(dbparameters_->db_));
}
std::string result;
- const int rc = sqlite3_step(dbparameters_->statements_[FIND_PREVIOUS]);
+ const int rc = sqlite3_step(stmt);
if (rc == SQLITE_ROW) {
// We found it
- result = convertToPlainChar(sqlite3_column_text(dbparameters_->
- statements_[FIND_PREVIOUS], 0), dbparameters_->db_);
+ result = convertToPlainChar(sqlite3_column_text(stmt, 0),
+ dbparameters_->db_);
}
- sqlite3_reset(dbparameters_->statements_[FIND_PREVIOUS]);
+ sqlite3_reset(stmt);
if (rc == SQLITE_DONE) {
// No NSEC records here, this DB doesn't support DNSSEC or
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index 8b74309..6b5369c 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -131,6 +131,8 @@ public:
virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
bool replace);
+ virtual void startTransaction();
+
/// \note we are quite impatient here: it's quite possible that the COMMIT
/// fails due to other process performing SELECT on the same database
/// (consider the case where COMMIT is done by xfrin or dynamic update
@@ -139,7 +141,7 @@ public:
/// attempt and/or increase timeout before giving up the COMMIT, even
/// if it still doesn't guarantee 100% success. Right now this
/// implementation throws a \c DataSourceError exception in such a case.
- virtual void commitUpdateZone();
+ virtual void commit();
/// \note In SQLite3 rollback can fail if there's another unfinished
/// statement is performed for the same database structure.
@@ -147,7 +149,7 @@ public:
/// guaranteed to be prevented at the API level. If it ever happens, this
/// method throws a \c DataSourceError exception. It should be
/// considered a bug of the higher level application program.
- virtual void rollbackUpdateZone();
+ virtual void rollback();
virtual void addRecordToZone(
const std::string (&columns)[ADD_COLUMN_COUNT]);
@@ -155,6 +157,23 @@ public:
virtual void deleteRecordInZone(
const std::string (¶ms)[DEL_PARAM_COUNT]);
+ /// This derived version of the method prepares an SQLite3 statement
+ /// for adding the diff first time it's called, and if it fails throws
+ // an \c SQLite3Error exception.
+ virtual void addRecordDiff(
+ int zone_id, uint32_t serial, DiffOperation operation,
+ const std::string (¶ms)[DIFF_PARAM_COUNT]);
+
+ // A short term method for tests until we implement more complete
+ // API to retrieve diffs (#1330). It returns all records of the diffs
+ // table whose zone_id column is identical to the given value.
+ // Since this is a short term workaround, it ignores some corner cases
+ // (such as an SQLite3 execution failure) and is not very efficient,
+ // in favor of brevity. Once #1330 is completed, this method must be
+ // removed, and the tests using this method must be rewritten using the
+ // official API.
+ std::vector<std::vector<std::string> > getRecordDiff(int zone_id);
+
/// The SQLite3 implementation of this method returns a string starting
/// with a fixed prefix of "sqlite3_" followed by the DB file name
/// removing any path name. For example, for the DB file
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index 3d2ba6d..e5cca0a 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -76,4 +76,5 @@ EXTRA_DIST += testdata/sql1.example.com.signed
EXTRA_DIST += testdata/sql2.example.com.signed
EXTRA_DIST += testdata/test-root.sqlite3
EXTRA_DIST += testdata/test.sqlite3
+EXTRA_DIST += testdata/test.sqlite3.nodiffs
EXTRA_DIST += testdata/rwtest.sqlite3
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index de6b5fa..1514fc3 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -12,7 +12,7 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <boost/foreach.hpp>
+#include <boost/shared_ptr.hpp>
#include <gtest/gtest.h>
@@ -33,7 +33,10 @@
using namespace isc::datasrc;
using namespace std;
-using namespace boost;
+// don't import the entire boost namespace. It will unexpectedly hide uint32_t
+// for some systems.
+using boost::shared_ptr;
+using boost::dynamic_pointer_cast;
using namespace isc::dns;
namespace {
@@ -154,9 +157,13 @@ const char* const TEST_RECORDS[][5] = {
// Put some data into apex (including NS) so we can check our NS
// doesn't break anything
+ {"example.org.", "SOA", "3600", "", "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200" },
{"example.org.", "NS", "3600", "", "ns.example.com."},
{"example.org.", "A", "3600", "", "192.0.2.1"},
{"example.org.", "NSEC", "3600", "", "acnamesig1.example.org. NS A NSEC RRSIG"},
+ {"example.org.", "RRSIG", "3600", "", "SOA 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE"},
{"example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 "
"20000201000000 12345 example.org. FAKEFAKEFAKE"},
{"example.org.", "RRSIG", "3600", "", "NS 5 3 3600 20000101000000 "
@@ -216,17 +223,21 @@ public:
}
virtual shared_ptr<DatabaseAccessor> clone() {
- return (shared_ptr<DatabaseAccessor>()); // bogus data, but unused
+ // This accessor is stateless, so we can simply return a new instance.
+ return (shared_ptr<DatabaseAccessor>(new NopAccessor));
}
virtual std::pair<bool, int> startUpdateZone(const std::string&, bool) {
// return dummy value. unused anyway.
return (pair<bool, int>(true, 0));
}
- virtual void commitUpdateZone() {}
- virtual void rollbackUpdateZone() {}
+ virtual void startTransaction() {}
+ virtual void commit() {}
+ virtual void rollback() {}
virtual void addRecordToZone(const string (&)[ADD_COLUMN_COUNT]) {}
virtual void deleteRecordInZone(const string (&)[DEL_PARAM_COUNT]) {}
+ virtual void addRecordDiff(int, uint32_t, DiffOperation,
+ const std::string (&)[DIFF_PARAM_COUNT]) {}
virtual const std::string& getDBName() const {
return (database_name_);
@@ -273,7 +284,7 @@ class MockAccessor : public NopAccessor {
NameCompare > Domains;
public:
- MockAccessor() : rollbacked_(false) {
+ MockAccessor() : rollbacked_(false), did_transaction_(false) {
readonly_records_ = &readonly_records_master_;
update_records_ = &update_records_master_;
empty_records_ = &empty_records_master_;
@@ -289,6 +300,24 @@ public:
return (cloned_accessor);
}
+ virtual void startTransaction() {
+ // Currently we only use this transaction for simple read-only
+ // operations. So we just make a local copy of the data (we don't
+ // care about what happens after commit() or rollback()).
+ // Obviously as a consequence, if a test case tries to make multiple
+ // transactions on a single mock accessor it will fail.
+
+ // Check any attempt of multiple transactions
+ if (did_transaction_) {
+ isc_throw(isc::Unexpected, "MockAccessor::startTransaction() "
+ "called multiple times - likely a bug in the test");
+ }
+
+ readonly_records_copy_ = *readonly_records_;
+ readonly_records_ = &readonly_records_copy_;
+ did_transaction_ = true;
+ }
+
private:
class MockNameIteratorContext : public IteratorContext {
public:
@@ -360,38 +389,52 @@ private:
class MockIteratorContext : public IteratorContext {
private:
int step;
+ const Domains& domains_;
public:
- MockIteratorContext() :
- step(0)
+ MockIteratorContext(const Domains& domains) :
+ step(0), domains_(domains)
{ }
virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ // A special case: if the given set of domains is already empty,
+ // we always return false.
+ if (domains_.empty()) {
+ return (false);
+ }
+
+ // Return faked data for tests
switch (step ++) {
case 0:
data[DatabaseAccessor::NAME_COLUMN] = "example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "3600";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 1:
+ data[DatabaseAccessor::NAME_COLUMN] = "example.org";
data[DatabaseAccessor::TYPE_COLUMN] = "SOA";
- data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::TTL_COLUMN] = "3600";
data[DatabaseAccessor::RDATA_COLUMN] = "ns1.example.org. admin.example.org. "
"1234 3600 1800 2419200 7200";
return (true);
- case 1:
+ case 2:
data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
data[DatabaseAccessor::TYPE_COLUMN] = "A";
data[DatabaseAccessor::TTL_COLUMN] = "300";
data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
return (true);
- case 2:
+ case 3:
data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
data[DatabaseAccessor::TYPE_COLUMN] = "A";
data[DatabaseAccessor::TTL_COLUMN] = "300";
data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
return (true);
- case 3:
+ case 4:
data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
data[DatabaseAccessor::TTL_COLUMN] = "300";
data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::1";
return (true);
- case 4:
+ case 5:
data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
data[DatabaseAccessor::TTL_COLUMN] = "300";
@@ -400,7 +443,7 @@ private:
default:
ADD_FAILURE() <<
"Request past the end of iterator context";
- case 5:
+ case 6:
return (false);
}
}
@@ -443,7 +486,8 @@ private:
public:
virtual IteratorContextPtr getAllRecords(int id) const {
if (id == READONLY_ZONE_ID) {
- return (IteratorContextPtr(new MockIteratorContext()));
+ return (IteratorContextPtr(new MockIteratorContext(
+ *readonly_records_)));
} else if (id == 13) {
return (IteratorContextPtr());
} else if (id == 0) {
@@ -463,7 +507,11 @@ public:
new MockNameIteratorContext(*this, id, name,
subdomains)));
} else {
- isc_throw(isc::Unexpected, "Unknown zone ID");
+ // This iterator is bogus, but for the cases tested below that's
+ // sufficient.
+ return (IteratorContextPtr(
+ new MockNameIteratorContext(*this, READONLY_ZONE_ID,
+ name, subdomains)));
}
}
@@ -486,10 +534,10 @@ public:
return (pair<bool, int>(true, WRITABLE_ZONE_ID));
}
- virtual void commitUpdateZone() {
+ virtual void commit() {
*readonly_records_ = *update_records_;
}
- virtual void rollbackUpdateZone() {
+ virtual void rollback() {
// Special hook: if something with a name of "throw.example.org"
// has been added, trigger an imaginary unexpected event with an
// exception.
@@ -603,20 +651,20 @@ private:
// The following member variables are storage and/or update work space
// of the test zone. The "master"s are the real objects that contain
// the data, and they are shared among all accessors cloned from
- // an initially created one. The pointer members allow the sharing.
+ // an initially created one. The "copy" data will be used for read-only
+ // transaction. The pointer members allow the sharing.
// "readonly" is for normal lookups. "update" is the workspace for
// updates. When update starts it will be initialized either as an
// empty set (when replacing the entire zone) or as a copy of the
// "readonly" one. "empty" is a sentinel to produce negative results.
Domains readonly_records_master_;
+ Domains readonly_records_copy_;
Domains* readonly_records_;
Domains update_records_master_;
Domains* update_records_;
const Domains empty_records_master_;
const Domains* empty_records_;
- // used as temporary storage during the building of the fake data
-
// used as temporary storage after searchForRecord() and during
// getNextRecord() calls, as well as during the building of the
// fake data
@@ -632,6 +680,9 @@ private:
// Remember the mock accessor that was last cloned
boost::shared_ptr<MockAccessor> latest_clone_;
+ // Internal flag for duplicate check
+ bool did_transaction_;
+
const Domains& getMockRecords(int zone_id) const {
if (zone_id == READONLY_ZONE_ID) {
return (*readonly_records_);
@@ -860,7 +911,7 @@ public:
addRecordToZone(columns);
}
- commitUpdateZone();
+ commit();
}
};
@@ -951,56 +1002,64 @@ TEST_F(MockDatabaseClientTest, emptyIterator) {
EXPECT_THROW(it->getNextRRset(), isc::Unexpected);
}
+// checks if the given rrset matches the
+// given name, class, type and rdatas
+void
+checkRRset(isc::dns::ConstRRsetPtr rrset,
+ const isc::dns::Name& name,
+ const isc::dns::RRClass& rrclass,
+ const isc::dns::RRType& rrtype,
+ const isc::dns::RRTTL& rrttl,
+ const std::vector<std::string>& rdatas) {
+ isc::dns::RRsetPtr expected_rrset(
+ new isc::dns::RRset(name, rrclass, rrtype, rrttl));
+ for (unsigned int i = 0; i < rdatas.size(); ++i) {
+ expected_rrset->addRdata(
+ isc::dns::rdata::createRdata(rrtype, rrclass,
+ rdatas[i]));
+ }
+ isc::testutils::rrsetCheck(expected_rrset, rrset);
+}
+
// Iterate through a zone
TYPED_TEST(DatabaseClientTest, iterator) {
ZoneIteratorPtr it(this->client_->getIterator(Name("example.org")));
ConstRRsetPtr rrset(it->getNextRRset());
ASSERT_NE(ConstRRsetPtr(), rrset);
+ // The first name should be the zone origin.
+ EXPECT_EQ(this->zname_, rrset->getName());
+
// The rest of the checks work only for the mock accessor.
if (!this->is_mock_) {
return;
}
- EXPECT_EQ(Name("example.org"), rrset->getName());
- EXPECT_EQ(RRClass::IN(), rrset->getClass());
- EXPECT_EQ(RRType::SOA(), rrset->getType());
- EXPECT_EQ(RRTTL(300), rrset->getTTL());
- RdataIteratorPtr rit(rrset->getRdataIterator());
- ASSERT_FALSE(rit->isLast());
- rit->next();
- EXPECT_TRUE(rit->isLast());
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ checkRRset(rrset, Name("example.org"), this->qclass_, RRType::A(),
+ this->rrttl_, this->expected_rdatas_);
rrset = it->getNextRRset();
- ASSERT_NE(ConstRRsetPtr(), rrset);
- EXPECT_EQ(Name("x.example.org"), rrset->getName());
- EXPECT_EQ(RRClass::IN(), rrset->getClass());
- EXPECT_EQ(RRType::A(), rrset->getType());
- EXPECT_EQ(RRTTL(300), rrset->getTTL());
- rit = rrset->getRdataIterator();
- ASSERT_FALSE(rit->isLast());
- EXPECT_EQ("192.0.2.1", rit->getCurrent().toText());
- rit->next();
- ASSERT_FALSE(rit->isLast());
- EXPECT_EQ("192.0.2.2", rit->getCurrent().toText());
- rit->next();
- EXPECT_TRUE(rit->isLast());
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200");
+ checkRRset(rrset, Name("example.org"), this->qclass_, RRType::SOA(),
+ this->rrttl_, this->expected_rdatas_);
rrset = it->getNextRRset();
- ASSERT_NE(ConstRRsetPtr(), rrset);
- EXPECT_EQ(Name("x.example.org"), rrset->getName());
- EXPECT_EQ(RRClass::IN(), rrset->getClass());
- EXPECT_EQ(RRType::AAAA(), rrset->getType());
- EXPECT_EQ(RRTTL(300), rrset->getTTL());
- EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
- rit = rrset->getRdataIterator();
- ASSERT_FALSE(rit->isLast());
- EXPECT_EQ("2001:db8::1", rit->getCurrent().toText());
- rit->next();
- ASSERT_FALSE(rit->isLast());
- EXPECT_EQ("2001:db8::2", rit->getCurrent().toText());
- rit->next();
- EXPECT_TRUE(rit->isLast());
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ checkRRset(rrset, Name("x.example.org"), this->qclass_, RRType::A(),
+ RRTTL(300), this->expected_rdatas_);
+
+ rrset = it->getNextRRset();
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ checkRRset(rrset, Name("x.example.org"), this->qclass_, RRType::AAAA(),
+ RRTTL(300), this->expected_rdatas_);
}
// This has inconsistent TTL in the set (the rest, like nonsense in
@@ -1011,23 +1070,96 @@ TEST_F(MockDatabaseClientTest, badIterator) {
EXPECT_EQ(it->getNextRRset()->getTTL(), isc::dns::RRTTL(300));
}
-// checks if the given rrset matches the
-// given name, class, type and rdatas
-void
-checkRRset(isc::dns::ConstRRsetPtr rrset,
- const isc::dns::Name& name,
- const isc::dns::RRClass& rrclass,
- const isc::dns::RRType& rrtype,
- const isc::dns::RRTTL& rrttl,
- const std::vector<std::string>& rdatas) {
- isc::dns::RRsetPtr expected_rrset(
- new isc::dns::RRset(name, rrclass, rrtype, rrttl));
- for (unsigned int i = 0; i < rdatas.size(); ++i) {
- expected_rrset->addRdata(
- isc::dns::rdata::createRdata(rrtype, rrclass,
- rdatas[i]));
+TYPED_TEST(DatabaseClientTest, getSOAFromIterator) {
+ vector<string> soa_data;
+ soa_data.push_back("ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200");
+
+ ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+ ASSERT_TRUE(it);
+ checkRRset(it->getSOA(), this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_, soa_data);
+
+ // Iterate over the zone until we find an SOA. Although there's a broken
+ // RDATA that would trigger an exception in getNextRRset(), we should
+ // reach the SOA as the sequence should be sorted and the SOA is at
+ // the origin name (which has no bogus data).
+ ConstRRsetPtr rrset;
+ while ((rrset = it->getNextRRset()) != ConstRRsetPtr() &&
+ rrset->getType() != RRType::SOA()) {
+ ;
}
- isc::testutils::rrsetCheck(expected_rrset, rrset);
+ ASSERT_TRUE(rrset);
+ // It should be identical to the result of getSOA().
+ isc::testutils::rrsetCheck(it->getSOA(), rrset);
+}
+
+TYPED_TEST(DatabaseClientTest, noSOAFromIterator) {
+ // First, empty the zone.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+
+ // Then getSOA() should return NULL.
+ ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+ ASSERT_TRUE(it);
+ EXPECT_FALSE(it->getSOA());
+}
+
+TYPED_TEST(DatabaseClientTest, iterateThenUpdate) {
+ ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+ ASSERT_TRUE(it);
+
+ // Try to empty the zone after getting the iterator. Depending on the
+ // underlying data source, it may result in an exception due to the
+ // transaction for the iterator. In either case the integrity of the
+ // iterator result should be reserved.
+ try {
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+
+ // Confirm at least it doesn't contain any SOA
+ EXPECT_EQ(ZoneFinder::NXDOMAIN,
+ this->getFinder()->find(this->zname_, RRType::SOA()).code);
+ } catch (const DataSourceError&) {}
+
+ ConstRRsetPtr rrset;
+ while ((rrset = it->getNextRRset()) != ConstRRsetPtr() &&
+ rrset->getType() != RRType::SOA()) {
+ ;
+ }
+ ASSERT_TRUE(rrset);
+ // It should be identical to the result of getSOA().
+ isc::testutils::rrsetCheck(it->getSOA(), rrset);
+}
+
+TYPED_TEST(DatabaseClientTest, updateThenIterateThenUpdate) {
+ // First clear the zone.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+
+ // Then iterate over it. It should immediately reach the end, at which
+ // point the transaction should be committed.
+ ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+ ASSERT_TRUE(it);
+ EXPECT_FALSE(it->getNextRRset());
+
+ // So another update attempt should succeed, too.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+}
+
+TYPED_TEST(DatabaseClientTest, updateAfterDeleteIterator) {
+ // Similar to the previous case, but we delete the iterator in the
+ // middle of zone. The transaction should be canceled (actually no
+ // different from commit though) at that point.
+ ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+ ASSERT_TRUE(it);
+ EXPECT_TRUE(it->getNextRRset());
+ it.reset();
+
+ // So another update attempt should succeed.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
}
void
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
index 62fa3c3..90b2ac1 100644
--- a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -22,6 +22,7 @@
#include <dns/rrclass.h>
#include <gtest/gtest.h>
+#include <boost/lexical_cast.hpp>
#include <boost/scoped_ptr.hpp>
#include <fstream>
#include <sqlite3.h>
@@ -29,6 +30,7 @@
using namespace std;
using namespace isc::datasrc;
using boost::shared_ptr;
+using boost::lexical_cast;
using isc::data::ConstElementPtr;
using isc::data::Element;
using isc::dns::RRClass;
@@ -130,18 +132,6 @@ TEST_F(SQLite3AccessorTest, iterator) {
std::string data[DatabaseAccessor::COLUMN_COUNT];
// Get and check the first and only record
EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("dname.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("dname.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
-
- EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
- EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("dname2.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("dname2.foo.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
-
- EXPECT_TRUE(context->getNext(data));
EXPECT_EQ("MX", data[DatabaseAccessor::TYPE_COLUMN]);
EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
EXPECT_EQ("10 mail.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
@@ -174,16 +164,22 @@ TEST_F(SQLite3AccessorTest, iterator) {
EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
EXPECT_TRUE(context->getNext(data));
- EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("192.0.2.10", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("mail.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ EXPECT_EQ("dname.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("dname.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("dname2.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("dname2.foo.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
EXPECT_TRUE(context->getNext(data));
EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
- EXPECT_EQ("192.0.2.101", data[DatabaseAccessor::RDATA_COLUMN]);
- EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+ EXPECT_EQ("192.0.2.10", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("mail.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
EXPECT_TRUE(context->getNext(data));
EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
@@ -194,6 +190,12 @@ TEST_F(SQLite3AccessorTest, iterator) {
EXPECT_TRUE(context->getNext(data));
EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.101", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
EXPECT_EQ("192.0.2.1", data[DatabaseAccessor::RDATA_COLUMN]);
EXPECT_EQ("www.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
@@ -214,8 +216,7 @@ TEST(SQLite3Open, getDBNameExampleROOT) {
EXPECT_EQ(SQLITE_DBNAME_EXAMPLE_ROOT, accessor.getDBName());
}
-// Simple function to cound the number of records for
-// any name
+// Simple function to match records
void
checkRecordRow(const std::string columns[],
const std::string& field0,
@@ -518,6 +519,7 @@ protected:
std::string get_columns[DatabaseAccessor::COLUMN_COUNT];
std::string add_columns[DatabaseAccessor::ADD_COLUMN_COUNT];
std::string del_params[DatabaseAccessor::DEL_PARAM_COUNT];
+ std::string diff_params[DatabaseAccessor::DIFF_PARAM_COUNT];
vector<const char* const*> expected_stored; // placeholder for checkRecords
vector<const char* const*> empty_stored; // indicate no corresponding data
@@ -550,7 +552,7 @@ TEST_F(SQLite3Update, emptyUpdate) {
checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
zone_id = accessor->startUpdateZone("example.com.", false).second;
checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
- accessor->commitUpdateZone();
+ accessor->commit();
checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
}
@@ -561,7 +563,7 @@ TEST_F(SQLite3Update, flushZone) {
checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
zone_id = accessor->startUpdateZone("example.com.", true).second;
checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
- accessor->commitUpdateZone();
+ accessor->commit();
checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
}
@@ -575,7 +577,7 @@ TEST_F(SQLite3Update, readWhileUpdate) {
// Once the changes are committed, the other accessor will see the new
// data.
- accessor->commitUpdateZone();
+ accessor->commit();
checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
empty_stored);
}
@@ -585,7 +587,7 @@ TEST_F(SQLite3Update, rollback) {
checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
// Rollback will revert the change made by startUpdateZone(, true).
- accessor->rollbackUpdateZone();
+ accessor->rollback();
checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
}
@@ -599,7 +601,7 @@ TEST_F(SQLite3Update, rollbackFailure) {
EXPECT_TRUE(iterator->getNext(columns));
accessor->startUpdateZone("example.com.", true);
- EXPECT_THROW(accessor->rollbackUpdateZone(), DataSourceError);
+ EXPECT_THROW(accessor->rollback(), DataSourceError);
}
TEST_F(SQLite3Update, commitConflict) {
@@ -612,8 +614,8 @@ TEST_F(SQLite3Update, commitConflict) {
// which will prevent commit.
zone_id = accessor->startUpdateZone("example.com.", true).second;
checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
- EXPECT_THROW(accessor->commitUpdateZone(), DataSourceError);
- accessor->rollbackUpdateZone(); // rollback should still succeed
+ EXPECT_THROW(accessor->commit(), DataSourceError);
+ accessor->rollback(); // rollback should still succeed
checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
}
@@ -631,9 +633,9 @@ TEST_F(SQLite3Update, updateConflict) {
// Once we rollback the other attempt of change, we should be able to
// start and commit the transaction using the main accessor.
- another_accessor->rollbackUpdateZone();
+ another_accessor->rollback();
accessor->startUpdateZone("example.com.", true);
- accessor->commitUpdateZone();
+ accessor->commit();
}
TEST_F(SQLite3Update, duplicateUpdate) {
@@ -643,11 +645,11 @@ TEST_F(SQLite3Update, duplicateUpdate) {
}
TEST_F(SQLite3Update, commitWithoutTransaction) {
- EXPECT_THROW(accessor->commitUpdateZone(), DataSourceError);
+ EXPECT_THROW(accessor->commit(), DataSourceError);
}
TEST_F(SQLite3Update, rollbackWithoutTransaction) {
- EXPECT_THROW(accessor->rollbackUpdateZone(), DataSourceError);
+ EXPECT_THROW(accessor->rollback(), DataSourceError);
}
TEST_F(SQLite3Update, addRecord) {
@@ -664,7 +666,7 @@ TEST_F(SQLite3Update, addRecord) {
checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
// Commit the change, and confirm the new data is still there.
- accessor->commitUpdateZone();
+ accessor->commit();
checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
}
@@ -678,7 +680,7 @@ TEST_F(SQLite3Update, addThenRollback) {
expected_stored.push_back(new_data);
checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
- accessor->rollbackUpdateZone();
+ accessor->rollback();
checkRecords(*accessor, zone_id, "newdata.example.com.", empty_stored);
}
@@ -717,7 +719,7 @@ TEST_F(SQLite3Update, deleteRecord) {
checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
// Commit the change, and confirm the deleted data still isn't there.
- accessor->commitUpdateZone();
+ accessor->commit();
checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
}
@@ -730,7 +732,7 @@ TEST_F(SQLite3Update, deleteThenRollback) {
checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
// Rollback the change, and confirm the data still exists.
- accessor->rollbackUpdateZone();
+ accessor->rollback();
checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
}
@@ -768,4 +770,346 @@ TEST_F(SQLite3Update, invalidDelete) {
// An attempt of delete before an explicit start of transaction
EXPECT_THROW(accessor->deleteRecordInZone(del_params), DataSourceError);
}
+
+TEST_F(SQLite3Update, emptyTransaction) {
+ // A generic transaction without doing anything inside it. Just check
+ // it doesn't throw or break the database.
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ accessor->startTransaction();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ accessor->commit();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, duplicateTransaction) {
+ accessor->startTransaction();
+ EXPECT_THROW(accessor->startTransaction(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, transactionInUpdate) {
+ accessor->startUpdateZone("example.com.", true);
+ EXPECT_THROW(accessor->startTransaction(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, updateInTransaction) {
+ accessor->startTransaction();
+ EXPECT_THROW(accessor->startUpdateZone("example.com.", true),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, updateWithTransaction) {
+ // Start a read-only transaction, wherein we execute two reads.
+ // Meanwhile we start a write (update) transaction. The commit attempt
+ // for the write transaction will due to the lock held by the read
+ // transaction. The database should be intact.
+ another_accessor->startTransaction();
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+
+ ASSERT_TRUE(accessor->startUpdateZone("example.com.", true).first);
+ EXPECT_THROW(accessor->commit(), DataSourceError);
+
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+ another_accessor->commit(); // this shouldn't throw
+}
+
+TEST_F(SQLite3Update, updateWithoutTransaction) {
+ // Similar to the previous test, but reads are not protected in a
+ // transaction. So the write transaction will succeed and flush the DB,
+ // and the result of the second read is different from the first.
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+
+ ASSERT_TRUE(accessor->startUpdateZone("example.com.", true).first);
+ accessor->commit();
+
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ empty_stored);
+}
+
+TEST_F(SQLite3Update, concurrentTransactions) {
+ // Two read-only transactions coexist (unlike the read vs write)
+ // Start one transaction.
+ accessor->startTransaction();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Start a new one.
+ another_accessor->startTransaction();
+
+ // The second transaction doesn't affect the first or vice versa.
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+
+ // Commit should be successful for both transactions.
+ accessor->commit();
+ another_accessor->commit();
+}
+
+//
+// Commonly used data for diff related tests. The last two entries are
+// a textual representation of "version" and a textual representation of
+// diff operation (either DIFF_ADD_TEXT or DIFF_DELETE_TEXT). We use this
+// format for the convenience of generating test data and checking the results.
+//
+const char* const DIFF_ADD_TEXT = "0";
+const char* const DIFF_DELETE_TEXT = "1";
+const char* const diff_begin_data[] = {
+ "example.com.", "SOA", "3600",
+ "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200",
+ "1234", DIFF_DELETE_TEXT
+};
+const char* const diff_del_a_data[] = {
+ "dns01.example.com.", "A", "3600", "192.0.2.1", "1234", DIFF_DELETE_TEXT
+};
+const char* const diff_end_data[] = {
+ "example.com.", "SOA", "3600",
+ "ns.example.com. admin.example.com. 1300 3600 1800 2419200 7200",
+ "1300", DIFF_ADD_TEXT
+};
+const char* const diff_add_a_data[] = {
+ "dns01.example.com.", "A", "3600", "192.0.2.10", "1234", DIFF_ADD_TEXT
+};
+
+// The following two are helper functions to convert textual test data
+// to integral zone ID and diff operation.
+int
+getVersion(const char* const diff_data[]) {
+ return (lexical_cast<int>(diff_data[DatabaseAccessor::DIFF_PARAM_COUNT]));
+}
+
+DatabaseAccessor::DiffOperation
+getOperation(const char* const diff_data[]) {
+ return (static_cast<DatabaseAccessor::DiffOperation>(
+ lexical_cast<int>(
+ diff_data[DatabaseAccessor::DIFF_PARAM_COUNT + 1])));
+}
+
+// Common checker function that compares expected and actual sequence of
+// diffs.
+void
+checkDiffs(const vector<const char* const*>& expected,
+ const vector<vector<string> >& actual)
+{
+ EXPECT_EQ(expected.size(), actual.size());
+ const size_t n_diffs = std::min(expected.size(), actual.size());
+ for (size_t i = 0; i < n_diffs; ++i) {
+ for (int j = 0; j < actual[i].size(); ++j) {
+ EXPECT_EQ(expected[i][j], actual[i][j]);
+ }
+ }
+}
+
+TEST_F(SQLite3Update, addRecordDiff) {
+ // A simple case of adding diffs: just changing the SOA, and confirm
+ // the diffs are stored as expected.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ // Until the diffs are committed, they are not visible to other accessors.
+ EXPECT_TRUE(another_accessor->getRecordDiff(zone_id).empty());
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(diff_begin_data);
+ expected_stored.push_back(diff_end_data);
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+ // Now it should be visible to others, too.
+ checkDiffs(expected_stored, another_accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addRecordOfLargeSerial) {
+ // This is essentially the same as the previous test, but using a
+ // very large "version" (SOA serial), which is actually the possible
+ // largest value to confirm the internal code doesn't have an overflow bug
+ // or other failure due to the larger value.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ const char* const begin_data[] = {
+ "example.com.", "SOA", "3600",
+ "ns.example.com. admin.example.com. 4294967295 3600 1800 2419200 7200",
+ "4294967295", DIFF_DELETE_TEXT
+ };
+
+ copy(begin_data, begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ // For "serial" parameter, we intentionally hardcode the value rather
+ // than converting it from the data.
+ accessor->addRecordDiff(zone_id, 0xffffffff, getOperation(diff_begin_data),
+ diff_params);
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(begin_data);
+ expected_stored.push_back(diff_end_data);
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithoutUpdate) {
+ // Right now we require startUpdateZone() prior to performing
+ // addRecordDiff.
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ EXPECT_THROW(accessor->addRecordDiff(0, getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ DataSourceError);
+
+ // For now, we don't allow adding diffs in a general transaction either.
+ accessor->startTransaction();
+ EXPECT_THROW(accessor->addRecordDiff(0, getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, addDiffWithBadZoneID) {
+ // For now, we require zone ID passed to addRecordDiff be equal to
+ // that for the zone being updated.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ EXPECT_THROW(accessor->addRecordDiff(zone_id + 1,
+ getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, addDiffRollback) {
+ // Rollback tentatively added diffs. This is no different from the
+ // update case, but we test it explicitly just in case.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+ accessor->rollback();
+
+ EXPECT_TRUE(accessor->getRecordDiff(zone_id).empty());
+}
+
+TEST_F(SQLite3Update, addDiffInBadOrder) {
+ // At this level, the API is naive, and doesn't care if the diff sequence
+ // is a valid IXFR order.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ // Add diff of 'end', then 'begin'
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(diff_end_data);
+ expected_stored.push_back(diff_begin_data);
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithUpdate) {
+ // A more realistic example: add corresponding diffs while updating zone.
+ // Implementation wise, there should be no reason this could fail if
+ // the basic tests so far pass. But we check it in case we miss something.
+
+ const char* const old_a_record[] = {
+ "dns01.example.com.", "A", "192.0.2.1"
+ };
+ const char* const new_a_record[] = {
+ "dns01.example.com.", "com.example.dns01.", "3600", "A", "",
+ "192.0.2.10"
+ };
+ const char* const old_soa_record[] = {
+ "example.com.", "SOA",
+ "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200",
+ };
+ const char* const new_soa_record[] = {
+ "dns01.example.com.", "com.example.dns01.", "3600", "A", "",
+ "ns.example.com. admin.example.com. 1300 3600 1800 2419200 7200",
+ };
+
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ // Delete SOA (and add that diff)
+ copy(old_soa_record, old_soa_record + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+
+ // Delete A
+ copy(old_a_record, old_a_record + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ copy(diff_del_a_data, diff_del_a_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_del_a_data),
+ getOperation(diff_del_a_data), diff_params);
+
+ // Add SOA
+ copy(new_soa_record, new_soa_record + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ // Add A
+ copy(new_a_record, new_a_record + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+ copy(diff_add_a_data, diff_add_a_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_add_a_data),
+ getOperation(diff_add_a_data), diff_params);
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(diff_begin_data);
+ expected_stored.push_back(diff_del_a_data);
+ expected_stored.push_back(diff_end_data);
+ expected_stored.push_back(diff_add_a_data);
+
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithNoTable) {
+ // An attempt of adding diffs to an old version of database that doesn't
+ // have a diffs table. This will fail in preparing the statement.
+ initAccessor(SQLITE_DBFILE_EXAMPLE + ".nodiffs", "IN");
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ EXPECT_THROW(accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ SQLite3Error);
+}
} // end anonymous namespace
diff --git a/src/lib/datasrc/tests/testdata/test.sqlite3 b/src/lib/datasrc/tests/testdata/test.sqlite3
index cc8cfc3..521cf31 100644
Binary files a/src/lib/datasrc/tests/testdata/test.sqlite3 and b/src/lib/datasrc/tests/testdata/test.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs
new file mode 100644
index 0000000..cc8cfc3
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs differ
diff --git a/src/lib/dhcp/libdhcp.cc b/src/lib/dhcp/libdhcp.cc
index 8e6314e..b95a427 100644
--- a/src/lib/dhcp/libdhcp.cc
+++ b/src/lib/dhcp/libdhcp.cc
@@ -14,16 +14,17 @@
#include <boost/shared_array.hpp>
#include <boost/shared_ptr.hpp>
-#include "dhcp/libdhcp.h"
+#include <util/buffer.h>
+#include <dhcp/libdhcp.h>
#include "config.h"
-#include "dhcp6.h"
-
-#include "option.h"
-#include "option6_ia.h"
-#include "option6_iaaddr.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/option6_iaaddr.h>
using namespace std;
using namespace isc::dhcp;
+using namespace isc::util;
// static array with factories for options
std::map<unsigned short, Option::Factory*> LibDHCP::v6factories_;
@@ -32,7 +33,7 @@ unsigned int
LibDHCP::unpackOptions6(const boost::shared_array<uint8_t> buf,
unsigned int buf_len,
unsigned int offset, unsigned int parse_len,
- isc::dhcp::Option::Option6Collection& options) {
+ isc::dhcp::Option::OptionCollection& options) {
if (offset + parse_len > buf_len) {
isc_throw(OutOfRange, "Option parse failed. Tried to parse "
<< parse_len << " bytes at offset " << offset
@@ -83,13 +84,41 @@ LibDHCP::unpackOptions6(const boost::shared_array<uint8_t> buf,
return (offset);
}
+void
+LibDHCP::unpackOptions4(const std::vector<uint8_t>& buf,
+ isc::dhcp::Option::OptionCollection& options) {
+ size_t offset = 0;
+
+ // 2 - header of DHCPv4 option
+ while (offset + 2 <= buf.size()) {
+ uint8_t opt_type = buf[offset++];
+ uint8_t opt_len = buf[offset++];
+ if (offset + opt_len > buf.size() ) {
+ isc_throw(OutOfRange, "Option parse failed. Tried to parse "
+ << offset + opt_len << " bytes from " << buf.size()
+ << "-byte long buffer.");
+ }
+
+ boost::shared_ptr<Option> opt;
+ switch(opt_type) {
+ default:
+ opt = boost::shared_ptr<Option>(new Option(Option::V4, opt_type,
+ buf.begin()+offset,
+ buf.begin()+offset+opt_len));
+ }
+
+ options.insert(pair<int, boost::shared_ptr<Option> >(opt_type, opt));
+ offset += opt_len;
+ }
+}
+
unsigned int
LibDHCP::packOptions6(boost::shared_array<uint8_t> data,
unsigned int data_len,
unsigned int offset,
- const isc::dhcp::Option::Option6Collection& options) {
+ const isc::dhcp::Option::OptionCollection& options) {
try {
- for (isc::dhcp::Option::Option6Collection::const_iterator it = options.begin();
+ for (Option::OptionCollection::const_iterator it = options.begin();
it != options.end();
++it) {
unsigned short opt_len = (*it).second->len();
@@ -97,7 +126,7 @@ LibDHCP::packOptions6(boost::shared_array<uint8_t> data,
isc_throw(OutOfRange, "Failed to build option " <<
(*it).first << ": out of buffer");
}
- offset = (*it).second->pack(data, data_len, offset);
+ offset = it->second->pack(data, data_len, offset);
}
}
catch (const Exception& e) {
@@ -107,6 +136,17 @@ LibDHCP::packOptions6(boost::shared_array<uint8_t> data,
return (offset);
}
+void
+LibDHCP::packOptions(isc::util::OutputBuffer& buf,
+ const Option::OptionCollection& options) {
+ for (Option::OptionCollection::const_iterator it = options.begin();
+ it != options.end();
+ ++it) {
+ it->second->pack4(buf);
+ }
+}
+
+
bool
LibDHCP::OptionFactoryRegister(Option::Universe u,
unsigned short opt_type,
diff --git a/src/lib/dhcp/libdhcp.h b/src/lib/dhcp/libdhcp.h
index c2ac949..468e6bb 100644
--- a/src/lib/dhcp/libdhcp.h
+++ b/src/lib/dhcp/libdhcp.h
@@ -16,7 +16,8 @@
#define LIBDHCP_H_
#include <iostream>
-#include "dhcp/pkt6.h"
+#include <util/buffer.h>
+#include <dhcp/pkt6.h>
namespace isc {
namespace dhcp {
@@ -39,8 +40,27 @@ public:
static unsigned int
packOptions6(boost::shared_array<uint8_t> buf, unsigned int buf_len,
unsigned int offset,
- const isc::dhcp::Option::Option6Collection& options);
+ const isc::dhcp::Option::OptionCollection& options);
+
+ /// @brief Stores options in a buffer.
+ ///
+ /// Stores all options defined in options containers in a on-wire
+ /// format in output buffer specified by buf.
+ ///
+ /// May throw different exceptions if option assembly fails. There
+ /// may be different reasons (option too large, option malformed,
+ /// too many options etc.)
+ ///
+ /// @param buf
+ /// @param options
+ static void
+ packOptions(isc::util::OutputBuffer& buf,
+ const isc::dhcp::Option::OptionCollection& options);
+
+ static void
+ unpackOptions4(const std::vector<uint8_t>& buf,
+ isc::dhcp::Option::OptionCollection& options);
///
/// Parses provided buffer and creates Option objects.
///
@@ -57,7 +77,7 @@ public:
static unsigned int
unpackOptions6(const boost::shared_array<uint8_t> buf, unsigned int buf_len,
unsigned int offset, unsigned int parse_len,
- isc::dhcp::Option::Option6Collection& options_);
+ isc::dhcp::Option::OptionCollection& options_);
///
/// Registers factory method that produces options of specific option types.
diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc
index dd45c34..daef288 100644
--- a/src/lib/dhcp/option.cc
+++ b/src/lib/dhcp/option.cc
@@ -29,50 +29,117 @@ using namespace isc::dhcp;
using namespace isc::util;
Option::Option(Universe u, unsigned short type)
- :universe_(u), type_(type), data_len_(0) {
-
+ :universe_(u), type_(type) {
+ if ((u == V4) && (type > 255)) {
+ isc_throw(BadValue, "Can't create V4 option of type "
+ << type << ", V4 options are in range 0..255");
+ }
}
Option::Option(Universe u, unsigned short type,
const boost::shared_array<uint8_t>& buf,
unsigned int offset, unsigned int len)
- :universe_(u), type_(type), data_(buf),
- data_len_(len), offset_(offset)
- {
+ :universe_(u), type_(type),
+ offset_(offset)
+{
+ uint8_t* ptr = &buf[offset];
+ data_ = std::vector<uint8_t>(ptr, ptr + len);
+
+ check();
+}
+
+Option::Option(Universe u, unsigned short type, std::vector<uint8_t>& data)
+ :universe_(u), type_(type), data_(data) {
+ check();
+}
- // sanity checks
- // TODO: universe must be in V4 and V6
+Option::Option(Universe u, uint16_t type, vector<uint8_t>::const_iterator first,
+ vector<uint8_t>::const_iterator last)
+ :universe_(u), type_(type), data_(std::vector<uint8_t>(first,last)) {
+ check();
+}
+
+void
+Option::check() {
+ if ( (universe_ != V4) && (universe_ != V6) ) {
+ isc_throw(BadValue, "Invalid universe type specified."
+ << "Only V4 and V6 are allowed.");
+ }
+
+ if (universe_ == V4) {
+
+ if (type_ > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option type " << type_ << " is too big."
+ << "For DHCPv4 allowed type range is 0..255");
+ } else if (data_.size() > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option " << type_ << " is too big.");
+ /// TODO Larger options can be stored as separate instances
+ /// of DHCPv4 options. Clients MUST concatenate them.
+ /// Fortunately, there are no such large options used today.
+ }
+ }
+
+ // no need to check anything for DHCPv6. It allows full range (0-64k) of
+ // both types and data size.
}
unsigned int
Option::pack(boost::shared_array<uint8_t>& buf,
unsigned int buf_len,
unsigned int offset) {
+ if (universe_ != V6) {
+ isc_throw(BadValue, "Failed to pack " << type_ << " option. Do not "
+ << "use this method for options other than DHCPv6.");
+ }
+ return pack6(buf, buf_len, offset);
+}
+
+void
+Option::pack4(isc::util::OutputBuffer& buf) {
switch (universe_) {
- case V4:
- return pack4(buf, buf_len, offset);
+ case V4: {
+ if (data_.size() > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option " << type_ << " is too big."
+ << "At most 255 bytes are supported.");
+ /// TODO Larger options can be stored as separate instances
+ /// of DHCPv4 options. Clients MUST concatenate them.
+ /// Fortunately, there are no such large options used today.
+ }
+
+ buf.writeUint8(type_);
+ buf.writeUint8(len() - getHeaderLen());
+
+ buf.writeData(&data_[0], data_.size());
+
+ LibDHCP::packOptions(buf, options_);
+ return;
+ }
case V6:
- return pack6(buf, buf_len, offset);
+ /// TODO: Do we need a sanity check for option size here?
+ buf.writeUint16(type_);
+ buf.writeUint16(len() - getHeaderLen());
+
+ LibDHCP::packOptions(buf, options_);
+ return;
default:
- isc_throw(BadValue, "Unknown universe defined for Option " << type_);
+ isc_throw(OutOfRange, "Invalid universe type" << universe_);
}
}
-
unsigned int
Option::pack4(boost::shared_array<uint8_t>& buf,
unsigned int buf_len,
unsigned int offset) {
- if ( offset+len() > buf_len ) {
+ if (offset + len() > buf_len) {
isc_throw(OutOfRange, "Failed to pack v4 option=" <<
- type_ << ",len=" << data_len_ << ": too small buffer.");
+ type_ << ",len=" << len() << ": too small buffer.");
}
uint8_t *ptr = &buf[offset];
ptr[0] = type_;
- ptr[1] = data_len_;
+ ptr[1] = len() - getHeaderLen();
ptr += 2;
- memcpy(ptr, &data_[0], data_len_);
+ memcpy(ptr, &data_[0], data_.size());
return offset + len();
}
@@ -81,22 +148,22 @@ unsigned int
Option::pack6(boost::shared_array<uint8_t>& buf,
unsigned int buf_len,
unsigned int offset) {
- if ( offset+len() > buf_len ) {
+ if (offset+len() > buf_len) {
isc_throw(OutOfRange, "Failed to pack v6 option=" <<
type_ << ",len=" << len() << ": too small buffer.");
}
- uint8_t * ptr = &buf[offset];
+ uint8_t* ptr = &buf[offset];
ptr = writeUint16(type_, ptr);
ptr = writeUint16(len() - getHeaderLen(), ptr);
- if (data_len_)
- memcpy(ptr, &data_[offset_], data_len_);
+ if (! data_.empty())
+ memcpy(ptr, &data_[0], data_.size());
// end of fixed part of this option
- offset += OPTION6_HDR_LEN + data_len_;
+ offset += OPTION6_HDR_LEN + data_.size();
return LibDHCP::packOptions6(buf, buf_len, offset, options_);
}
@@ -140,22 +207,27 @@ Option::unpack6(const boost::shared_array<uint8_t>& buf,
<< "): too small buffer.");
}
- data_ = buf;
+ uint8_t* ptr = &buf[offset];
+ data_ = std::vector<uint8_t>(ptr, ptr + parse_len);
+
offset_ = offset;
- data_len_ = buf_len;
- return LibDHCP::unpackOptions6(buf, buf_len, offset, parse_len,
- options_);
+ return (offset+parse_len);
+
+ //return LibDHCP::unpackOptions6(buf, buf_len, offset, parse_len,
+ // options_);
}
+/// Returns length of the complete option (data length + DHCPv4/DHCPv6
+/// option header)
unsigned short
Option::len() {
// length of the whole option is header and data stored in this option...
- int length = getHeaderLen() + data_len_;
+ int length = getHeaderLen() + data_.size();
// ... and sum of lengths of all suboptions
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
@@ -177,16 +249,9 @@ Option::valid() {
return (true);
}
-void
-isc::dhcp::Option::addOption(boost::shared_ptr<isc::dhcp::Option> opt) {
- options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(),
- opt));
-
-}
-
boost::shared_ptr<isc::dhcp::Option>
Option::getOption(unsigned short opt_type) {
- isc::dhcp::Option::Option6Collection::const_iterator x =
+ isc::dhcp::Option::OptionCollection::const_iterator x =
options_.find(opt_type);
if ( x != options_.end() ) {
return (*x).second;
@@ -196,7 +261,7 @@ Option::getOption(unsigned short opt_type) {
bool
Option::delOption(unsigned short opt_type) {
- isc::dhcp::Option::Option6Collection::iterator x = options_.find(opt_type);
+ isc::dhcp::Option::OptionCollection::iterator x = options_.find(opt_type);
if ( x != options_.end() ) {
options_.erase(x);
return true; // delete successful
@@ -208,22 +273,22 @@ Option::delOption(unsigned short opt_type) {
std::string Option::toText(int indent /* =0 */ ) {
std::stringstream tmp;
- for (int i=0; i<indent; i++)
+ for (int i = 0; i < indent; i++)
tmp << " ";
- tmp << "type=" << type_ << ", len=" << data_len_ << ": ";
+ tmp << "type=" << type_ << ", len=" << len()-getHeaderLen() << ": ";
- for (unsigned int i=0; i<data_len_; i++) {
+ for (unsigned int i = 0; i < data_.size(); i++) {
if (i) {
tmp << ":";
}
tmp << setfill('0') << setw(2) << hex
- << static_cast<unsigned short>(data_[offset_+i]);
+ << static_cast<unsigned short>(data_[i]);
}
// print suboptions
- for (Option6Collection::const_iterator opt=options_.begin();
- opt!=options_.end();
+ for (OptionCollection::const_iterator opt = options_.begin();
+ opt != options_.end();
++opt) {
tmp << (*opt).second->toText(indent+2);
}
@@ -235,13 +300,9 @@ Option::getType() {
return type_;
}
-uint8_t*
+const std::vector<uint8_t>&
Option::getData() {
- if (data_len_) {
- return (&data_[offset_]);
- } else {
- return (NULL);
- }
+ return (data_);
}
unsigned short
@@ -255,6 +316,18 @@ Option::getHeaderLen() {
return 0; // should not happen
}
+void
+Option::addOption(boost::shared_ptr<Option> opt) {
+ if (universe_ == V4) {
+ // check for uniqueness (DHCPv4 options must be unique)
+ if (getOption(opt->getType())) {
+ isc_throw(BadValue, "Option " << opt->getType()
+ << " already present in this message.");
+ }
+ }
+ options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
Option::~Option() {
}
diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h
index 5be1be3..3822cf0 100644
--- a/src/lib/dhcp/option.h
+++ b/src/lib/dhcp/option.h
@@ -17,8 +17,10 @@
#include <string>
#include <map>
+#include <vector>
#include <boost/shared_ptr.hpp>
#include <boost/shared_array.hpp>
+#include <util/buffer.h>
namespace isc {
namespace dhcp {
@@ -34,13 +36,9 @@ public:
/// defines option universe DHCPv4 or DHCPv6
enum Universe { V4, V6 };
- /// a collection of DHCPv4 options
- typedef std::map<unsigned int, boost::shared_ptr<Option> >
- Option4Collection;
-
/// a collection of DHCPv6 options
typedef std::multimap<unsigned int, boost::shared_ptr<Option> >
- Option6Collection;
+ OptionCollection;
/// @brief a factory function prototype
///
@@ -80,11 +78,55 @@ public:
const boost::shared_array<uint8_t>& buf, unsigned int offset,
unsigned int len);
- /// @brief writes option in wire-format to buf
+ /// @brief Constructor, used for received options.
+ ///
+ /// This constructor takes vector<uint8_t>& which is used in cases
+ /// when content of the option will be copied and stored within
+ /// option object. V4 Options follow that approach already.
+ /// TODO Migrate V6 options to that approach.
+ ///
+ /// @param u specifies universe (V4 or V6)
+ /// @param type option type (0-255 for V4 and 0-65535 for V6)
+ /// @param data content of the option
+ Option(Universe u, unsigned short type, std::vector<uint8_t>& data);
+
+ /// @brief Constructor, used for received options.
+ ///
+ /// This contructor is similar to the previous one, but it does not take
+ /// the whole vector<uint8_t>, but rather subset of it.
+ ///
+ /// TODO: This can be templated to use different containers, not just
+ /// vector. Prototype should look like this:
+ /// template<typename InputIterator> Option(Universe u, uint16_t type,
+ /// InputIterator first, InputIterator last);
+ ///
+ /// vector<int8_t> myData;
+ /// Example usage: new Option(V4, 123, myData.begin()+1, myData.end()-1)
+ /// This will create DHCPv4 option of type 123 that contains data from
+ /// trimmed (first and last byte removed) myData vector.
+ ///
+ /// @param u specifies universe (V4 or V6)
+ /// @param type option type (0-255 for V4 and 0-65535 for V6)
+ /// @param first iterator to the first element that should be copied
+ /// @param last iterator to the next element after the last one
+ /// to be copied.
+ Option(Universe u, uint16_t type,
+ std::vector<uint8_t>::const_iterator first,
+ std::vector<uint8_t>::const_iterator last);
+
+ /// @brief returns option universe (V4 or V6)
+ ///
+ /// @return universe type
+ Universe
+ getUniverse() { return universe_; };
+
+ /// @brief Writes option in wire-format to a buffer.
///
/// Writes option in wire-format to buffer, returns pointer to first unused
/// byte after stored option (that is useful for writing options one after
- /// another)
+ /// another). Used in DHCPv6 options.
+ ///
+ /// TODO: Migrate DHCPv6 code to pack(OutputBuffer& buf) version
///
/// @param buf pointer to a buffer
/// @param buf_len length of the buffer
@@ -93,10 +135,21 @@ public:
/// @return offset to first unused byte after stored option
///
virtual unsigned int
- pack(boost::shared_array<uint8_t>& buf,
- unsigned int buf_len,
+ pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
unsigned int offset);
+ /// @brief Writes option in a wire-format to a buffer.
+ ///
+ /// Method will throw if option storing fails for some reason.
+ ///
+ /// TODO Once old (DHCPv6) implementation is rewritten,
+ /// unify pack4() and pack6() and rename them to just pack().
+ ///
+ /// @param buf output buffer (option will be stored there)
+ virtual void
+ pack4(isc::util::OutputBuffer& buf);
+
+
/// @brief Parses buffer.
///
/// Parses received buffer, returns offset to the first unused byte after
@@ -150,7 +203,7 @@ public:
/// Returns pointer to actual data.
///
/// @return pointer to actual data (or NULL if there is no data)
- virtual uint8_t*
+ virtual const std::vector<uint8_t>&
getData();
/// Adds a sub-option.
@@ -242,26 +295,31 @@ protected:
unsigned int offset,
unsigned int parse_len);
+ /// @brief A private method used for option correctness.
+ ///
+ /// It is used in constructors. In there are any problems detected
+ /// (like specifying type > 255 for DHCPv4 option), it will throw
+ /// BadValue or OutOfRange exceptions.
+ void check();
+
/// option universe (V4 or V6)
Universe universe_;
/// option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
unsigned short type_;
- /// shared pointer to a buffer (usually a part of packet)
- boost::shared_array<uint8_t> data_;
-
- /// length of data only. Use len() if you want to
- /// know proper length with option header overhead
- unsigned int data_len_;
+ /// contains content of this data
+ std::vector<uint8_t> data_;
+ /// TODO: Remove this field. vector<uint8_t> should be used
+ /// instead.
/// data is a shared_pointer that points out to the
/// whole packet. offset_ specifies where data for
/// this option begins.
unsigned int offset_;
/// collection for storing suboptions
- Option6Collection options_;
+ OptionCollection options_;
/// TODO: probably 2 different containers have to be used for v4 (unique
/// options) and v6 (options with the same type can repeat)
diff --git a/src/lib/dhcp/option6_ia.cc b/src/lib/dhcp/option6_ia.cc
index ee314db..46daee1 100644
--- a/src/lib/dhcp/option6_ia.cc
+++ b/src/lib/dhcp/option6_ia.cc
@@ -113,7 +113,7 @@ std::string Option6IA::toText(int indent /* = 0*/) {
tmp << " iaid=" << iaid_ << ", t1=" << t1_ << ", t2=" << t2_
<< " " << options_.size() << " sub-options:" << endl;
- for (Option6Collection::const_iterator opt=options_.begin();
+ for (OptionCollection::const_iterator opt=options_.begin();
opt!=options_.end();
++opt) {
tmp << (*opt).second->toText(indent+2);
@@ -127,7 +127,7 @@ unsigned short Option6IA::len() {
OPTION6_IA_LEN /* option content (12) */;
// length of all suboptions
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
diff --git a/src/lib/dhcp/option6_iaaddr.cc b/src/lib/dhcp/option6_iaaddr.cc
index d5b57dd..4177714 100644
--- a/src/lib/dhcp/option6_iaaddr.cc
+++ b/src/lib/dhcp/option6_iaaddr.cc
@@ -108,7 +108,7 @@ std::string Option6IAAddr::toText(int indent /* =0 */) {
<< ", preferred-lft=" << preferred_ << ", valid-lft="
<< valid_ << endl;
- for (Option6Collection::const_iterator opt=options_.begin();
+ for (OptionCollection::const_iterator opt=options_.begin();
opt!=options_.end();
++opt) {
tmp << (*opt).second->toText(indent+2);
@@ -123,7 +123,7 @@ unsigned short Option6IAAddr::len() {
// length of all suboptions
// TODO implement:
// protected: unsigned short Option::lenHelper(int header_size);
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
index d8e05d9..ba07a10 100644
--- a/src/lib/dhcp/pkt4.cc
+++ b/src/lib/dhcp/pkt4.cc
@@ -47,7 +47,7 @@ Pkt4::Pkt4(uint8_t msg_type, uint32_t transid)
yiaddr_(DEFAULT_ADDRESS),
siaddr_(DEFAULT_ADDRESS),
giaddr_(DEFAULT_ADDRESS),
- bufferIn_(0), // not used, this is TX packet
+ bufferIn_(NULL, 0), // not used, this is TX packet
bufferOut_(DHCPV4_PKT_HDR_LEN),
msg_type_(msg_type)
{
@@ -73,36 +73,82 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
yiaddr_(DEFAULT_ADDRESS),
siaddr_(DEFAULT_ADDRESS),
giaddr_(DEFAULT_ADDRESS),
- bufferIn_(0), // not used, this is TX packet
- bufferOut_(DHCPV4_PKT_HDR_LEN),
+ bufferIn_(data, len),
+ bufferOut_(0), // not used, this is RX packet
msg_type_(DHCPDISCOVER)
{
if (len < DHCPV4_PKT_HDR_LEN) {
isc_throw(OutOfRange, "Truncated DHCPv4 packet (len=" << len
- << " received, at least 236 bytes expected.");
+ << " received, at least " << DHCPV4_PKT_HDR_LEN
+ << "is expected");
}
- bufferIn_.writeData(data, len);
}
size_t
Pkt4::len() {
size_t length = DHCPV4_PKT_HDR_LEN; // DHCPv4 header
- /// TODO: Include options here (ticket #1228)
+ // ... and sum of lengths of all options
+ for (Option::OptionCollection::const_iterator it = options_.begin();
+ it != options_.end();
+ ++it) {
+ length += (*it).second->len();
+ }
+
return (length);
}
bool
Pkt4::pack() {
- /// TODO: Implement this (ticket #1227)
-
- return (false);
+ bufferOut_.writeUint8(op_);
+ bufferOut_.writeUint8(htype_);
+ bufferOut_.writeUint8(hlen_);
+ bufferOut_.writeUint8(hops_);
+ bufferOut_.writeUint32(transid_);
+ bufferOut_.writeUint16(secs_);
+ bufferOut_.writeUint16(flags_);
+ bufferOut_.writeUint32(ciaddr_);
+ bufferOut_.writeUint32(yiaddr_);
+ bufferOut_.writeUint32(siaddr_);
+ bufferOut_.writeUint32(giaddr_);
+ bufferOut_.writeData(chaddr_, MAX_CHADDR_LEN);
+ bufferOut_.writeData(sname_, MAX_SNAME_LEN);
+ bufferOut_.writeData(file_, MAX_FILE_LEN);
+
+ LibDHCP::packOptions(bufferOut_, options_);
+
+ return (true);
}
bool
Pkt4::unpack() {
- /// TODO: Implement this (ticket #1226)
+ if (bufferIn_.getLength()<DHCPV4_PKT_HDR_LEN) {
+ isc_throw(OutOfRange, "Received truncated DHCPv4 packet (len="
+ << bufferIn_.getLength() << " received, at least "
+ << DHCPV4_PKT_HDR_LEN << "is expected");
+ }
- return (false);
+ op_ = bufferIn_.readUint8();
+ htype_ = bufferIn_.readUint8();
+ hlen_ = bufferIn_.readUint8();
+ hops_ = bufferIn_.readUint8();
+ transid_ = bufferIn_.readUint32();
+ secs_ = bufferIn_.readUint16();
+ flags_ = bufferIn_.readUint16();
+ ciaddr_ = IOAddress(bufferIn_.readUint32());
+ yiaddr_ = IOAddress(bufferIn_.readUint32());
+ siaddr_ = IOAddress(bufferIn_.readUint32());
+ giaddr_ = IOAddress(bufferIn_.readUint32());
+ bufferIn_.readData(chaddr_, MAX_CHADDR_LEN);
+ bufferIn_.readData(sname_, MAX_SNAME_LEN);
+ bufferIn_.readData(file_, MAX_FILE_LEN);
+
+ size_t opts_len = bufferIn_.getLength() - bufferIn_.getPosition();
+ vector<uint8_t> optsBuffer;
+ // fist use of readVector
+ bufferIn_.readVector(optsBuffer, opts_len);
+ LibDHCP::unpackOptions4(optsBuffer, options_);
+
+ return (true);
}
std::string
@@ -184,6 +230,26 @@ Pkt4::DHCPTypeToBootpType(uint8_t dhcpType) {
}
}
+void
+Pkt4::addOption(boost::shared_ptr<Option> opt) {
+ // check for uniqueness (DHCPv4 options must be unique)
+ if (getOption(opt->getType())) {
+ isc_throw(BadValue, "Option " << opt->getType()
+ << " already present in this message.");
+ }
+ options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
+boost::shared_ptr<isc::dhcp::Option>
+Pkt4::getOption(uint8_t type) {
+ Option::OptionCollection::const_iterator x = options_.find(type);
+ if (x!=options_.end()) {
+ return (*x).second;
+ }
+ return boost::shared_ptr<isc::dhcp::Option>(); // NULL
+}
+
+
} // end of namespace isc::dhcp
} // end of namespace isc
diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h
index 7ac0ca9..8517091 100644
--- a/src/lib/dhcp/pkt4.h
+++ b/src/lib/dhcp/pkt4.h
@@ -105,7 +105,7 @@ public:
///
/// @return hops field
uint8_t
- getHops() { return (hops_); };
+ getHops() const { return (hops_); };
// Note: There's no need to manipulate OP field directly,
// thus no setOp() method. See op_ comment.
@@ -114,7 +114,7 @@ public:
///
/// @return op field
uint8_t
- getOp() { return (op_); };
+ getOp() const { return (op_); };
/// Sets secs field
///
@@ -126,7 +126,7 @@ public:
///
/// @return secs field
uint16_t
- getSecs() { return (secs_); };
+ getSecs() const { return (secs_); };
/// Sets flags field
///
@@ -138,14 +138,14 @@ public:
///
/// @return flags field
uint16_t
- getFlags() { return (flags_); };
+ getFlags() const { return (flags_); };
/// Returns ciaddr field
///
/// @return ciaddr field
- isc::asiolink::IOAddress&
- getCiaddr() { return (ciaddr_); };
+ const isc::asiolink::IOAddress&
+ getCiaddr() const { return (ciaddr_); };
/// Sets ciaddr field
///
@@ -157,8 +157,8 @@ public:
/// Returns siaddr field
///
/// @return siaddr field
- isc::asiolink::IOAddress&
- getSiaddr() { return (siaddr_); };
+ const isc::asiolink::IOAddress&
+ getSiaddr() const { return (siaddr_); };
/// Sets siaddr field
///
@@ -170,8 +170,8 @@ public:
/// Returns yiaddr field
///
/// @return yiaddr field
- isc::asiolink::IOAddress&
- getYiaddr() { return (yiaddr_); };
+ const isc::asiolink::IOAddress&
+ getYiaddr() const { return (yiaddr_); };
/// Sets yiaddr field
///
@@ -183,8 +183,8 @@ public:
/// Returns giaddr field
///
/// @return giaddr field
- isc::asiolink::IOAddress&
- getGiaddr() { return (giaddr_); };
+ const isc::asiolink::IOAddress&
+ getGiaddr() const { return (giaddr_); };
/// Sets giaddr field
///
@@ -195,13 +195,13 @@ public:
/// Returns value of transaction-id field
///
/// @return transaction-id
- uint32_t getTransid() { return (transid_); };
+ uint32_t getTransid() const { return (transid_); };
/// Returns message type (e.g. 1 = DHCPDISCOVER)
///
/// @return message type
uint8_t
- getType() { return (msg_type_); }
+ getType() const { return (msg_type_); }
/// Sets message type (e.g. 1 = DHCPDISCOVER)
///
@@ -215,7 +215,7 @@ public:
///
/// @return sname field
const std::vector<uint8_t>
- getSname() { return (std::vector<uint8_t>(sname_, &sname_[MAX_SNAME_LEN])); };
+ getSname() const { return (std::vector<uint8_t>(sname_, &sname_[MAX_SNAME_LEN])); };
/// Sets sname field
///
@@ -230,7 +230,7 @@ public:
///
/// @return pointer to file field
const std::vector<uint8_t>
- getFile() { return (std::vector<uint8_t>(file_, &file_[MAX_FILE_LEN])); };
+ getFile() const { return (std::vector<uint8_t>(file_, &file_[MAX_FILE_LEN])); };
/// Sets file field
///
@@ -256,24 +256,53 @@ public:
///
/// @return hardware type
uint8_t
- getHtype() { return (htype_); };
+ getHtype() const { return (htype_); };
/// Returns hlen field
///
/// @return hardware address length
uint8_t
- getHlen() { return (hlen_); };
+ getHlen() const { return (hlen_); };
- /// @brief Returns chaddr field
+ /// @brief Returns chaddr field.
///
/// Note: This is 16 bytes long field. It doesn't have to be
/// null-terminated. Do no use strlen() or similar on it.
///
/// @return pointer to hardware address
const uint8_t*
- getChaddr() { return (chaddr_); };
+ getChaddr() const { return (chaddr_); };
+ /// @brief Returns reference to output buffer.
+ ///
+ /// Returned buffer will contain reasonable data only for
+ /// output (TX) packet and after pack() was called. This buffer
+ /// is only valid till Pkt4 object is valid.
+ ///
+ /// RX packet or TX packet before pack() will return buffer with
+ /// zero length
+ ///
+ /// @return reference to output buffer
+ const isc::util::OutputBuffer&
+ getBuffer() const { return (bufferOut_); };
+
+ /// @brief Add an option.
+ ///
+ /// Throws BadValue if option with that type is already present.
+ ///
+ /// @param opt option to be added
+ void
+ addOption(boost::shared_ptr<Option> opt);
+
+ /// @brief Returns an option of specified type.
+ ///
+ /// @return returns option of requested type (or NULL)
+ /// if no such option is present
+
+ boost::shared_ptr<Option>
+ getOption(uint8_t opt_type);
+
protected:
/// converts DHCP message type to BOOTP op type
@@ -359,7 +388,7 @@ protected:
/// input buffer (used during message reception)
/// Note that it must be modifiable as hooks can modify incoming buffer),
/// thus OutputBuffer, not InputBuffer
- isc::util::OutputBuffer bufferIn_;
+ isc::util::InputBuffer bufferIn_;
/// output buffer (used during message
isc::util::OutputBuffer bufferOut_;
@@ -370,7 +399,7 @@ protected:
uint8_t msg_type_;
/// collection of options present in this message
- isc::dhcp::Option::Option4Collection options_;
+ isc::dhcp::Option::OptionCollection options_;
}; // Pkt4 class
} // isc::dhcp namespace
diff --git a/src/lib/dhcp/pkt6.cc b/src/lib/dhcp/pkt6.cc
index b00652a..84c5729 100644
--- a/src/lib/dhcp/pkt6.cc
+++ b/src/lib/dhcp/pkt6.cc
@@ -63,7 +63,7 @@ unsigned short
Pkt6::len() {
unsigned int length = DHCPV6_PKT_HDR_LEN; // DHCPv6 header
- for (Option::Option6Collection::iterator it = options_.begin();
+ for (Option::OptionCollection::iterator it = options_.begin();
it != options_.end();
++it) {
length += (*it).second->len();
@@ -88,6 +88,13 @@ Pkt6::pack() {
bool
Pkt6::packUDP() {
+
+ // TODO: Once OutputBuffer is used here, some thing like this
+ // will be used. Yikes! That's ugly.
+ // bufferOut_.writeData(ciaddr_.getAddress().to_v6().to_bytes().data(), 16);
+ // It is better to implement a method in IOAddress that extracts
+ // vector<uint8_t>
+
unsigned short length = len();
if (data_len_ < length) {
cout << "Previous len=" << data_len_ << ", allocating new buffer: len="
@@ -190,7 +197,7 @@ Pkt6::toText() {
<< "]:" << remote_port_ << endl;
tmp << "msgtype=" << msg_type_ << ", transid=0x" << hex << transid_
<< dec << endl;
- for (isc::dhcp::Option::Option6Collection::iterator opt=options_.begin();
+ for (isc::dhcp::Option::OptionCollection::iterator opt=options_.begin();
opt != options_.end();
++opt) {
tmp << opt->second->toText() << std::endl;
@@ -200,7 +207,7 @@ Pkt6::toText() {
boost::shared_ptr<isc::dhcp::Option>
Pkt6::getOption(unsigned short opt_type) {
- isc::dhcp::Option::Option6Collection::const_iterator x = options_.find(opt_type);
+ isc::dhcp::Option::OptionCollection::const_iterator x = options_.find(opt_type);
if (x!=options_.end()) {
return (*x).second;
}
@@ -214,7 +221,7 @@ Pkt6::addOption(boost::shared_ptr<Option> opt) {
bool
Pkt6::delOption(unsigned short type) {
- isc::dhcp::Option::Option6Collection::iterator x = options_.find(type);
+ isc::dhcp::Option::OptionCollection::iterator x = options_.find(type);
if (x!=options_.end()) {
options_.erase(x);
return (true); // delete successful
diff --git a/src/lib/dhcp/pkt6.h b/src/lib/dhcp/pkt6.h
index d089444..019eeb2 100644
--- a/src/lib/dhcp/pkt6.h
+++ b/src/lib/dhcp/pkt6.h
@@ -180,7 +180,7 @@ public:
/// TODO Need to implement getOptions() as well
/// collection of options present in this message
- isc::dhcp::Option::Option6Collection options_;
+ isc::dhcp::Option::OptionCollection options_;
protected:
/// Builds on wire packet for TCP transmission.
diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am
index 41cabba..01799da 100644
--- a/src/lib/dhcp/tests/Makefile.am
+++ b/src/lib/dhcp/tests/Makefile.am
@@ -1,8 +1,6 @@
SUBDIRS = .
AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
-AM_CPPFLAGS += -I$(top_builddir)/src/lib/asiolink
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
diff --git a/src/lib/dhcp/tests/libdhcp_unittest.cc b/src/lib/dhcp/tests/libdhcp_unittest.cc
index d9d7c47..11b618c 100644
--- a/src/lib/dhcp/tests/libdhcp_unittest.cc
+++ b/src/lib/dhcp/tests/libdhcp_unittest.cc
@@ -15,16 +15,16 @@
#include <config.h>
#include <iostream>
#include <sstream>
-
#include <arpa/inet.h>
#include <gtest/gtest.h>
-
-#include "dhcp/libdhcp.h"
+#include <util/buffer.h>
+#include <dhcp/libdhcp.h>
#include "config.h"
using namespace std;
using namespace isc;
using namespace isc::dhcp;
+using namespace isc::util;
namespace {
class LibDhcpTest : public ::testing::Test {
@@ -41,9 +41,9 @@ static const uint8_t packed[] = {
1, 1, 0, 1, 114 // opt5 (5 bytes)
};
-TEST_F(LibDhcpTest, packOptions6) {
+TEST(LibDhcpTest, packOptions6) {
boost::shared_array<uint8_t> buf(new uint8_t[512]);
- isc::dhcp::Option::Option6Collection opts; // list of options
+ isc::dhcp::Option::OptionCollection opts; // list of options
// generate content for options
for (int i = 0; i < 64; i++) {
@@ -70,13 +70,13 @@ TEST_F(LibDhcpTest, packOptions6) {
EXPECT_EQ(0, memcmp(&buf[100], packed, 35) );
}
-TEST_F(LibDhcpTest, unpackOptions6) {
+TEST(LibDhcpTest, unpackOptions6) {
// just couple of random options
// Option is used as a simple option implementation
// More advanced uses are validated in tests dedicated for
// specific derived classes.
- isc::dhcp::Option::Option6Collection options; // list of options
+ isc::dhcp::Option::OptionCollection options; // list of options
// we can't use packed directly, as shared_array would try to
// free it eventually
@@ -91,35 +91,35 @@ TEST_F(LibDhcpTest, unpackOptions6) {
EXPECT_EQ(35, offset); // parsed first 35 bytes (offset 0..34)
EXPECT_EQ(options.size(), 5); // there should be 5 options
- isc::dhcp::Option::Option6Collection::const_iterator x = options.find(12);
+ isc::dhcp::Option::OptionCollection::const_iterator x = options.find(12);
ASSERT_FALSE(x == options.end()); // option 1 should exist
EXPECT_EQ(12, x->second->getType()); // this should be option 12
ASSERT_EQ(9, x->second->len()); // it should be of length 9
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+4, 5)); // data len=5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+4, 5)); // data len=5
x = options.find(13);
ASSERT_FALSE(x == options.end()); // option 13 should exist
EXPECT_EQ(13, x->second->getType()); // this should be option 13
ASSERT_EQ(7, x->second->len()); // it should be of length 7
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+13, 3)); // data len=3
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+13, 3)); // data len=3
x = options.find(14);
ASSERT_FALSE(x == options.end()); // option 3 should exist
EXPECT_EQ(14, x->second->getType()); // this should be option 14
ASSERT_EQ(6, x->second->len()); // it should be of length 6
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+20, 2)); // data len=2
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+20, 2)); // data len=2
x = options.find(256);
ASSERT_FALSE(x == options.end()); // option 256 should exist
EXPECT_EQ(256, x->second->getType()); // this should be option 256
ASSERT_EQ(8, x->second->len()); // it should be of length 7
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+26, 4)); // data len=4
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+26, 4)); // data len=4
x = options.find(257);
ASSERT_FALSE(x == options.end()); // option 257 should exist
EXPECT_EQ(257, x->second->getType()); // this should be option 257
ASSERT_EQ(5, x->second->len()); // it should be of length 5
- EXPECT_EQ(0, memcmp(x->second->getData(), packed+34, 1)); // data len=1
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+34, 1)); // data len=1
x = options.find(0);
EXPECT_TRUE(x == options.end()); // option 0 not found
@@ -134,4 +134,101 @@ TEST_F(LibDhcpTest, unpackOptions6) {
EXPECT_TRUE(x == options.end()); // option 32000 not found
}
+
+static uint8_t v4Opts[] = {
+ 12, 3, 0, 1, 2,
+ 13, 3, 10, 11, 12,
+ 14, 3, 20, 21, 22,
+ 254, 3, 30, 31, 32,
+ 128, 3, 40, 41, 42
+};
+
+TEST(LibDhcpTest, packOptions4) {
+
+ vector<uint8_t> payload[5];
+ for (int i = 0; i < 5; i++) {
+ payload[i].resize(3);
+ payload[i][0] = i*10;
+ payload[i][1] = i*10+1;
+ payload[i][2] = i*10+2;
+ }
+
+ boost::shared_ptr<Option> opt1(new Option(Option::V4, 12, payload[0]));
+ boost::shared_ptr<Option> opt2(new Option(Option::V4, 13, payload[1]));
+ boost::shared_ptr<Option> opt3(new Option(Option::V4, 14, payload[2]));
+ boost::shared_ptr<Option> opt4(new Option(Option::V4,254, payload[3]));
+ boost::shared_ptr<Option> opt5(new Option(Option::V4,128, payload[4]));
+
+ isc::dhcp::Option::OptionCollection opts; // list of options
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt1));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt2));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt3));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt4));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt5));
+
+ vector<uint8_t> expVect(v4Opts, v4Opts + sizeof(v4Opts));
+
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW (
+ LibDHCP::packOptions(buf, opts);
+ );
+ ASSERT_EQ(buf.getLength(), sizeof(v4Opts));
+ EXPECT_EQ(0, memcmp(v4Opts, buf.getData(), sizeof(v4Opts)));
+
+}
+
+TEST(LibDhcpTest, unpackOptions4) {
+
+ vector<uint8_t> packed(v4Opts, v4Opts + sizeof(v4Opts));
+ isc::dhcp::Option::OptionCollection options; // list of options
+
+ ASSERT_NO_THROW(
+ LibDHCP::unpackOptions4(packed, options);
+ );
+
+ isc::dhcp::Option::OptionCollection::const_iterator x = options.find(12);
+ ASSERT_FALSE(x == options.end()); // option 1 should exist
+ EXPECT_EQ(12, x->second->getType()); // this should be option 12
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+2, 3)); // data len=3
+
+ x = options.find(13);
+ ASSERT_FALSE(x == options.end()); // option 1 should exist
+ EXPECT_EQ(13, x->second->getType()); // this should be option 13
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+7, 3)); // data len=3
+
+ x = options.find(14);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(14, x->second->getType()); // this should be option 14
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+12, 3)); // data len=3
+
+ x = options.find(254);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(254, x->second->getType()); // this should be option 254
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+17, 3)); // data len=3
+
+ x = options.find(128);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(128, x->second->getType()); // this should be option 254
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+22, 3)); // data len=3
+
+ x = options.find(0);
+ EXPECT_TRUE(x == options.end()); // option 0 not found
+
+ x = options.find(1);
+ EXPECT_TRUE(x == options.end()); // option 1 not found
+
+ x = options.find(2);
+ EXPECT_TRUE(x == options.end()); // option 2 not found
+}
+
}
diff --git a/src/lib/dhcp/tests/option6_addrlst_unittest.cc b/src/lib/dhcp/tests/option6_addrlst_unittest.cc
index 2a2fc1a..60b618b 100644
--- a/src/lib/dhcp/tests/option6_addrlst_unittest.cc
+++ b/src/lib/dhcp/tests/option6_addrlst_unittest.cc
@@ -15,14 +15,12 @@
#include <config.h>
#include <iostream>
#include <sstream>
-
#include <arpa/inet.h>
#include <gtest/gtest.h>
-
-#include "io_address.h"
-#include "dhcp/dhcp6.h"
-#include "dhcp/option.h"
-#include "dhcp/option6_addrlst.h"
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_addrlst.h>
using namespace std;
using namespace isc;
@@ -38,10 +36,10 @@ public:
TEST_F(Option6AddrLstTest, basic) {
- // limiting tests to just a 2001:db8::/32 as is *wrong*.
+ // Limiting tests to just a 2001:db8::/32 as is *wrong*.
// Good tests check corner cases as well.
// ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff checks
- // for integer overflow
+ // for integer overflow.
// ff02::face:b00c checks if multicast addresses
// can be represented properly.
@@ -111,6 +109,8 @@ TEST_F(Option6AddrLstTest, basic) {
opt1 = new Option6AddrLst(D6O_NAME_SERVERS, buf, 128, 0, 16);
);
+ EXPECT_EQ(Option::V6, opt1->getUniverse());
+
EXPECT_EQ(D6O_NAME_SERVERS, opt1->getType());
EXPECT_EQ(20, opt1->len());
Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
@@ -178,6 +178,7 @@ TEST_F(Option6AddrLstTest, constructors) {
EXPECT_NO_THROW(
opt1 = new Option6AddrLst(1234, IOAddress("::1"));
);
+ EXPECT_EQ(Option::V6, opt1->getUniverse());
EXPECT_EQ(1234, opt1->getType());
Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
diff --git a/src/lib/dhcp/tests/option6_ia_unittest.cc b/src/lib/dhcp/tests/option6_ia_unittest.cc
index 91aaba4..3fd52f5 100644
--- a/src/lib/dhcp/tests/option6_ia_unittest.cc
+++ b/src/lib/dhcp/tests/option6_ia_unittest.cc
@@ -67,6 +67,7 @@ TEST_F(Option6IATest, basic) {
0,
12);
+ EXPECT_EQ(Option::V6, opt->getUniverse());
EXPECT_EQ(D6O_IA_NA, opt->getType());
EXPECT_EQ(0xa1a2a3a4, opt->getIAID());
EXPECT_EQ(0x81020304, opt->getT1());
@@ -121,6 +122,7 @@ TEST_F(Option6IATest, simple) {
ia->setT1(2345);
ia->setT2(3456);
+ EXPECT_EQ(Option::V6, ia->getUniverse());
EXPECT_EQ(D6O_IA_NA, ia->getType());
EXPECT_EQ(1234, ia->getIAID());
EXPECT_EQ(2345, ia->getT1());
@@ -251,7 +253,7 @@ TEST_F(Option6IATest, suboptions_unpack) {
EXPECT_EQ(0xcafe, subopt->getType());
EXPECT_EQ(4, subopt->len());
// there should be no data at all
- EXPECT_EQ(static_cast<void*>(NULL), subopt->getData());
+ EXPECT_EQ(0, subopt->getData().size());
subopt = ia->getOption(1); // get option 1
ASSERT_FALSE(subopt); // should be NULL
diff --git a/src/lib/dhcp/tests/option6_iaaddr_unittest.cc b/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
index d1f7628..81c3eb3 100644
--- a/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
+++ b/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
@@ -75,6 +75,8 @@ TEST_F(Option6IAAddrTest, basic) {
EXPECT_EQ(78, offset);
+ EXPECT_EQ(Option::V6, opt->getUniverse());
+
// 4 bytes header + 4 bytes content
EXPECT_EQ("2001:db8:1::dead:beef", opt->getAddress().toText());
EXPECT_EQ(1000, opt->getPreferred());
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
index 49426ae..db3ee3b 100644
--- a/src/lib/dhcp/tests/option_unittest.cc
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -19,6 +19,8 @@
#include <arpa/inet.h>
#include <gtest/gtest.h>
#include <boost/shared_ptr.hpp>
+#include <exceptions/exceptions.h>
+#include <util/buffer.h>
#include "dhcp/dhcp6.h"
#include "dhcp/option.h"
@@ -26,6 +28,7 @@
using namespace std;
using namespace isc;
using namespace isc::dhcp;
+using namespace isc::util;
namespace {
class OptionTest : public ::testing::Test {
@@ -35,26 +38,162 @@ public:
};
// v4 is not really implemented yet. A simple test will do for now
-TEST_F(OptionTest, basic4) {
+TEST_F(OptionTest, v4_basic) {
- Option* opt = new Option(Option::V4, 17);
+ Option* opt = 0;
+ EXPECT_NO_THROW(
+ opt = new Option(Option::V4, 17);
+ );
+ EXPECT_EQ(Option::V4, opt->getUniverse());
EXPECT_EQ(17, opt->getType());
- EXPECT_EQ(static_cast<uint8_t*>(NULL), opt->getData());
+ EXPECT_EQ(0, opt->getData().size());
EXPECT_EQ(2, opt->len()); // just v4 header
EXPECT_NO_THROW(
delete opt;
);
+ opt = 0;
+
+ // V4 options have type 0...255
+ EXPECT_THROW(
+ opt = new Option(Option::V4, 256),
+ BadValue
+ );
+ if (opt) {
+ delete opt;
+ opt = 0;
+ }
+}
+
+const uint8_t dummyPayload[] =
+{ 1, 2, 3, 4};
+
+TEST_F(OptionTest, v4_data1) {
+
+ vector<uint8_t> data(dummyPayload, dummyPayload + sizeof(dummyPayload));
+
+ Option* opt = 0;
+
+ // create DHCPv4 option of type 123
+ // that contains 4 bytes of data
+ ASSERT_NO_THROW(
+ opt= new Option(Option::V4,
+ 123, // type
+ data);
+ );
+
+ // check that content is reported properly
+ EXPECT_EQ(123, opt->getType());
+ vector<uint8_t> optData = opt->getData();
+ ASSERT_EQ(optData.size(), data.size());
+ EXPECT_TRUE(optData == data);
+ EXPECT_EQ(2, opt->getHeaderLen());
+ EXPECT_EQ(6, opt->len());
+
+ // now store that option into a buffer
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ // check content of that buffer
+
+ // 2 byte header + 4 bytes data
+ ASSERT_EQ(6, buf.getLength());
+
+ // that's how this option is supposed to look like
+ uint8_t exp[] = { 123, 4, 1, 2, 3, 4 };
+
+ /// TODO: use vector<uint8_t> getData() when it will be implemented
+ EXPECT_EQ(0, memcmp(exp, buf.getData(), 6));
+
+ // check that we can destroy that option
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+// this is almost the same test as v4_data1, but it uses
+// different constructor
+TEST_F(OptionTest, v4_data2) {
+
+ vector<uint8_t> data(dummyPayload, dummyPayload + sizeof(dummyPayload));
+
+ vector<uint8_t> expData = data;
+
+ // Add fake data in front and end. Main purpose of this test is to check
+ // that only subset of the whole vector can be used for creating option.
+ data.insert(data.begin(), 56);
+ data.push_back(67);
+
+ // Data contains extra garbage at beginning and at the end. It should be
+ // ignored, as we pass interators to proper data. Only subset (limited by
+ // iterators) of the vector should be used.
+ // expData contains expected content (just valid data, without garbage).
+
+ Option* opt = 0;
+
+ // Create DHCPv4 option of type 123 that contains
+ // 4 bytes (sizeof(dummyPayload).
+ ASSERT_NO_THROW(
+ opt= new Option(Option::V4,
+ 123, // type
+ data.begin() + 1,
+ data.end() - 1);
+ );
+
+ // check that content is reported properly
+ EXPECT_EQ(123, opt->getType());
+ vector<uint8_t> optData = opt->getData();
+ ASSERT_EQ(optData.size(), expData.size());
+ EXPECT_TRUE(optData == expData);
+ EXPECT_EQ(2, opt->getHeaderLen());
+ EXPECT_EQ(6, opt->len());
+
+ // now store that option into a buffer
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ // check content of that buffer
+
+ // 2 byte header + 4 bytes data
+ ASSERT_EQ(6, buf.getLength());
+
+ // that's how this option is supposed to look like
+ uint8_t exp[] = { 123, 4, 1, 2, 3, 4 };
+
+ /// TODO: use vector<uint8_t> getData() when it will be implemented
+ EXPECT_EQ(0, memcmp(exp, buf.getData(), 6));
+
+ // check that we can destroy that option
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+TEST_F(OptionTest, v4_toText) {
+
+ vector<uint8_t> buf(3);
+ buf[0] = 0;
+ buf[1] = 0xf;
+ buf[2] = 0xff;
+
+ Option opt(Option::V4, 253, buf);
+
+ EXPECT_EQ("type=253, len=3: 00:0f:ff", opt.toText());
}
// tests simple constructor
-TEST_F(OptionTest, basic6) {
+TEST_F(OptionTest, v6_basic) {
Option* opt = new Option(Option::V6, 1);
+ EXPECT_EQ(Option::V6, opt->getUniverse());
EXPECT_EQ(1, opt->getType());
- EXPECT_EQ(static_cast<uint8_t*>(NULL), opt->getData());
+ EXPECT_EQ(0, opt->getData().size());
EXPECT_EQ(4, opt->len()); // just v6 header
EXPECT_NO_THROW(
@@ -64,7 +203,7 @@ TEST_F(OptionTest, basic6) {
// tests contructor used in pkt reception
// option contains actual data
-TEST_F(OptionTest, data1) {
+TEST_F(OptionTest, v6_data1) {
boost::shared_array<uint8_t> buf(new uint8_t[32]);
for (int i = 0; i < 32; i++)
buf[i] = 100+i;
@@ -73,9 +212,10 @@ TEST_F(OptionTest, data1) {
3, // offset
7); // 7 bytes of data
EXPECT_EQ(333, opt->getType());
- ASSERT_EQ(&buf[3], opt->getData());
+
ASSERT_EQ(11, opt->len());
- EXPECT_EQ(0, memcmp(&buf[3], opt->getData(), 7) );
+ ASSERT_EQ(7, opt->getData().size());
+ EXPECT_EQ(0, memcmp(&buf[3], &opt->getData()[0], 7) );
int offset = opt->pack(buf, 32, 20);
EXPECT_EQ(31, offset);
@@ -96,7 +236,7 @@ TEST_F(OptionTest, data1) {
// another text that tests the same thing, just
// with different input parameters
-TEST_F(OptionTest, data2) {
+TEST_F(OptionTest, v6_data2) {
boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
for (int i = 0; i < 128; i++)
@@ -144,7 +284,7 @@ TEST_F(OptionTest, data2) {
// |
// +----opt3
//
-TEST_F(OptionTest, suboptions1) {
+TEST_F(OptionTest, v6_suboptions1) {
boost::shared_array<uint8_t> buf(new uint8_t[128]);
for (int i=0; i<128; i++)
buf[i] = 100+i;
@@ -184,13 +324,13 @@ TEST_F(OptionTest, suboptions1) {
);
}
-// check that an option can contain 2 suboptions:
+// check that an option can contain nested suboptions:
// opt1
// +----opt2
// |
// +----opt3
//
-TEST_F(OptionTest, suboptions2) {
+TEST_F(OptionTest, v6_suboptions2) {
boost::shared_array<uint8_t> buf(new uint8_t[128]);
for (int i=0; i<128; i++)
buf[i] = 100+i;
@@ -226,7 +366,7 @@ TEST_F(OptionTest, suboptions2) {
);
}
-TEST_F(OptionTest, addgetdel) {
+TEST_F(OptionTest, v6_addgetdel) {
boost::shared_array<uint8_t> buf(new uint8_t[128]);
for (int i=0; i<128; i++)
buf[i] = 100+i;
@@ -266,7 +406,7 @@ TEST_F(OptionTest, addgetdel) {
}
-TEST_F(OptionTest, toText) {
+TEST_F(OptionTest, v6_toText) {
boost::shared_array<uint8_t> buf(new uint8_t[3]);
buf[0] = 0;
buf[1] = 0xf;
diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc
index 3988fb0..c89743f 100644
--- a/src/lib/dhcp/tests/pkt4_unittest.cc
+++ b/src/lib/dhcp/tests/pkt4_unittest.cc
@@ -20,55 +20,51 @@
#include <boost/static_assert.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/shared_array.hpp>
-
-#include "io_address.h"
-#include "dhcp/pkt4.h"
-#include "dhcp/dhcp4.h"
-#include "exceptions/exceptions.h"
+#include <util/buffer.h>
+#include <asiolink/io_address.h>
+#include <dhcp/pkt4.h>
+#include <dhcp/dhcp4.h>
+#include <exceptions/exceptions.h>
using namespace std;
using namespace isc;
using namespace isc::asiolink;
using namespace isc::dhcp;
+using namespace isc::util;
using namespace boost;
-// can't compare const to value directly, as it gives strange
-// linker errors in gtest.h
-
-static size_t DHCPV4_PKT_HDR_LEN = Pkt4::DHCPV4_PKT_HDR_LEN;
-
namespace {
TEST(Pkt4Test, constructor) {
- ASSERT_EQ(236U, DHCPV4_PKT_HDR_LEN);
+ ASSERT_EQ(236U, static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) );
Pkt4* pkt = 0;
- // minimal
+ // Just some dummy payload.
uint8_t testData[250];
for (int i = 0; i < 250; i++) {
testData[i]=i;
}
- // positive case1. Normal received packet
+ // Positive case1. Normal received packet.
EXPECT_NO_THROW(
- pkt = new Pkt4(testData, 236);
+ pkt = new Pkt4(testData, Pkt4::DHCPV4_PKT_HDR_LEN);
);
- EXPECT_EQ(236, pkt->len());
+ EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
EXPECT_NO_THROW(
delete pkt;
pkt = 0;
);
- // positive case2. Normal outgoing packet
+ // Positive case2. Normal outgoing packet.
EXPECT_NO_THROW(
pkt = new Pkt4(DHCPDISCOVER, 0xffffffff);
);
// DHCPv4 packet must be at least 236 bytes long
- EXPECT_EQ(DHCPV4_PKT_HDR_LEN, pkt->len());
+ EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
EXPECT_EQ(DHCPDISCOVER, pkt->getType());
EXPECT_EQ(0xffffffff, pkt->getTransid());
EXPECT_NO_THROW(
@@ -76,20 +72,32 @@ TEST(Pkt4Test, constructor) {
pkt = 0;
);
- // negative case. Should drop truncated messages
+ // Negative case. Should drop truncated messages.
EXPECT_THROW(
- pkt = new Pkt4(testData, 235),
+ pkt = new Pkt4(testData, Pkt4::DHCPV4_PKT_HDR_LEN-1),
OutOfRange
);
if (pkt) {
- // test failed. Exception should have been thrown, but
- // object was created instead. Let's clean this up
+ // Test failed. Exception should have been thrown, but
+ // object was created instead. Let's clean this up.
delete pkt;
+ pkt = 0;
}
}
-// a sample transaction-id
-const static uint32_t dummyTransid = 0x12345678;
+// a sample data
+const uint8_t dummyOp = BOOTREQUEST;
+const uint8_t dummyHtype = 6;
+const uint8_t dummyHlen = 6;
+const uint8_t dummyHops = 13;
+const uint32_t dummyTransid = 0x12345678;
+const uint16_t dummySecs = 42;
+const uint16_t dummyFlags = BOOTP_BROADCAST;
+
+const IOAddress dummyCiaddr("192.0.2.1");
+const IOAddress dummyYiaddr("1.2.3.4");
+const IOAddress dummySiaddr("192.0.2.255");
+const IOAddress dummyGiaddr("255.255.255.255");
// a dummy MAC address
const uint8_t dummyMacAddr[] = {0, 1, 2, 3, 4, 5};
@@ -110,7 +118,7 @@ const uint8_t dummySname[] = "Lorem ipsum dolor sit amet, consectetur "
BOOST_STATIC_ASSERT(sizeof(dummyFile) == Pkt4::MAX_FILE_LEN + 1);
BOOST_STATIC_ASSERT(sizeof(dummySname) == Pkt4::MAX_SNAME_LEN + 1);
-/// Generates test packet
+/// @brief Generates test packet.
///
/// Allocates and generates test packet, with all fixed
/// fields set to non-zero values. Content is not always
@@ -129,23 +137,23 @@ generateTestPacket1() {
+sizeof(dummyMacAddr));
// hwType = 6(ETHERNET), hlen = 6(MAC address len)
- pkt->setHWAddr(6, 6, vectorMacAddr);
- pkt->setHops(13); // 13 relays. Wow!
- // transaction-id is already set
- pkt->setSecs(42);
- pkt->setFlags(0xffffU); // all flags set
- pkt->setCiaddr(IOAddress("192.0.2.1"));
- pkt->setYiaddr(IOAddress("1.2.3.4"));
- pkt->setSiaddr(IOAddress("192.0.2.255"));
- pkt->setGiaddr(IOAddress("255.255.255.255"));
- // chaddr already set with setHWAddr()
+ pkt->setHWAddr(dummyHtype, dummyHlen, vectorMacAddr);
+ pkt->setHops(dummyHops); // 13 relays. Wow!
+ // Transaction-id is already set.
+ pkt->setSecs(dummySecs);
+ pkt->setFlags(dummyFlags); // all flags set
+ pkt->setCiaddr(dummyCiaddr);
+ pkt->setYiaddr(dummyYiaddr);
+ pkt->setSiaddr(dummySiaddr);
+ pkt->setGiaddr(dummyGiaddr);
+ // Chaddr already set with setHWAddr().
pkt->setSname(dummySname, 64);
pkt->setFile(dummyFile, 128);
return (pkt);
}
-/// Generates test packet
+/// @brief Generates test packet.
///
/// Allocates and generates on-wire buffer that represents
/// test packet, with all fixed fields set to non-zero values.
@@ -156,7 +164,6 @@ generateTestPacket1() {
///
/// @return pointer to allocated Pkt4 object
// Returns a vector containing a DHCPv4 packet header.
-#if 0
vector<uint8_t>
generateTestPacket2() {
@@ -165,7 +172,7 @@ generateTestPacket2() {
uint8_t hdr[] = {
1, 6, 6, 13, // op, htype, hlen, hops,
0x12, 0x34, 0x56, 0x78, // transaction-id
- 0, 42, 0xff, 0xff, // 42 secs, 0xffff flags
+ 0, 42, 0x80, 0x00, // 42 secs, BROADCAST flags
192, 0, 2, 1, // ciaddr
1, 2, 3, 4, // yiaddr
192, 0, 2, 255, // siaddr
@@ -176,7 +183,7 @@ generateTestPacket2() {
vector<uint8_t> buf(hdr, hdr + sizeof(hdr));
// Append the large header fields.
- copy(dummyMacAddr, dummyMacAddr + Pkt4::MAX_CHADDR_LEN, back_inserter(buf));
+ copy(dummyChaddr, dummyChaddr + Pkt4::MAX_CHADDR_LEN, back_inserter(buf));
copy(dummySname, dummySname + Pkt4::MAX_SNAME_LEN, back_inserter(buf));
copy(dummyFile, dummyFile + Pkt4::MAX_FILE_LEN, back_inserter(buf));
@@ -187,25 +194,24 @@ generateTestPacket2() {
return (buf);
}
-#endif
TEST(Pkt4Test, fixedFields) {
shared_ptr<Pkt4> pkt = generateTestPacket1();
// ok, let's check packet values
- EXPECT_EQ(1, pkt->getOp());
- EXPECT_EQ(6, pkt->getHtype());
- EXPECT_EQ(6, pkt->getHlen());
- EXPECT_EQ(13, pkt->getHops());
+ EXPECT_EQ(dummyOp, pkt->getOp());
+ EXPECT_EQ(dummyHtype, pkt->getHtype());
+ EXPECT_EQ(dummyHlen, pkt->getHlen());
+ EXPECT_EQ(dummyHops, pkt->getHops());
EXPECT_EQ(dummyTransid, pkt->getTransid());
- EXPECT_EQ(42, pkt->getSecs());
- EXPECT_EQ(0xffff, pkt->getFlags());
+ EXPECT_EQ(dummySecs, pkt->getSecs());
+ EXPECT_EQ(dummyFlags, pkt->getFlags());
- EXPECT_EQ(string("192.0.2.1"), pkt->getCiaddr().toText());
- EXPECT_EQ(string("1.2.3.4"), pkt->getYiaddr().toText());
- EXPECT_EQ(string("192.0.2.255"), pkt->getSiaddr().toText());
- EXPECT_EQ(string("255.255.255.255"), pkt->getGiaddr().toText());
+ EXPECT_EQ(dummyCiaddr.toText(), pkt->getCiaddr().toText());
+ EXPECT_EQ(dummyYiaddr.toText(), pkt->getYiaddr().toText());
+ EXPECT_EQ(dummySiaddr.toText(), pkt->getSiaddr().toText());
+ EXPECT_EQ(dummyGiaddr.toText(), pkt->getGiaddr().toText());
// chaddr is always 16 bytes long and contains link-layer addr (MAC)
EXPECT_EQ(0, memcmp(dummyChaddr, pkt->getChaddr(), 16));
@@ -217,52 +223,59 @@ TEST(Pkt4Test, fixedFields) {
EXPECT_EQ(DHCPDISCOVER, pkt->getType());
}
-#if 0
-/// TODO Uncomment when ticket #1227 is implemented
TEST(Pkt4Test, fixedFieldsPack) {
shared_ptr<Pkt4> pkt = generateTestPacket1();
- shared_array<uint8_t> expectedFormat = generateTestPacket2();
+ vector<uint8_t> expectedFormat = generateTestPacket2();
EXPECT_NO_THROW(
pkt->pack();
);
- ASSERT_EQ(Pkt4::DHCPV4_PKT_HDR_LEN, pkt->len());
+ ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
+
+ // redundant but MUCH easier for debug in gdb
+ const uint8_t* exp = &expectedFormat[0];
+ const uint8_t* got = static_cast<const uint8_t*>(pkt->getBuffer().getData());
- EXPECT_EQ(0, memcmp(&expectedFormat[0], pkt->getData(), pkt->len()));
+ EXPECT_EQ(0, memcmp(exp, got, Pkt4::DHCPV4_PKT_HDR_LEN));
}
/// TODO Uncomment when ticket #1226 is implemented
TEST(Pkt4Test, fixedFieldsUnpack) {
- shared_array<uint8_t> expectedFormat = generateTestPkt2();
+ vector<uint8_t> expectedFormat = generateTestPacket2();
shared_ptr<Pkt4> pkt(new Pkt4(&expectedFormat[0],
Pkt4::DHCPV4_PKT_HDR_LEN));
+ EXPECT_NO_THROW(
+ pkt->unpack()
+ );
+
// ok, let's check packet values
- EXPECT_EQ(1, pkt->getOp());
- EXPECT_EQ(6, pkt->getHtype());
- EXPECT_EQ(6, pkt->getHlen());
- EXPECT_EQ(13, pkt->getHops());
- EXPECT_EQ(transid, pkt->getTransid());
- EXPECT_EQ(42, pkt->getSecs());
- EXPECT_EQ(0xffff, pkt->getFlags());
-
- EXPECT_EQ(string("192.0.2.1"), pkt->getCiaddr.toText());
- EXPECT_EQ(string("1.2.3.4"), pkt->getYiaddr.toText());
- EXPECT_EQ(string("192.0.2.255"), pkt->getSiaddr.toText());
- EXPECT_EQ(string("255.255.255.255"), pkt->getGiaddr.toText());
+ EXPECT_EQ(dummyOp, pkt->getOp());
+ EXPECT_EQ(dummyHtype, pkt->getHtype());
+ EXPECT_EQ(dummyHlen, pkt->getHlen());
+ EXPECT_EQ(dummyHops, pkt->getHops());
+ EXPECT_EQ(dummyTransid, pkt->getTransid());
+ EXPECT_EQ(dummySecs, pkt->getSecs());
+ EXPECT_EQ(dummyFlags, pkt->getFlags());
+
+ EXPECT_EQ(dummyCiaddr.toText(), pkt->getCiaddr().toText());
+ EXPECT_EQ(string("1.2.3.4"), pkt->getYiaddr().toText());
+ EXPECT_EQ(string("192.0.2.255"), pkt->getSiaddr().toText());
+ EXPECT_EQ(string("255.255.255.255"), pkt->getGiaddr().toText());
// chaddr is always 16 bytes long and contains link-layer addr (MAC)
- EXPECT_EQ(0, memcmp(expectedChaddr, pkt->getChaddr(), 16));
+ EXPECT_EQ(0, memcmp(dummyChaddr, pkt->getChaddr(), Pkt4::MAX_CHADDR_LEN));
- EXPECT_EQ(0, memcmp(expectedSname, pkt->getSname(), 64));
+ ASSERT_EQ(static_cast<size_t>(Pkt4::MAX_SNAME_LEN), pkt->getSname().size());
+ EXPECT_EQ(0, memcmp(dummySname, &pkt->getSname()[0], Pkt4::MAX_SNAME_LEN));
- EXPECT_EQ(0, memcmp(expectedFile, pkt->getFile(), 128));
+ ASSERT_EQ(static_cast<size_t>(Pkt4::MAX_FILE_LEN), pkt->getFile().size());
+ EXPECT_EQ(0, memcmp(dummyFile, &pkt->getFile()[0], Pkt4::MAX_FILE_LEN));
- EXPECT_EQ(DHCPSOLICIT, pkt->getType());
+ EXPECT_EQ(DHCPDISCOVER, pkt->getType());
}
-#endif
// this test is for hardware addresses (htype, hlen and chaddr fields)
TEST(Pkt4Test, hwAddr) {
@@ -270,18 +283,21 @@ TEST(Pkt4Test, hwAddr) {
vector<uint8_t> mac;
uint8_t expectedChaddr[Pkt4::MAX_CHADDR_LEN];
+ // We resize vector to specified length. It is more natural for fixed-length
+ // field, than clear it (shrink size to 0) and push_back each element
+ // (growing length back to MAX_CHADDR_LEN).
mac.resize(Pkt4::MAX_CHADDR_LEN);
Pkt4* pkt = 0;
// let's test each hlen, from 0 till 16
- for (int macLen=0; macLen < Pkt4::MAX_CHADDR_LEN; macLen++) {
- for (int i=0; i < Pkt4::MAX_CHADDR_LEN; i++) {
+ for (int macLen = 0; macLen < Pkt4::MAX_CHADDR_LEN; macLen++) {
+ for (int i = 0; i < Pkt4::MAX_CHADDR_LEN; i++) {
mac[i] = 0;
expectedChaddr[i] = 0;
}
- for (int i=0; i < macLen; i++) {
- mac[i] = 128+i;
- expectedChaddr[i] = 128+i;
+ for (int i = 0; i < macLen; i++) {
+ mac[i] = 128 + i;
+ expectedChaddr[i] = 128 + i;
}
// type and transaction doesn't matter in this test
@@ -292,16 +308,15 @@ TEST(Pkt4Test, hwAddr) {
EXPECT_EQ(0, memcmp(expectedChaddr, pkt->getChaddr(),
Pkt4::MAX_CHADDR_LEN));
-#if 0
- /// TODO Uncomment when ticket #1227 is implemented)
EXPECT_NO_THROW(
pkt->pack();
);
// CHADDR starts at offset 28 in DHCP packet
- EXPECT_EQ(0, memcmp(pkt->getData()+28, expectedChaddr,
- Pkt4::MAX_CHADDR_LEN));
-#endif
+ const uint8_t* ptr =
+ static_cast<const uint8_t*>(pkt->getBuffer().getData())+28;
+
+ EXPECT_EQ(0, memcmp(ptr, expectedChaddr, Pkt4::MAX_CHADDR_LEN));
delete pkt;
}
@@ -333,7 +348,7 @@ TEST(Pkt4Test, msgTypes) {
};
Pkt4* pkt = 0;
- for (int i=0; i < sizeof(types)/sizeof(msgType); i++) {
+ for (int i = 0; i < sizeof(types) / sizeof(msgType); i++) {
pkt = new Pkt4(types[i].dhcp, 0);
EXPECT_EQ(types[i].dhcp, pkt->getType());
@@ -357,35 +372,31 @@ TEST(Pkt4Test, msgTypes) {
TEST(Pkt4Test, sname) {
uint8_t sname[Pkt4::MAX_SNAME_LEN];
- uint8_t expectedSname[Pkt4::MAX_SNAME_LEN];
Pkt4* pkt = 0;
// let's test each sname length, from 0 till 64
for (int snameLen=0; snameLen < Pkt4::MAX_SNAME_LEN; snameLen++) {
- for (int i=0; i < Pkt4::MAX_SNAME_LEN; i++) {
+ for (int i = 0; i < Pkt4::MAX_SNAME_LEN; i++) {
sname[i] = 0;
- expectedSname[i] = 0;
}
- for (int i=0; i < snameLen; i++) {
+ for (int i = 0; i < snameLen; i++) {
sname[i] = i;
- expectedSname[i] = i;
}
// type and transaction doesn't matter in this test
pkt = new Pkt4(DHCPOFFER, 1234);
pkt->setSname(sname, snameLen);
- EXPECT_EQ(0, memcmp(expectedSname, &pkt->getSname()[0], Pkt4::MAX_SNAME_LEN));
+ EXPECT_EQ(0, memcmp(sname, &pkt->getSname()[0], Pkt4::MAX_SNAME_LEN));
-#if 0
- /// TODO Uncomment when ticket #1227 is implemented)
EXPECT_NO_THROW(
pkt->pack();
);
// SNAME starts at offset 44 in DHCP packet
- EXPECT_EQ(0, memcmp(pkt->getData()+44, expectedChaddr, Pkt4::MAX_SNAME_LEN));
-#endif
+ const uint8_t* ptr =
+ static_cast<const uint8_t*>(pkt->getBuffer().getData())+44;
+ EXPECT_EQ(0, memcmp(ptr, sname, Pkt4::MAX_SNAME_LEN));
delete pkt;
}
@@ -394,39 +405,158 @@ TEST(Pkt4Test, sname) {
TEST(Pkt4Test, file) {
uint8_t file[Pkt4::MAX_FILE_LEN];
- uint8_t expectedFile[Pkt4::MAX_FILE_LEN];
Pkt4* pkt = 0;
- // let's test each file length, from 0 till 64
- for (int fileLen=0; fileLen < Pkt4::MAX_FILE_LEN; fileLen++) {
- for (int i=0; i < Pkt4::MAX_FILE_LEN; i++) {
+ // Let's test each file length, from 0 till 128.
+ for (int fileLen = 0; fileLen < Pkt4::MAX_FILE_LEN; fileLen++) {
+ for (int i = 0; i < Pkt4::MAX_FILE_LEN; i++) {
file[i] = 0;
- expectedFile[i] = 0;
}
- for (int i=0; i < fileLen; i++) {
+ for (int i = 0; i < fileLen; i++) {
file[i] = i;
- expectedFile[i] = i;
}
- // type and transaction doesn't matter in this test
+ // Type and transaction doesn't matter in this test.
pkt = new Pkt4(DHCPOFFER, 1234);
pkt->setFile(file, fileLen);
- EXPECT_EQ(0, memcmp(expectedFile, &pkt->getFile()[0], Pkt4::MAX_FILE_LEN));
+ EXPECT_EQ(0, memcmp(file, &pkt->getFile()[0], Pkt4::MAX_FILE_LEN));
-#if 0
- /// TODO Uncomment when ticket #1227 is implemented)
+ //
EXPECT_NO_THROW(
pkt->pack();
);
- // FILE starts at offset 44 in DHCP packet
- EXPECT_EQ(0, memcmp(pkt->getData()+44, expectedChaddr, Pkt4::MAX_FILE_LEN));
-#endif
+ // FILE starts at offset 108 in DHCP packet.
+ const uint8_t* ptr =
+ static_cast<const uint8_t*>(pkt->getBuffer().getData())+108;
+ EXPECT_EQ(0, memcmp(ptr, file, Pkt4::MAX_FILE_LEN));
delete pkt;
}
}
+static uint8_t v4Opts[] = {
+ 12, 3, 0, 1, 2,
+ 13, 3, 10, 11, 12,
+ 14, 3, 20, 21, 22,
+ 128, 3, 30, 31, 32,
+ 254, 3, 40, 41, 42
+};
+
+TEST(Pkt4Test, options) {
+ Pkt4* pkt = new Pkt4(DHCPOFFER, 0);
+
+ vector<uint8_t> payload[5];
+ for (int i = 0; i < 5; i++) {
+ payload[i].push_back(i*10);
+ payload[i].push_back(i*10+1);
+ payload[i].push_back(i*10+2);
+ }
+
+ boost::shared_ptr<Option> opt1(new Option(Option::V4, 12, payload[0]));
+ boost::shared_ptr<Option> opt2(new Option(Option::V4, 13, payload[1]));
+ boost::shared_ptr<Option> opt3(new Option(Option::V4, 14, payload[2]));
+ boost::shared_ptr<Option> opt5(new Option(Option::V4,128, payload[3]));
+ boost::shared_ptr<Option> opt4(new Option(Option::V4,254, payload[4]));
+
+ pkt->addOption(opt1);
+ pkt->addOption(opt2);
+ pkt->addOption(opt3);
+ pkt->addOption(opt4);
+ pkt->addOption(opt5);
+
+ EXPECT_TRUE(pkt->getOption(12));
+ EXPECT_TRUE(pkt->getOption(13));
+ EXPECT_TRUE(pkt->getOption(14));
+ EXPECT_TRUE(pkt->getOption(128));
+ EXPECT_TRUE(pkt->getOption(254));
+ EXPECT_FALSE(pkt->getOption(127)); // no such option
+
+ // options are unique in DHCPv4. It should not be possible
+ // to add more than one option of the same type.
+ EXPECT_THROW(
+ pkt->addOption(opt1),
+ BadValue
+ );
+
+ EXPECT_NO_THROW(
+ pkt->pack();
+ );
+
+ const OutputBuffer& buf = pkt->getBuffer();
+ // check that all options are stored, they should take sizeof(v4Opts)
+ ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(v4Opts),
+ buf.getLength());
+
+ // that that this extra data actually contain our options
+ const uint8_t* ptr = static_cast<const uint8_t*>(buf.getData());
+ ptr += Pkt4::DHCPV4_PKT_HDR_LEN; // rewind to end of fixed part
+ EXPECT_EQ(0, memcmp(ptr, v4Opts, sizeof(v4Opts)));
+
+ EXPECT_NO_THROW(
+ delete pkt;
+ );
+}
+
+TEST(Pkt4Test, unpackOptions) {
+
+ vector<uint8_t> expectedFormat = generateTestPacket2();
+
+ for (int i=0; i < sizeof(v4Opts); i++) {
+ expectedFormat.push_back(v4Opts[i]);
+ }
+
+ // now expectedFormat contains fixed format and 5 options
+
+ shared_ptr<Pkt4> pkt(new Pkt4(&expectedFormat[0],
+ expectedFormat.size()));
+
+ EXPECT_NO_THROW(
+ pkt->unpack()
+ );
+
+ EXPECT_TRUE(pkt->getOption(12));
+ EXPECT_TRUE(pkt->getOption(13));
+ EXPECT_TRUE(pkt->getOption(14));
+ EXPECT_TRUE(pkt->getOption(128));
+ EXPECT_TRUE(pkt->getOption(254));
+
+ shared_ptr<Option> x = pkt->getOption(12);
+ ASSERT_TRUE(x); // option 1 should exist
+ EXPECT_EQ(12, x->getType()); // this should be option 12
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+2, 3)); // data len=3
+
+ x = pkt->getOption(13);
+ ASSERT_TRUE(x); // option 13 should exist
+ EXPECT_EQ(13, x->getType()); // this should be option 13
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+7, 3)); // data len=3
+
+ x = pkt->getOption(14);
+ ASSERT_TRUE(x); // option 14 should exist
+ EXPECT_EQ(14, x->getType()); // this should be option 14
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+12, 3)); // data len=3
+
+ x = pkt->getOption(128);
+ ASSERT_TRUE(x); // option 3 should exist
+ EXPECT_EQ(128, x->getType()); // this should be option 254
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+17, 3)); // data len=3
+
+ x = pkt->getOption(254);
+ ASSERT_TRUE(x); // option 3 should exist
+ EXPECT_EQ(254, x->getType()); // this should be option 254
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+22, 3)); // data len=3
+}
+
} // end of anonymous namespace
diff --git a/src/lib/dhcp/tests/pkt6_unittest.cc b/src/lib/dhcp/tests/pkt6_unittest.cc
index 0f110ba..968b24c 100644
--- a/src/lib/dhcp/tests/pkt6_unittest.cc
+++ b/src/lib/dhcp/tests/pkt6_unittest.cc
@@ -18,10 +18,10 @@
#include <arpa/inet.h>
#include <gtest/gtest.h>
-#include "io_address.h"
-#include "dhcp/option.h"
-#include "dhcp/pkt6.h"
-#include "dhcp/dhcp6.h"
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/dhcp6.h>
using namespace std;
using namespace isc;
diff --git a/src/lib/dns/message.h b/src/lib/dns/message.h
index f286c67..47632cb 100644
--- a/src/lib/dns/message.h
+++ b/src/lib/dns/message.h
@@ -526,7 +526,7 @@ public:
/// source message to the same section of this message
///
/// \param section the section to append
- /// \param target The source Message
+ /// \param source The source Message
void appendSection(const Section section, const Message& source);
/// \brief Prepare for making a response from a request.
@@ -668,7 +668,7 @@ typedef boost::shared_ptr<const Message> ConstMessagePtr;
///
/// \param os A \c std::ostream object on which the insertion operation is
/// performed.
-/// \param record A \c Message object output by the operation.
+/// \param message A \c Message object output by the operation.
/// \return A reference to the same \c std::ostream object referenced by
/// parameter \c os after the insertion operation.
std::ostream& operator<<(std::ostream& os, const Message& message);
diff --git a/src/lib/dns/messagerenderer.cc b/src/lib/dns/messagerenderer.cc
index 767aca9..02f5519 100644
--- a/src/lib/dns/messagerenderer.cc
+++ b/src/lib/dns/messagerenderer.cc
@@ -150,8 +150,6 @@ private:
struct MessageRenderer::MessageRendererImpl {
/// \brief Constructor from an output buffer.
///
- /// \param buffer An \c OutputBuffer object to which wire format data is
- /// written.
MessageRendererImpl() :
nbuffer_(Name::MAX_WIRE), msglength_limit_(512),
truncated_(false), compress_mode_(MessageRenderer::CASE_INSENSITIVE)
diff --git a/src/lib/dns/name.cc b/src/lib/dns/name.cc
index 4cd0b2b..772417f 100644
--- a/src/lib/dns/name.cc
+++ b/src/lib/dns/name.cc
@@ -700,7 +700,7 @@ Name::split(const unsigned int first, const unsigned int n) const {
}
Name
-Name::split(const unsigned level) const {
+Name::split(const unsigned int level) const {
if (level >= getLabelCount()) {
isc_throw(OutOfRange, "invalid level for name split (" << level
<< ") for name " << *this);
diff --git a/src/lib/dns/python/name_python.cc b/src/lib/dns/python/name_python.cc
index 4043445..ce556df 100644
--- a/src/lib/dns/python/name_python.cc
+++ b/src/lib/dns/python/name_python.cc
@@ -25,6 +25,8 @@
#include "messagerenderer_python.h"
#include "name_python.h"
+#include <iostream>
+
using namespace isc::dns;
using namespace isc::dns::python;
using namespace isc::util;
@@ -97,7 +99,7 @@ int Name_init(s_Name* self, PyObject* args);
void Name_destroy(s_Name* self);
PyObject* Name_toWire(s_Name* self, PyObject* args);
-PyObject* Name_toText(s_Name* self);
+PyObject* Name_toText(s_Name* self, PyObject* args);
PyObject* Name_str(PyObject* self);
PyObject* Name_getLabelCount(s_Name* self);
PyObject* Name_at(s_Name* self, PyObject* args);
@@ -120,8 +122,9 @@ PyMethodDef Name_methods[] = {
"Returns the length" },
{ "get_labelcount", reinterpret_cast<PyCFunction>(Name_getLabelCount), METH_NOARGS,
"Returns the number of labels" },
- { "to_text", reinterpret_cast<PyCFunction>(Name_toText), METH_NOARGS,
- "Returns the string representation" },
+ { "to_text", reinterpret_cast<PyCFunction>(Name_toText), METH_VARARGS,
+ "Returns the string representation. The optional argument must be either"
+ "True of False. If True, the final dot will be omitted." },
{ "to_wire", reinterpret_cast<PyCFunction>(Name_toWire), METH_VARARGS,
"Converts the Name object to wire format.\n"
"The argument can be either a MessageRenderer or an object that "
@@ -278,8 +281,24 @@ Name_getLabelCount(s_Name* self) {
}
PyObject*
-Name_toText(s_Name* self) {
- return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+Name_toText(s_Name* self, PyObject* args) {
+ PyObject* omit_final_dot_obj = NULL;
+ if (PyArg_ParseTuple(args, "|O", &omit_final_dot_obj)) {
+ bool omit_final_dot = false;
+ if (omit_final_dot_obj != NULL) {
+ if (PyBool_Check(omit_final_dot_obj) != 0) {
+ omit_final_dot = (omit_final_dot_obj == Py_True);
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "Optional argument 1 of to_text() should be True of False");
+ return (NULL);
+ }
+ }
+ return (Py_BuildValue("s",
+ self->cppobj->toText(omit_final_dot).c_str()));
+ } else {
+ return (NULL);
+ }
}
PyObject*
diff --git a/src/lib/dns/python/tests/name_python_test.py b/src/lib/dns/python/tests/name_python_test.py
index b8e625a..5263412 100644
--- a/src/lib/dns/python/tests/name_python_test.py
+++ b/src/lib/dns/python/tests/name_python_test.py
@@ -121,6 +121,15 @@ class NameTest(unittest.TestCase):
self.assertEqual(".", str(self.name2))
self.assertEqual("something.completely.different.", self.name3.to_text())
+ self.assertEqual("example.com.", self.name1.to_text(False))
+ self.assertEqual("example.com", self.name1.to_text(True))
+
+ # make sure it does not behave unexpectedly on wrong arguments
+ self.assertRaises(TypeError, self.name1.to_text, True, 1)
+ self.assertRaises(TypeError, self.name1.to_text, 1)
+ self.assertRaises(TypeError, self.name1.to_text, [])
+ self.assertRaises(TypeError, self.name1.to_text, "foo")
+
def test_to_wire(self):
b1 = bytearray()
self.name1.to_wire(b1)
diff --git a/src/lib/dns/rdatafields.h b/src/lib/dns/rdatafields.h
index e33bcd7..16880f0 100644
--- a/src/lib/dns/rdatafields.h
+++ b/src/lib/dns/rdatafields.h
@@ -296,7 +296,7 @@ public:
/// as long as the \c RdataFields object is used.
///
/// \param fields An array of \c FieldSpec entries. This can be \c NULL.
- /// \param nfields The number of entries of \c fields.
+ /// \param fields_length The total length of the \c fields.
/// \param data A pointer to memory region for the entire RDATA. This can
/// be NULL.
/// \param data_length The length of \c data in bytes.
diff --git a/src/lib/dns/rrset.h b/src/lib/dns/rrset.h
index 6c15b53..1586465 100644
--- a/src/lib/dns/rrset.h
+++ b/src/lib/dns/rrset.h
@@ -478,7 +478,7 @@ public:
/// \brief Return the current \c Rdata corresponding to the rdata cursor.
///
- /// \return A reference to an \c rdata::::Rdata object corresponding
+ /// \return A reference to an \c rdata::Rdata object corresponding
/// to the rdata cursor.
virtual const rdata::Rdata& getCurrent() const = 0;
diff --git a/src/lib/dns/tsigkey.h b/src/lib/dns/tsigkey.h
index 31211d1..6081dd3 100644
--- a/src/lib/dns/tsigkey.h
+++ b/src/lib/dns/tsigkey.h
@@ -113,10 +113,10 @@ public:
/// \brief Constructor from an input string
///
/// The string must be of the form:
- /// <name>:<secret>[:<algorithm>]
- /// Where <name> is a domain name for the key, <secret> is a
+ /// name:secret[:algorithm]
+ /// Where "name" is a domain name for the key, "secret" is a
/// base64 representation of the key secret, and the optional
- /// algorithm is an algorithm identifier as specified in RFC4635.
+ /// "algorithm" is an algorithm identifier as specified in RFC 4635.
/// The default algorithm is hmac-md5.sig-alg.reg.int.
///
/// The same restriction about the algorithm name (and secret) as that
@@ -188,11 +188,10 @@ public:
///
/// The resulting string will be of the form
/// name:secret:algorithm
- /// Where <name> is a domain name for the key, <secret> is a
- /// base64 representation of the key secret, and algorithm is
- /// an algorithm identifier as specified in RFC4635
+ /// Where "name" is a domain name for the key, "secret" is a
+ /// base64 representation of the key secret, and "algorithm" is
+ /// an algorithm identifier as specified in RFC 4635.
///
- /// \param key the TSIG key to convert
/// \return The string representation of the given TSIGKey.
std::string toText() const;
diff --git a/src/lib/log/log_formatter.h b/src/lib/log/log_formatter.h
index ca23844..7a9e5fa 100644
--- a/src/lib/log/log_formatter.h
+++ b/src/lib/log/log_formatter.h
@@ -169,7 +169,7 @@ public:
/// Deactivates the current formatter. In case the formatter is not active,
/// only produces another inactive formatter.
///
- /// \param arg The argument to place into the placeholder.
+ /// \param value The argument to place into the placeholder.
template<class Arg> Formatter& arg(const Arg& value) {
if (logger_) {
try {
diff --git a/src/lib/log/logger_level_impl.h b/src/lib/log/logger_level_impl.h
index 9289a1d..c990796 100644
--- a/src/lib/log/logger_level_impl.h
+++ b/src/lib/log/logger_level_impl.h
@@ -83,7 +83,7 @@ public:
/// The log4cplus log level may be non-standard in which case it is
/// encoding a BIND 10 debug level as well.
///
- /// \param level log4cplus log level
+ /// \param loglevel log4cplus log level
///
/// \return Equivalent BIND 10 severity and debug level
static
diff --git a/src/lib/log/logger_manager_impl.h b/src/lib/log/logger_manager_impl.h
index aa596a0..f99f832 100644
--- a/src/lib/log/logger_manager_impl.h
+++ b/src/lib/log/logger_manager_impl.h
@@ -59,8 +59,6 @@ public:
/// This resets the hierachy of loggers back to their defaults. This means
/// that all non-root loggers (if they exist) are set to NOT_SET, and the
/// root logger reset to logging informational messages.
- ///
- /// \param root_name BIND 10 name of the root logger
static void processInit();
/// \brief Process Specification
diff --git a/src/lib/log/logger_specification.h b/src/lib/log/logger_specification.h
index 35c879c..6805fdd 100644
--- a/src/lib/log/logger_specification.h
+++ b/src/lib/log/logger_specification.h
@@ -103,7 +103,7 @@ public:
/// \brief Add output option.
///
- /// \param Option to add to the list.
+ /// \param option Option to add to the list.
void addOutputOption(const OutputOption& option) {
options_.push_back(option);
}
diff --git a/src/lib/log/message_dictionary.h b/src/lib/log/message_dictionary.h
index 23f76d7..519986d 100644
--- a/src/lib/log/message_dictionary.h
+++ b/src/lib/log/message_dictionary.h
@@ -79,7 +79,7 @@ public:
///
/// \return true if the message was added to the dictionary, false if the
/// message existed and it was not added.
- virtual bool add (const std::string& ident, const std::string& test);
+ virtual bool add (const std::string& ident, const std::string& text);
/// \brief Replace Message
diff --git a/src/lib/nsas/nameserver_address_store.h b/src/lib/nsas/nameserver_address_store.h
index 87845c9..1af535a 100644
--- a/src/lib/nsas/nameserver_address_store.h
+++ b/src/lib/nsas/nameserver_address_store.h
@@ -92,7 +92,10 @@ public:
/// \brief cancel the given lookup action
///
- /// \param callback Callback object that would be called
+ /// \param zone Name of zone.
+ /// \param class_code Class of the zone.
+ /// \param callback Callback object that would be called.
+ /// \param family Address family for which lookup is being cancelled.
void cancel(const std::string& zone, const dns::RRClass& class_code,
const boost::shared_ptr<AddressRequestCallback>& callback,
AddressFamily family = ANY_OK);
diff --git a/src/lib/nsas/zone_entry.h b/src/lib/nsas/zone_entry.h
index f772784..482b89f 100644
--- a/src/lib/nsas/zone_entry.h
+++ b/src/lib/nsas/zone_entry.h
@@ -66,7 +66,7 @@ public:
* different objects.
* \param nameserver_table Hashtable of NameServerEntry objects for
* this zone
- * \param namesever_lru LRU for the nameserver entries
+ * \param nameserver_lru LRU for the nameserver entries
* \todo Move to cc file, include the lookup (if NSAS uses resolver for
* everything)
*/
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
index 9a5758a..c681d07 100644
--- a/src/lib/python/isc/bind10/sockcreator.py
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -18,6 +18,7 @@ import struct
import os
import copy
import subprocess
+import copy
from isc.log_messages.bind10_messages import *
from libutil_io_python import recv_fd
@@ -201,6 +202,9 @@ class WrappedSocket:
class Creator(Parser):
"""
This starts the socket creator and allows asking for the sockets.
+
+ Note: __process shouldn't be reset once created. See the note
+ of the SockCreator class for details.
"""
def __init__(self, path):
(local, remote) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -233,4 +237,3 @@ class Creator(Parser):
logger.warn(BIND10_SOCKCREATOR_KILL)
if self.__process is not None:
self.__process.kill()
- self.__process = None
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
index a366fd8..bac51ff 100644
--- a/src/lib/python/isc/bind10/special_component.py
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -17,11 +17,21 @@ from isc.bind10.component import Component, BaseComponent
import isc.bind10.sockcreator
from bind10_config import LIBEXECDIR
import os
+import posix
+import isc.log
+from isc.log_messages.bind10_messages import *
+
+logger = isc.log.Logger("boss")
class SockCreator(BaseComponent):
"""
The socket creator component. Will start and stop the socket creator
accordingly.
+
+ Note: _creator shouldn't be reset explicitly once created. The
+ underlying Popen object would then wait() the child process internally,
+ which breaks the assumption of the boss, who is expecting to see
+ the process die in waitpid().
"""
def __init__(self, process, boss, kind, address=None, params=None):
BaseComponent.__init__(self, boss, kind)
@@ -32,10 +42,10 @@ class SockCreator(BaseComponent):
self.__creator = isc.bind10.sockcreator.Creator(LIBEXECDIR + ':' +
os.environ['PATH'])
self._boss.register_process(self.pid(), self)
+ self._boss.log_started(self.pid())
def _stop_internal(self):
self.__creator.terminate()
- self.__creator = None
def name(self):
return "Socket creator"
@@ -103,6 +113,31 @@ class XfrIn(Component):
Component.__init__(self, process, boss, kind, 'Xfrin', None,
boss.start_xfrin)
+class SetUID(BaseComponent):
+ """
+ This is a pseudo-component which drops root privileges when started
+ and sets the uid stored in boss.
+
+ This component does nothing when stopped.
+ """
+ def __init__(self, process, boss, kind, address=None, params=None):
+ BaseComponent.__init__(self, boss, kind)
+ self.uid = boss.uid
+
+ def _start_internal(self):
+ if self.uid is not None:
+ logger.info(BIND10_SETUID, self.uid)
+ posix.setuid(self.uid)
+
+ def _stop_internal(self): pass
+ def kill(self, forcefull=False): pass
+
+ def name(self):
+ return "Set UID"
+
+ def pid(self):
+ return None
+
def get_specials():
"""
List of specially started components. Each one should be the class than can
@@ -118,5 +153,7 @@ def get_specials():
'resolver': Resolver,
'cmdctl': CmdCtl,
# FIXME: Temporary workaround before #1292 is done
- 'xfrin': XfrIn
+ 'xfrin': XfrIn,
+ # TODO: Remove when not needed, workaround before sockcreator works
+ 'setuid': SetUID
}
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
index 734cb1e..15fa470 100644
--- a/src/lib/python/isc/bind10/tests/component_test.py
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -106,6 +106,9 @@ class ComponentTests(BossUtils, unittest.TestCase):
self.__registered_processes = {}
self.__stop_process_params = None
self.__start_simple_params = None
+ # Pretending to be boss
+ self.uid = None
+ self.__uid_set = None
def __start(self):
"""
@@ -418,7 +421,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
def test_pid_not_running(self):
"""
Test that a componet that is not yet started doesn't have a PID.
- But it won't failed if asked for and returns None.
+ But it won't fail if asked for and return None.
"""
for component_type in [Component,
isc.bind10.special_component.SockCreator,
@@ -427,7 +430,8 @@ class ComponentTests(BossUtils, unittest.TestCase):
isc.bind10.special_component.Auth,
isc.bind10.special_component.Resolver,
isc.bind10.special_component.CmdCtl,
- isc.bind10.special_component.XfrIn]:
+ isc.bind10.special_component.XfrIn,
+ isc.bind10.special_component.SetUID]:
component = component_type('none', self, 'needed')
self.assertIsNone(component.pid())
@@ -527,6 +531,31 @@ class ComponentTests(BossUtils, unittest.TestCase):
self.assertTrue(process.killed)
self.assertFalse(process.terminated)
+ def setuid(self, uid):
+ self.__uid_set = uid
+
+ def test_setuid(self):
+ """
+ Some tests around the SetUID pseudo-component.
+ """
+ component = isc.bind10.special_component.SetUID(None, self, 'needed',
+ None)
+ orig_setuid = isc.bind10.special_component.posix.setuid
+ isc.bind10.special_component.posix.setuid = self.setuid
+ component.start()
+ # No uid set in boss, nothing called.
+ self.assertIsNone(self.__uid_set)
+ # Doesn't do anything, but doesn't crash
+ component.stop()
+ component.kill()
+ component.kill(True)
+ self.uid = 42
+ component = isc.bind10.special_component.SetUID(None, self, 'needed',
+ None)
+ component.start()
+ # This time, it get's called
+ self.assertEqual(42, self.__uid_set)
+
class TestComponent(BaseComponent):
"""
A test component. It does not start any processes or so, it just logs
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index c7eb8ad..2d998ce 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -543,6 +543,7 @@ class UIModuleCCSession(MultiConfigData):
cur_map = {}
if value in cur_map:
del cur_map[value]
+ self.set_value(identifier, cur_map)
else:
raise isc.cc.data.DataNotFoundError(value + " not found in named_set " + str(identifier))
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 88c0ee8..4d568be 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -117,12 +117,13 @@ class ConfigManagerData:
if file:
file.close();
return config
-
+
def write_to_file(self, output_file_name = None):
"""Writes the current configuration data to a file. If
output_file_name is not specified, the file used in
read_from_file is used."""
filename = None
+
try:
file = tempfile.NamedTemporaryFile(mode='w',
prefix="b10-config.db.",
@@ -291,7 +292,7 @@ class ConfigManager:
# ok, just start with an empty config
self.config = ConfigManagerData(self.data_path,
self.database_filename)
-
+
def write_config(self):
"""Write the current configuration to the file specificied at init()"""
self.config.write_to_file()
@@ -445,7 +446,7 @@ class ConfigManager:
answer = ccsession.create_answer(1, "Wrong number of arguments")
if not answer:
answer = ccsession.create_answer(1, "No answer message from " + cmd[0])
-
+
return answer
def _handle_module_spec(self, spec):
@@ -455,7 +456,7 @@ class ConfigManager:
# todo: error checking (like keyerrors)
answer = {}
self.set_module_spec(spec)
-
+
# We should make one general 'spec update for module' that
# passes both specification and commands at once
spec_update = ccsession.create_command(ccsession.COMMAND_MODULE_SPECIFICATION_UPDATE,
@@ -491,7 +492,7 @@ class ConfigManager:
else:
answer = ccsession.create_answer(1, "Unknown message format: " + str(msg))
return answer
-
+
def run(self):
"""Runs the configuration manager."""
self.running = True
diff --git a/src/lib/python/isc/config/config_data.py b/src/lib/python/isc/config/config_data.py
index fabd37d..b2cf048 100644
--- a/src/lib/python/isc/config/config_data.py
+++ b/src/lib/python/isc/config/config_data.py
@@ -515,7 +515,7 @@ class MultiConfigData:
return value, self.CURRENT
if default:
value = self.get_default_value(identifier)
- if value != None:
+ if value is not None:
return value, self.DEFAULT
return None, self.NONE
@@ -649,7 +649,11 @@ class MultiConfigData:
id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
cur_value, status = self.get_value(cur_id_part + id)
# Check if the value was there in the first place
- if status == MultiConfigData.NONE and cur_id_part != "/":
+ # If we are at the final element, we do not care whether we found
+ # it, since if we have reached this point and it did not exist,
+ # it was apparently an optional value without a default.
+ if status == MultiConfigData.NONE and cur_id_part != "/" and\
+ cur_id_part + id != identifier:
raise isc.cc.data.DataNotFoundError(id_part +
" not found in " +
cur_id_part)
diff --git a/src/lib/python/isc/config/tests/ccsession_test.py b/src/lib/python/isc/config/tests/ccsession_test.py
index 1c63957..8d616e2 100644
--- a/src/lib/python/isc/config/tests/ccsession_test.py
+++ b/src/lib/python/isc/config/tests/ccsession_test.py
@@ -756,6 +756,17 @@ class TestUIModuleCCSession(unittest.TestCase):
uccs = self.create_uccs_named_set(fake_conn)
value, status = uccs.get_value("/Spec32/named_set_item")
self.assertEqual({'a': 1, 'b': 2}, value)
+
+ # make sure that removing from default actually removes it
+ uccs.remove_value("/Spec32/named_set_item", "a")
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'b': 2}, value)
+ self.assertEqual(uccs.LOCAL, status)
+
+ # ok, put it back now
+ uccs.add_value("/Spec32/named_set_item", "a")
+ uccs.set_value("/Spec32/named_set_item/a", 1)
+
uccs.add_value("/Spec32/named_set_item", "foo")
value, status = uccs.get_value("/Spec32/named_set_item")
self.assertEqual({'a': 1, 'b': 2, 'foo': 3}, value)
@@ -765,14 +776,50 @@ class TestUIModuleCCSession(unittest.TestCase):
value, status = uccs.get_value("/Spec32/named_set_item")
self.assertEqual({'b': 2}, value)
+ uccs.set_value("/Spec32/named_set_item/c", 5)
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({"b": 2, "c": 5}, value)
+
self.assertRaises(isc.cc.data.DataNotFoundError,
uccs.set_value,
- "/Spec32/named_set_item/no_such_item",
+ "/Spec32/named_set_item/no_such_item/a",
4)
self.assertRaises(isc.cc.data.DataNotFoundError,
uccs.remove_value, "/Spec32/named_set_item",
"no_such_item")
+ def test_set_value_named_set(self):
+ fake_conn = fakeUIConn()
+ uccs = self.create_uccs_named_set(fake_conn)
+ value, status = uccs.get_value("/Spec32/named_set_item2")
+ self.assertEqual({}, value)
+ self.assertEqual(status, uccs.DEFAULT)
+
+ # Try setting a value that is optional but has no default
+ uccs.add_value("/Spec32/named_set_item2", "new1")
+ uccs.set_value("/Spec32/named_set_item2/new1/first", 3)
+ # Different method to add a new element
+ uccs.set_value("/Spec32/named_set_item2/new2", { "second": 4 })
+
+ value, status = uccs.get_value("/Spec32/named_set_item2")
+ self.assertEqual({ "new1": {"first": 3 }, "new2": {"second": 4}},
+ value)
+ self.assertEqual(status, uccs.LOCAL)
+
+ uccs.set_value("/Spec32/named_set_item2/new1/second", "foo")
+
+ value, status = uccs.get_value("/Spec32/named_set_item2")
+ self.assertEqual({ "new1": {"first": 3, "second": "foo" },
+ "new2": {"second": 4}},
+ value)
+ self.assertEqual(status, uccs.LOCAL)
+
+ # make sure using a bad name still fails
+ self.assertRaises(isc.cc.data.DataNotFoundError, uccs.set_value,
+ "/Spec32/named_set_item2/doesnotexist/first", 3)
+
+
+
def test_commit(self):
fake_conn = fakeUIConn()
uccs = self.create_uccs2(fake_conn)
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index c8df3b6..589a398 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -37,7 +37,7 @@ class TestConfigManagerData(unittest.TestCase):
It shouldn't append the data path to it.
"""
abs_path = self.data_path + os.sep + "b10-config-imaginary.db"
- data = ConfigManagerData(os.getcwd(), abs_path)
+ data = ConfigManagerData(self.data_path, abs_path)
self.assertEqual(abs_path, data.db_filename)
self.assertEqual(self.data_path, data.data_path)
@@ -88,7 +88,7 @@ class TestConfigManagerData(unittest.TestCase):
self.assertEqual(cfd1, cfd2)
cfd2.data['test'] = { 'a': [ 1, 2, 3]}
self.assertNotEqual(cfd1, cfd2)
-
+
class TestConfigManager(unittest.TestCase):
@@ -198,8 +198,8 @@ class TestConfigManager(unittest.TestCase):
self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
config_spec = self.cm.get_config_spec('Spec2')
self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
-
-
+
+
def test_get_commands_spec(self):
commands_spec = self.cm.get_commands_spec()
self.assertEqual(commands_spec, {})
@@ -250,7 +250,7 @@ class TestConfigManager(unittest.TestCase):
def test_write_config(self):
# tested in ConfigManagerData tests
pass
-
+
def _handle_msg_helper(self, msg, expected_answer):
answer = self.cm.handle_msg(msg)
self.assertEqual(expected_answer, answer)
@@ -338,7 +338,7 @@ class TestConfigManager(unittest.TestCase):
# self.fake_session.get_message(self.name, None))
#self.assertEqual({'version': 1, 'TestModule': {'test': 124}}, self.cm.config.data)
#
- self._handle_msg_helper({ "command":
+ self._handle_msg_helper({ "command":
["module_spec", self.spec.get_full_spec()]
},
{'result': [0]})
@@ -359,7 +359,7 @@ class TestConfigManager(unittest.TestCase):
#self.assertEqual({'commands_update': [ self.name, self.commands ] },
# self.fake_session.get_message("Cmdctl", None))
- self._handle_msg_helper({ "command":
+ self._handle_msg_helper({ "command":
["shutdown"]
},
{'result': [0]})
diff --git a/src/lib/python/isc/config/tests/config_data_test.py b/src/lib/python/isc/config/tests/config_data_test.py
index 0dd441d..bede625 100644
--- a/src/lib/python/isc/config/tests/config_data_test.py
+++ b/src/lib/python/isc/config/tests/config_data_test.py
@@ -627,7 +627,7 @@ class TestMultiConfigData(unittest.TestCase):
config_items = self.mcd.get_config_item_list(None, False)
self.assertEqual(['Spec32'], config_items)
config_items = self.mcd.get_config_item_list(None, True)
- self.assertEqual(['Spec32/named_set_item'], config_items)
+ self.assertEqual(['Spec32/named_set_item', 'Spec32/named_set_item2'], config_items)
self.mcd.set_value('Spec32/named_set_item', { "aaaa": 4, "aabb": 5, "bbbb": 6})
config_items = self.mcd.get_config_item_list("/Spec32/named_set_item", True)
self.assertEqual(['Spec32/named_set_item/aaaa',
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index 60282d9..a5b4ca3 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -8,6 +8,7 @@ python_PYTHON = __init__.py master.py sqlite3_ds.py
# new data
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CPPFLAGS += $(SQLITE_CFLAGS)
python_LTLIBRARIES = datasrc.la
diff --git a/src/lib/python/isc/datasrc/iterator_inc.cc b/src/lib/python/isc/datasrc/iterator_inc.cc
index b1d9d25..087200a 100644
--- a/src/lib/python/isc/datasrc/iterator_inc.cc
+++ b/src/lib/python/isc/datasrc/iterator_inc.cc
@@ -31,4 +31,37 @@ the end of the zone.\n\
Raises an isc.datasrc.Error exception if it is called again after returning\n\
None\n\
";
+
+// Modifications:
+// - ConstRRset->RRset
+// - NULL->None
+// - removed notes about derived classes (which doesn't apply for python)
+const char* const ZoneIterator_getSOA_doc = "\
+get_soa() -> isc.dns.RRset\n\
+\n\
+Return the SOA record of the zone in the iterator context.\n\
+\n\
+This method returns the zone's SOA record (if any, and a valid zone\n\
+should have it) in the form of an RRset object. This SOA is identical\n\
+to that (again, if any) contained in the sequence of RRsets returned\n\
+by the iterator. In that sense this method is redundant, but is\n\
+provided as a convenient utility for the application of the iterator;\n\
+the application may need to know the SOA serial or the SOA RR itself\n\
+for the purpose of protocol handling or skipping the expensive\n\
+iteration processing.\n\
+\n\
+If the zone doesn't have an SOA (which is broken, but some data source\n\
+may allow that situation), this method returns None. Also, in the\n\
+normal and valid case, the SOA should have exactly one RDATA, but this\n\
+API does not guarantee it as some data source may accept such an\n\
+abnormal condition. It's up to the caller whether to check the number\n\
+of RDATA and how to react to the unexpected case.\n\
+\n\
+Exceptions:\n\
+ None\n\
+\n\
+Return Value(s): An SOA RRset object that would be\n\
+returned from the iteration. It will be None if the zone doesn't have\n\
+an SOA.\n\
+";
} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/iterator_python.cc b/src/lib/python/isc/datasrc/iterator_python.cc
index eb368ba..9e6900c 100644
--- a/src/lib/python/isc/datasrc/iterator_python.cc
+++ b/src/lib/python/isc/datasrc/iterator_python.cc
@@ -132,10 +132,35 @@ ZoneIterator_next(PyObject* self) {
}
}
+PyObject*
+ZoneIterator_getSOA(PyObject* po_self, PyObject*) {
+ s_ZoneIterator* self = static_cast<s_ZoneIterator*>(po_self);
+ try {
+ isc::dns::ConstRRsetPtr rrset = self->cppobj->getSOA();
+ if (!rrset) {
+ Py_RETURN_NONE;
+ }
+ return (createRRsetObject(*rrset));
+ } catch (const isc::Exception& isce) {
+ // isc::Unexpected is thrown when we call getNextRRset() when we are
+ // already done iterating ('iterating past end')
+ // We could also simply return None again
+ PyErr_SetString(getDataSourceException("Error"), isce.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
PyMethodDef ZoneIterator_methods[] = {
- { "get_next_rrset",
- reinterpret_cast<PyCFunction>(ZoneIterator_getNextRRset), METH_NOARGS,
+ { "get_next_rrset", ZoneIterator_getNextRRset, METH_NOARGS,
ZoneIterator_getNextRRset_doc },
+ { "get_soa", ZoneIterator_getSOA, METH_NOARGS, ZoneIterator_getSOA_doc },
{ NULL, NULL, 0, NULL }
};
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index dcb8904..c649f6e 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -189,6 +189,20 @@ class DataSrcClient(unittest.TestCase):
self.assertRaises(TypeError, dsc.get_iterator, "asdf")
+ def test_iterator_soa(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+ iterator = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
+ expected_soa = isc.dns.RRset(isc.dns.Name("sql1.example.com."),
+ isc.dns.RRClass.IN(),
+ isc.dns.RRType.SOA(),
+ isc.dns.RRTTL(3600))
+ expected_soa.add_rdata(isc.dns.Rdata(isc.dns.RRType.SOA(),
+ isc.dns.RRClass.IN(),
+ "master.example.com. " +
+ "admin.example.com. 678 " +
+ "3600 1800 2419200 7200"))
+ self.assertTrue(rrsets_equal(expected_soa, iterator.get_soa()))
+
def test_construct(self):
# can't construct directly
self.assertRaises(TypeError, isc.datasrc.ZoneFinder)
@@ -512,6 +526,17 @@ class DataSrcUpdater(unittest.TestCase):
dsc.get_updater(isc.dns.Name("example.com"), True)
self.assertEqual(orig_ref, sys.getrefcount(dsc))
+ def test_iterate_over_empty_zone(self):
+ # empty the test zone first
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ updater.commit()
+
+ # Check the iterator behavior for the empty zone.
+ iterator = dsc.get_iterator(isc.dns.Name("example.com."))
+ self.assertEqual(None, iterator.get_soa())
+ self.assertEqual(None, iterator.get_next_rrset())
+
if __name__ == "__main__":
isc.log.init("bind10")
unittest.main()
diff --git a/src/lib/resolve/recursive_query.cc b/src/lib/resolve/recursive_query.cc
index d692dc1..0d3fb4c 100644
--- a/src/lib/resolve/recursive_query.cc
+++ b/src/lib/resolve/recursive_query.cc
@@ -84,6 +84,7 @@ questionText(const isc::dns::Question& question) {
/// It is not public function, therefore it's not in header. But it's not
/// in anonymous namespace, so we can call it from unittests.
/// \param name The name we want to delegate to.
+/// \param rrclass The class.
/// \param cache The place too look for known delegations.
std::string
deepestDelegation(Name name, RRClass rrclass,
diff --git a/src/lib/resolve/recursive_query.h b/src/lib/resolve/recursive_query.h
index b9fb80d..9af2d72 100644
--- a/src/lib/resolve/recursive_query.h
+++ b/src/lib/resolve/recursive_query.h
@@ -38,7 +38,7 @@ public:
///
/// Adds a round-trip time to the internal vector of times.
///
- /// \param RTT to record.
+ /// \param rtt RTT to record.
void addRtt(uint32_t rtt) {
rtt_.push_back(rtt);
}
@@ -73,6 +73,10 @@ public:
///
/// \param dns_service The DNS Service to perform the recursive
/// query on.
+ /// \param nsas Nameserver address store, used to hold information about zone
+ /// nameservers.
+ /// \param cache Resolver cache object, used to hold information about retrieved
+ /// records.
/// \param upstream Addresses and ports of the upstream servers
/// to forward queries to.
/// \param upstream_root Addresses and ports of the root servers
@@ -133,8 +137,10 @@ public:
/// object.
///
/// \param question The question being answered <qname/qclass/qtype>
- /// \param answer_message An output Message into which the final response will be copied
- /// \param buffer An output buffer into which the intermediate responses will be copied
+ /// \param answer_message An output Message into which the final response will
+ /// be copied.
+ /// \param buffer An output buffer into which the intermediate responses will
+ /// be copied.
/// \param server A pointer to the \c DNSServer object handling the client
void resolve(const isc::dns::Question& question,
isc::dns::MessagePtr answer_message,
@@ -147,6 +153,10 @@ public:
/// function resolve().
///
/// \param query_message the full query got from client.
+ /// \param answer_message the full answer received from other server.
+ /// \param buffer Output buffer into which the responses will be copied.
+ /// \param server Server object that handles receipt and processing of the
+ /// received messages.
/// \param callback callback object
void forward(isc::dns::ConstMessagePtr query_message,
isc::dns::MessagePtr answer_message,
diff --git a/src/lib/resolve/resolve.h b/src/lib/resolve/resolve.h
index 550b620..0a588e2 100644
--- a/src/lib/resolve/resolve.h
+++ b/src/lib/resolve/resolve.h
@@ -37,7 +37,6 @@ namespace resolve {
/// section), you can simply use this to create an error response.
///
/// \param answer_message The message to clear and place the error in
-/// \param question The question to add to the
/// \param error_code The error Rcode
void makeErrorMessage(isc::dns::MessagePtr answer_message,
const isc::dns::Rcode& error_code);
diff --git a/src/lib/server_common/client.h b/src/lib/server_common/client.h
index 1c5928a..8cafb1e 100644
--- a/src/lib/server_common/client.h
+++ b/src/lib/server_common/client.h
@@ -140,7 +140,7 @@ private:
///
/// \param os A \c std::ostream object on which the insertion operation is
/// performed.
-/// \param edns A reference to an \c Client object output by the operation.
+/// \param client A reference to a \c Client object output by the operation.
/// \return A reference to the same \c std::ostream object referenced by
/// parameter \c os after the insertion operation.
std::ostream& operator<<(std::ostream& os, const Client& client);
diff --git a/src/lib/server_common/logger.h b/src/lib/server_common/logger.h
index ae07865..80bc81d 100644
--- a/src/lib/server_common/logger.h
+++ b/src/lib/server_common/logger.h
@@ -18,7 +18,7 @@
#include <log/macros.h>
#include <server_common/server_common_messages.h>
-/// \file logger.h
+/// \file server_common/logger.h
/// \brief Server Common library global logger
///
/// This holds the logger for the server common library. It is a private header
diff --git a/src/lib/util/buffer.h b/src/lib/util/buffer.h
index b7a8e28..eb90d64 100644
--- a/src/lib/util/buffer.h
+++ b/src/lib/util/buffer.h
@@ -207,6 +207,24 @@ public:
}
//@}
+ /// @brief Read specified number of bytes as a vector.
+ ///
+ /// If specified buffer is too short, it will be expanded
+ /// using vector::resize() method.
+ ///
+ /// @param Reference to a buffer (data will be stored there).
+ /// @param Size specified number of bytes to read in a vector.
+ ///
+ void readVector(std::vector<uint8_t>& data, size_t len)
+ {
+ if (position_ + len > len_) {
+ isc_throw(InvalidBufferPosition, "read beyond end of buffer");
+ }
+
+ data.resize(len);
+ readData(&data[0], len);
+ }
+
private:
size_t position_;
@@ -519,6 +537,6 @@ typedef boost::shared_ptr<OutputBuffer> OutputBufferPtr;
} // namespace isc
#endif // __BUFFER_H
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/util/tests/buffer_unittest.cc b/src/lib/util/tests/buffer_unittest.cc
index 0cd1823..666924e 100644
--- a/src/lib/util/tests/buffer_unittest.cc
+++ b/src/lib/util/tests/buffer_unittest.cc
@@ -239,4 +239,36 @@ TEST_F(BufferTest, outputBufferZeroSize) {
});
}
+TEST_F(BufferTest, readVectorAll) {
+ std::vector<uint8_t> vec;
+
+ // check that vector can read the whole buffer
+ ibuffer.readVector(vec, 5);
+
+ ASSERT_EQ(5, vec.size());
+ EXPECT_EQ(0, memcmp(&vec[0], testdata, 5));
+
+ // ibuffer is 5 bytes long. Can't read past it.
+ EXPECT_THROW(
+ ibuffer.readVector(vec, 1),
+ isc::util::InvalidBufferPosition
+ );
+}
+
+TEST_F(BufferTest, readVectorChunks) {
+ std::vector<uint8_t> vec;
+
+ // check that vector can read the whole buffer
+ ibuffer.readVector(vec, 3);
+ EXPECT_EQ(3, vec.size());
+
+ EXPECT_EQ(0, memcmp(&vec[0], testdata, 3));
+
+ EXPECT_NO_THROW(
+ ibuffer.readVector(vec, 2)
+ );
+
+ EXPECT_EQ(0, memcmp(&vec[0], testdata+3, 2));
+}
+
}
diff --git a/tests/lettuce/README b/tests/lettuce/README
new file mode 100644
index 0000000..21a57c7
--- /dev/null
+++ b/tests/lettuce/README
@@ -0,0 +1,127 @@
+BIND10 system testing with Lettuce
+or: to BDD or not to BDD
+
+In this directory, we define a set of behavioral tests for BIND 10. Currently,
+these tests are specific for BIND10, but we are keeping in mind that RFC-related
+tests could be separated, so that we can test other systems as well.
+
+Prerequisites:
+- Installed version of BIND 10 (but see below how to run it from source tree)
+- dig
+- lettuce (http://lettuce.it)
+
+To install lettuce, if you have the python pip installation tool, simply do
+pip install lettuce
+See http://lettuce.it/intro/install.html
+
+Most systems have the pip tool in a separate package; on Debian-based systems
+it is called python-pip. On FreeBSD the port is devel/py-pip.
+
+Running the tests
+-----------------
+
+At this moment, we have a fixed port for local tests in our setups, port 47806.
+This port must be free. (TODO: can we make this run-time discovered?).
+Port 47805 is used for cmdctl, and must also be available.
+(note, we will need to extend this to a range, or if possible, we will need to
+do some on-the-fly available port finding)
+
+The bind10 main program, bindctl, and dig must all be in the default search
+path of your environment, and BIND 10 must not be running if you use the
+installed version when you run the tests.
+
+If you want to test an installed version of bind 10, just run 'lettuce' in
+this directory.
+
+We have provided a script that sets up the shell environment to run the tests
+with the build tree version of bind. If your shell uses export to set
+environment variables, you can source the script setup_intree_bind10.sh, then
+run lettuce.
+
+Due to the default way lettuce prints its output, it is advisable to run it
+in a terminal that is wide than the default. If you see a lot of lines twice
+in different colors, the terminal is not wide enough.
+
+If you just want to run one specific feature test, use
+lettuce features/<feature file>
+
+To run a specific scenario from a feature, use
+lettuce features/<feature file> -s <scenario number>
+
+We have set up the tests to assume that lettuce is run from this directory,
+so even if you specify a specific feature file, you should do it from this
+directory.
+
+What to do when a test fails
+----------------------------
+
+First of all, look at the error it printed and see what step it occurred in.
+If written well, the output should explain most of what went wrong.
+
+The stacktrace that is printed is *not* of bind10, but of the testing
+framework; this helps in finding more information about what exactly the test
+tried to achieve when it failed (as well as help debug the tests themselves).
+
+Furthermore, if any scenario fails, the output from long-running processes
+will be stored in the directory output/. The name of the files will be
+<Feature name>-<Scenario name>-<Process name>.stdout and
+<Feature name>-<Scenario name>-<Process name>.stderr
+Where spaces and other non-standard characters are replaced by an underscore.
+The process name is either the standard name for said process (e.g. 'bind10'),
+or the name given to it by the test ('when i run bind10 as <name>').
+
+These files *will* be overwritten or deleted if the same scenarios are run
+again, so if you want to inspect them after a failed test, either do so
+immediately or move the files.
+
+Adding and extending tests
+--------------------------
+
+If you want to add tests, it is advisable to first go through the examples to
+see what is possible, and read the documentation on http://www.lettuce.it
+
+There is also a README.tutorial file here.
+
+We have a couple of conventions to keep things manageable.
+
+Configuration files go into the configurations/ directory.
+Data files go into the data/ directory.
+Step definition go into the features/terrain/ directory (the name terrain is
+chosen for the same reason Lettuce chose terrain.py, this is the place the
+tests 'live' in).
+Feature definitions go directly into the features/ directory.
+
+These directories are currently not divided further; we may want to consider
+this as the set grows. Due to a (current?) limitation of Lettuce, for
+feature files this is currently not possible; the python files containing
+steps and terrain must be below or at the same level of the feature files.
+
+Long-running processes should be started through the world.RunningProcesses
+instance. If you want to add a process (e.g. bind9), create start, stop and
+control steps in terrain/<base_name>_control.py, and let it use the
+RunningProcesses API (defined in terrain.py). See bind10_control.py for an
+example.
+
+For sending queries and checking the results, steps have been defined in
+terrain/querying.py. These use dig and store the results split up into text
+strings. This is intentionally not parsed through our own library (as that way
+we might run into a 'symmetric bug'). If you need something more advanced from
+query results, define it here.
+
+Some very general steps are defined in terrain/steps.py.
+Initialization code, cleanup code, and helper classes are defined in
+terrain/terrain.py.
+
+To find the right steps, case insensitive matching is used. Parameters taken
+from the steps are case-sensitive though. So a step defined as
+'do foo with value (bar)' will be matched when using
+'Do Foo with value xyz', but xyz will be taken as given.
+
+If you need to add steps that are very particular to one test, create a new
+file with a name relevant for that test in terrain. We may want to consider
+creating a specific subdirectory for these, but at this moment it is unclear
+whether we need to.
+
+We should try to keep steps as general as possible, while not making them to
+complex and error-prone.
+
diff --git a/tests/lettuce/README.tutorial b/tests/lettuce/README.tutorial
new file mode 100644
index 0000000..18c94cf
--- /dev/null
+++ b/tests/lettuce/README.tutorial
@@ -0,0 +1,157 @@
+Quick tutorial and overview
+---------------------------
+
+Lettuce is a framework for doing Behaviour Driven Development (BDD).
+
+The idea behind BDD is that you first write down your requirements in
+the form of scenarios, then implement their behaviour.
+
+We do not plan on doing full BDD, but such a system should also help
+us make system tests. And, hopefully, being able to better identify
+what exactly is going wrong when a test fails.
+
+Lettuce is a python implementation of the Cucumber framework, which is
+a ruby system. So far we chose lettuce because we already need python
+anyway, so chances are higher that any system we want to run it on
+supports it. It only supports a subset of cucumber, but more cucumber
+features are planned. As I do not know much details of cucumber, I
+can't really say what is there and what is not.
+
+A slight letdown is that the current version does not support python 3.
+However, as long as the tool-calling glue is python2, this should not
+cause any problems, since these aren't unit tests; We do not plan to use
+our libraries directly, but only through the runnable scripts and
+executables.
+
+-----
+
+Features, Scenarios, Steps.
+
+Lettuce makes a distinction between features, scenarios, and steps.
+
+Features are general, well, features. Each 'feature' has its own file
+ending in .feature. A feature file contains a description and a number
+of scenarios. Each scenario tests one or more particular parts of the
+feature. Each scenario consists of a number of steps.
+
+So let's open up a simple one.
+
+-- example.feature
+Feature: showing off BIND 10
+ This is to show BIND 10 running and that it answer queries
+
+ Scenario: Starting bind10
+ # steps go here
+--
+
+I have predefined a number of steps we can use, as we build test we
+will need to expand these, but we will look at them shortly.
+
+This file defines a feature, just under the feature name we can
+provide a description of the feature.
+
+The one scenario we have no has no steps, so if we run it we should
+see something like:
+
+-- output
+> lettuce
+Feature: showing off BIND 10
+ This is to show BIND 10 running and that it answer queries
+
+ Scenario: Starting bind10
+
+1 feature (1 passed)
+1 scenario (1 passed)
+0 step (0 passed)
+--
+
+Let's first add some steps that send queries.
+
+--
+ A query for www.example.com should have rcode REFUSED
+ A query for www.example.org should have rcode NOERROR
+--
+
+Since we didn't start any bind10, dig will time out and the result
+should be an error saying it got no answer. Errors are in the
+form of stack traces (trigger by failed assertions), so we can find
+out easily where in the tests they occurred. Especially when the total
+set of steps gets bigger we might need that.
+
+So let's add a step that starts bind10.
+
+--
+ When I start bind10 with configuration example.org.config
+--
+
+This is not good enough; it will fire of the process, but setting up
+b10-auth may take a few moments, so we need to add a step to wait for
+it to be started before we continue.
+
+--
+ Then wait for bind10 auth to start
+--
+
+And let's run the tests again.
+
+--
+> lettuce
+
+Feature: showing off BIND 10
+ This is to show BIND 10 running and that it answer queries
+
+ Scenario: Starting bind10
+ When I start bind10 with configuration example.org.config
+ Then wait for bind10 auth to start
+ A query for www.example.com should have rcode REFUSED
+ A query for www.example.org should have rcode NOERROR
+
+1 feature (1 passed)
+1 scenario (1 passed)
+4 steps (4 passed)
+(finished within 2 seconds)
+--
+
+So take a look at one of those steps, let's pick the first one.
+
+A step is defined through a python decorator, which in essence is a regular
+expression; lettuce searches through all defined steps to find one that
+matches. These are 'partial' matches (unless specified otherwise in the
+regular expression itself), so if the step is defined with "do foo bar", the
+scenario can add words for readability "When I do foo bar".
+
+Each captured group will be passed as an argument to the function we define.
+For bind10, i defined a configuration file, a cmdctl port, and a process
+name. The first two should be self-evident, and the process name is an
+optional name we give it, should we want to address it in the rest of the
+tests. This is most useful if we want to start multiple instances. In the
+next step (the wait for auth to start), I added a 'of <instance>'. So if we
+define the bind10 'as b10_second_instance', we can specify that one here as
+'of b10_second_instance'.
+
+--
+ When I start bind10 with configuration second.config
+ with cmdctl port 12345 as b10_second_instance
+--
+(line wrapped for readability)
+
+But notice how we needed two steps, which we probably always need (but
+not entirely always)? We can also combine steps; for instance:
+
+--
+ at step('have bind10 running(?: with configuration ([\w.]+))?')
+def have_bind10_running(step, config_file):
+ step.given('start bind10 with configuration ' + config_file)
+ step.given('wait for bind10 auth to start')
+--
+
+Now we can replace the two steps with one:
+
+--
+ Given I have bind10 running
+--
+
+That's it for the quick overview. For some more examples, with comments,
+take a look at features/example.feature. You can read more about lettuce and
+its features on http://www.lettuce.it, and if you plan on adding tests and
+scenarios, please consult the last section of the main README first.
diff --git a/tests/lettuce/configurations/example.org.config.orig b/tests/lettuce/configurations/example.org.config.orig
new file mode 100644
index 0000000..642f2dd
--- /dev/null
+++ b/tests/lettuce/configurations/example.org.config.orig
@@ -0,0 +1,17 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "debuglevel": 99,
+ "severity": "DEBUG",
+ "name": "auth"
+ } ]
+ },
+ "Auth": {
+ "database_file": "data/example.org.sqlite3",
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/configurations/example2.org.config b/tests/lettuce/configurations/example2.org.config
new file mode 100644
index 0000000..1a40d1b
--- /dev/null
+++ b/tests/lettuce/configurations/example2.org.config
@@ -0,0 +1,18 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "severity": "DEBUG",
+ "name": "auth",
+ "debuglevel": 99
+ }
+ ]
+ },
+ "Auth": {
+ "database_file": "data/example.org.sqlite3",
+ "listen_on": [ {
+ "port": 47807,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/configurations/no_db_file.config b/tests/lettuce/configurations/no_db_file.config
new file mode 100644
index 0000000..f865354
--- /dev/null
+++ b/tests/lettuce/configurations/no_db_file.config
@@ -0,0 +1,10 @@
+{
+ "version": 2,
+ "Auth": {
+ "database_file": "data/test_nonexistent_db.sqlite3",
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/data/empty_db.sqlite3 b/tests/lettuce/data/empty_db.sqlite3
new file mode 100644
index 0000000..f27a8b8
Binary files /dev/null and b/tests/lettuce/data/empty_db.sqlite3 differ
diff --git a/tests/lettuce/data/example.org.sqlite3 b/tests/lettuce/data/example.org.sqlite3
new file mode 100644
index 0000000..070012f
Binary files /dev/null and b/tests/lettuce/data/example.org.sqlite3 differ
diff --git a/tests/lettuce/features/example.feature b/tests/lettuce/features/example.feature
new file mode 100644
index 0000000..d1ed6b3
--- /dev/null
+++ b/tests/lettuce/features/example.feature
@@ -0,0 +1,142 @@
+Feature: Example feature
+ This is an example Feature set. Is is mainly intended to show
+ our use of the lettuce tool and our own framework for it
+ The first scenario is to show what a simple test would look like, and
+ is intentionally uncommented.
+ The later scenarios have comments to show what the test steps do and
+ support
+
+ Scenario: A simple example
+ Given I have bind10 running with configuration example.org.config
+ A query for www.example.org should have rcode NOERROR
+ A query for www.doesnotexist.org should have rcode REFUSED
+ The SOA serial for example.org should be 1234
+
+ Scenario: New database
+ # This test checks whether a database file is automatically created
+ # Underwater, we take advantage of our intialization routines so
+ # that we are sure this file does not exist, see
+ # features/terrain/terrain.py
+
+ # Standard check to test (non-)existence of a file
+ # This file is actually automatically
+ The file data/test_nonexistent_db.sqlite3 should not exist
+
+ # In the first scenario, we used 'given I have bind10 running', which
+ # is actually a compound step consisting of the following two
+ # one to start the server
+ When I start bind10 with configuration no_db_file.config
+ # And one to wait until it reports that b10-auth has started
+ Then wait for bind10 auth to start
+
+ # This is a general step to stop a named process. By convention,
+ # the default name for any process is the same as the one we
+ # use in the start step (for bind 10, that is 'I start bind10 with')
+ # See scenario 'Multiple instances' for more.
+ Then stop process bind10
+
+ # Now we use the first step again to see if the file has been created
+ The file data/test_nonexistent_db.sqlite3 should exist
+
+ Scenario: example.org queries
+ # This scenario performs a number of queries and inspects the results
+ # Simple queries have already been show, but after we have sent a query,
+ # we can also do more extensive checks on the result.
+ # See querying.py for more information on these steps.
+
+ # note: lettuce can group similar checks by using tables, but we
+ # intentionally do not make use of that here
+
+ # This is a compound statement that starts and waits for the
+ # started message
+ Given I have bind10 running with configuration example.org.config
+
+ # Some simple queries that is not examined further
+ A query for www.example.com should have rcode REFUSED
+ A query for www.example.org should have rcode NOERROR
+
+ # A query where we look at some of the result properties
+ A query for www.example.org should have rcode NOERROR
+ The last query response should have qdcount 1
+ The last query response should have ancount 1
+ The last query response should have nscount 3
+ The last query response should have adcount 0
+ # The answer section can be inspected in its entirety; in the future
+ # we may add more granular inspection steps
+ The answer section of the last query response should be
+ """
+ www.example.org. 3600 IN A 192.0.2.1
+ """
+
+ A query for example.org type NS should have rcode NOERROR
+ The answer section of the last query response should be
+ """
+ example.org. 3600 IN NS ns1.example.org.
+ example.org. 3600 IN NS ns2.example.org.
+ example.org. 3600 IN NS ns3.example.org.
+ """
+
+ # We have a specific step for checking SOA serial numbers
+ The SOA serial for example.org should be 1234
+
+ # Another query where we look at some of the result properties
+ A query for doesnotexist.example.org should have rcode NXDOMAIN
+ The last query response should have qdcount 1
+ The last query response should have ancount 0
+ The last query response should have nscount 1
+ The last query response should have adcount 0
+ # When checking flags, we must pass them exactly as they appear in
+ # the output of dig.
+ The last query response should have flags qr aa rd
+
+ A query for www.example.org type TXT should have rcode NOERROR
+ The last query response should have ancount 0
+
+ # Some queries where we specify more details about what to send and
+ # where
+ A query for www.example.org class CH should have rcode REFUSED
+ A query for www.example.org to 127.0.0.1 should have rcode NOERROR
+ A query for www.example.org to 127.0.0.1:47806 should have rcode NOERROR
+ A query for www.example.org type A class IN to 127.0.0.1:47806 should have rcode NOERROR
+
+ Scenario: changing database
+ # This scenario contains a lot of 'wait for' steps
+ # If those are not present, the asynchronous nature of the application
+ # can cause some of the things we send to be handled out of order;
+ # for instance auth could still be serving the old zone when we send
+ # the new query, or already respond from the new database.
+ # Therefore we wait for specific log messages after each operation
+ #
+ # This scenario outlines every single step, and does not use
+ # 'steps of steps' (e.g. Given I have bind10 running)
+ # We can do that but as an example this is probably better to learn
+ # the system
+
+ When I start bind10 with configuration example.org.config
+ Then wait for bind10 auth to start
+ Wait for bind10 stderr message CMDCTL_STARTED
+ A query for www.example.org should have rcode NOERROR
+ Wait for new bind10 stderr message AUTH_SEND_NORMAL_RESPONSE
+ Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3
+ And wait for new bind10 stderr message DATASRC_SQLITE_OPEN
+ A query for www.example.org should have rcode REFUSED
+ Wait for new bind10 stderr message AUTH_SEND_NORMAL_RESPONSE
+ Then set bind10 configuration Auth/database_file to data/example.org.sqlite3
+ And wait for new bind10 stderr message DATASRC_SQLITE_OPEN
+ A query for www.example.org should have rcode NOERROR
+
+ Scenario: two bind10 instances
+ # This is more a test of the test system, start 2 bind10's
+ When I start bind10 with configuration example.org.config as bind10_one
+ And I start bind10 with configuration example2.org.config with cmdctl port 47804 as bind10_two
+
+ Then wait for bind10 auth of bind10_one to start
+ Then wait for bind10 auth of bind10_two to start
+ A query for www.example.org to 127.0.0.1:47806 should have rcode NOERROR
+ A query for www.example.org to 127.0.0.1:47807 should have rcode NOERROR
+
+ Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3
+ And wait for bind10_one stderr message DATASRC_SQLITE_OPEN
+
+ A query for www.example.org to 127.0.0.1:47806 should have rcode REFUSED
+ A query for www.example.org to 127.0.0.1:47807 should have rcode NOERROR
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
new file mode 100644
index 0000000..e104a81
--- /dev/null
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -0,0 +1,108 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from lettuce import *
+import subprocess
+import re
+
+ at step('start bind10(?: with configuration (\S+))?' +\
+ '(?: with cmdctl port (\d+))?(?: as (\S+))?')
+def start_bind10(step, config_file, cmdctl_port, process_name):
+ """
+ Start BIND 10 with the given optional config file, cmdctl port, and
+ store the running process in world with the given process name.
+ Parameters:
+ config_file ('with configuration <file>', optional): this configuration
+ will be used. The path is relative to the base lettuce
+ directory.
+ cmdctl_port ('with cmdctl port <portnr>', optional): The port on which
+ b10-cmdctl listens for bindctl commands. Defaults to 47805.
+ process_name ('as <name>', optional). This is the name that can be used
+ in the following steps of the scenario to refer to this
+ BIND 10 instance. Defaults to 'bind10'.
+ This call will block until BIND10_STARTUP_COMPLETE or BIND10_STARTUP_ERROR
+ is logged. In the case of the latter, or if it times out, the step (and
+ scenario) will fail.
+ It will also fail if there is a running process with the given process_name
+ already.
+ """
+ args = [ 'bind10', '-v' ]
+ if config_file is not None:
+ args.append('-p')
+ args.append("configurations/")
+ args.append('-c')
+ args.append(config_file)
+ if cmdctl_port is None:
+ args.append('--cmdctl-port=47805')
+ else:
+ args.append('--cmdctl-port=' + cmdctl_port)
+ if process_name is None:
+ process_name = "bind10"
+ else:
+ args.append('-m')
+ args.append(process_name + '_msgq.socket')
+
+ world.processes.add_process(step, process_name, args)
+
+ # check output to know when startup has been completed
+ message = world.processes.wait_for_stderr_str(process_name,
+ ["BIND10_STARTUP_COMPLETE",
+ "BIND10_STARTUP_ERROR"])
+ assert message == "BIND10_STARTUP_COMPLETE", "Got: " + str(message)
+
+ at step('wait for bind10 auth (?:of (\w+) )?to start')
+def wait_for_auth(step, process_name):
+ """Wait for b10-auth to run. This is done by blocking until the message
+ AUTH_SERVER_STARTED is logged.
+ Parameters:
+ process_name ('of <name', optional): The name of the BIND 10 instance
+ to wait for. Defaults to 'bind10'.
+ """
+ if process_name is None:
+ process_name = "bind10"
+ world.processes.wait_for_stderr_str(process_name, ['AUTH_SERVER_STARTED'],
+ False)
+
+ at step('have bind10 running(?: with configuration ([\w.]+))?')
+def have_bind10_running(step, config_file):
+ """
+ Compound convenience step for running bind10, which consists of
+ start_bind10 and wait_for_auth.
+ Currently only supports the 'with configuration' option.
+ """
+ step.given('start bind10 with configuration ' + config_file)
+ step.given('wait for bind10 auth to start')
+
+ at step('set bind10 configuration (\S+) to (.*)(?: with cmdctl port (\d+))?')
+def set_config_command(step, name, value, cmdctl_port):
+ """
+ Run bindctl, set the given configuration to the given value, and commit it.
+ Parameters:
+ name ('configuration <name>'): Identifier of the configuration to set
+ value ('to <value>'): value to set it to.
+ cmdctl_port ('with cmdctl port <portnr>', optional): cmdctl port to send
+ the command to. Defaults to 47805.
+ Fails if cmdctl does not exit with status code 0.
+ """
+ if cmdctl_port is None:
+ cmdctl_port = '47805'
+ args = ['bindctl', '-p', cmdctl_port]
+ bindctl = subprocess.Popen(args, 1, None, subprocess.PIPE,
+ subprocess.PIPE, None)
+ bindctl.stdin.write("config set " + name + " " + value + "\n")
+ bindctl.stdin.write("config commit\n")
+ bindctl.stdin.write("quit\n")
+ result = bindctl.wait()
+ assert result == 0, "bindctl exit code: " + str(result)
diff --git a/tests/lettuce/features/terrain/querying.py b/tests/lettuce/features/terrain/querying.py
new file mode 100644
index 0000000..ea89b18
--- /dev/null
+++ b/tests/lettuce/features/terrain/querying.py
@@ -0,0 +1,279 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This script provides querying functionality
+# The most important step is
+#
+# query for <name> [type X] [class X] [to <addr>[:port]] should have rcode <rc>
+#
+# By default, it will send queries to 127.0.0.1:47806 unless specified
+# otherwise. The rcode is always checked. If the result is not NO_ANSWER,
+# the result will be stored in last_query_result, which can then be inspected
+# more closely, for instance with the step
+#
+# "the last query response should have <property> <value>"
+#
+# Also see example.feature for some examples
+
+from lettuce import *
+import subprocess
+import re
+
+#
+# define a class to easily access different parts
+# We may consider using our full library for this, but for now
+# simply store several parts of the response as text values in
+# this structure.
+# (this actually has the advantage of not relying on our own libraries
+# to test our own, well, libraries)
+#
+# The following attributes are 'parsed' from the response, all as strings,
+# and end up as direct attributes of the QueryResult object:
+# opcode, rcode, id, flags, qdcount, ancount, nscount, adcount
+# (flags is one string with all flags, in the order they appear in the
+# response packet.)
+#
+# this will set 'rcode' as the result code, we 'define' one additional
+# rcode, "NO_ANSWER", if the dig process returned an error code itself
+# In this case none of the other attributes will be set.
+#
+# The different sections will be lists of strings, one for each RR in the
+# section. The question section will start with ';', as per dig output
+#
+# See server_from_sqlite3.feature for various examples to perform queries
+class QueryResult(object):
+ status_re = re.compile("opcode: ([A-Z])+, status: ([A-Z]+), id: ([0-9]+)")
+ flags_re = re.compile("flags: ([a-z ]+); QUERY: ([0-9]+), ANSWER: " +
+ "([0-9]+), AUTHORITY: ([0-9]+), ADDITIONAL: ([0-9]+)")
+
+ def __init__(self, name, qtype, qclass, address, port):
+ """
+ Constructor. This fires of a query using dig.
+ Parameters:
+ name: The domain name to query
+ qtype: The RR type to query. Defaults to A if it is None.
+ qclass: The RR class to query. Defaults to IN if it is None.
+ address: The IP adress to send the query to.
+ port: The port number to send the query to.
+ All parameters must be either strings or have the correct string
+ representation.
+ Only one query attempt will be made.
+ """
+ args = [ 'dig', '+tries=1', '@' + str(address), '-p', str(port) ]
+ if qtype is not None:
+ args.append('-t')
+ args.append(str(qtype))
+ if qclass is not None:
+ args.append('-c')
+ args.append(str(qclass))
+ args.append(name)
+ dig_process = subprocess.Popen(args, 1, None, None, subprocess.PIPE,
+ None)
+ result = dig_process.wait()
+ if result != 0:
+ self.rcode = "NO_ANSWER"
+ else:
+ self.rcode = None
+ parsing = "HEADER"
+ self.question_section = []
+ self.answer_section = []
+ self.authority_section = []
+ self.additional_section = []
+ self.line_handler = self.parse_header
+ for out in dig_process.stdout:
+ self.line_handler(out)
+
+ def _check_next_header(self, line):
+ """
+ Returns true if we found a next header, and sets the internal
+ line handler to the appropriate value.
+ """
+ if line == ";; ANSWER SECTION:\n":
+ self.line_handler = self.parse_answer
+ elif line == ";; AUTHORITY SECTION:\n":
+ self.line_handler = self.parse_authority
+ elif line == ";; ADDITIONAL SECTION:\n":
+ self.line_handler = self.parse_additional
+ elif line.startswith(";; Query time"):
+ self.line_handler = self.parse_footer
+ else:
+ return False
+ return True
+
+ def parse_header(self, line):
+ """
+ Parse the header lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ status_match = self.status_re.search(line)
+ flags_match = self.flags_re.search(line)
+ if status_match is not None:
+ self.opcode = status_match.group(1)
+ self.rcode = status_match.group(2)
+ elif flags_match is not None:
+ self.flags = flags_match.group(1)
+ self.qdcount = flags_match.group(2)
+ self.ancount = flags_match.group(3)
+ self.nscount = flags_match.group(4)
+ self.adcount = flags_match.group(5)
+
+ def parse_question(self, line):
+ """
+ Parse the question section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.question_section.append(line.strip())
+
+ def parse_answer(self, line):
+ """
+ Parse the answer section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.answer_section.append(line.strip())
+
+ def parse_authority(self, line):
+ """
+ Parse the authority section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.authority_section.append(line.strip())
+
+ def parse_additional(self, line):
+ """
+ Parse the additional section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.additional_section.append(line.strip())
+
+ def parse_footer(self, line):
+ """
+ Parse the footer lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ pass
+
+ at step('A query for ([\w.]+) (?:type ([A-Z]+) )?(?:class ([A-Z]+) )?' +
+ '(?:to ([^:]+)(?::([0-9]+))? )?should have rcode ([\w.]+)')
+def query(step, query_name, qtype, qclass, addr, port, rcode):
+ """
+ Run a query, check the rcode of the response, and store the query
+ result in world.last_query_result.
+ Parameters:
+ query_name ('query for <name>'): The domain name to query.
+ qtype ('type <type>', optional): The RR type to query. Defaults to A.
+ qclass ('class <class>', optional): The RR class to query. Defaults to IN.
+ addr ('to <address>', optional): The IP address of the nameserver to query.
+ Defaults to 127.0.0.1.
+ port (':<port>', optional): The port number of the nameserver to query.
+ Defaults to 47806.
+ rcode ('should have rcode <rcode>'): The expected rcode of the answer.
+ """
+ if qtype is None:
+ qtype = "A"
+ if qclass is None:
+ qclass = "IN"
+ if addr is None:
+ addr = "127.0.0.1"
+ if port is None:
+ port = 47806
+ query_result = QueryResult(query_name, qtype, qclass, addr, port)
+ assert query_result.rcode == rcode,\
+ "Expected: " + rcode + ", got " + query_result.rcode
+ world.last_query_result = query_result
+
+ at step('The SOA serial for ([\w.]+) should be ([0-9]+)')
+def query_soa(step, query_name, serial):
+ """
+ Convenience function to check the SOA SERIAL value of the given zone at
+ the nameserver at the default address (127.0.0.1:47806).
+ Parameters:
+ query_name ('for <name>'): The zone to find the SOA record for.
+ serial ('should be <number>'): The expected value of the SOA SERIAL.
+ If the rcode is not NOERROR, or the answer section does not contain the
+ SOA record, this step fails.
+ """
+ query_result = QueryResult(query_name, "SOA", "IN", "127.0.0.1", "47806")
+ assert "NOERROR" == query_result.rcode,\
+ "Got " + query_result.rcode + ", expected NOERROR"
+ assert len(query_result.answer_section) == 1,\
+ "Too few or too many answers in SOA response"
+ soa_parts = query_result.answer_section[0].split()
+ assert serial == soa_parts[6],\
+ "Got SOA serial " + soa_parts[6] + ", expected " + serial
+
+ at step('last query response should have (\S+) (.+)')
+def check_last_query(step, item, value):
+ """
+ Check a specific value in the reponse from the last successful query sent.
+ Parameters:
+ item: The item to check the value of
+ value: The expected value.
+ This performs a very simple direct string comparison of the QueryResult
+ member with the given item name and the given value.
+ Fails if the item is unknown, or if its value does not match the expected
+ value.
+ """
+ assert world.last_query_result is not None
+ assert item in world.last_query_result.__dict__
+ lq_val = world.last_query_result.__dict__[item]
+ assert str(value) == str(lq_val),\
+ "Got: " + str(lq_val) + ", expected: " + str(value)
+
+ at step('([a-zA-Z]+) section of the last query response should be')
+def check_last_query_section(step, section):
+ """
+ Check the entire contents of the given section of the response of the last
+ query.
+ Parameters:
+ section ('<section> section'): The name of the section (QUESTION, ANSWER,
+ AUTHORITY or ADDITIONAL).
+ The expected response is taken from the multiline part of the step in the
+ scenario. Differing whitespace is ignored, but currently the order is
+ significant.
+ Fails if they do not match.
+ """
+ response_string = None
+ if section.lower() == 'question':
+ response_string = "\n".join(world.last_query_result.question_section)
+ elif section.lower() == 'answer':
+ response_string = "\n".join(world.last_query_result.answer_section)
+ elif section.lower() == 'authority':
+ response_string = "\n".join(world.last_query_result.answer_section)
+ elif section.lower() == 'additional':
+ response_string = "\n".join(world.last_query_result.answer_section)
+ else:
+ assert False, "Unknown section " + section
+ # replace whitespace of any length by one space
+ response_string = re.sub("[ \t]+", " ", response_string)
+ expect = re.sub("[ \t]+", " ", step.multiline)
+ assert response_string.strip() == expect.strip(),\
+ "Got:\n'" + response_string + "'\nExpected:\n'" + step.multiline +"'"
+
+
diff --git a/tests/lettuce/features/terrain/steps.py b/tests/lettuce/features/terrain/steps.py
new file mode 100644
index 0000000..4050940
--- /dev/null
+++ b/tests/lettuce/features/terrain/steps.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# This file contains a number of common steps that are general and may be used
+# By a lot of feature files.
+#
+
+from lettuce import *
+import os
+
+ at step('stop process (\w+)')
+def stop_a_named_process(step, process_name):
+ """
+ Stop the process with the given name.
+ Parameters:
+ process_name ('process <name>'): Name of the process to stop.
+ """
+ world.processes.stop_process(process_name)
+
+ at step('wait for (new )?(\w+) stderr message (\w+)')
+def wait_for_message(step, new, process_name, message):
+ """
+ Block until the given message is printed to the given process's stderr
+ output.
+ Parameter:
+ new: (' new', optional): Only check the output printed since last time
+ this step was used for this process.
+ process_name ('<name> stderr'): Name of the process to check the output of.
+ message ('message <message>'): Output (part) to wait for.
+ Fails if the message is not found after 10 seconds.
+ """
+ world.processes.wait_for_stderr_str(process_name, [message], new)
+
+ at step('wait for (new )?(\w+) stdout message (\w+)')
+def wait_for_message(step, process_name, message):
+ """
+ Block until the given message is printed to the given process's stdout
+ output.
+ Parameter:
+ new: (' new', optional): Only check the output printed since last time
+ this step was used for this process.
+ process_name ('<name> stderr'): Name of the process to check the output of.
+ message ('message <message>'): Output (part) to wait for.
+ Fails if the message is not found after 10 seconds.
+ """
+ world.processes.wait_for_stdout_str(process_name, [message], new)
+
+ at step('the file (\S+) should (not )?exist')
+def check_existence(step, file_name, should_not_exist):
+ """
+ Check the existence of the given file.
+ Parameters:
+ file_name ('file <name>'): File to check existence of.
+ should_not_exist ('not', optional): Whether it should or should not exist.
+ Fails if the file should exist and does not, or vice versa.
+ """
+ if should_not_exist is None:
+ assert os.path.exists(file_name), file_name + " does not exist"
+ else:
+ assert not os.path.exists(file_name), file_name + " exists"
diff --git a/tests/lettuce/features/terrain/terrain.py b/tests/lettuce/features/terrain/terrain.py
new file mode 100644
index 0000000..634d2fb
--- /dev/null
+++ b/tests/lettuce/features/terrain/terrain.py
@@ -0,0 +1,360 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# This is the 'terrain' in which the lettuce lives. By convention, this is
+# where global setup and teardown is defined.
+#
+# We declare some attributes of the global 'world' variables here, so the
+# tests can safely assume they are present.
+#
+# We also use it to provide scenario invariants, such as resetting data.
+#
+
+from lettuce import *
+import subprocess
+import os.path
+import shutil
+import re
+import time
+
+# In order to make sure we start all tests with a 'clean' environment,
+# We perform a number of initialization steps, like restoring configuration
+# files, and removing generated data files.
+
+# This approach may not scale; if so we should probably provide specific
+# initialization steps for scenarios. But until that is shown to be a problem,
+# It will keep the scenarios cleaner.
+
+# This is a list of files that are freshly copied before each scenario
+# The first element is the original, the second is the target that will be
+# used by the tests that need them
+copylist = [
+["configurations/example.org.config.orig", "configurations/example.org.config"]
+]
+
+# This is a list of files that, if present, will be removed before a scenario
+removelist = [
+"data/test_nonexistent_db.sqlite3"
+]
+
+# When waiting for output data of a running process, use OUTPUT_WAIT_INTERVAL
+# as the interval in which to check again if it has not been found yet.
+# If we have waited OUTPUT_WAIT_MAX_INTERVALS times, we will abort with an
+# error (so as not to hang indefinitely)
+OUTPUT_WAIT_INTERVAL = 0.5
+OUTPUT_WAIT_MAX_INTERVALS = 20
+
+# class that keeps track of one running process and the files
+# we created for it.
+class RunningProcess:
+ def __init__(self, step, process_name, args):
+ # set it to none first so destructor won't error if initializer did
+ """
+ Initialize the long-running process structure, and start the process.
+ Parameters:
+ step: The scenario step it was called from. This is used for
+ determining the output files for redirection of stdout
+ and stderr.
+ process_name: The name to refer to this running process later.
+ args: Array of arguments to pass to Popen().
+ """
+ self.process = None
+ self.step = step
+ self.process_name = process_name
+ self.remove_files_on_exit = True
+ self._check_output_dir()
+ self._create_filenames()
+ self._start_process(args)
+
+ def _start_process(self, args):
+ """
+ Start the process.
+ Parameters:
+ args:
+ Array of arguments to pass to Popen().
+ """
+ stderr_write = open(self.stderr_filename, "w")
+ stdout_write = open(self.stdout_filename, "w")
+ self.process = subprocess.Popen(args, 1, None, subprocess.PIPE,
+ stdout_write, stderr_write)
+ # open them again, this time for reading
+ self.stderr = open(self.stderr_filename, "r")
+ self.stdout = open(self.stdout_filename, "r")
+
+ def mangle_filename(self, filebase, extension):
+ """
+ Remove whitespace and non-default characters from a base string,
+ and return the substituted value. Whitespace is replaced by an
+ underscore. Any other character that is not an ASCII letter, a
+ number, a dot, or a hyphen or underscore is removed.
+ Parameter:
+ filebase: The string to perform the substitution and removal on
+ extension: An extension to append to the result value
+ Returns the modified filebase with the given extension
+ """
+ filebase = re.sub("\s+", "_", filebase)
+ filebase = re.sub("[^a-zA-Z0-9.\-_]", "", filebase)
+ return filebase + "." + extension
+
+ def _check_output_dir(self):
+ # We may want to make this overridable by the user, perhaps
+ # through an environment variable. Since we currently expect
+ # lettuce to be run from our lettuce dir, we shall just use
+ # the relative path 'output/'
+ """
+ Make sure the output directory for stdout/stderr redirection
+ exists.
+ Fails if it exists but is not a directory, or if it does not
+ and we are unable to create it.
+ """
+ self._output_dir = os.getcwd() + os.sep + "output"
+ if not os.path.exists(self._output_dir):
+ os.mkdir(self._output_dir)
+ assert os.path.isdir(self._output_dir),\
+ self._output_dir + " is not a directory."
+
+ def _create_filenames(self):
+ """
+ Derive the filenames for stdout/stderr redirection from the
+ feature, scenario, and process name. The base will be
+ "<Feature>-<Scenario>-<process name>.[stdout|stderr]"
+ """
+ filebase = self.step.scenario.feature.name + "-" +\
+ self.step.scenario.name + "-" + self.process_name
+ self.stderr_filename = self._output_dir + os.sep +\
+ self.mangle_filename(filebase, "stderr")
+ self.stdout_filename = self._output_dir + os.sep +\
+ self.mangle_filename(filebase, "stdout")
+
+ def stop_process(self):
+ """
+ Stop this process by calling terminate(). Blocks until process has
+ exited. If remove_files_on_exit is True, redirected output files
+ are removed.
+ """
+ if self.process is not None:
+ self.process.terminate()
+ self.process.wait()
+ self.process = None
+ if self.remove_files_on_exit:
+ self._remove_files()
+
+ def _remove_files(self):
+ """
+ Remove the files created for redirection of stdout/stderr output.
+ """
+ os.remove(self.stderr_filename)
+ os.remove(self.stdout_filename)
+
+ def _wait_for_output_str(self, filename, running_file, strings, only_new):
+ """
+ Wait for a line of output in this process. This will (if only_new is
+ False) first check all previous output from the process, and if not
+ found, check all output since the last time this method was called.
+ For each line in the output, the given strings array is checked. If
+ any output lines checked contains one of the strings in the strings
+ array, that string (not the line!) is returned.
+ Parameters:
+ filename: The filename to read previous output from, if applicable.
+ running_file: The open file to read new output from.
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ """
+ if not only_new:
+ full_file = open(filename, "r")
+ for line in full_file:
+ for string in strings:
+ if line.find(string) != -1:
+ full_file.close()
+ return string
+ wait_count = 0
+ while wait_count < OUTPUT_WAIT_MAX_INTERVALS:
+ where = running_file.tell()
+ line = running_file.readline()
+ if line:
+ for string in strings:
+ if line.find(string) != -1:
+ return string
+ else:
+ wait_count += 1
+ time.sleep(OUTPUT_WAIT_INTERVAL)
+ running_file.seek(where)
+ assert False, "Timeout waiting for process output: " + str(strings)
+
+ def wait_for_stderr_str(self, strings, only_new = True):
+ """
+ Wait for one of the given strings in this process's stderr output.
+ Parameters:
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ """
+ return self._wait_for_output_str(self.stderr_filename, self.stderr,
+ strings, only_new)
+
+ def wait_for_stdout_str(self, strings, only_new = True):
+ """
+ Wait for one of the given strings in this process's stdout output.
+ Parameters:
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ """
+ return self._wait_for_output_str(self.stdout_filename, self.stdout,
+ strings, only_new)
+
+# Container class for a number of running processes
+# i.e. servers like bind10, etc
+# one-shot programs like dig or bindctl are started and closed separately
+class RunningProcesses:
+ def __init__(self):
+ """
+ Initialize with no running processes.
+ """
+ self.processes = {}
+
+ def add_process(self, step, process_name, args):
+ """
+ Start a process with the given arguments, and store it under the given
+ name.
+ Parameters:
+ step: The scenario step it was called from. This is used for
+ determining the output files for redirection of stdout
+ and stderr.
+ process_name: The name to refer to this running process later.
+ args: Array of arguments to pass to Popen().
+ Fails if a process with the given name is already running.
+ """
+ assert process_name not in self.processes,\
+ "Process " + name + " already running"
+ self.processes[process_name] = RunningProcess(step, process_name, args)
+
+ def get_process(self, process_name):
+ """
+ Return the Process with the given process name.
+ Parameters:
+ process_name: The name of the process to return.
+ Fails if the process is not running.
+ """
+ assert process_name in self.processes,\
+ "Process " + name + " unknown"
+ return self.processes[process_name]
+
+ def stop_process(self, process_name):
+ """
+ Stop the Process with the given process name.
+ Parameters:
+ process_name: The name of the process to return.
+ Fails if the process is not running.
+ """
+ assert process_name in self.processes,\
+ "Process " + name + " unknown"
+ self.processes[process_name].stop_process()
+ del self.processes[process_name]
+
+ def stop_all_processes(self):
+ """
+ Stop all running processes.
+ """
+ for process in self.processes.values():
+ process.stop_process()
+
+ def keep_files(self):
+ """
+ Keep the redirection files for stdout/stderr output of all processes
+ instead of removing them when they are stopped later.
+ """
+ for process in self.processes.values():
+ process.remove_files_on_exit = False
+
+ def wait_for_stderr_str(self, process_name, strings, only_new = True):
+ """
+ Wait for one of the given strings in the given process's stderr output.
+ Parameters:
+ process_name: The name of the process to check the stderr output of.
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ Fails if the process is unknown.
+ """
+ assert process_name in self.processes,\
+ "Process " + process_name + " unknown"
+ return self.processes[process_name].wait_for_stderr_str(strings,
+ only_new)
+
+ def wait_for_stdout_str(self, process_name, strings, only_new = True):
+ """
+ Wait for one of the given strings in the given process's stdout output.
+ Parameters:
+ process_name: The name of the process to check the stdout output of.
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ Fails if the process is unknown.
+ """
+ assert process_name in self.processes,\
+ "Process " + process_name + " unknown"
+ return self.processes[process_name].wait_for_stdout_str(strings,
+ only_new)
+
+ at before.each_scenario
+def initialize(scenario):
+ """
+ Global initialization for each scenario.
+ """
+ # Keep track of running processes
+ world.processes = RunningProcesses()
+
+ # Convenience variable to access the last query result from querying.py
+ world.last_query_result = None
+
+ # Some tests can modify the settings. If the tests fail half-way, or
+ # don't clean up, this can leave configurations or data in a bad state,
+ # so we copy them from originals before each scenario
+ for item in copylist:
+ shutil.copy(item[0], item[1])
+
+ for item in removelist:
+ if os.path.exists(item):
+ os.remove(item)
+
+ at after.each_scenario
+def cleanup(scenario):
+ """
+ Global cleanup for each scenario.
+ """
+ # Keep output files if the scenario failed
+ if not scenario.passed:
+ world.processes.keep_files()
+ # Stop any running processes we may have had around
+ world.processes.stop_all_processes()
+
diff --git a/tests/lettuce/setup_intree_bind10.sh.in b/tests/lettuce/setup_intree_bind10.sh.in
new file mode 100755
index 0000000..40fd82d
--- /dev/null
+++ b/tests/lettuce/setup_intree_bind10.sh.in
@@ -0,0 +1,46 @@
+#! /bin/sh
+
+# Copyright (C) 2010 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
+export PYTHON_EXEC
+
+BIND10_PATH=@abs_top_builddir@/src/bin/bind10
+
+PATH=@abs_top_builddir@/src/bin/bind10:@abs_top_builddir@/src/bin/bindctl:@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
+export PATH
+
+PYTHONPATH=@abs_top_builddir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
+export PYTHONPATH
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
+B10_FROM_SOURCE=@abs_top_srcdir@
+export B10_FROM_SOURCE
+# TODO: We need to do this feature based (ie. no general from_source)
+# But right now we need a second one because some spec files are
+# generated and hence end up under builddir
+B10_FROM_BUILD=@abs_top_builddir@
+export B10_FROM_BUILD
+
+BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
+export BIND10_MSGQ_SOCKET_FILE
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 49ef0f1..565b306 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -50,7 +50,7 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Stopping b10-auth and checking that ($n)"
-echo 'config set Boss/start_auth false
+echo 'config remove Boss/components b10-auth
config commit
quit
' | $RUN_BINDCTL \
@@ -61,7 +61,8 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Restarting b10-auth and checking that ($n)"
-echo 'config set Boss/start_auth true
+echo 'config add Boss/components b10-auth
+config set Boss/components/b10-auth { "special": "auth", "kind": "needed" }
config commit
quit
' | $RUN_BINDCTL \
diff --git a/tests/system/ixfr/in-3/tests.sh b/tests/system/ixfr/in-3/tests.sh
index 858b815..d47a221 100644
--- a/tests/system/ixfr/in-3/tests.sh
+++ b/tests/system/ixfr/in-3/tests.sh
@@ -22,6 +22,8 @@
# server; the server should not respond to the request, so the client should
# then send an AXFR request and receive the latest copy of the zone.
+# TODO It seems bind9 still allows IXFR even when provide-ixfr on;
+
. ../ixfr_init.sh
status=$?
@@ -29,9 +31,6 @@ status=$?
old_client_serial=`$DIG_SOA @$CLIENT_IP | $AWK '{print $3}'`
echo "I:SOA serial of IXFR client $CLIENT_NAME is $old_client_serial"
-# TODO: Need to alter configuration of BIND 10 server such that it accepts
-# NOTIFYs from and sends IXFR requests to the BIND 9 master.
-
# If required, get the IXFR server to notify the IXFR client of the new zone.
# Do this by allowing notifies and then triggering a re-notification of the
# server.
@@ -48,8 +47,20 @@ status=`expr $status + $?`
compare_soa $SERVER_NAME $SERVER_IP $CLIENT_NAME $CLIENT_IP
status=`expr $status + $?`
-# TODO: Check the BIND 10 log, looking for the IXFR messages that indicate that
-# it has initiated an IXFR and then an AXFR.
+# Check the log there's the IXFR and fallback
+grep XFRIN_XFR_TRANSFER_STARTED nsx2/bind10.run | grep IXFR
+if [ $? -ne 0 ];
+then
+ echo "R:$CLIENT_NAME FAIL no 'IXFR started' message in the BIND 10 log"
+ exit 1
+fi
+
+grep XFRIN_XFR_TRANSFER_FALLBACK nsx2/bind10.run
+if [ $? -ne 0 ];
+then
+ echo "R:$CLIENT_NAME FAIL no fallback message in BIND10 log"
+ exit 1
+fi
echo "I:exit status: $status"
exit $status
diff --git a/tests/system/ixfr/named_noixfr.conf b/tests/system/ixfr/named_noixfr.conf
index b0d972a..d171876 100644
--- a/tests/system/ixfr/named_noixfr.conf
+++ b/tests/system/ixfr/named_noixfr.conf
@@ -33,6 +33,7 @@ options {
ixfr-from-differences no;
notify explicit;
also-notify { 10.53.0.2; };
+ provide-ixfr no;
};
zone "example" {
More information about the bind10-changes
mailing list