BIND 10 trac833, updated. 01c6801b65e167ba2cf635143b988bf4bcbbdc68 Merge branch 'trac1228'
BIND 10 source code commits
bind10-changes at lists.isc.org
Wed Nov 9 19:29:15 UTC 2011
The branch, trac833 has been updated
via 01c6801b65e167ba2cf635143b988bf4bcbbdc68 (commit)
via 31d5a4f66b18cca838ca1182b9f13034066427a7 (commit)
via 0a3592efda9bd97cf251163cf9a30f38122cb7c2 (commit)
via 1177bfe30e17a76bea6b6447e14ae9be9e1ca8c2 (commit)
via e6a596fe8f57103c735d8e135f855d46c248844c (commit)
via f8cea54b5bb8f870a01beebbdcde5eb90dd7d8b4 (commit)
via 137a61f2afcd6d16ea20c3a4436046d783a5babf (commit)
via 6b75c128bcdcefd85c18ccb6def59e9acedd4437 (commit)
via 1a5bd80bbe01abbb2a5932bc43fab8e7a287dcf5 (commit)
via c03e6df1521a378fa3cb9eab4a11db93e6e34969 (commit)
via 8cea64b69af8d5ef21497d2f1c9812968ce5d8f7 (commit)
via 1aa233fab1d74dc776899df61181806679d14013 (commit)
via 45bd390859827c02965765b4b146b5351cbbb1c1 (commit)
via 0f6b216a89583edd445942386df5a388b39149d5 (commit)
via ac552055bc8a4d996a0c24eb5f13d01667a3d77a (commit)
via 26aaecc388f8c152b5d63a1f3906ba5a625b0e31 (commit)
via 10c84106e8b34d78fa1916e4bc3db15030fd94f9 (commit)
via 23cfc5b4d9b384172d0eadd2269ed6a6121966a8 (commit)
via 007d31f50876cd58a031dd86b461145e77bea63e (commit)
via 27b7f9d36113514773777eb94bf66a3ef8c49a82 (commit)
via 6716721a7c10737d86a4a29af530d54a458f83ca (commit)
via e8aa8b8b994146dfff6d29435a66c88dcf79eb69 (commit)
via 5d6c71aeb2575883488b2cde87501aa84260b1ab (commit)
via 233d2d783e6be97f208998f9fcf79404eea5c9b3 (commit)
via dee6a4739aee15e8899da2e35d179cb1d8623e76 (commit)
via 50672f2d6073e813fb80250398b6e6a2b93c915d (commit)
via 1a90f118bf69d6239ca290f712bfeb89a9027efd (commit)
via 5d290088a1b996011217cf801e37600d5bcd037e (commit)
via 3d59d6a24e3a84c3ca453721649e6adfab863c0e (commit)
via a95b528af25a2b3bda91f9b88c04a20b0b783208 (commit)
via 58e8ca7d1c5d8f4b69aa174405e4ef280b8012cc (commit)
via aa13f832395794bab3647ed375ac8a6e2d26e55f (commit)
via 0ea04c4bb216cc822be49626d4b0269956fd070e (commit)
via b03d29677700c1dd2a527dafe9987defb7556e97 (commit)
via 043ff1e7ec5f2c8e3d6b7e278418fc03eea2b09f (commit)
via 9697c6b3cc3e49d96efc6777c1dba5ecb00eb785 (commit)
via 09e4d880b9e7260caf6b5ec763aa1e0712531657 (commit)
via 33a0d78c8ff1bd0083251fdad2def37c6c9064dc (commit)
via a28f94240549b3b869e6aef5265d46afbd09f6aa (commit)
via b843d05fdaefa92abcec50a781dbdfbadb4c9bed (commit)
via b7e1847c3a1979d3ac593de435e142335cbc7188 (commit)
via b3af32e148d004ef5010d37eddccf6df57bdb257 (commit)
via 2104208cfcc7ab912cf2d530697c7192608f3c5d (commit)
via 7e1e5f38f1d28c8e19337fb56f3dacba81341ec8 (commit)
via 8635b169171d0d88ce19f46039ded6e1dab7b72c (commit)
via 05d4deb643271e0f0b0dcfb22809714086d50788 (commit)
via 1c8dfb0cdb80841bea487ee355ce85c749223810 (commit)
via 0613c0e0ebfcc8e3cf37678bd6799889569beb83 (commit)
via 66bb38a4d0cf296f48181d624d22b1074688de38 (commit)
via 7d2826b519f95b2fecd299e15952e897c5a60b2b (commit)
via e9f0637479f992936b2feab96e50a84a6a4dfebd (commit)
via c3b01cc59ba03c6054af4bae42e08965b3f60eb0 (commit)
via 409e800ffc208240ec70eb63bc2e56aadfbb21e1 (commit)
via 6e4e3ac19c322c65679c6c5653cc41b80305d9b9 (commit)
via f80ab7879cc29f875c40dde6b44e3796ac98d6da (commit)
via 00a99483151a21e73ef432dcba73347e1fd407f2 (commit)
via d5ade3d32087884e477d8f5b2fa200324b96ea0a (commit)
via 0e776c32330aee466073771600390ce74b959b38 (commit)
via 723a57edeb33afe206a8e350cfc583d5cb451051 (commit)
via 25c802dd1c30580b94345e83eeb6a168ab329a33 (commit)
via 76bbf34210a5cf70853337a9a9f064c07c7aca76 (commit)
via d27f4125c99d13a7a73dee8c196a0d95050a4b62 (commit)
via 081271155ea18a33a135711a983e8882a2f56eea (commit)
via 2182b30eb6b833fe4c902d635aa97ad901518904 (commit)
via 9aaf85731baa1ea5fe9484efc9bf48b264f60d1e (commit)
via dc491833cf75ac1481ba1475795b0f266545013d (commit)
via d07206bb5c5ec4b3411e759a9afc75c2c593a4fa (commit)
via 8fc9df7f444af31a936e1f261f7560b1e222a3ef (commit)
via 254eb201171f450826e2c907098f0c78a7e3c7f4 (commit)
via d38014229e33d2bdb3875e53b9486d54b3920ecc (commit)
via 17565e10ce667cfd7048d4867795ba3cb6876f2e (commit)
via 1cdc35417c6f25f254b7053e801e8415eeba9d84 (commit)
via 0ec187bc1e3cdde29b20f2465c4d5417e04e2d6f (commit)
via ce39dd192fc8ba15479fda1a9da08deb8c3d2225 (commit)
via eb35651d68eba80cbe7a5bc23e72d3544719a33a (commit)
via bef6ceb2905d328c712a45754be23393d56b2a31 (commit)
via e5c133124da1b724f0f452f63fa947fa036c24d3 (commit)
via 1aedd1b56bd3764739d247dda7477bb799a37ac6 (commit)
via cd3588c9020d0310f949bfd053c4d3a4bd84ef88 (commit)
via 40e0797d688e26dae0e93553f63328aa649e9681 (commit)
via 1107b46ec39da9cdac19af44ba79ae5ee8722025 (commit)
via b561ddc9d182cc237143fbd37ab9e6a0277da948 (commit)
via f8c76dbe976b3134974a3b3e28ae9c7586439c3a (commit)
via c6df34ee69d5f4db86abcd4710c359f62f78e8ef (commit)
via 56aa312f698ef597a9d819e5fa28e2b75a9f321b (commit)
via 1cdc605c50c999ffc1225bee5817aa0ae26bcc4d (commit)
via 8b5b28cdbd7be0c7a79950b52679ac4be3db274b (commit)
via b16e9d26953cd7117d14ea8dde9e739cb34cb878 (commit)
via df9b10fae5385c1c0f1cacb2894eee347abe1f09 (commit)
via 607cbae949553adac7e2a684fa25bda804658f61 (commit)
via b5040b229739c8c69463fe462aa8f7b4a8e47f7f (commit)
via 357106fc545e6d506c4ec757d306a955c68d1d5f (commit)
via 8b52836ccac5c331b30812c608d52aa7fc538de5 (commit)
via 410464e7b0f37c8ba149e543c789a598914fc7d7 (commit)
via e715842e4d36c12fb17a8ee3d0a41218ff86ad7c (commit)
via 20b2131e3753a2ac7e78597a95bf244748b7dd3c (commit)
via 0f988d9f9fc26ec5dd3ee1e298ac544af3da2fd3 (commit)
via efa6b47c19bc9f992f1c5c0196e07a01d030ecce (commit)
via 0a7bd2339e604fb26b7bd94bd8c548b188d60adc (commit)
via a72886e643864bb6f86ab47b115a55e0c7f7fcad (commit)
via 6442c07428bf7b8abeb73c4b6a7729ecd4b853c5 (commit)
via ed04555e46292f9d573372b07000384b6f0118af (commit)
via 4f6c6441787be0a145917ae8935b70bb89f27b7a (commit)
via e13d28918a391060d9c1f286d19308cb10975cd9 (commit)
via 5b7e0424c3d826d5c7a9a247d63c7d716b08e470 (commit)
via 46adf014f18c6b3f9a685b8f0fdd0775a583a7c5 (commit)
via 1e9bb55e135af5a0d8dc353a2ffde7c5b247f92a (commit)
via 738b11db9f13c00f5a9ddfb3ab9996fbf85c42d8 (commit)
via 9b76badecd4b688c95c5f83ecdc168922e48680b (commit)
via 07520bd77da400ca476432f8bedcd934d992ec81 (commit)
via 2ab68057dceb0d471daf4524ba338f8f45e942f2 (commit)
via 11981fea517310f13154bf7695f0278b571ac28a (commit)
via 092dbe3f2af0a0c010a081f64947dbedb11b3468 (commit)
via 1fc79b932eaa88be33c224e4eea3fc58907e98bd (commit)
via 8d36a0115d1b3051b88c9f9687103fa2427e749c (commit)
via 65bd895a45fd28c43f748f07aad5fb9321fa6a0a (commit)
via bfab5a33ceabe3f0d31bd465d13308c8b84adf68 (commit)
via ef51c8418dc44bf2882c898990b30fc76ca9a97b (commit)
via ab642e89554bedf0a66c2358db71ec16ddeb2e7f (commit)
via 91c2cf35e41642a997df020de797324bb4cfedcc (commit)
via c6e8dd84e81f5686d45cc41f514d4f61d075a276 (commit)
via 94282a20ab88b590e8b3b2522da2873ca81c632a (commit)
via 4ddb345fdc31614e191b0a0dea282ddd2ebe279e (commit)
via 18b04945884fbcc1783e52bed845c34395c0acf8 (commit)
via 7d25b201c0bc91987c4d9743d0c21b9486b98fd8 (commit)
via a1e64504a4d039b4c7f7434451f169c475a1a35a (commit)
via 9e6570256e27c28b20a17fc34de5689ee4685091 (commit)
via a0e6002f56e624a7cbb48fb06d4ddbc612e315bd (commit)
via b01c18148a840b0d5719cbcd2653bf1b346e45f9 (commit)
via 3db6583d93c42b3cb01ac5619d59d19645bd60bf (commit)
via 1d43b46ab58077daaaf5cae3c6aa3e0eb76eb5d8 (commit)
via 41f528a9eacdb430406a0d9047049585cae31db8 (commit)
via 0fed56c3692e358184958cc1263cff67db0f62cb (commit)
via 1173960107363c04608726b57218a54d2b3b3d56 (commit)
via e76affc220a5f62b24e34152afdda62328a327ec (commit)
via d15cad92c958a6380c90ba76a2ea968e1d8304dc (commit)
via e098bcfbef9b8a66c3330bd37c6bbd8d72a1399e (commit)
via 784f0a8916465d6ec9c47db9f7f3af0fbd564bed (commit)
via 5cb4d41cf68ac18fb5a5db68046e3d06b6552e20 (commit)
via b5d072cfe24be6ad1636dfdb50405ff32473a413 (commit)
via 6060fcf2a39711ba5d842a311ea03a47054f2ffc (commit)
via 00ff3b7b99fd40c267b91fcc2d8d8396e6209873 (commit)
via 975c64367afd77288b193ae3beb5b95688deab3b (commit)
via cf1ce254c246be39069e7e7277e1c506e1b239a0 (commit)
via a7d0518a8c66ebc0eb471eccd67054d27caa07a3 (commit)
via b93bdb9b324b7dc56bd12b5c781e20275bfc3310 (commit)
via 351ce9ee1612362800453a280dabc012565493c6 (commit)
via 44147cd660a85ea909f54e496ae3c8ad1ed583fa (commit)
via ea78ae80aa517556f7c5ac722f324baaf422f08e (commit)
via ba1568e6482268cea9dbf7f980a17423133c65eb (commit)
via bad7607f03104c81cf7224f6fd71db009219ad51 (commit)
via 56d5c4a16e39b3aa6c1786e1ceebb8550c0429e3 (commit)
via 96e22f4284307b1d5f15e03837559711bb4f580c (commit)
via b1380ef8f0534540970ee93a24f955db89891e05 (commit)
via 673ef8efd5d474d66d62d134348730518160cbf9 (commit)
via f7c85718e562f5cbbd6eafeb2549a21f358afba8 (commit)
via c3385a5449721914b56448705cae8af449e6d337 (commit)
via b10e71aafd6c8b4227083d8e1c87da8878198816 (commit)
via 25e2cd129a9f7b5a944692152e173dc2896825fb (commit)
via d69588a14a8886c02a1510820d69f319171b68c7 (commit)
via f08602b2e2a311b9662e65c36c56f9f38461c194 (commit)
via 6e68b97b050e40e073f736d84b62b3e193dd870a (commit)
via 78252609c39a14fb24a879c74108705c7cffed49 (commit)
via ad134a3c515577b5fbff5d05733bdf8d4265fb45 (commit)
via 3c5fd61a4a294c75b6e90dd5a78aec980c387432 (commit)
via 4e4e22281cd1c9dbec42d7c0a2842c92a69b1e8e (commit)
via d3fbd47b4323cbd12fdf3c07af74a6dd7514492a (commit)
via 6e65029d717a44a8250ca6d7e9b57c2927ea6d11 (commit)
via 46e8133ce6aced930a85be2536b5cf1e493e9ab2 (commit)
via 431973bb16c68442614aac015f38e44ed39e960a (commit)
via 4cf570ad0a107cbf70a6e96e8db30eb2c8b8a2ff (commit)
via 3872fd58983155a69817da3744db56ab665e9707 (commit)
via a670d589923976af730c4387c7c4707552efad0d (commit)
via edffc4851f7373294b6486a5d6171f406f7e1de6 (commit)
via 299473702fedd1cab6967683ad7172b88c35f353 (commit)
via 1814e37c1b61225bdb05ea81c3fcc6ea3320e8d9 (commit)
via b3daacd1da0cefe8adfef38f8d324db55a7f2cc1 (commit)
via 499668edac6fd5929dc43391e3281046d0512388 (commit)
via c30cda06f4b0ce0dfeb99badab779b72175000f9 (commit)
via 0a6bcde01f641bf024e9179d2e753d7d2ef4df41 (commit)
via 587102b55b9cee15faf4fd9d9c4c3ad81c88666d (commit)
via 3a86448c1c0a34fd3d27b3395167a63d5e59e733 (commit)
via 03265efc1c833609c21c4e6d2daf8227d20f68b9 (commit)
via b341cd21229db04e564fc8da0398e3ddfe883e46 (commit)
via aa35477883e1a5b1740092296d463ecfd277dbbb (commit)
via 701074ebbf30930b92f8b06d9cc88428aed8db5f (commit)
via e009438536339927f645f9a272c23e43cd0660fc (commit)
via 7714dea5be87f0708ab858bf369390128d2cd2be (commit)
via 6d197660434d715c4e57c6acf792ffdb0528c147 (commit)
via c3bde52d5e1e2748f9d60aa8740fa044260913d5 (commit)
via d8ac168592885baab953cbe6e416afc6b72d9e7d (commit)
via 6d8da5a713e33008a9e8bac1ba24367a3ba86a10 (commit)
via d63457baaa31c80bb1ffeefd4f111c7d92685c8c (commit)
via dcd6d7ff4c0671a0995fe4051cea0e525d3f82bc (commit)
via 6ecf994e0d3580064d6c8a490dfba1a02e9930ee (commit)
via 61fdce086a40930595e70168340ee68080b327bf (commit)
via 1b328591b9bd5f366bc6e205aad0cde28e447442 (commit)
via 61488d93393fff47ea8cce1c2b41ac004802caaf (commit)
via 0a54d27ad889cc8931bc5a0b6549325c4fb3e45f (commit)
via 6a54cfb961dae9f44120ae2da4bd4c3693f9ea49 (commit)
via f17fad51f1533f89600fb3c2e265ee2ad79c3f53 (commit)
via 44113e516b30bb58dd7481b2b87a7f88c0ec51a7 (commit)
via 81c031de6abed68c9fb4a89b2a71474f36488b9b (commit)
via ae199c79e42520e8e521668c6f9435796ca92aa3 (commit)
via 7d4cc051f1ab3470bb5f7b5f8ea9e622fc7c7c9b (commit)
via bbfee9cc079856d3b437a1bbb69b4157092cbf97 (commit)
via 797d30d14f37c6d3fdce9c1140ffebd91021bfb6 (commit)
via 6bdd521c49d10867b63158837f6fdc6d06d1f256 (commit)
via 56d8610a9e10792048a10cce86196deee928e203 (commit)
via 9a4db0085e43df8d8facd885eb9c9a0b52280090 (commit)
via 541ea699442bece13605d34e182ff89bca384a43 (commit)
via c88718438ee67b52cfea003b9e3ce1e5fe234bd8 (commit)
via dd7fb442ed97cc469db4275fdc3d4628cd44ea79 (commit)
via 032f9633f4a353c11d0d855984aad0f0392a6ac1 (commit)
via f546d730bb772a8a4b9ea1737ed63d888755673a (commit)
via 9895253cc3a1cb1431a04a4b6d8e9dc82a3e3bc8 (commit)
via 7e8b9cdec8bb9e79069a4534d896a9a4061c3b88 (commit)
via a0cf3955fceb4d810997dfefed7abbf57e4ee1cf (commit)
via 1032195dcf567dcdd1b500ebf177c415ff9aeba3 (commit)
via ad24f1d258542fecf0e83187f6ed5cc8cdce8ca2 (commit)
via 9ebdb058e61cae1dda642fcb00ced7b7554b44fb (commit)
via b88c94b7119650217408d800847dfbbbcea7306c (commit)
via 419b9d48771946a1b0b75b7412cd2da3e7f81a5a (commit)
via 1f311bbc22d17a747ad394ffd00cb130f2999ede (commit)
via fb49d74df87f9e87a7d14b16a3a84b31976a92fe (commit)
via 1817f1e8fd4a3635b8b5e0d581f6a2aa61b5e955 (commit)
via 68ee3818bcbecebf3e6789e81ea79d551a4ff3e8 (commit)
via ca1d0935b9d65aa1f26dbe4f0cfc0c4db7701900 (commit)
via 1561a91d494d02fbddbac1023b2c84367cb1887d (commit)
via 1a4da963a4b1378332c74feb96ff8556d11bbbc8 (commit)
via acab4a018b927cf5887b6de8135dbae0d2dcdbff (commit)
via 84ded89c2309b41f3d7656ffa0112021436ec4cd (commit)
via 5bb1a0ebbed603d81656d5e87196191f1b00aad5 (commit)
via fd5db1f7aa6f49091b4e66193b0379679eede4c4 (commit)
via 190d4380c54493561eda99d66739f31e515f8e4e (commit)
via 2a08eafed9264b790ada134bdee7ee02c995c50e (commit)
via 2d84595398e0a29bd042b848e986e8aa7bc40f75 (commit)
via 021f3f24fcd053a92b6ee305b984f1a6a550cebc (commit)
via 937b5a6f47d4506193de9a3ed77adcef2cb1fd35 (commit)
via c110fcc95f61b07871fd6fe7e24a495a3f49b89c (commit)
via 58e72cb159391aa0c7832d08ddb0df361514918e (commit)
via 26f4192ca701bafca9460e994c61715916091e37 (commit)
via 86b3f90af4f0f643e44fd3f7cfd11d89a42e4ebd (commit)
via 67ea6de047d4dbd63c25fe7f03f5d5cc2452ad7d (commit)
via ce532896000ddcc026045a08ddb9ae2b96ae7ba9 (commit)
via ddf232dc82203a777e0a59aa9b8252aaf5117548 (commit)
via 41c8d6f1170f06e1da8908666444c88b08906f1e (commit)
via 14909927e06d884129baf8baf7fd8760b2dea196 (commit)
via 4a345eca2184ebccec3a17902056d03f5d00e540 (commit)
via 6217a55056c1e2e6fa8d82357d86b218de43ded4 (commit)
via 32012c8148dbf25fea0a490bd8453fcfb3854cbb (commit)
via 01bc2a7ff47131144717e923108f71eda283475b (commit)
via cee641fd3d12341d6bfce5a6fbd913e3aebc1e8e (commit)
via ed787b2156b0a7a88ace941447783c53991a254c (commit)
via 621c92d9a19379bb43e98c821183be1aa4d97c7b (commit)
via e9c6c3cf86e3b1b02c64bf567f0c20f6c1e2f589 (commit)
via 004e1238d580d601f7fd8847ff1c4933de465942 (commit)
via 5da6a0f0e8829140999c69bfb551a305c6bf0257 (commit)
via 25ce3ceaf98ed34ad3a4ebe3cac901c0b6e15a97 (commit)
via 5756a9c761748b960b974f422963fbf8e5498378 (commit)
via 566e635f4f2647a82457acb9c047d890f4cb459b (commit)
via 76f58b2ff1ebc572cef465f5be1445e08e4bf0fb (commit)
via f89dbf486bbbd41c3f4e85c15d2cda91706a37ba (commit)
via a91fbbe9905680873c4f0acf5cff1d712aa68831 (commit)
via 86cab473cc0113b0f83755c14db4035bae675065 (commit)
via ca22c5ab2322ab7620e4b84589da6761fdaa3b62 (commit)
via 838acbebc584fee662143c303b7a110563f4e0de (commit)
via c5d29c73bcd554111ada4dec49f61cfde497cb6c (commit)
via 4ec7a8d9ab678f28abf3b37d40acdf159344cf0f (commit)
via 92b2aa9c962e9ca1cec80f44bee713afa1ac53f3 (commit)
via 6b9d28f7602143bb85fcfcefbaa35cde95fdbde3 (commit)
via 223b19a30e4897c7281bb40c9f366a01c8f449ca (commit)
via bfea61834be28bc3c2413afb586971fc04056a41 (commit)
via 620072324ac5f111f8fd40a4ba6d10879c44e211 (commit)
via 522d27a63d1ff318173e7e4aeb6c1265aba93ca5 (commit)
via c4291199d0ebab1cdb49b80101239b9582c13148 (commit)
via 9d9680719eb0ce32ea039386bfe767dfa41d1968 (commit)
via 2f39435c981e3cb14d2c4e9551af93fbbfc28109 (commit)
via 2276752655f67044fb6ae8f7e14e9ba5f6ee6638 (commit)
via 54ba29f03a62c84ee9cbf1c92db74b57327a1868 (commit)
via 6433a51cb6e72309eb027411ea4fa98adb97a7f8 (commit)
via 4865dbd45b6f94b20b562b11224754313e74bf25 (commit)
via c7b8783766258a4321622b7d7e2bb02a647d0864 (commit)
via b8d8ea4cfb87fd12abe113cf63edbae4a342e6c1 (commit)
via 6ce36056a14fad339ffc6528343aadf12065ca44 (commit)
via 8dbb407ba4adc1bbaf061b5680bafd35c778cd90 (commit)
via f9ff938c75816df97f318a839f01be3f01c93f2a (commit)
via a69020025379d5430fff394465348aa430533458 (commit)
via af27ec87f09d82918b96c9dd6d236b4e39989f7f (commit)
via b47533e918cb5b0c2befe7b0da315819b009c47f (commit)
via f4c7155d41cb008a1a180e567e142ce096a21b88 (commit)
via d647a4589362d2b6efee86e58c9fb38e7084deb4 (commit)
via ad26a33ce7624ea2a9b8685f1255db0c1f80bfa8 (commit)
via 5f9a52abbd9f785ab878e71907c8b6dfa587fab1 (commit)
via 8b024ff407a6987a4c7e55f51b9d08c1c9a185d3 (commit)
via a6646243196e059b1c137c939787117e78523b57 (commit)
via 9578ddf805881e30dc4ae2124bcae6b24b580f65 (commit)
via c3dafdb35cc8cd72e972a0d8212ddea3c09c330c (commit)
via a3e0ed25ca88b74d92e9bfa11414e137b5982de8 (commit)
via 554a8b40e84b9f778f398b7ff15d86140255bc89 (commit)
via 41040f22c80fd9f300f4f75cae3177a5360a80e2 (commit)
via 163fa80307a1ab8882a1895ab1e2e12d22c8fd4b (commit)
via 337198f6b8e619d836e9c51399be97e7a6038674 (commit)
via b0b09a77b7dab7b961f4424d05c135e9eb045b80 (commit)
via d1897d34676045b89edc09a767f8d0ab14d662c9 (commit)
via acb0565bb4ddaf1d51abc511459478e738dff6d7 (commit)
via 99aa9fc05044158e0f41e56da538bd1162d869e2 (commit)
via e2c88f03e394ed8ebcfcff936ee888bf593e22d1 (commit)
via 930d4317ed56bdb7cb0d7d53bac5db297ddd0551 (commit)
via 1db00151f61cdd1c58bcd80dad38f3f97c67dae7 (commit)
via b684bcd2eb5a0def50c149319ab8df379155121f (commit)
via ada1705cfdea36539c48b1e7fc6a0c5cd7f3d8d5 (commit)
via fd39e4e890ce175901311b9e11291628743edde4 (commit)
via b9a2e86c2ee8d688a3e12877eb6dc4894a7eae24 (commit)
via ee7fff3113f67a7dddbf069e2a555e6dbac97f69 (commit)
via 681e0e8b37fcf732b0f4caabae3695756e6a1e9f (commit)
via aadf8f9a5d8dfe13ae2196b61c406c8a8e1d05e7 (commit)
via c38fc5257ebbd0cd444e6b9cedb466b31df66cea (commit)
via ee8c0398005c5aff53be33b07d9d0b6fdd353175 (commit)
via 45970604c644066bd34abd15c8877d5969462250 (commit)
via 044381e03b7f178c7c322861960b79c8a27bb4b1 (commit)
via f2b5473fc2f2dfa13485fe9822e84fadd69ac950 (commit)
via 0cfe2c51b17627d777cec9c33f9652b61f14427c (commit)
via 1bbe7018238856b949b449bdacb43bcf90c79bef (commit)
via d7711ce221b04a035afa9d454c8baf53ec0cb9f0 (commit)
via 625e9b719947e894ad7369d8ca61df23ea31b243 (commit)
via 2878aadf0276c7a52832c7ca7f3bddf5e348d79c (commit)
via b9aefd1018b099666908d64650c8a5ea3e153ff4 (commit)
via 8a40bdd1108f37caacd6bc5f367ecc1587ee53cc (commit)
via ad1161678c25ee35b1cb7d657d1aba411939efdd (commit)
via 0b6ac7ed34c708e6e92c41dc28bc8589864cecd3 (commit)
via b0e43dae72cf709cb01627eb9e3095cc48989f4e (commit)
via c12ca1170ad094e0450efeb462328ec6b6ec7e1f (commit)
via 0c88eb0d723fa43865e185b201aba2685173f378 (commit)
via 9ec6d23aa2ca58dd13a45821c92a926a0780591c (commit)
via 4cfd27849ef6f2e99ee346763695fbc64742d783 (commit)
via 46230c83bb8d70e170fe77e9e936765014b762d2 (commit)
via 594dc98507783efcec6781077467885990094ec6 (commit)
via 656f891efdbb6cda87d10a06f7c2ac883e17fb7b (commit)
via 691a23f33ccde30a0d741b98bf0439228336af01 (commit)
via 1c16d6d7fe6253041362ff994e7594805c297b89 (commit)
via 8eb6232b0094778b4c195a870fa2c06cd1b7d0ab (commit)
via f6445b024942629726daeb591f99af090aa43c28 (commit)
via 146934075349f94ee27f23bf9ff01711b94e369e (commit)
via b126cf8dbf225ff5b12c9a7b6a241d80babf8a42 (commit)
via 911cb21ff76c1b887d8ce5e52a3059eaba9ec7e0 (commit)
via 794cb37669e1a0566c6435e38e247ded643fa96c (commit)
via de9778c0c9db5a2e6ca3cfa384ae5a7b84120281 (commit)
via 14c73702aebaea61c543730e4aec2608b842b5c6 (commit)
via 625d818594e468ebd8bf89a6c09a97208b58071e (commit)
via 81240b14097c5311ba5585f01f344b18b2048fcf (commit)
via 4a4d7bbde30de5eec9cd7753e3f44c92c2c057d7 (commit)
via ff571bb13401ce21184923c973ee2cb67b85cade (commit)
via 3a7f572d5a406e294373ba56b1a0357252fb30cd (commit)
via 03753df452d1871af6f82824286cc07ab40493f9 (commit)
via 4e99a42d3634690c74963ec9fc5c45ae21431775 (commit)
via 2e74ee9f329249738ddf00599090f94ef80eecc1 (commit)
via 6c2de9d212bbb3193304a1ae4fd54ca9137262ff (commit)
via a95e95882763d10fac105a93826d79d5ebe4a449 (commit)
via bc1c9342a382378d6d659e3fcf87d6730ea71e81 (commit)
via 38e4a2c44f8f73f81b56e54a7436bb9662b9851f (commit)
via af3b17472694f58b3d6a56d0baf64601b0f6a6a1 (commit)
via 0e4960761e5bd30e5157e45711da1013d825764e (commit)
via f00e85fff2018483303ccc3dbf7d85b4052cae1c (commit)
via ab1f7bea793d2435080e5cc018f115169ddf07f0 (commit)
via c5753d1c96374bfdf2c8e9fc0773ac036082cfa5 (commit)
via ff329082790af7572016f64a90f62c7be87f593c (commit)
via 32007ad7c992f395895eb8f27343003cf4f94a20 (commit)
via 5d1dc735923493b057014df7fefa8c8d7b04349e (commit)
via 5fb87e69f26c800823be33e81f99e1cb2143e067 (commit)
via 19722a540a20a2dc5370df4d8f1f0bb326175001 (commit)
via 8724a537b4f7d9d93bf06c2df860ea83f247461b (commit)
via d903fe92287645e9701890b0953bd84529665776 (commit)
via ecb3b76489bf838fe32030517e3c8b23000d59bd (commit)
via df1298668ac3e758576b8b2bd6475c70cff7a57f (commit)
via f3f87eb305123de57135aaa96c12190f3bf1951b (commit)
via f5b0e46b8cc66dc5aeef4df4d4e938ab0f4cd3da (commit)
via 0a149e0c7faf8fc0db56d4804acfb3df99dcebb4 (commit)
via 5ca7b409bccc815cee58c804236504fda1c1c147 (commit)
via 1e6ee8d85fb949a2e39eb0f130b6a2d4a6470958 (commit)
via a903d1aae9ab0ab3095144b9d2db7d5fc606b187 (commit)
via dcbc2db0a515208db5cbfc5a2ba88c14120ba1bb (commit)
via be1f9361884f15155c20fc8f8979d9ee32102713 (commit)
via 4f423f99cb3b73d75a736c9610f3faf30cc3d837 (commit)
via 982f6e4d7e7a2ffb0d17add0df1e5643aa38c092 (commit)
via 98104aa8ac64b6602fa6c1c7c7eb08e9b43f0fa6 (commit)
via 5220fc55e92504899d102b5704382382a4e450c1 (commit)
via 21d7a1b1870466cd8b9f6203d509d9a9601e5c87 (commit)
via bb1400f97e377247cda132a14cdcb5dcc3f456e1 (commit)
via 1d007555e13f0e148014b4582f6fbd8b6b7fd386 (commit)
via 9163208c660f8ef8c4b1dbdae6c0c785c516bb1a (commit)
via f5c9c2f489e84de596aff390c498ec31fe44a5b0 (commit)
via 56bd0746aef2a0b789402844e041a268b889b081 (commit)
via c4949d3d2b74f62824b670cf8d07cfe9e965a661 (commit)
via d76b95edce86919636ee0e458f0b9def08a9d2ea (commit)
via d4405856fd2e088fbc7cd4caa9b2e9a6c66e8e83 (commit)
via 99fbf7cc5eacc8c0ec65a19a1eb83b4e0a329cd1 (commit)
via ff4a86af49e629a83e40f35c78c7616a208659c4 (commit)
via 47ea557d9d8a9782e4d576c45c545913bbaac4ea (commit)
via 006133b49eb5d44eeacb1d79593b97ae2212bbca (commit)
via 261656de6d4fbe2f6979162c978023f8859d2289 (commit)
via 419768f653e098ab566c9f94771552e2bfe3cc99 (commit)
via affa93f31bbc9719ac4e2ccc0e44d9a09c2b6a3b (commit)
via 5f5d0ee169a33ed2edf6d80ed1c7f557b993eaf5 (commit)
via a1363e4b0e747cf0814e6f6575311aba9aac1ef2 (commit)
via 8028d7abe44464aacaf7e8cb82a5a23d39fb4489 (commit)
via b6465a25eb8106081484d17a48c75031c14c50d2 (commit)
via a6222db2c3da815eb23c6deab6390066b0969670 (commit)
via 6117a5805ef05672b9e05e961f7fa0f8aa5fca0e (commit)
via 0fc138613824f16378ba2b5462886cb2d97a318a (commit)
via 7e8fc92cb83d984188bd1556ead421bee39d9581 (commit)
via 929daeade2b98ad56b156eadd22308bd6d7f265a (commit)
via 64ba30803ae7a87f1c6bc21eb1a45c413fb6ce43 (commit)
via 6588fc2759e5901f61327f170bb9ce0ec3d0bfcd (commit)
via 4a843f9058e625685cf2338fe89c9a89af3c1777 (commit)
via 1f77f60db465b53342d88b4280b1889f1888b411 (commit)
via 7ae9a73236a4bb7eed9f02b30563b7f4e040863f (commit)
via 35f2bd564e1e0311e3440f09bf81aac822d65a1c (commit)
via f5bb60e5636d908de8534d35b5f06142ae2a8c3a (commit)
via b8d12c83236964f6bbb5cd3910b0960abd0117c1 (commit)
via c260476dc19056181931668db6316055526f4daf (commit)
via 60765d3c47eedd4bf412b53c2ce47c5de41be8a8 (commit)
via b26befde4983f01b37f7e29bc8ebb8dbc7f6c1de (commit)
via d178a34c2798221f7cee90d07bfced84df4908d6 (commit)
via da9206de5ccdb4ff074c0246856ac8de487eff40 (commit)
via 6aa910d6307f825013e2e0d7b5b1e4599a634f1b (commit)
via 9bbc2ac61f19fe7d27f3268fb4de7dd727a59bb0 (commit)
via ff23bfe6d68eeb0972e9b01a45b950e6ae781b01 (commit)
via 0fd60764e65b270cafc1b3b573e5ac14b3c633d6 (commit)
via a3c0c4cffe519c5d70185ec989fe707102a71836 (commit)
via d119c3155e617e120c69abebcf8d22e500dcffca (commit)
via c80a7b8a53dc04e66b55431e2d4c637618293dae (commit)
via 31830397285a50d1636622b58d04fffc7ca883ae (commit)
via c96f735cd5bbbd8be3c32e7d40f264ebfa889be5 (commit)
via 5275b7b65e2ce2acf92528868c9859dd4407c4c1 (commit)
via 973fc74ac4c030a350f436e96d37a582565e41dc (commit)
via 95cbc4efbaab12b66852ede318cb9af0d3f8780b (commit)
via 44f582bd7d16b11259cda469de5f56bad9768059 (commit)
via 48b88725698f7e4979f577c0e86689160aa758c5 (commit)
via 90ab0a155bc5e42ef2ad35569968dd3db9c501bb (commit)
via a46eed49d48d22fcf83eeed363b559e4b60bd840 (commit)
via 1fe148279b130dc4c8c072ab3bd1006cdacfc9f6 (commit)
via 137d1b29b6063f4d1983bde07f6ec5404f67dcee (commit)
via 1afd287718c25b673beb31903eb80bfc9f268d87 (commit)
via a4766a155b7821c9b7eb5e126988007a95118dc9 (commit)
via 8f3c0649785d7fb0df37a9ba9e0e20c978044bb7 (commit)
via 2a2aa4ccfb548b2a18b10e97acd80df324c5d4a8 (commit)
via 02acd96cff43650110f4af6d2fb2a8143887ac00 (commit)
via a6790c80bfcefde81e032db9d3a45c7a9e48faad (commit)
via 2342cdf1ff5563c6afa1901104fe4cda849ad345 (commit)
via 5b302edc6302331a2c39ae1ac5a18759e47340c0 (commit)
via 85071d50cf5e1a569b447ba00e118db04293475a (commit)
via 70f720080190f2ec3536bd5c15c7ada18a7a7fa7 (commit)
via 3898b36a132fe44e51cc99674104d9e1f0d35d36 (commit)
via ed7eecab42af0064d261d9c9dafd701250bbc1d3 (commit)
via 2adf4a90ad79754d52126e7988769580d20501c3 (commit)
via d6616e7ef66b3904e2d585e7b4946900f67d3b70 (commit)
via c4344fadc93b62af473a8e05fc3a453256e4ce13 (commit)
via a9b140ed88b9a25f47e5649b635c8a19e81bfdee (commit)
via f5d7359a945241edf986b7c91c0ad6c7bcf113e3 (commit)
via f1a85d0c97636ce15d830ef56c3440298f3773a7 (commit)
via 6b206d435a3dd92ef4a18f1c4558da147016fe4f (commit)
via cf136247fad510f55ba230f746558274fada1de6 (commit)
via 5f5b2d7a98eff5dc74f74b7018f50e286ae82c2d (commit)
via 7209be736accd15885ad7eafc23b36eec18c2213 (commit)
via 703cd3ed5855e673443e898d427fdc7768c5bceb (commit)
via 1a035954731fce34faf09705bc61b7eb0ad34ac6 (commit)
via ae43bdedcfaabacbc8e4455313e6a5b4d98a68cd (commit)
via 017b4e1bcc7fe62f11650444518f422934c925ff (commit)
via e9e29a281b0b8b9d91fe9097e51c7e5df6d3ff78 (commit)
via fcd39b6e84665a033d7ee4c06bd904e2b416c53a (commit)
via ce00497088209db82fbbabb80381acf92039763c (commit)
via 0fbdaf01b0fc3d7031b51d542b91f6f758f033fa (commit)
via b3a1ea108d3df58dcd2d247fdc87b3d1fbd953cf (commit)
via 2de8b71f8c0e7d02e25aa7ec6fa13f9933c8b534 (commit)
via 4edd9c38112db5161f46533ffb3886c85880ee03 (commit)
via bff7aa9429b7e0a9f26f69dd24c8aa7efc64ffc6 (commit)
via 1e0d70a994d9cf9cabe10d1205c40b74af2a2bc4 (commit)
via 738afedababcfc874fe107d9bc408d69d213813e (commit)
via 8ed59723a5ae90dedcbf741254b65f88a4c98ca1 (commit)
via 3f2d29d0dc92606fac3ba306c34a32a0bec8159e (commit)
via 3bfaa404624697f5e2f08076c78f94a8438e851c (commit)
via f85f868171956abcc1996235a26a276da2ca6209 (commit)
via 1982c235382043d87737ec24779d10da216101a6 (commit)
via f6c675c19790d3715445a7877cc8d1d193f17071 (commit)
via 419fe34aec67a7bf30991f7df7b568133d8eb541 (commit)
via 96d5830820c86c06517fc3f4c0f1c95c478237db (commit)
via 5ab87bf933008827bc39aed0cd4e6bf4cd71a062 (commit)
via 7225bbf8e6e3c892159124e7795f7396b5764bb8 (commit)
via 2056251f56e4c5e3ff785b924061fecfe1ac21e4 (commit)
via a5eeb73116cbc74f6bb3fb4a06b99396a8ceebcb (commit)
via 743dad9408b0a86052156e6a3d4fec1001600017 (commit)
via af927e2c390b49012b276c11991a3f7ef3a592a9 (commit)
via d267c0511a07c41cd92e3b0b9ee9bf693743a7cf (commit)
via 42968abbd4edf489d4d667089033d11e4045f463 (commit)
via 33c0d21361655c08b274c75736b7bcbe99dd3d2d (commit)
via e114429f15c0ff8b5eb77728985281afcfc0d37a (commit)
via 6dbe35be17827ccf8bfc904be707aea01fb4ef94 (commit)
via a8a8ceb589f9f3bf4da29717eec446cb2766032c (commit)
via 1f6c32ac6941c3c2ec456017e73ea74ca5944e1c (commit)
via 71488ea628a1d791eeba41cb2eed3025c6311565 (commit)
via 956e210239d46bebe4574c5ca38b3b51b1bb7c65 (commit)
via fe76209cd8ad96144f0e2fc9522f5fda1d52d9c3 (commit)
via cadfcca91ef5bdb2c72c9db4e918ff6ac7b10e65 (commit)
via eb4917aea94d78ea64fa90f0c70501bbb6d48b37 (commit)
via a01b47d66272166135c20bf15a958bed023ff009 (commit)
via 461acc1e4b464611411ae77b7a72d65c744a740e (commit)
via 9163b3833877225c8b9bd8e59eb7159ea65d3867 (commit)
via e451eade196bc7cc43102412a73faa397253c841 (commit)
via 5a2d0b61afe86668613cbb83a75708b760aae76f (commit)
via 43bbfab2cc57a08da6d2d6ffe8da92efcae9c2ec (commit)
via 38e530b762f7d05caaf06aec41c6df432f0800cf (commit)
via e8e1bd309d449b23dae2b472b650a130300aa760 (commit)
via a5adb8e45ee8c66a19c46bd1bf5f752630619be8 (commit)
via 85ac49c5282c231c71b8d2046889d22b0061db08 (commit)
via ebeb5ead60c5c0d7b16478498b78a8f1ef3b71c3 (commit)
via e38010819247006d20532d24de8dd6c37e0ca664 (commit)
via 00f4c38428153bb5ad99ba1cc40e9a204266dace (commit)
via f7bb760f4d8290d52959ea83b090d1877e4ac9ee (commit)
via b29c5e5221b8e6a9ff65a0c39f14c04afaed5c44 (commit)
via 9e17bd49b426ffba00312cf90ec80d178a20b964 (commit)
via 519720d9c6eb354a2e31089f1c7b8fd0760053f9 (commit)
via c6babcd3e44bc42fdb090d3a4837848d8c7c149c (commit)
via bb444bae93e8e87d1e62214b1819fb73fd7634e4 (commit)
via 8fe024cd171ecf1610419abb70e5d613b94ba5a0 (commit)
via d36eda71276b43e4281ae53fd558155725f4d4eb (commit)
via 32f075fa288dc5ea049cbf72657386889144bd12 (commit)
via 471edfd7a86d91f04536bc7c7fb42ad7239e1731 (commit)
via feeddd7e5b966c9445fc4ac97a6526fa792413cd (commit)
via d801b1e2ebb6c9cf35e3475040b013784f3e6e41 (commit)
via e4c78a2739dddade3aaaa12528afff944458f777 (commit)
via 48bec4c73b92679e91f0cc72fc63bdba9c593e87 (commit)
via dc5aa6284fe6b6f51d85270969f0befd8db1f838 (commit)
via 15e60f1f54722c32c9977f00e49c211f047ee08f (commit)
via 85e4dfa61bf440c132f4ce6bc73130bc6e91719c (commit)
via 7ab4a102ee610f36b4362897431e4fbbeac735c5 (commit)
via 054699635affd9c9ecbe7a108d880829f3ba229e (commit)
via d04acfb82c3425a638f09d2f49208ef86bc7a6b3 (commit)
via 434f4fd17dd3dee1d17e7b2e008f1ab1416d5799 (commit)
via ce8b5fe9567f06f7acba34b9e9b35ad471e2ab67 (commit)
via 34ead9dfeff5f64af36a209cae28075fcbbb3330 (commit)
via fcfe5af9c22c5b666e5ecf646bbe0d9da7b655e9 (commit)
via 1f967a8ffe37f6732dd628d28a13abc442541c38 (commit)
via 3efca5f9b7b7bfeac53044fdd44e5add61397157 (commit)
via a35b62699480e149f22f4e039935bfcf41f97ac2 (commit)
via 9dedc72e89b9ca8ba2c5f3bc562ad9ccd1aa05b0 (commit)
via 7808524aa9bbb424327ac67d7408647cb18840f5 (commit)
via 5b866ef26bd5ae980bb86c494a592ef232552b68 (commit)
via a5387c15e93c6d1925bf4ad0eacdcfd63790c32a (commit)
via d56c782197242e32ccdd23c9e3652ff520f3d58f (commit)
via bd8cb42b61666342ee8bc6c33aed2a168301ff67 (commit)
via 9accf90bb081b057023479f0a86e54017b02cdd3 (commit)
via 9eafb04ee8dbd47022dd9a5e5c1310f88f398d2c (commit)
via 7af1aeddc36a1ac1343f1af12aa29164f1028f03 (commit)
via 15f5d7895a2744376062229cf19593016a773cde (commit)
via ddec42c7a23cca11903ece8f7ab614dcc7e5edd3 (commit)
via d8cac904c7aea4a652a47afb35aceb6ca4808ce8 (commit)
via 433381e5ca62418fc90377d16f1805260b27b619 (commit)
via c8bbdd1d74ac313d8b57d8debe4f7b75490e5df2 (commit)
via e57c5196d3e8dd56b0190799c98b56a5be55333a (commit)
via 06f7bc4b3b69e8fda96f6e626a7dac5b1fbbb233 (commit)
via 0aa4c14ebd1eb0a68c2bcf5c617325596657ea71 (commit)
via 9daa2f686b3bdb03b13e9becf45a722344888cf3 (commit)
via f159ac66aa577889514dc170c87a92c49be5a6cc (commit)
via d6b86a88c7a486f2e5b742fc60d374e48382320e (commit)
via 5ddc441f77a34158039f0328c3ab7c2106b7b3b8 (commit)
via 290e89c515e051dad269f1acbce0b52a541d9c8c (commit)
via 9b8925a4d0ecbd8a09d307dfd56fa15fb8eedcc6 (commit)
via 53314ecb63f3f0f85629b66a228207658d8fd73f (commit)
via 863509e2dc3bf96fd38476d787abb62e0da46624 (commit)
via fe1d6665faf06b3fcc0aaf8ec72905aa4b7ce1f7 (commit)
via 7581a21a7dce1dc6b92ad24293b4269a3531e6d4 (commit)
via 1fd37ae8a4bb25a6e85ffb2158b2ae95fe8cbd04 (commit)
via 8ed3b760c179df435882f2ad96b6dcfad5b6e9fa (commit)
via 3516ab551851273faeeb0b8696695e5f3ffc88f9 (commit)
via 9f8ddd6ee1b73c9403f85b6ef5c85605ca393aa7 (commit)
via 898485cd30084d478e8be688151cd11fb4d492a7 (commit)
via 30f4856101bf23ce155ef0f2ebd1ca6f034d2420 (commit)
via eb4be17ddf3b26c379e3f100cf8e8b0fd4329537 (commit)
via ac06a06d1df9a1cc905b224b79921b0d0ade4c05 (commit)
via 611d0300fb8bb2e87d787023cb5c6030ee07d8d2 (commit)
via fdf02d580f2bb1fbc6fa85ee0edd81a07404d1de (commit)
via a0bb482b46bd05f8c8774bacdd26dc891cb3bef7 (commit)
via cebd7e3562312ade50d972af49239cee7f10d057 (commit)
via 8750dc3ab772e29d7374d779cefb3c8b8c61d2d1 (commit)
via b743e6ba98c8cbb53c45e1c0f59e5a78ba62f5d4 (commit)
via 6556a2ffdd7bdb5370c2f1b3d8c9e8799ef82140 (commit)
via 3e9189a483c0f53eba4f05092c90f7955123f52c (commit)
via 7f5702a379516cee041129c03dd37d67f26d49c1 (commit)
via e60ecc91ad65087c3cff3af479cc455abccbe020 (commit)
via 62bd7736311e166aea3604b8e486b58c1315f82f (commit)
via 9687077033661cf07b6ea2e966299e837a501612 (commit)
via 703d5f36d0102993f311d21e662a28492d8cf7b4 (commit)
via 84d9095c66c765cf78814323597b2e3bbef293d5 (commit)
via e54bc83c4e8a66fd9ab1ae9f27899d70ef82a066 (commit)
via 1a8c86ea2503bffe6dc1f2300dfc2b4efba108cc (commit)
via ed5311a26b7b1368f28191c405ec13da907213ae (commit)
via 493a6449b37b34ac5fe36257b266c229e34d105c (commit)
via 6f6a4cf9d98f2b4550e0949da1e20a7f38440610 (commit)
via 36a53f41a7da580926111dca65652d6389fcd909 (commit)
via 61681dac2023240a4a029072add3a39809ccb7f0 (commit)
via 96dd4d2daf1fb91672a798fa478da0ec8a7ac737 (commit)
via 9354737244e0bb7c22ec684ed652c89991eca913 (commit)
via 9bbc77b6b8381c9a6d831e490a7715ba84b9356f (commit)
via 8023760a5fc6f346cf82340aa50df755b0d0d00a (commit)
via cc0d6e4674fd2e6ebe3775a28ec87fc5c869f924 (commit)
via f9cb0d187f02078b27a0119ce42c83f62461a507 (commit)
via 4fda2b6eefe81f1c197d32a0c8eb14ca1a7d9108 (commit)
via 106e9a793499c81698cf5a938d48933f5e909af4 (commit)
via 26691e282b76d74959e63524b280e77b09ac89df (commit)
via 4cde36d2b97a24f03c192a61248545d0180fb856 (commit)
via c874cb056e2a5e656165f3c160e1b34ccfe8b302 (commit)
via 12fd115d2e1ea8b55f43313ac665c32e07f9498e (commit)
via 84ada921a2fe98489b578b6d780c1ad2e6c31482 (commit)
via 763a994cb14bb11ba823831f54d64071319bfac0 (commit)
via b86d51b24e7d1bb4980426c9a74962628c096ba7 (commit)
via 48d5ac59277e2e8b43f697a0d1d4b0991a40caa0 (commit)
via c191f23dfc2b0179ec0a010a1ff00fa3ae1d9398 (commit)
via 8d2c46f19c1b4f435d7b9180ff6c2e8daf78ab2b (commit)
via 80319933903fbdb359ef9472573bfaceda7c8cd5 (commit)
via 8c838cf57adef3c004b910b086513d9620147692 (commit)
via 1378551aa74712c929a79964ae18d9962ce73787 (commit)
via bb7833f2054edca11a32d24d17486f153db00ec1 (commit)
via c430e464860b4460a0ab32454e53918a1cc7444b (commit)
via 39e529c506a4350cd676bf5ddff6d61686e8814f (commit)
via aba10a01b765b472d57112fd4e09a6fb47b49fa7 (commit)
via 9688dee697e9ad279c6542bf164b820e907e526f (commit)
via c1a72c46b572eee2d94ab53a5589c724fcb1fcf1 (commit)
via 9016513b4d19d2781d0b6f2575b490431e04ec79 (commit)
via 13e8bc43e4888fe9e6df7e536ea0b439c6351199 (commit)
via e89895b7e5f3b7074271c89de281e426c53be347 (commit)
via 938f4e9ba14954551fbc390abb7d1e06d38189c2 (commit)
via b0b0da67c915f3c02020397b8dcf6a078a9b3a90 (commit)
via 1ee8ad4a2b092a6edc35c111c5a3b5b761da0dae (commit)
via c943619d223be1158ae8db5223f655343d06785f (commit)
via 0d874a95d3c782b9c663c64be619f449956df457 (commit)
via 2d325650009f46a1f16ef2e7c1f4ed0827db236f (commit)
via abe73e885b980aace1fd0df492fa321bdd35f01f (commit)
via 53d45f54e33d23a5b4df42dc977a3a6ab597f5c5 (commit)
via 338b54ef4631f0d35601f174eabfa10f1541f46d (commit)
via 698176eccd5d55759fe9448b2c249717c932ac31 (commit)
via 41cbf5a91bdfa0b311aade6b05d2f51f59cce978 (commit)
via d845ae918fe8dce6806c3f927a7c101fc0e2173d (commit)
via 7bc93774a449b3f19748a37186db3efcd3d6c537 (commit)
via d5a58bbe641d32257035a6087f18655e7b66d8fd (commit)
via c64c4730852f74fff8ea75730e0b40cd3b23a85e (commit)
via fdf1c88a53f5970aa4e6d55da42303ce7d4730f7 (commit)
via 33ee923f7139cbda7a616a83d572a4358f456e16 (commit)
via c69a1675dd0434db0b99682d14fa7905fcd3af8f (commit)
via 9b23d60d6f58b18da3995dc3e090d7fd63233bcc (commit)
via 4bb4081381b39c563707c03818a0f9d16ef7846f (commit)
via eef5b0eb5defdd22ef5e351213ab66531f788c5d (commit)
via e7f1ead205f2dc13d6fd6e2a28b121794ca281be (commit)
via 638674c480d47cf957a8b4f7d61dda3320c881ff (commit)
via 0a22b98c05bf5032c190fbfdf9fefceac3597411 (commit)
via f59415a8b5ee951dd298eaf8eecaa21e8955851c (commit)
via 4e458fc15b5c236e1cc44565f6af313753e87a26 (commit)
via e2eca96f1876a72fc8c121c9204d49cb7e9eaeb7 (commit)
via 4a605525cda67bea8c43ca8b3eae6e6749797450 (commit)
via 85455b6e2f7063b10bae9938de1b70f5d319911e (commit)
via 66e1420d30f8e71e867a3b5b0a73ead1156d5660 (commit)
via 16cc75f764b6ea509f386c261b472e282cd606ed (commit)
via b2d2acebebc66495b98eef634ce633eb70cc2411 (commit)
via b1f197c6102ae31ded2e4b61103308dcdfa72a89 (commit)
via acb299784ddbf280aac6ee5a78977c9acbf1fd32 (commit)
via 2418922a1389bbf265b02328f7c4f594257c4026 (commit)
via 44a44c0b568dc997e7522292212e0ef02b522f3d (commit)
via 250ce2abb3d6b48fce778b5e0c651d57582aff7c (commit)
via 99be45a44f97942f9327b16aff368f1650994e0e (commit)
via 7592596f7a9f8dce2e5e8d9311cc40c5199c66e3 (commit)
via c24c42a5e29444313efee6528f172ad66452050d (commit)
via 5e14c4caafaa44b92134c5df01b726f435f46845 (commit)
via 05eaa177051b212669c2a7b9e2194c3e9ba47f14 (commit)
via 9797d47ab90761c50020f78d5a55fb2672ffd7c0 (commit)
via 000164d51a974acf3846a6b0a7795f484e915161 (commit)
via 0b46c391a973bb8d3f0a1681eb0a79e8a196f0f0 (commit)
via 5e5743ecb40da81c4e8ad27ac8b158c9a7aaff87 (commit)
via 9c95bf79406ae791e2f8c7263ff4fddb19d0eda4 (commit)
via 7dfa14ccdb6777ccacb99fe0d716b7d63654426f (commit)
via f0ff0a2f69bcfae3e2a30a3bdeae37b475ae9106 (commit)
via 38816f95cc01f1c7aeec1d42bde3febb308dd98f (commit)
via 0f8868d1ed7d479d05e2a70de67897d133d41ef9 (commit)
via bc03b37015ab6ea23cbec70dbd299c74fb001aba (commit)
via e56e0f7d1ad206f1ebc26e285d82a8e7ff6390e1 (commit)
via 7d2b0148161460b928cf39c7c2969d95d2870d9c (commit)
via 58b843554162e6599ba895c8325985f74adef734 (commit)
via 98cb905a5852321204499985efb42c5a76b9da6e (commit)
via f7a92e4b0336f3c64eb429947657952178b7d76f (commit)
via 3ff9c6c215faa2e1419d4cb67906a1f7772b355a (commit)
via 90b3952ff515f8746ffc6b227695836921bc046d (commit)
via 0372723794501908ae94be9330dcd8577d951f68 (commit)
via 6b27a7ba1c0343725e3d2e9ea7d97426a8f73f0d (commit)
via a8b5aabeb7b56702a85344434d7822a034ff140c (commit)
via 87a3c86e7e132a1ee80bf29b418ad4b61cefc7d8 (commit)
via 8b4f53f245ab45bf07be9b1108fca951133b836a (commit)
via 07b6398dbd11037eb553fc6fcf56dc8051e71150 (commit)
via f0ef6c88066961a038ea1b80face4feaa9a2d17d (commit)
via 8f9f4ece764df4607f695f3f7eb4c421e8ac4c9d (commit)
via 7751d0ac43f1b7186a53ba5dd5cf2eeca6f7dc46 (commit)
via 40cd22fc64c7755efe60cd42cb12851cf3de55a4 (commit)
via ed8d686171f140fd12164d2d34f65b4ab3c97645 (commit)
via 1e32824c93dac7e406d1b35449b42700bf854679 (commit)
via c5d5522f83888a8b442aa7ff17738f3f688749fe (commit)
via 688867daa34ade5075443c77535f80e1d2d76743 (commit)
via d36ded7d95a695f0412f6ccdb59bf55fc600e9d3 (commit)
via b8e90124c19177e0b6b33bd624e244860e2424b3 (commit)
via 5cf1b7ab58c42675c1396fbbd5b1aaf037eb8d19 (commit)
via 17d9827aa40e363650d1698fddba9204f27b5171 (commit)
via 27f447c8b054b17d96abfba431568c1ffe017f0a (commit)
via 219818389cc848dc2d67aff732b9790968851b51 (commit)
via e602f86dae29c62619b0ea8bf2ca69e1ce1b8295 (commit)
via 57f7044d690d38cff90487b5883883a674d2589f (commit)
via 383b6b2891226228ddf3cfd4c3dd8b17ea186b8a (commit)
via 8cc8f4c008f640b7f13f8f1160261275ec14475b (commit)
via b6dd72042939ca62d9ceeb80385eedc7c5f0560d (commit)
via 31e010330189f489c624b7cdb812ef3f33f8e280 (commit)
via 70bba1b3f811261fcef30694568245e83cd64bc5 (commit)
via 6c5f8867a45f40411594372bca09c04ddf5c0002 (commit)
via f1fef139dbc592aa4c7071d47e38e14487ab72e7 (commit)
via 2c8b76ed408547789f2e26ad76773e40e316a392 (commit)
via eefa62a767ec09c20d679876842e15e9d3742499 (commit)
via 58845974d57ee0cd0b261b00d1ededccc7bde105 (commit)
via d49e3c5e79e00b59e518c4bc1f71882adf721696 (commit)
via 06a24c688282b61dd2ce5b6c00608bee34ae3563 (commit)
via b902e70583a9dfb1ee410e297e2da4c8b944ba8d (commit)
via 09349cf206ee9e68618713b97e621b7ef2a6c0a9 (commit)
via ff1bd2a00278bc753a7d035fd5020ff936df1882 (commit)
via c89f3a2f43fd7fe70bcb199fad0ccf94364b1ebe (commit)
via 4c86025464db4603ec07490169aaf4b77868057b (commit)
via 842fc917163f0b8cb2a703a4c7fe078d944932e8 (commit)
via 0eb576518f81c3758c7dbaa2522bd8302b1836b3 (commit)
via 68cf1ccf20ecfcc1e06de69fcd50d13cf8b5e1e0 (commit)
via bd0c874dda60a0f5e235b653e1bb63716cb385f8 (commit)
via b6709a7001e4812c4ed774ef0ff3111fb654d199 (commit)
via 9b4326dc093b71bcd77a527111ea6778795bf068 (commit)
via 2c5b2fc19c21dd12747eb960baee65759847a118 (commit)
via 0aa89cf84c78a9ee8b97a51c17b3982324021f81 (commit)
via d9dd4c5a7438c152f6c9ae2bcc4c9f5ee598728b (commit)
via 03da93322b956e003882c09a8d4ea949f790dbc4 (commit)
via bfa93c0ee79935bf37d379065e219ba0afb0c4e3 (commit)
via 7a061c2e82d62e2b275cb5a8d7460dce7d36f050 (commit)
via a6cbb14cc9c986d109983087313225829f1c91fe (commit)
via 7cc32b7915532354ed7e2fd15f7ca5a9b9b64610 (commit)
via dd340b32df88083fdc17f682094b451f7dcdf6d6 (commit)
via 30c277567f64d09c11cadcb173eef066efdaea07 (commit)
via ec2793914d1090db8c8d94a2f9b92ed97b1a6cba (commit)
via a59c7f28a458842b4edce2d6639639b17a85eb9f (commit)
via 766db4a6100e34e6a29aa9c849b60ba80b551389 (commit)
via f7b5370a9bf82b0b480b75275349d8570ee83c4c (commit)
via 12d62d54d33fbb1572a1aa3089b0d547d02924aa (commit)
via c38112d8b59bfb6e73b5fbc637fa9eaaae42c52d (commit)
via ccb4c0aa696918c579a0b80448fc93606152ec93 (commit)
via 0fa8006ade38ac7206ff57934f3bb866be6407a2 (commit)
via b25df34f6a7582baff54dab59c4e033f6db4e42c (commit)
via 715fee7daf2f966261d997e1b39888f14fb28a45 (commit)
via c3424869801ea8811106f8f97928ed5cd71efbff (commit)
via 4e544fba3459913e23f86dc5e628665bd288c483 (commit)
via 259955ba65c102bd36ec818ca4193aab311e983d (commit)
via 1f81b4916fa3bd0cbf4f41cc7ad8f13450aa6481 (commit)
via 6d6353cea42ed088df3c2c90c4c2741a1b8b2871 (commit)
via 7efa61c40b94d3234dd7fc79a0fc7ae0f1b0a105 (commit)
via 5c3a7ca7b3b28a7a163b0af3cbadc3d8fe7a702b (commit)
via 54c6127e005c8e3dd82cd97d49aca23f5a5d8029 (commit)
via b6261f09b53af42a26d88fd50d74ab1e84524cce (commit)
via 8634aa9cab1c2205629540b4d99b88847148bd80 (commit)
via d1a1871cc6c93ababba62f42bcab5205320b8867 (commit)
via 2a5c5383e3df0e625367bf85b740f62bf777b211 (commit)
via af10f1ef696ee94f817bc389e0e8b6cd08234333 (commit)
via f16de89251e4607eb413df666a64022c50478a4c (commit)
via 3eb0dedb8a5d9835b394484c6112a4b2fcbe9d51 (commit)
via 2f8c4b3da6060a9b57e944726dd61cb1b2a19906 (commit)
via 4e93ba217318854742144bf0b8e30f4c3614db92 (commit)
via ee468e8f02f1cd1bcf09da75170ed62dc230b70e (commit)
via 433f29fd44d8dd6c940e49ee2657b769d70781fe (commit)
via f0274b7451761b2dc48c0be148ecd8563c9800da (commit)
via 45ef63790b34ebc2d26081609bb168aefee800dc (commit)
via 38d80ef7186ac2b18ed234a825894f5f78fc90b1 (commit)
via 88bee2515653d3b5481608bc92a1956c7ea7cf48 (commit)
via e9286ce511be095f2b16b1b7bc503b1e4377593d (commit)
via 723a6d1f333f1d513d5e4fe26a8ee7611767c9fc (commit)
via 88fe1bafce118f40d256097c2bfbdf9e53553784 (commit)
via cbf08d56345922d754182b941b84b18bfddabcda (commit)
via 84a95705e1e8219187e75433baec2fd2fc8ba2fe (commit)
via aa5fd84d438cf165c9836fa545d15c33781401af (commit)
via fac67afceead36ba7296e194942811d9ed3b437b (commit)
via 90b740caf4cc5d207dfa2ac98f1c73d9818792e2 (commit)
via 0ea828cb5c74b0f9a254aeab2c7d31ff214371e5 (commit)
via 170a0661dfb17014a62cd2eeaaa99e408bc55a14 (commit)
via b12f4e55007ee2e8130991f322e782bb31a8a289 (commit)
via 18083458382473b414a3fc7f57623d2241f487ef (commit)
via fbe4ee1f76237fdd586638ce1ded4c6e5bd0bf1d (commit)
via 9c53309978b4a4bf684b3abbb853876c5413f875 (commit)
via 8ee5844e8dc3ec7d99a5890bdc85f54afd8886b6 (commit)
via c9ad781ebbaebb2e57956ac9eda542eaa88a743b (commit)
via 9f441d72a245e3ccce2ee014adaa0ad62e7b0d29 (commit)
via 51c4b53945599a72d550d7380c7107e11b467d5c (commit)
via efe8aa23b59448214ef826a5910e52bdf0ce0015 (commit)
via 4d39f72b87677c194d282a9e93de67dc0adfb4f3 (commit)
via ece8bd155e646869b10fd08817ee7cd71c699c61 (commit)
via b59f898456b33294d71a333d3f3b4fe9dc81e3dd (commit)
via 84d7ae48d44e055cb16e3900cf2c4b2262f6a6da (commit)
via f8b10842465d60483e3bc9827e06115ea8081bfc (commit)
via a4ff990c9b0136c97b101f42dd5498a453fbdf25 (commit)
via 06341cb6cdbd5ff57c376f7b0b25aba4a35bab86 (commit)
via 54aad8af04350eb3a45a4bd6623681efa2f8d2fb (commit)
via 61aaae27e12db2a00cfde674931e5080e733e6b3 (commit)
via 3089b6fd6eff650dc06c0698b80eae1595986677 (commit)
via 3a9dc4fbd7dab867829ba3299d86c2f5b58d864f (commit)
via 5859f177250685fbd49c9562ffc3e984b9d5ebae (commit)
via 4948e0c8965c3d39b6e1bcb1bdb12b9615260a27 (commit)
via 59e2ceaf7b75c38391c518436a70ac3d41b8c8be (commit)
via 4e3c6c5e5b19be3a0f970a06e3e135d1b2fae668 (commit)
via 749e1c9c0627c0a20dc824ecc8c475ecee613d8a (commit)
via 8d8d6bd981771edb3011afedc5e62a59d78d7826 (commit)
via 03e9f45f8a6584a373f1bd15f01f56d9296c842a (commit)
via cb4d8443645a5c3e973b4e2477198686d8d8c507 (commit)
via f847a5e079ceae0346b84fb320ed06ce9b443a63 (commit)
via 05512e090c6c3cb852cebdb85ae7c12e8001603b (commit)
via c35f6b15bb6b703154e05399266dd2051ef9cfa9 (commit)
via 3f2864bf1271ca525858cf3e1fa641e3496eec59 (commit)
via f8720ba467d8e107c512160a5502caf9be58a425 (commit)
via 38af8a4225e8c82564758e8a5629da438220bc87 (commit)
via c5e0db2b7d8fbdb13548e01310f623f131ea0e9c (commit)
via 26c7bfe851f00422beb442a77d25cc0887557b79 (commit)
via f5239632a06383f2b4f6825cb6a006ceb8bea417 (commit)
via 680f05c35753bf1f70392d25b1e6310cf46476ce (commit)
via b12351c21ee92a13536aa89331cc73bd166dbe5f (commit)
via 2e1dceedf6a4f661a8d7e57757b28f9f6cb1a9b3 (commit)
via df69ad0d0231218610f68ecb2b1953ae7f28fa68 (commit)
via 5b713ea8e5fd35fdb1ab7ff953e010ef9b60f98c (commit)
via 02b2e71bdc1564f4272869bb5676727af809870f (commit)
via 8d1942a3b7516e8161b7f54888da2a4a4d27484e (commit)
via 856ff83ad2b97c136de1103a421547bdcb332e74 (commit)
via 7cc9b08f18967fa1a694f5b7e320aad62d0d3e88 (commit)
via 25e56e5d1bc9197e882e3a42285d0efad21a51f2 (commit)
via 87d2a8766e610a0dece7d86268ac9be4122d6d82 (commit)
via 64ac0166d5ea3b565f500f8a770dfa4d7d9f6a28 (commit)
via c86612cd4120b9ad3d00978c04ea252e7d501e44 (commit)
via c1c2ddf5be4556e6e8cd52a314ddd6d026c7e540 (commit)
via ba50f189eced101999efb96672179aa1024204e9 (commit)
via 6906362bebdbe7e0de66f2c8d10a00bd34911121 (commit)
via 83a58b817e5c0432d543b66208f502b059fdbe13 (commit)
via 40126733cc69634035b0cca3a0c90ee3a606ea3b (commit)
via bcafb8b98d5df77108a83a6bd8b7746f7c2616d7 (commit)
via 4ef59f25a452f934408a9ba837cea9b7fab0be48 (commit)
via 3d069e2745070bc23f14c845cb7d8116d919f0da (commit)
via 230df584722d08705f2cb3b99940b764b1cb7865 (commit)
via fda403b09887a24403c3a90d7ad6c95288f2d641 (commit)
via 88095bed9cbc3e39c61eb0ea7dee1646ff13ac7e (commit)
via b557ab47f3355f5fc7d4f87dfa9e4a15e7e9f3e3 (commit)
via 04b04226b726b6e1fea6bba970556b9ed5cc3446 (commit)
via 3a838eb454ed0de4f073b99e94e02014eca63a56 (commit)
via 748c3e1aeb833012a19b651af7d98757a8ffc50f (commit)
via a0e04c0ad837b4b42caf139573f2a95c86cdac76 (commit)
via 4e12574323ca3db3e985acee0540c603b2b33124 (commit)
via 3fc53ba91b92ad40ebbf46272f57a45e3d2e3a27 (commit)
via fcb2409598d37e2078076cf43794ef6c445ac22f (commit)
via c6d2a365580709981852007cd0a9a3b32afaa5c3 (commit)
via da8bfe82aa18a67b1a99fa459f48cea89ee2a41a (commit)
via 7980a6c8e598d34f5f733f5c6c3ca83c0a0f1187 (commit)
via 9c62a36b0ebf9ff4ef3dad1f4d91195d301348ed (commit)
via 2ec9338d84714ea670ee888f1edf5a4ad220ea9a (commit)
via 1d907966f7f0fe7089efe46d8b808d9115f0d167 (commit)
via 93327a85ea63f7043c49a0af2384a1e274ab1dda (commit)
via 75e756cdf9d5b08e859afac5cef38bd818a90e60 (commit)
via 778bd1be6ced7f4a135e2a6bcc7414c4e4bdc27d (commit)
via 38c8e9a9ccfd7fd57bc5fa5090c86cf7b7920d28 (commit)
via ddf9da5175b1182810838861f1464fb05fe00104 (commit)
via 8fe581570c2ef4f881762f4f22ef4f66c1063491 (commit)
via 2812fa5cb0c2013ef1696888651390aa71a76b4a (commit)
via b131dd71ce147b4efcece9dd8fba16c51fefa492 (commit)
via eefc291d240bc1fe15d131df9d463343b0333d3a (commit)
via 84d83c1d8979e2906971af79f2e41083299beb7e (commit)
via 255bf5b18e2b0e28a65062e87dc2d1212376bfc2 (commit)
via e2ada81cd2a090f707147abdb73a90d44db2f2b0 (commit)
via 0953b3f5d7ed1b4a25362f9a2d1a41eeeda8efa6 (commit)
via 8d380bb47dd24c7fd2c4880a4106835d871bf4d5 (commit)
via 77ba8639c274865c762eee688383c321f18ef889 (commit)
via ecf3f4b962026aa9094ee321b03ee32df2fdf1d2 (commit)
via 30df43575158b0cb294ec49a8463fe8b49593e62 (commit)
via 4c0accf0a591b0422c84216150e1b9b4e008609e (commit)
via 1f051716bce3d7aa2545722ba41958df9758cadc (commit)
via 10553ed4ebb5b949ae74d277d398d2e8a3909ea5 (commit)
via d916aef6af6bb8506b1ff4756054a1697410982f (commit)
via 4700bada6282f5ad10b53cd8ca7cc03b8fea791d (commit)
via ef64723fe9638f8d56f58fba44a149ac620eadd9 (commit)
via 5de6f9658f745e05361242042afd518b444d7466 (commit)
via 3f847f9d35bf2bf9ee0d957ea1aa9ffb27a32cdb (commit)
via df047c5ccb5c81f9a3d36f7fc38a19bc7c8f2ac2 (commit)
via a7346d50ae5389ce37e35a7131f0f218663b8c68 (commit)
via ad91831c938430b6d4a8fd7bfae517a0f1e327c1 (commit)
via 43da3c6c1cc7cb5fcb1dbe2f983a53e883408d1b (commit)
via fb3d0e1146e9e5a36a9402690a09e7629408c677 (commit)
via 27b3488b71a5c3b95652eab2720497d6d055346e (commit)
via 087c6def9087019640a437b63c782a5c22de1feb (commit)
via 3b0ccfb2f23961e4cbddb9d0873bab0f4c1d4c3d (commit)
via 0a39659638fc68f60b95b102968d7d0ad75443ea (commit)
via 2684301690d59a41cd20d131491e0714d156fa7c (commit)
via 5baa7aa73ad8d8d5250990a9e330b9b746659452 (commit)
via 7e857cbcbd5dfa64552d15dee5ed01ca39bf8937 (commit)
via 1921e1297dfcb878b9417edefe4d87639c827948 (commit)
via fa4c8fa8acbff7f4defc768e50a453bc376c56de (commit)
via 9b0785b11da612abf0e60f39950ebed9977b2e65 (commit)
via 872bd5756ba8b5daeeacedfcd4ec38bc50035ec8 (commit)
via 67d8e93028e014f644868fede3570abb28e5fb43 (commit)
via 4ff5e524a7f79ad7f4513ebed3ca0990392263af (commit)
via 5157876271e945703ee699f07442ee1a72bba362 (commit)
via 73df015104eb5ac8934ff1176c24079e6e9b09c3 (commit)
via 586d49827ebaa2cf2c70dc030c5830afb1fb89f5 (commit)
via 2b755575c9d0277980008df99f92c38dd6b3a420 (commit)
via 38d1a8aa943424e1a0de0503ee8aa961a95d0e14 (commit)
via 4579a2a9f43a38144539447bb5076bfcbaf8b6d8 (commit)
via 58d7fda0fd2efc2d4bccfdcb55ce6ba42af83aa0 (commit)
via 8e5b40643255bd93c6edda9cabed39f46b074b0d (commit)
via 7f08fc3123ef7d26a2e61dd29455c07510404a7e (commit)
via af6328603521584ff62b25a6f86a923bba5a4f5e (commit)
via 9d48d1964569b49be17afc3e20085a23544a32de (commit)
via 28988a78d3b80c7f1080fce696acf176b74a29fe (commit)
via 5c6391cca55baec236b813b4c2e2b7699595559d (commit)
via 08b5add9a6e405342c0c8bc3bdf5d552ed45df0e (commit)
via a176724d99c073f8e547dea2675a5b7d1df70515 (commit)
via a9b769b8bf12e2922e385c62ce337fb723731699 (commit)
via 6318db7dc90cb6656cc2a1f8e875f2258f6a4343 (commit)
via 35a0136d56de7faca280666ba40bb1b87a85fff6 (commit)
via 3a11f2fd5bbe98fc555bfdf1cdf9019f7222e3e9 (commit)
via b97162729a3ad4214e5f6b85452a27904b8f34ca (commit)
via ad36c799ff07d47ebd5c861c63e9feef50408e34 (commit)
via 9d3e78f0d8075ad62391ed005e1e82f79f05e2ca (commit)
via c5e0ebf85ef50e61457f3b99a05109a92b328573 (commit)
via 8216d5dbe1ef23d56ba589fe1de619a601bada4b (commit)
via 1c834de994f51a1fb98add648dad49abfea2c403 (commit)
via 9622aed753d953a763a9c0ac25cd7868d257bad7 (commit)
via 01d8d0f13289ecdf9996d6d5d26ac0d43e30549c (commit)
via 7fe505d131d2a13a6a412789474d92493ade65dd (commit)
via 353c08576b5c7fae46be834cb815df744ec2ba96 (commit)
via dc9318330acbd36e07ad5a4e8a68c9a6e2430543 (commit)
via e4a17a0630a6460090c5cdb562e02ba992a74fa8 (commit)
via 954143a2748110c720d28df49159ed4f0bc1a1a2 (commit)
via 21bac503aa78b1d0cbb6993edc083fbc508dad16 (commit)
via ee826c177bef06f22cdbbf82044085972bfd8737 (commit)
via 4111989bb81641ee36fa94bf5cb181aa18f5477f (commit)
via 8cfa0f76baf92f82bf2865b3557c0a2094e81cb4 (commit)
via bdebd1afa4bf82120c66d9ee8d8cab500ab0b606 (commit)
via 68653f1c822916ceade94511168f87adff74c235 (commit)
via 451086b203ef3e4611487630225a7650ad9322e7 (commit)
via c0c7b21ab57bb9445329fed9e1451c534aab6a67 (commit)
via 59add6ec0f7e96ee81a7b9970228b8f795b01997 (commit)
via f6463fa6e74a32e3fb28f150247e11d0fe073782 (commit)
via 1b421982a6fcadebc72d3d6ee7a4e34eec61a25d (commit)
via 45630ca90e823247c429f82b338244a9bba9baf4 (commit)
via 36c6035855db0ae87a64a0d169e0230d936e3e64 (commit)
via 5ece3fe5e40efbcf7d727650475c35850624cfaf (commit)
via d88becea33630677dbb5123cd72fa8695512311a (commit)
via 171088e69ff96a2e242cfdf98e8d1f0415d4c172 (commit)
via 568a8cc472f3207b44b92428e7ac40338d9ede37 (commit)
via 9a8667331d9a7179331516e7bb1f3aa942bf8218 (commit)
via 4c98f3dc47545794daccd4978103f6b98236ad82 (commit)
via 2dfa0983e4680f321a3d4f1bd0d826abd88f455c (commit)
via ec8fed8c805b513ea15ad76eb380c639dba88548 (commit)
via ce3be84b9f772fda5f08947fec92764119989019 (commit)
via d60bb44c243f27053589b5501529b0001404373f (commit)
via 92dcac243a4a2924bab85d1519a0c7a20853f9cc (commit)
via fb7c63f65c121b372b1ea23a823cb17afdcd1dfd (commit)
via 2bd6dc4ac6ac61705517df297320fa79b308b9e3 (commit)
via 58d6de47f6e189ff0b648b4f2f74e6d5df85d749 (commit)
via 2ecb4add323e3c4ba56641d28e35dd79013ff9cf (commit)
via 540c6a5f5b25d935a8193fd835c1ba83dba02fd5 (commit)
via 507b231626a2e0289288f48b1e4613b569cdd8b2 (commit)
via ea8bdd6cb6894855f109b8d19ce104ae9a4b9cb5 (commit)
via 4a7d63179ae732ede6bdc77c393a1cfd9b0b58ca (commit)
via ba9f03b99b6e1dd46d9b11eb1bac629789c8f94a (commit)
via 6ea996c67dff319e332b465ed450ee50b97de4f7 (commit)
via bbc661e3c38f02b4a1fb50bd4e058a22150b0087 (commit)
via 373a792a4706be2619dd1d1820f949858620bc77 (commit)
via f9b1950752ff1d3041d776a5d50ec2d0ddb8065a (commit)
via d63056e7cff35f58898a9bdc8d5cad589689590c (commit)
via fe8f3314300936f71cc89535ecd3f0f3cad3804c (commit)
via b4ae924f504e9749989059a14e6a5dc830c99e81 (commit)
via 20871297d2aaae57acb79e987ff80a9020d608d1 (commit)
via 2384bcf387e93435658ec1ab92addbf28c9ab640 (commit)
via 1d314b2544b8af8a936c90e00a0dbbb605410952 (commit)
via 2bb551be853647c25005d1ab167e17ada7a5bfc5 (commit)
via e3c81bd07046903b4b3bff8325024aafcdb35cba (commit)
via 9001f1db99dfff10957dc2a971e7466a496f0f2f (commit)
via 616fb3be8c0b3c266eaf0aa4ae399918fc7992ef (commit)
via 7dd0238dbd4ed086ca7217ec50d8f0a5be3179f3 (commit)
via 7a9a19d6431df02d48a7bc9de44f08d9450d3a37 (commit)
via d72e84456e23ac19c2c12a186ba429cd2e4985cd (commit)
via deefb84c32a289f8deda6550518a48b01a6032c0 (commit)
via 83f8d6de769a33f51b83cd81efe178db162e95e1 (commit)
via db9e3c398b854c83a65eb227ab9ff40dfae1145b (commit)
via 77030a4789285a3f08fbdd9621a384a9e008f4a8 (commit)
via a030033e5a53dd18157509c6c101340688d16011 (commit)
via 13e236a3d647d15858b061c7d96288bf7407e090 (commit)
via a7fe0d5982813f092f8a497d350620c02b995649 (commit)
via 485e0ba7f7fe11e4d28e3eec2be835157521a6e9 (commit)
via 6a55aa002c8f3b701dbb8291cd9a8e21534c6974 (commit)
via 7cdda20613f7ed7b18e7fe210ae0f6a87054dbf3 (commit)
via 745ebcec892cb27feec663de9218ae3647c7b8a5 (commit)
via 1e702fae4c9adbd7134a739dee28c868a15f0b3e (commit)
via 44bd4bc6dc7df56905071933a542e00e91f84837 (commit)
via 006d0fab3f44ec9caa2b23da3866bbbd841cd5d3 (commit)
via 68da925f226966a2760a193e9f9a3cdbdfcfacec (commit)
via 09e8c50958a1fca313c2be427c2991c39798f90f (commit)
via d1b580f1780e5ebdbbf6fe8655cc923fbd5c02de (commit)
via 98e74ad62b23ce33f66e3841431511136bc1c2f8 (commit)
via 0fe4f0151ae7a994aaf305e7985d4ba9f992e482 (commit)
via 9df1f04f8b1f7091ab32dcd56fb6e47e3e96d5a7 (commit)
via 691c232b2655673ac352beafc0bfba4bc966f8f8 (commit)
via 6ad78d124740f1ea18f6f93721ec6f152364e878 (commit)
via 5253640054d48f7816aa00c803f5bc593c0c12c1 (commit)
via ce052cd92cd128ea3db5a8f154bd151956c2920c (commit)
via 6dfeded7b6f2f78a2d45fa54543a5962bdc6c035 (commit)
via 9bbf7837ed869bfa42849f433367b0471bf7bc58 (commit)
via 810c79d6d9b8efbc12ec8e1ad727cf002f2dedc6 (commit)
via c74d3b7f393f3934bae22fc9d3a4a49e2211aadb (commit)
via ed6fc7857e3fe7d64f19a0bed27226964009f095 (commit)
via e074df43e95dc002374de30503ba44e203b04788 (commit)
via b06a3e2ba1febb9e34458c5106f8d1629a191d5f (commit)
via 56af86bdab9c9700a13cc7d622653d34cbaa72f3 (commit)
via 4cbf309be8a302afe3bc041da11c24b593464157 (commit)
via b3bcd825cfb9c19a62a7db4d12717e85aca0b1e8 (commit)
via 3f5a0900a568436b011fc14b628b71bb130ae5f7 (commit)
via 6df7102965c6afdec6f621175f9e91a56ee42a67 (commit)
via 81613a741bcc9cbe909c814fab9ca99c1a1fc2fd (commit)
via cc004ec0ff327ca300cde89ffc252a9b1c588bec (commit)
via c454dfae8988337bd10bfe0551ee62a267049dfe (commit)
via afde75c1fe9ab3fa35acdf1a3b5f80ec389e1190 (commit)
via 5de7909a21a077238567b64e489ed5345824b2a0 (commit)
via b4a1bc9ba28398dbd5fdbe4ee4f118a2faf59efa (commit)
via 3ce7b09732207eac03998fa5e267672760e475c9 (commit)
via d9f4f26b0f2c73eddd07b2a4368ae1b238944b80 (commit)
via 59c8ea50e972e7753c96f6bcf46fec48e694daa2 (commit)
via 0f7dd030eb47912112b8774424a62c5561af16a1 (commit)
via fb441884baa9994093ed380aded84e707c3d34b5 (commit)
via 6f5ca0bd47ff6a9b1670f38d6a68a1a7b1a01a5c (commit)
via ee552335b8177318be98e6a4c5d941aa41091a2f (commit)
via edbcbf0ab15f140b96efab5fae808b35e705cf67 (commit)
via c4131b7a0c4a6d666a35847f8cce3d099b7a9949 (commit)
via f3e53fe5cba59946ddcf24be423eece1ab596769 (commit)
via a51d6b87331f0fc991b9926a9101e081668ebbcb (commit)
via e0215095818d30e80b59e99689f2cf0dfbbae841 (commit)
via 10cfb9ccd5b2eb489b14804e0ea9a73c80e697e6 (commit)
via acb5dff4449286422f23a7d5867b3bd792c888e5 (commit)
via 253d1fc351fffc8a0b1d325044854a2defdd7223 (commit)
via d7834356a301b162fb9757427359d0dbac95cecf (commit)
via 004afad6ea3fba7c8dd7730428b50fd770daec66 (commit)
via f20be125d667bceea0d940fc5fabf87b2eef86cd (commit)
via fcc707041d663b98c1992cdd1402cc183155d3c0 (commit)
via da5d5926cb26ca8dbdae119c03687cd3415f6638 (commit)
via 0314c7bb66b85775dea73c95463eed88e9e286c3 (commit)
via b8cecbbd905c10d28bcb905def7160d9e406dac4 (commit)
via 7a31e95e63013a298b449573cc5336bcd64a0419 (commit)
via e18a678b62d03729f065c40650d7183e2f260b22 (commit)
via 1d1a87939a010bd16ed23cd817261e9a655bf98f (commit)
via c6948a6df9aeedd3753bc4c5e3a553088cd98f63 (commit)
via db0371fc9e5c7a85ab524ab7bc0b8169b9ba0486 (commit)
via e906efc3747f052128eef50bed0107a0d53546c8 (commit)
via d86a9dceaddf5a2cee44170e6e677f492df5e0ea (commit)
via 4c2732cbf0bb7384ed61ab3604855f143a0c6c5d (commit)
via aaffb9c83c0fe59d9c7d590c5bea559ed8876269 (commit)
via e8a22472e58bfc7df4a661d665152fe4d70454a6 (commit)
via 2c22d334a05ec1e77299a6c55252f1d1c33082af (commit)
via 8a24b9066537caf373d0cfc11dca855eb6c3e4d9 (commit)
via 7275c59de54593d3baca81345226dda2d3a19c30 (commit)
via bcf37a11b08922d69d02fa2ea1b280b2fa2c21e0 (commit)
via a142fa6302e1e0ea2ad1c9faf59d6a70a53a6489 (commit)
via ae8748f77a0261623216b1a11f9d979f555fe892 (commit)
via d0d5a67123b8009e89e84515eee4f93b37ec8497 (commit)
via a9a976d2a5871f1501018d697d3afd299ceec5da (commit)
via df9a8f921f0d20bd70c519218335357297bffa7d (commit)
via e95625332a20fb50afe43da2db0cab507efe8ebe (commit)
via 28cad73dff9dae43a38ad7dafbee406c690fb77c (commit)
via 4de3a5bdf367d87247cb9138f8929ab4798f014e (commit)
via aa108cc824539a1d32a4aa2f46f9e58171074a9e (commit)
via 691328d91b4c4d15ace467ca47a3c987a9fb52b9 (commit)
via c06463cf96ea7401325a208af8ba457e661d1cec (commit)
via c074f6e0b72c3facf6b325b17dea1ca13a2788cc (commit)
via daa1d6dd07292142d3dec5928583b0ab1da89adf (commit)
via e7b4337aeaa760947e8e7906e64077ad7aaadc66 (commit)
via 0b235902f38d611606d44661506f32baf266fdda (commit)
via c19a295eb4125b4d2a391de65972271002412258 (commit)
via 9261da8717a433cf20218af08d3642fbeffb7d4b (commit)
via d4078d52343247b07c47370b497927a3a47a4f9a (commit)
via 1aa728ddf691657611680385c920e3a7bd5fee12 (commit)
via 1768e822df82943f075ebed023b72d225b3b0216 (commit)
via 326885a3f98c49a848a67dc48db693b8bcc7b508 (commit)
via 3e0a0e157bc2a1ca7ad9efb566755ec61eedd180 (commit)
via 93a7f7d1495795b731242e270b6dc76b1ad6b0dc (commit)
via 87e410c0061df72fe69fb47c7456ae54c609b219 (commit)
via 1ddc6158f7544c95742757654863379fff847771 (commit)
via 0f787178301c7cbf59fc7c516ebe920a33e22429 (commit)
via 9b6993b6f6507fab1bc8956f727cca60c8c9243a (commit)
via 7bda7762ab9243404bbd0964908b3365cd052969 (commit)
via 7cf7ec751e4f776dbb60cd290cea4fb217173cdb (commit)
via d5ded106a85afaf695e59941bd382bca4811fe46 (commit)
via c4ef641d07c7ddfd6b86d6b5ae944ab9a30d6990 (commit)
via e443a325b31edefe9cd4da71e10497db6544468c (commit)
via cddcafd790288f5e666198effa142132b6fc43fa (commit)
via ab5085e81007711f9d18ed77f3d78f51cf37545c (commit)
via 5e621bce015d2847104303fba574989fdf0399e0 (commit)
via 7d5c3d56743fb696405f509663b3e1558fa72e25 (commit)
via 990247bfd2248be5ae4293928101eec87e1997e9 (commit)
via e9e36557849ba6b650e503841596bd31034c1936 (commit)
via 39a0a5c65d0802f40ab428474b1e6d981a91fbce (commit)
via 0c9db8bbeb7187218a5b47d82df18e38128d06a3 (commit)
via 9882d600d0bbbc115671b12646e690ccddbf5348 (commit)
via 59b545e90d30444a97c8e925569d240c819d42b4 (commit)
via 7e89c625c5d12b5816c857d0c0910922f8803f82 (commit)
via b9f87e9332895be6915e2f2960a2e921375e8e7f (commit)
via 978ae99ac4aa211ba4ba960f56bb6cdd84b648ae (commit)
via 2e60562cfda15fad37550ce5996e942084131d1c (commit)
via 2f49e3eb0ddf31d601184b516b7f44ab4ea6eece (commit)
via d71b7da05d3e1a82047e35c2720c759bdc0fb44f (commit)
via a577b387b7e5c9c8afd371767fccc85009e84485 (commit)
via 8e82cd7374cda9ef55f88504a94d31b06d7e1bd4 (commit)
via 1351cb42c92cd415003adf6234d96507c8a6d2db (commit)
via 575bd3ec2fe918257cb448eee8ebbff269d85431 (commit)
via 51a7361aef92b8c6caad857ed09f0bea0f210db6 (commit)
via 17a87c6bb9d16e992fadd47b11b3eb26af54ac69 (commit)
via 2cc500af0929c1f268aeb6f8480bc428af70f4c4 (commit)
via e021b84f7fc20b3e3927093ed87e9c873d33a443 (commit)
via c46b0bc28c22f2ae4b46c592f450e745774846d4 (commit)
via 7740b9810bc093a9083e8c3404afc627c8b78242 (commit)
via b51f0cfcedc2499aa1c0b85aaebf2fecf244c291 (commit)
via 69eb1a250699f481427c2d12abf14314fee9e6eb (commit)
via 62432e71ef943744fd4ca9ce216da1b0a7250573 (commit)
via 005c77dfe53b54cef92ce51d91f615eb9c2769c4 (commit)
via ecffd3a7f26c9a1590994bb176494ed4f4ca7a64 (commit)
via ce3bc8504d765ecc9b453398efb18662bd4f277a (commit)
via 94fc6d8d303053c47064c9408947cd49a8e11975 (commit)
via 0edf51aca0ea2d75ed9d96fb612c1005965ec64f (commit)
via ba292defc14029971d5e9043881ddb98c994cfdb (commit)
via c5cf3cc081042fec0e2baea7cdf7f22a8a84664a (commit)
via 779e145d8f15ad9975f6ca689e6a595ea0a3de4b (commit)
via adcbbb141bdb09a6fd999f3369e15c2881f843ba (commit)
via 80014655d76e758868e8e1ed36472be9a606eb2a (commit)
via 959dc163810ac286e01d0163624f5bbad5b82c55 (commit)
via 1d74428fb7a817790c397338db92d102e2113e1c (commit)
via d5e24e94bbd581098e460fc3a0b437478340c876 (commit)
via 4cd96de7e7d4ac12c38b45efe7b3ee0ed331d3b9 (commit)
via 914fe9bc05003defeff70acb84a52e86fb9ced4c (commit)
via b22882ae78f0e5d38d4b6ace0725bf0ae5bc4803 (commit)
via f83a47d3c260982be4918a3d9f5d0b480503b131 (commit)
via 8c84a6865c4b09eccf41c9d2e91a030c941bffab (commit)
via c6ca831b3f171da96fad75c21dffbd2bed71e297 (commit)
via 8ce8e05a403440e7f2323e9d43dca08be1cf8a94 (commit)
via a9ddfa91f81e00400f04548e71ab9519892a6dea (commit)
via 04749187843604f51ddcab4f53811dac9a9ed8a0 (commit)
via 414b25d4bfa89e0609cd3c8c3a6e610681f4c929 (commit)
via f57e8133a7af31a59578ac2cd50dd20418cb8fbc (commit)
via 85a14b1daffb3a20e9e510b73d25c71ba95cc350 (commit)
via 774a56a8beeef3a73258910b12cace20443a1bcb (commit)
via 89bd1bf64a6d745f4276fce3ee8fa4e050736ff1 (commit)
via f429202995ebb0dbc86d41c6d707815186832063 (commit)
via f14bc0502c3c4d2ffd609b110771ca1fa752b68e (commit)
via f75d5bd488669426794d086b80568ef0a7a4afe6 (commit)
via d719b47c4131e2120305cee60395c0a88f5aca25 (commit)
via c7db1351d3b1c25bfc31ed9e7b6b491e6bcb1555 (commit)
via ac15a86eb62832cc22533bc33b802ea297666ad5 (commit)
via 0af72968bfd192fa418551ae75def455adcfbb4b (commit)
via 977f822d94c59bfd9d56373404291fc85218b1d6 (commit)
via d00042b03e1f85cd1d8ea8340d5ac72222e5123e (commit)
via 0081ce40b832f4c5abaeb0316736d772aec3f08d (commit)
via f03688da19c21b4d46761cc4ed9da981cebe43c1 (commit)
via eb8ba927115b091bb407cbc29ad2d07dfed318f1 (commit)
via b19a36e30d0d3829c68f2e0300ea1487da242af8 (commit)
via 12b3473393fb7a471fc7d928476b0ba66da145e9 (commit)
via cfd1d9e142fa2fd8b21f74de0e4a0109e0a04439 (commit)
via 67b352b3f7cf736c9aa7c1332aa7814911556ad5 (commit)
via 822a00aee0d7feb845e28dad7dccb552d10d83db (commit)
via c293f639684d2c6625b7395c995aa813eafa5fa4 (commit)
via 00686a614cca93f007335d01c06d78cfd212d973 (commit)
via 5951ef6faaffcff62d9a9963260a932666e3decb (commit)
via a36be891057f7a2505db032768264c79f37f05e7 (commit)
via 23b1e8bb169e058dfb11b826b1b59d606d64ce20 (commit)
via f82dc7b09f470f79ed2bf099216fa64c76528d3b (commit)
via a53c7d7c450de09ceb04b47cb59450225827bd51 (commit)
via 5b7dee0548f068e626c0bf5d116fc506d2af92a0 (commit)
via 7990857c32cbb49f4bedf805f86c1b718b3a70d0 (commit)
via a03d7d9aae8ac258d266c66c62c63e03ff5d2558 (commit)
via 5d6fde4aa0d2a93945276dd722be48e05da72faf (commit)
via d14895917e4841ee53c46f7ab3f46c3f19489069 (commit)
via eb5023d2a38e0862e2d9a5f1ca4a3788fc131405 (commit)
via 1aa26c98d1b827a80bad8abd7f8bb25c26db72b7 (commit)
via f6a1807c25d85a0ca762bfa276ebac4a3430e7c7 (commit)
via 20483389cb90e4f46486be925b896c8a0438191c (commit)
via 4102716ab6f3cfaa979151029c2859701dfe2ac6 (commit)
via 8975d286a6de827a02b073de32570602cd9cffbc (commit)
via 65e4595c21bf9c01fb0b7da61577ae8a79d29c30 (commit)
via 19c8c07e6e1601180f85f7aad145f00112f3f8a4 (commit)
via 87090907f39983b744749017cdac3fb957d8d0c0 (commit)
via 2808941eebec54dc7c4981f5a2a0e149d452b8ca (commit)
via 10b192574ca253331298bbc4b05ef70d2cb927d1 (commit)
via 9351dbcc88ccdd6aa83d72f432f19a76c031124b (commit)
via de06b256b36f6428c5d914266c4e91c25c69ded5 (commit)
via d4867b8dd18ddbee0b30040f569eeac99964343f (commit)
via b5347a6b22c2d82ffa57c8302c81ee0f25b413a1 (commit)
via 848cfc635084c5baccb275ed4995032d3ada2d59 (commit)
via 46b961d69aff3a2e4d1cb7f3d0910bfcc66d1e19 (commit)
via 52357dbe51bd015119a798a4f8e7244a3e1efda4 (commit)
via 97153d16eb9ecb7281ed9dc76783091964e769dd (commit)
via 56083614ae0e8c5177786528e85d348686bf9bc2 (commit)
via c9d7e29600f7a80094bcda2c3bd87d8f07d813e9 (commit)
via 2b6bcb84a17fc98ea0ea87df65e6a77829857ecd (commit)
via cc6d6b14603924a4ef2d86dfaf758447cca6a7ff (commit)
via 69642fb8f55cb4741f977d3fbaacd5d12d742625 (commit)
via 3027ed2010e5e27ef6e8ba519b789269100f442e (commit)
via fc33ec0a47dce3e94fa7179d4d28d7fd050a258d (commit)
via 86257c05755c8adbb19ce684546b718dd48a5ef8 (commit)
via 5f13949918d125f851bd2ba8ab092c301835d3ac (commit)
via 9a98be99edd71e540bd65631dcbd3d766f93056e (commit)
via cce2a00af57ef823abeaeff787eff35f43dfb093 (commit)
via 7e1e150e056d0dcf5a58b2a8036f47c2e5dac820 (commit)
via 15428e5a9c1bb01f5e7a04979c17ec5f1de9d1db (commit)
via ac9fd0a240cbfa8c448cb01bb69ac92313eb7e56 (commit)
via ce0544bd0852415891cb31e0c1b7d0ba0b3d19f3 (commit)
via dba1e2c7884b5bc68f945fd5d2dd500f9a258c6b (commit)
via bc281e8b48c92102d3c64318e07598c8e96e493c (commit)
via 82667b0cdd6592053f5b2f4cfa1cbd0ec92db0b2 (commit)
via 71b0ae9ddbcbf4093900ff879e2e1c82be89867f (commit)
via 1b96c2563342098e05ac4b240c66e60222249cf4 (commit)
via ff14da4f9b706a47f152491eae60586b75430c6e (commit)
via d23cde8c4285cf55b007b300123c41fa852d38d9 (commit)
via 885d7987eefb0b8b694626b0831ed93123fb8d8d (commit)
via 07cd1647921e0e94432cecb2f7a5413cd8f3884e (commit)
via 82348d8d9d266d91e570c4ae8d8f1afd3315178a (commit)
via ee2a86bd4c1472e606b3d59ef5c4392b61d7ab48 (commit)
via efea6557fd364ee42c84c08df28efa9797f1c9c8 (commit)
via 0e662967ac5a6c8e187725828cd20b826ca00000 (commit)
via dc979c6874916221df10de3557db0d1b4a19d221 (commit)
via 925045f2ad19d5dccb7dde77530ea16ea7b6341b (commit)
via ba80991049e1e361d2b1de08160c91e5bd38b728 (commit)
via faa90e91384af409419363aca539709e2985708b (commit)
via 1feeca7c2209819dd181f1fbaaa75026d3e38aa2 (commit)
via b2cab7978ff20eff1d4fcb4cf60fc8a4421fc24c (commit)
via d7713e5c5033ccb0b51769d7f28d91619655b24d (commit)
via 928dacfdf443393618edf7124a46c599bd760784 (commit)
via b34e7172b5f663faf3add7f6e72a3e2d8ffe680a (commit)
via 7fbc6a734b2a9e33100e57cbea0ce1d20cdf4491 (commit)
via 9f5c36321d6843ba5b2a0e9e6c10c3ffee7b14fc (commit)
via fea1f88cd0bb5bdeefc6048b122da4328635163d (commit)
via 54ef8963e504e22dcf29405412a95100a210efe5 (commit)
via 4db53f3593e24b80a33b608432ef463acbec295e (commit)
via 0b98878ed8a185cbc3b78c860019416bfed317bb (commit)
via 009d45dfbb20a54ea402e7e8f18bc2d253f41ad6 (commit)
via f1d52ff7171da920acc7583fa427a95386312908 (commit)
via 98953e35ee95489f01fbe87e55fe91d9571fcb48 (commit)
via f33ffa77fdcc3e40ec42268ea09b67ac65982f1f (commit)
via ac08c1b86b979574678aa110f19fb744719def21 (commit)
via 747d2952c78ee32acc485946d3922cfe899a4b48 (commit)
via f26298e3ae274ccea3d4bcef37f5ac85da383461 (commit)
via 7489fa475c3f5963323a6b660e4544e48f45d37c (commit)
via f00712037fa4b4cbd0d677d998df3728c0c4d8fe (commit)
via dae8a2aabc0cc9c9f3794276676872014c5a58fa (commit)
via 3cebb4e77088feb357b485aeeda26429f98dce9b (commit)
via 96249117c97e625ec93d94939e9d75fad18ac2df (commit)
via dfc13c8130787ee07e2386773a221524ac6d802b (commit)
via 6ee994f190d58df863c71389bf9f8edd38d8e3eb (commit)
via f240d7d1d55f4ae87bfd1acc9c07a90870f59a93 (commit)
via 1c5a66507b7dc2990709308979354d8e62646a28 (commit)
via c5124556a1a8907a84bb2c2bd1912da0c0aaafcc (commit)
via 19912ea4537e669f9c9ad1108b6f5453025738ef (commit)
via 3702df52de21023d90052afdc54732d9ad285b39 (commit)
via e47f04584b00f6d7b5c8bf9e8ae6af9aaa6831fd (commit)
via 823e0fcf308c7f3fc88ba48070e12bd995e75392 (commit)
via 608d45610e9f499fb43d2e52eba461d489a7d45f (commit)
via da32354d05eb22cecdf9543f542636d44e503a20 (commit)
via c42eef08cd6cb28c898d46c2168c5c08684d5c36 (commit)
via e76dc86b0a01a54dab56cbf8552bd0c5fbb5b461 (commit)
via f17363ea38564867df555b6be9138d2eff28daa0 (commit)
via 5fd94aa027828c50e63ae1073d9d6708e0a9c223 (commit)
via 7b04ab1afedaf73b4492f9e0a9210dc4392ea068 (commit)
via 16e52275c4c9e355cf4e448a5b17136f24324d7a (commit)
via 61029d971895738ba353841d99f4ca07ecf792b7 (commit)
via 4625b640b9b5892da7f35f165407ed3e850353d9 (commit)
via 1c8043e5b50bd47d7734397a08d5015e3672b9ad (commit)
via 9819295a58b8b40ca6d95c84f1f1de08fb0eb707 (commit)
via dc3b856b460ff380feb68cdff551f334e6db5a27 (commit)
via d2f96b7e1e3e4a5917ea73a56429fa645d8ede7c (commit)
via f4620596bd798f3c0e1d4b7738a5c4ca1730cf89 (commit)
via a01cd4ac5a68a1749593600c0f338620511cae2d (commit)
via e62e50f3143aa67bd60c2351ad61d7544f28d4ca (commit)
via be9d5fe994e6a086a951e432d56e7de2af3cfd09 (commit)
via 11b8b873e7fd6722053aa224d20f29350bf2b298 (commit)
via b63b9aac20259f3612e23c7a3e977dcb48693ef1 (commit)
via 14a0766224d50d1c4c409e883cf29515dafc25f0 (commit)
via b5fbd9c942b1080aa60a48ee23da60574d1fc22f (commit)
via d299036c6ac281d1d6c119c5fdbe603bed404851 (commit)
via e5d9f259dce621201a2c52b56b260f8de776ecc0 (commit)
via f773f9ac21221663bd093806374cab83abd2288d (commit)
via 63f4617b5ab99d75e98e40760ff68bb1615a84e6 (commit)
via 579fd2bf848e994ed6dcd8d1c3633f2fa62cbd28 (commit)
via 25b02eeaa9acda461629d19c4c6c2b20b5850795 (commit)
via e89a3a1302cd3e95403c5c64edb126153852ff35 (commit)
via d9d0d1f6cb6c6210f293dcf5c181024d2df787f6 (commit)
via c8710633f9cad97adc038852319f1a7a22cebc44 (commit)
via c759e90e162192eda89c5046fa446891aac259c7 (commit)
via 21850ab947dbdf98b1d89afc36d8bcfc6001592e (commit)
via 9cc8edcca2ab13145a954b44101f7058142d4ac1 (commit)
via dd7e5d47df1e9af687cdc87c2d2595893eefec12 (commit)
via 8907c6a5c71816483099683e0ddcaf11cf3a7912 (commit)
via 0d2c284222839ff21401cecb7cb567cb0cc04127 (commit)
via 06aeefc4787c82db7f5443651f099c5af47bd4d6 (commit)
via 119442008b97f3b39d0ade075dd219a2f781e2a3 (commit)
via d42d232acb16847ea8ec775854469e3226cdfe17 (commit)
via 34634d2ba1efba222403e8a210379d1573759939 (commit)
via 0373b72ac00aaecb7745cf7fd129424994e2fab8 (commit)
via feae0b934e048b17830f49779b01c48136a5b2bf (commit)
via 8f5f77f8e2819a66de774a4b7f5216ebc631434c (commit)
via ced9ddecf6b8f7777125b8d4d2ef1b24ccad34cd (commit)
via 34cfc02f00196f9f5124172b10de5cc8fea1081f (commit)
via 45dcf93cb43fbd2f52cd432e38a5c17ae2ded61f (commit)
via c18502d5a89af081b1cd4c4b1c112f9458056124 (commit)
via ee4916a2db7ff1217c0af65f03220583b80b4568 (commit)
via 87a4f24037965ae88435ebe3f887750c500cbfde (commit)
via aa9497f4d2346e7a18cd07b9bf31dfb5832031bc (commit)
via 7b0201a4f98ee1b1288ae3b074cd1007707b6b21 (commit)
via ba7bc1e14fcf1a223a9a42ede2e9cd7d290c8b61 (commit)
via c6ef5865b3fd8e5d5fb8c891467b3722fde4d685 (commit)
via 589965360a98152e8c783e4736080e06a895feb0 (commit)
via cb86d16418ced44b148726104c5c8f9d36a3be49 (commit)
via f279d996354eded4defa219a393efa362e157406 (commit)
via 69336de84b2ae1b5b6a59fa8d817daa1108cea27 (commit)
via e05a3418c9d6b3f70cdb387d1f30d8ba59733f02 (commit)
via 12186e267fb75a77027dc046f78db6ace99b8571 (commit)
via c62810c526d75363ed4d668bbdb6b21a5a294a7b (commit)
via 0710846d8d7a38079b9570aeec9abfb94341af79 (commit)
via 9517f61cb8ad4f8074b5e6e33c663ca9ed581908 (commit)
via 3da7e8747dcea9b45c8bc4c17b946be7d5ff9576 (commit)
via 900a3c5828be90bfce2a7b8e2e6edc0d4509df6a (commit)
via d9e757fb15b711464cfc8ba344f2563f3e2b9195 (commit)
via 517c31a58af1f7b97f308e77caeb8cbe9ef99cf1 (commit)
via 4c485d0b112721d3a2b2939ab61db14b7608c98c (commit)
via be388eb699a8517595ea921082b5ded2d1450dcc (commit)
via bf5fbf4c58d67a25c68efea6608ec2b8e89c7597 (commit)
via aa7400d4aa132f50a982739e1e8b9752d418b97f (commit)
via 0711c996f017cabe220dd291500bb1b202f21e1f (commit)
via 9b2e89cabb6191db86f88ee717f7abc4171fa979 (commit)
via 07e015d587c487ce1934144abe59010b8f588c81 (commit)
via 253a3fad875abba510e13a3112b6176b9e272e84 (commit)
via 566d284cd664a78255f5fbc8881ee8996f835960 (commit)
via 8d8c3bc259f8b549a2fbace562afb0984cd427ba (commit)
via af698f41e199e4942d818accb0cc0ad7589785e8 (commit)
via 6300d968db6e857e199cf8e4701988bf2f9136a2 (commit)
via 49d5415d994ab0807daeaacf5e30f9186ca72ff5 (commit)
via 6a204908cb3f11ba7635d5e0a97a196856fb5748 (commit)
via 489f9a3bf2078969f746a47a49fdc17d94f898d3 (commit)
via 7b55eb02488353672fad7160148a40e581cb5c80 (commit)
via 67f6e4baa87b5555f3bc13919707a3f3180d57f4 (commit)
via c0a78a899ad3d96bcfe15715e957eebdb71ecca4 (commit)
via 6ba745463f9f54496a2f9c2b1a407ab40844bbd4 (commit)
via 18d0a74b6464ffbe036c41e706d3130a69a38313 (commit)
via ae1cf18d06bfc92ba1803ad8bb7c90be844f491e (commit)
via 26e04c45efa440353cd75365c499fc06ba1eb4ea (commit)
via 902ad260c2399b597fe22bba461481a09502b9d5 (commit)
via 486bf91e0ecc5fbecfe637e1e75ebe373d42509b (commit)
via 687c2d4bdc959da433c141d920820875b701c2be (commit)
via 640a5da59304684d4fe304f2616e8dcf9f234d41 (commit)
via eff38a97dea5a54b7a9f3e1213cd5e8b2b15be37 (commit)
via 42017c858f5e08f1544620342404904c36d12625 (commit)
via fafb108c231295b40b7b0d0ea86caff5031a0c30 (commit)
via 136adbdab133d19bf900036b3786d5f709ab2082 (commit)
via 10e4a07adce6af4794166cc783eca4fed188cd42 (commit)
via bb79e799885427437f01e6456c03a206886ae9ff (commit)
via 6d842a64386a5c64a5136cadb4a1e646ee1901e5 (commit)
via 9741148f1166694a65612ea27be4080dbf7194cc (commit)
via b4591042f81a9ec8157bc74d023f1fa5c91999e7 (commit)
via 1c88cc3b00870a93c01688dd5742f5a19e0d0f76 (commit)
via ae79f5fe81a38b64a541adc67194404de5dc8cc5 (commit)
via d185c72f14dab4b4ca10dd01e6ea9b7aeb42b2df (commit)
via 50070c824270d5da1db0b716db73b726d458e9f7 (commit)
via 66ebc54e863f58b86c3ae65ca9f4764906c9a348 (commit)
via 3912ef9b24104abea0e9344ff24deeed700712e3 (commit)
via 21b4324449c7091d36fc3e153d3e0f4ea3515278 (commit)
via 5471e816ab36a6182b2223dea461fc8d086ed9e7 (commit)
via 686ed44b82c009ddb63ed064d46ce44fcade5fbe (commit)
via 834f8d0f752eda6b2baa5dffb48bc0d86de8c90a (commit)
via e108ea6f210bf93250ad4ea23ac3708e1478946e (commit)
via f551799e8c3de59be0a6a7c5168194b93987e876 (commit)
via bb1028fd4f52135f4a2c8175d9bf1b90043df1cc (commit)
via 1f26ac530c0ca072ff0de69093d38c95b9d3c80a (commit)
via 7d85a63f7bd3ef5926b92dd8f7d9c1588cf6e286 (commit)
via a365c21da34b70f50459137ae242767cc336f191 (commit)
via d13509441ce77077ccf21b9442458b0fb52b1c07 (commit)
via 8e00f359e81c3cb03c5075710ead0f87f87e3220 (commit)
via f52ff519388e7f3ab4e903695b731a2a7000fcf5 (commit)
via 582348ce86ac20e0bb92079e5f15ba9b05f60a66 (commit)
via 92bf1032800f3365a5d8eb5052a2a045495ca646 (commit)
via ebc15cde7e0fa14a61127be51267a5ad0c430f90 (commit)
via 27a209e24883177391c382906dcd0104a54faf79 (commit)
via 1c71878fcb9d5579383561cdaacd78b81fc28694 (commit)
via 4d18d306085f15ff218dd7dca303aa53122aa2d3 (commit)
via f63ff922e713a04b3f4391d509c2206ac32edbb5 (commit)
via 72a0beb8dfe85b303f546d09986461886fe7a3d8 (commit)
via aa4405d57bec097972c4d5b60d1cfd6a06f84bf1 (commit)
via 4d17de950b96631d01c7928b9cab24860b2e29e5 (commit)
via 365948a46f61db8726a24bfd0c625d26a014f63a (commit)
via c24553e21fe01121a42e2136d0a1230d75812b27 (commit)
via 151ea34890984f1fb2404df848c1dcbf3e61d765 (commit)
via dfd8332b1a958ed9aeb6ae423ea937b5e08024f8 (commit)
via 54c3708f45c72065cefd4d6013be5467bee65f85 (commit)
via 146c48357b32d26019675834eda1daddde95302c (commit)
via 8cec4587428e4fba8f5cf8791f19f8373212b250 (commit)
via 090c4c5abac33b2b28d7bdcf3039005a014f9c5b (commit)
via 62f912bd96a5fefeb0eb8b017ff12335810483b0 (commit)
via 3b4b066b5d1c3726f51e52fee52c317a3ae3f9e3 (commit)
via a7047de1ec7aece83271cc28605ea1d790afee67 (commit)
via 71eee6e279d7527adbc1e325b0cca49d824b67ee (commit)
via 0958095d36903cd821afc57be0c038896dd1acdb (commit)
via 8e66cc336b29bd5acc1f764f26cb0b116db4dc87 (commit)
via e540aaf2cedae6cfeb4c0ea063f8693cf5999822 (commit)
via 71fb105407d496134f0cfcbea73eaea9991dbcf5 (commit)
via aac974498b0a9513f3caf341e1eecbe4adbcff0a (commit)
via e7cf8992bed2ef0be2843da6f0eedf9fa6d5f66b (commit)
via eea48a1e96605accf8579ae4b7fb869295c9ff99 (commit)
via df79b8d3306394ae123fb4c558f7239146e9f0d6 (commit)
via 6d784213ea929dfa06099d7d85ed87709a7f408e (commit)
via e77575c3c85c7e219137b2c616ad104e5b28eb20 (commit)
via 49f1d2d2e7f75432465ddd4acae2579c018aab33 (commit)
via ed9c17ed1627872d701c76336aff407d3ad5c44e (commit)
via b0e38303e79e2a487e37a9dcadd5f1730cdeae9e (commit)
via 93145c09728dbfb7fe5bd77b5a3671e911c41deb (commit)
via 1c1bd99f0add79535b62f6723d7e942661007653 (commit)
via 1d03e4212cffa7fcf57d0f3a4fcdc1920c959e40 (commit)
via 834d48869745039bbd874d76bcafb4ac6ce7a4e8 (commit)
via cca39b307de50546d7e3c4cd9fe4c2435223bf21 (commit)
via dffeeebd09195ad602090501c8c9b05b55885596 (commit)
via 673a619cd628130b0506a5d3669fd6a4d139c790 (commit)
via f8092952b50ef238e2ffc63ccb6d17a469f22966 (commit)
via 7cb53c7b33c41bc8c5d76c6994caae800692108d (commit)
via d0df4daafee6703a7b52609b5681846f83310182 (commit)
via d23f84732df2786fad5bf31f3446e0e088d941ec (commit)
via 12114c5c973d70be91bfe946962e4373fa4d890a (commit)
via 963e72656e6a5d8303034f9085c87834a75c44ce (commit)
via fd2daaa2c1a27140568cf5a4f04baf57682214d2 (commit)
via 78942e3fc11f22f1bdbbd8fdd629691d5c510a55 (commit)
via 8945eccce758dd466ac42c6521a3fc4ada5a9226 (commit)
via f29890eed7bad4aead5e95cfa6aae147287a0b10 (commit)
via 7469b1f920d47306f87aab0e2fa0533903bc61af (commit)
via 8820f1314ddcaea75e069f2a11bced9bd1b80ef8 (commit)
via c5825a1d48bb2def1c6113629e30de4ac9dd2b0a (commit)
via a0007d1c88df41e7796f89e24f7af5b40660fbf3 (commit)
via bf9c46a19ba59fa798236b64521fc6d95f18e076 (commit)
via 2357e7abc8bac23f60d79ca8abe81854b5550eea (commit)
via 4d685db094731fccfa684f5c0b26ebfc1c28ca2c (commit)
via 0dd272381befd9b464365cc7df0bb2d761d0d2e0 (commit)
via 046f1b9556d3b8197c03225843ff96d0d79ae762 (commit)
via 7e0ef7c21ad41f0e3047059fef61ddbefe143444 (commit)
via 7cc84b1bfe00402ea12749c63c7e4d8cef5b2431 (commit)
via b327d9aac9bfd87e175d03421069ae679087dd00 (commit)
via 65bc0fbf12199bee2d16b914a544a69345c37cae (commit)
via 2cd7eb5d2c64c6a54350e6399f07fd4826933bff (commit)
via a2a8094104e32ed8249c2811c94f74b876f78b3d (commit)
via 4f17845a927e33ad9655c3f711177e376bc10e44 (commit)
via 84a16612dd45bcaca490715039b1bec235e0dfef (commit)
via d4dce83017319569f35e617dae47af9041166239 (commit)
via 829edd5488aa90324ddc4036dbaf4f2578be9e76 (commit)
via d81a47d3366b6c6ed14edff69188b60ed3655f28 (commit)
via a29b113e5b418921dffaf9b4cfc562ae887a7960 (commit)
via 5024b68a04ecc7ff1c73299fa986cac740cb3e8b (commit)
via 56b188c6e4e36a28b54cab442677e2fa748f0bae (commit)
via d7d60797272f02e6f3f09b659922c71f2c49ffec (commit)
via 570bbcef51aa6a5bc920faabd850cd6a86c0d421 (commit)
via e090ab50879c15c850df8e8145f01d39dbd6b87b (commit)
via 832cd1c032222fec662f9320e6f564f55b75cc8a (commit)
via 933adf250348caf92c04f5249120333e3e300227 (commit)
via 1658417daeac170c4068fbbc6f4e3762ada0e72c (commit)
via 9ff2f7bc88be85c09d37209bd0feeb96ca256892 (commit)
via 7b2691afaea9ccefa2db073f8a717e003f2ad07e (commit)
via 3b30727c4ae0b4febedb9795752352bf5154730a (commit)
via bf635ee41af43f357b285ab97f04f72b37e8fb64 (commit)
via e8e411dd27068279b58bc3527d1b60878ed19d0b (commit)
via 6c44ca2eaa94224d60ceac2602ee9c6846fabf18 (commit)
via bfbd97a0fa52c122c6d0ab5239524b7be58b62be (commit)
via 0e6771fbedb4081dc867e845b541023a673a1da6 (commit)
via b9bc51b44f59c9e93eaa5a21ae7658a320741e08 (commit)
via b18409a1d6515152d107cd965e25ef58835f9f22 (commit)
via a837df2e0c4858543c0bd2e420f726b89994a7e2 (commit)
via 0cd32b208c9a92e5e773b7874a3f75ee58abd6c0 (commit)
via 616537c54eb13b434294342e1a0df06375134ec0 (commit)
via 0d68ac445710fdb4d9d89ca2055b206c9a06dc94 (commit)
via b5e49faa3340628865ea28a60d3dc36d3e08511d (commit)
via b5bbfb2f868f8f7401018debe275c39fc65a5139 (commit)
via 710095383c263973fffe58b050a4924d5053bd7f (commit)
via dd356abbe83f7c1275eba42ac855977499e71e44 (commit)
via 5e1e54cbee0215374fb712152f7906fff960b334 (commit)
via c05781723ac006e4d193d9181bf46ccec998a5b0 (commit)
via 7203d4203cddbb6bf930586e2f3fba183ca12140 (commit)
via 535a401494dd268de77cccfaba68cacbaa1b2a6e (commit)
via b250ca760fe74c901845861fbc2e7292b4349724 (commit)
via 3ac41521c2a1cbc43e3b6e0979eee46b6c45fa63 (commit)
via f30f07e5b15e118686f5665c0a6dca430a95abba (commit)
via 63918fd91e570dbcc2c06f39c75083bbae6a2303 (commit)
via 5599a9aa3b735b42a4a726b79f0c85a0d38eb5da (commit)
via 469bda3f72a097af3dd1bde56d757d7ea916d996 (commit)
via a0209aebd72ab6ec63941d5881b58a3c689b943f (commit)
via 9dec49291c2ccefab6cf97b9146c292897783c5a (commit)
via 6c9695dac3a16574ff3e7d0d310cff3df6d542f6 (commit)
via 7c7238ca556654cd2a0483dab5e7478fa7956a88 (commit)
via e12a932eadf0b33e26979cfbf387eb6788b97cad (commit)
via d77fde1d390e921740df739699dc03b48777f81a (commit)
via 551a2d7f6ed5744170265ea5bc7b99690b58a6f5 (commit)
via 0529433796c0024e9345edd3c458e22e1aec9043 (commit)
via 0252f1b276eaf8e72d42510546f594b9d0703a58 (commit)
via 026699b978f21466cdd20b09dba3fe0448e0592f (commit)
via 6ec7cbb9976f68a0ca265e72dadfbb867d59581f (commit)
via 8faa21b81fde5c30ca1df72739b9a0dd27005402 (commit)
via 159caa607fc11e4b7c1b5efcbb28d0ebf5e99903 (commit)
via 2bfafa08c054715e6163a91da334e1e4fa780740 (commit)
via 78763269e5388263ad29b02049fa61c62829dbe8 (commit)
via 025f40af1c7877ac0ab84e0ee159806a57285c3b (commit)
via 91e59cdd3ffa76c041f774c4ff61cd865299ab75 (commit)
via 27c6b8cef44f5daaa149ec72e3b7052e516ebc26 (commit)
via 846493567722add45db6e7296d570f8ecf99837e (commit)
via d1f68168ca58af66f09e51ced1a35334fb5fb825 (commit)
via 9336279b31c1a5dd9e50fa37d8178c790c4fdef0 (commit)
via 7819ba75f5c170afa06a5a27b8c64e13ae094b74 (commit)
via cf5ed0e7c52e8a97ec48525ee2181e31aaa4184a (commit)
via db143fb8a98a13414f997892449ca2fbb07a0629 (commit)
via 47286f0bb01c6dbe0e48fc080f931d7b93e22063 (commit)
via 191329567e3cab6ae2f0752f2e70880b8d97271a (commit)
via 4c0d2595196da373ca70a52663b7ec13842c940d (commit)
via 242235d6e7bb4e1893c0ebfc58e7a757dae771f8 (commit)
via baf3d8783ad1bc05bbe4db507325e9bfcd8d9be9 (commit)
via f9045d39a58a9b9287f3ece1022391a3b07e88d3 (commit)
via 525d9602da83a5d8ddbfc9ebda282209aa743a70 (commit)
via c6dc0f2d6f67d69d32e7f8c3c175d79f4b2ef430 (commit)
via 0042b37bdc4e3929faf3d2b7862dd79979d60aa0 (commit)
via e39dbc26a1aaecdff6809be620a91d4771e5af9b (commit)
via 88c4d5e241d0dd670eca6f9a4981439a08924704 (commit)
via 2e8473390d5dd2274aedd59ba3934c597f94b04a (commit)
via 05164f9d61006869233b498d248486b4307ea8b6 (commit)
via bfcd0225fe669dde479dde1146612f7c067a817f (commit)
via 85b53414c2c8f70e541447ee204e004693289956 (commit)
via 2c936393a16d79fa3d4bbbdacc66884f7d8d3cb9 (commit)
via 3383b56081364d68de8c29fb34698a7651c50e05 (commit)
via b60e7347e2b97d913b386b82b682c8c7ae2e3d4e (commit)
via e2127bd275b2263f06d7ba039123411c9b7cf07d (commit)
via e3d273a6048b48b7b39d55087d3a6b7ca0a623eb (commit)
via 35d4ca9a46fd8372cce752577944b2fdc458a0f5 (commit)
via ea2a4f906dc3b5bae939a0348a5f82fa690bbec5 (commit)
via 376fd546007d1bf592e391f11b5fdf08993914c2 (commit)
via c0442d5d6e70643e10e639efe1162b64c44cce45 (commit)
via 95e0a157b2ae5a4027c06b7bb1aec04f9eb883fd (commit)
via 4082b7fdb7a1b23518e2dcbb5077f52a79bffa8c (commit)
via 18ba901c91b5bd1e910c7ccc8ae1ebbb1e00fa36 (commit)
via 2f088998525f389391efb86ac4e917174449df85 (commit)
via cdadcd5d6a6bd594fdbcb9efe628067879623df6 (commit)
via 2ab154b25ceb6264f87ba6a3ca139ec44c7db275 (commit)
via faa2bca6751f7a8837e8c593ae723ea81fd40b69 (commit)
via 2e29bef63a7fa200af54b9b0fc69e5cf2573b467 (commit)
via 258663014324e165ea95d581498268915d176141 (commit)
via 4d81613695a03b3d39adb5b54822dc1a07a37af0 (commit)
via af1dbc5024d5b3289841868ee49929ba4f4d3f50 (commit)
via 03d707719016e3d3a6d98b3fb9eb786c90df69ec (commit)
via 803a215c662c5a692c3b057fc7c0bae6c91b3587 (commit)
via 99a63ce0a562d9b26ef1ad68b9426d91e6ec35d7 (commit)
via cf4605bebe7b0266f21376b796d4863aca01f63e (commit)
via 18dcf1d0ec44f4ddf701d5872f6d5e493d3c4fdb (commit)
via ac06e0bbad2fd39f8cc77fac06fc397be14f92c2 (commit)
via a6de7efe8fbf314c5182744d462699283464d9f0 (commit)
via 3bd81bcaed4c9c2ca6c6ed5fab00f350be5c2eef (commit)
via 756a7d1a0816670445219db901364074b79f158a (commit)
via 1a915fef55d9902cb4e0c5d077e9c602101419dc (commit)
via dbe54369eb40d9ba95b8fd77859a243f076b5966 (commit)
via 87fe9bf486e8671d74ed7e6683309a77add03f51 (commit)
via ec45081d781ae19de834b11e000acc35415a8f30 (commit)
via 46e6d4b1702e5c30c8bcd33e7fc73733872bc620 (commit)
via 612a96ab3eea34e232fd97e834599745401b73eb (commit)
via ee8c5f5bee062c8943e955184146d839c05bd2da (commit)
via 9ef0e6eeb1ec8477b1f6867d118d4c599f41c0ae (commit)
via b9755c94e619471f8d9769c7c0d230c1e40b9584 (commit)
via 05c064b4bd3ea51553a34e37099aa1053c141060 (commit)
via 1eb10de8b47aaab24b48cb0e109cf2a3bbc22860 (commit)
via 690a04dcd63dce08a69e648223320e922f82b3d6 (commit)
via 1b11baa7c10783eb9d53c24c7f1deb1c0a424105 (commit)
via e3ef2529a0582f0b146ea7326cf2d52312149cf9 (commit)
via d2dc7a3ef911a5ab83527753f351bc99440d60fe (commit)
via f0445f392c1e2c99acfe9117ad36eef0811bd68b (commit)
via aedaf51b32a4b31d697b18ecb914af3889d13c2c (commit)
via ae5aa5618516d4f894bdf5d2aefed76742069644 (commit)
via d191035ad012bc481ce0a4545f9b6819b897a04e (commit)
via 6c3401b4a9fb79bdee7484e1e3c05758d1b0c0ca (commit)
via a5cf5c7b3a6ac9be60a8737f0e36a61897d32acd (commit)
via 734cae300ccd13aacec1f32b283d4d21b5de8fb5 (commit)
via dadf18e8c050ad6a5977fa32d563f31de99d3ac7 (commit)
via 3caca9f8debed45019acb731b5ef2f55a3479ee4 (commit)
via 3bbdd1e7d3f89b3a281900c75ceb0830d0cfd7d3 (commit)
via f5ae2264a57664aa6ab307865db72f1f740b80c7 (commit)
via 07708b4325680c4731f0d3dc24bca9da3c962d80 (commit)
via 0a836aa297d08b3c375d245f50971cf4cf2760e7 (commit)
via f7af58ec51254d0586ee20ebfae4bd0f8977ed48 (commit)
via b4007e4b25d21ba3b693674ca19ead7d202b7de0 (commit)
via 22f3ad26d4bb70a03858d42122b7a648211911c7 (commit)
via 534acaf92fd8ba43488be7057d7a35623dcab0a9 (commit)
via ebe5d465d2995899aa3f95c944e0d32d09ec2034 (commit)
via 3f599b883384e9f180f12b06d704ef098e948c8e (commit)
via 348387b8fa68c25873b4ee50881738c9c0e83670 (commit)
via cae4ced00386d042535ec9b53b20e9bbc2cdaa20 (commit)
via f8ef5bcb1e8ebc747b32192348faae9fd32fdba9 (commit)
via 666d6e49e1cd46fd293b3fdce239e34588666ed6 (commit)
via 8b21629d234228ff9fbb7a3c5ad5ebeca4b981c1 (commit)
via 600d77cc8af4625a30dceda2033c4aadbbbe71ff (commit)
via 3b1a604abf5709bfda7271fa94213f7d823de69d (commit)
via 4191945aad5aaf0873b15727716d0a988b1c978d (commit)
via 0caae46ef006c8322d489d6b140c0aee91928803 (commit)
via 99c025349129904b864806049ea8761940ba0ecc (commit)
via ebc5206327363f747822e7344037d9c2b76b8cd9 (commit)
via 9e72b16bcbfcdc819cbdc437feb10f73b1694107 (commit)
via 4355e75c9f82ea797d9353e82fd4d7c445c9e5c2 (commit)
via dfd7a01376d7b871cf7dfe631f5c96b4b2b7767b (commit)
via ae4e7f10136bd182db6d4801ace410e72574abf2 (commit)
via ca03a1f5156b0a68a2179e287d9af444c64aee91 (commit)
via 688d0a641d4fa7a018fb4f9e131ed1454c68dd15 (commit)
via c136060da6a43da5db7e45b6a32da83f0f7d0820 (commit)
via 0caa1d89d3e60a80ab7517d3691a149093e32be6 (commit)
via e3b0557e225ad3e7a6b7d192b8820666d7b81d0a (commit)
via 70d50df5bc495661463ff19885b9a4112270bafa (commit)
via 27bb28f8bbde1dfc79030b0129a1c0405a8ffc38 (commit)
via d3ef96824420d7f089b28e6521790191e39949bf (commit)
via f5cc3a37a155b140b4187a98028c1b8a5f79f9b9 (commit)
via 31bde63b8fae57143dd02d9db4798aa254494c77 (commit)
via 7e41b9c3e2e1ca809ed4ea6de67c843a1a0d7680 (commit)
via 4b83a53a37e3fa53a01ca0a6b4c9f7846a64bc5e (commit)
via 839d23249ae0d1722b51d87195b92cad40e6d78c (commit)
via 4dc1b4bf31b25256bac76baca6e8af71e11cc83a (commit)
via af6b421d426357550e818d6fee79dd559382ae46 (commit)
via b9ad6d4babd3e10f1c13140e53d60181681a5def (commit)
via 9c846eb19825f00e540c6bdbbe47c596f82b1a27 (commit)
via d2be8b73ba2b954ffcdb98d6ce0ee5f01fa161c8 (commit)
via 298f5f2ec683b78ddd7d3b57d04add89314a53d1 (commit)
via 0a4318b47384909ac7d922c065f38f46790db640 (commit)
via e0388c4bf5ca94c72132138edd81ff272148e2de (commit)
via a56ecdca722639891e04e89d39dcb5296f85d762 (commit)
via d7675f390dbfe0dd69bda3971e0fa5e4cd007b97 (commit)
via 4a590df96a1b1d373e87f1f56edaceccb95f267d (commit)
via 2f02427767832268aef89bbfed9b458a481829fe (commit)
via 9f6d580f95c2674864bd1fbc1955b9900d58ffb8 (commit)
via b9da1875519a8912f689a695bcb65b8d2f94f0a0 (commit)
via 44087e600d4d6163a2540c47c7ab0f4bdd7f0328 (commit)
via 8730224ec6ab50b04360caeb4ea72529574911fd (commit)
via acf0cc6205835bcee35ee276b5cbff285486a763 (commit)
via d7c2d4ab317636ed434265d885a433c8e669db10 (commit)
via 92e916b4fe00c6a544121616a401d0d5ea125686 (commit)
via a91e1274bc7cd044b9e6c254a100a0aff73dcc2b (commit)
via 3c3ef0445aefddccf5cebab6edb347536e3adc71 (commit)
via a88805ec3b7b2c0e3e42b91e0ffbd494b6c7457d (commit)
via 4ad7f97c789c2e2747506b7220bc279898aadeb4 (commit)
via e0744372924442ec75809d3964e917680c57a2ce (commit)
via ebf9a139d91ee05950e045fcbf2bf7acffe1755b (commit)
via 499ca0aa022f72674c29a2c66050a5cf1ee9f192 (commit)
via 376df1c9ac466742afad6681ff2e431ebfd75b63 (commit)
via b3a9a4228b95713636958a12f625434ef3cc3277 (commit)
via fb993ba8c52dca4a3a261e319ed095e5af8db15a (commit)
via cac876d0c8ab31aa9007411aacb5ef6ecda398a0 (commit)
via 1cbcbc84256489b664fe90553d3bb3579a33946c (commit)
via f9245031dcdecba55204916535555ea20374878a (commit)
via f9b5323ae8c8ffd7d4d2b69c360dc497b935d6de (commit)
via 3bb777140c5bc99775d5b8e0ea55711e227f0012 (commit)
via 8ec67a677e0ee2ecab48d112c3c5f5a5c5753543 (commit)
via 91bab51181c8ebc89e0ee2e735cc6e851eb6b93d (commit)
via 041c3ec8a0768c513131f47467652ab2aa75a07a (commit)
via ab31e2fbf10950084d9cda73c0b4fc7d36296817 (commit)
via 55c3df5bf10aac9f15735589142b197e4616e0ae (commit)
via d6cddde949ca816c12c182066afd5057eb726b86 (commit)
via 95707bad7adcc6963baebc1b7e3b005d1b8e316b (commit)
via 8081aaf24a0fb181f2847887ba6347d7c97969e5 (commit)
via 68b13c93af35910580e0ab42ad2081b212e8d5eb (commit)
via 0c9a11c009aec896872bf4914bd5f01e40a5e0ae (commit)
via 373bed0a95cbe38e67282e7ccca8cdd8fc2372f0 (commit)
via a01f7fc667d5fe05428231479d8e934673b40407 (commit)
via 8266c873258135c402a7cae57e409742a2cc1ace (commit)
via 5355f8f14648fddf13cda7240530e7b4216da671 (commit)
via 876951776077fecc59acb33574bd155fc9259dbc (commit)
via dcdabc780fce8d02c9263f8e98f03b29bb4e5210 (commit)
via 0fad7d4a8557741f953eda9fed1d351a3d9dc5ef (commit)
via 36c080d7a211dd69cc3ca37a8221f096828a2f4b (commit)
via 3e861eb6aec036b3c5a2f6a71c6ff3adbdc9a55a (commit)
via 502100d7b9cd9d2300e78826a3bddd024ef38a74 (commit)
via 6154d0aa83098b5b05f2c658a13bb1862139d618 (commit)
via b477df5d4dbce5b72ebd183b83555f62aa3fcec5 (commit)
via 701c0d6d7c484c2f46951d23fba47c760363b7e4 (commit)
via 5d5173ef0cc48d206464b39f696d03bae9daecea (commit)
via 7cc074aacd5159778111fa4cbdbe1c89e6a4e51b (commit)
via dc087934c1a1946cfdcf63b49a70aa0fefe6b282 (commit)
via 6f7998f9a209e9dd7b3ac80793098dfd81b489b9 (commit)
via feed2b3537a4e57e4cb55232242c6622d1fcc654 (commit)
via 8454952f81b704bdf5f28d1e105ed32de7797d83 (commit)
via 21a333f512f2a11ce0c770b7d72aacfb623d0c14 (commit)
via fb032e397153a63e4f1bd3b9b7fc1a89c01e7d6f (commit)
via 9395f12c95a2519803a0dc15b56424c22df88c84 (commit)
via d57f30ffe93b7f45aa6492ea1fba5d594adc01df (commit)
via 690dafd743f765f04b21d3ce15ec0a63da6a53bd (commit)
via 251a32a1fd1e7be23d59790e57a4b40fbcdceae3 (commit)
via 6bc6c57d5761ccd2ef65291e81bbfd995b4758a9 (commit)
via d137040ad98f7203cd440ca8b449a84f048af6fd (commit)
via 440524d524bde6ea17ec64b427e259f3bd08757a (commit)
via cb737e68ceac8238844fe2b8b9bc7feea23b4004 (commit)
via 6b48fd891428674ecf5eaaa083bcf5b843deabc5 (commit)
via a98a90d8c392ed3bd0ab51d644568cf560574112 (commit)
via 421d19914af7d7a6f886c1bce084324ce9407b99 (commit)
via 0f1b7a45520517a40b7b85d57d461e20e81b7aa9 (commit)
via 885a4ecf9c87b8e3a028b6488b0e6b853365edc8 (commit)
via 8d5a5b95c85af1f15654fe164f306fe21065ea73 (commit)
via 77367a5d67709b65afd8689159e5192416326cb7 (commit)
via 935bd760ed4f39213f8db8eab730bf41dc217da9 (commit)
via 52adf933c0bed4753a06632b25a46055d23eb655 (commit)
via 4fe29ae03d1ff8f6d721b42f4bb356702110c4e0 (commit)
via e3fa282a59eea69c50dcb9354e568a8503510511 (commit)
via 58df861a260fdf06b17194e224fb8c1bd03f0392 (commit)
via 77e3f8cf3f3fe79c7dd5f92f30d70c47b515f4cd (commit)
via 4a88c75d4d1decc3b3d5518bd12d592c118a7fd5 (commit)
via ea1b177b5503687f974252d185a9543066af20ed (commit)
via 6535d4fbe623226171b27730f60161436d0433e4 (commit)
via 0f4c693c3399bd9ecf2d2a5682fda8ed1eb8158f (commit)
via 877e89713ad2398b6637b843a22c3b12607fe5bb (commit)
via 33e08ca107c127d5c158882e7f2e86770a48c572 (commit)
via 32fb3ad97a7ccc65ef391b84c8f488d4ea71e963 (commit)
via 04e7fe3f480462d288c17bd121a368b74292cfd3 (commit)
via 354fcf46bf93f1e2e317043f2998a8b17f22fe04 (commit)
via 21acce853a4269f0db76dc2768bb7c5107b1b7d4 (commit)
via c021505a1a0d6ecb15a8fd1592b94baff6d115f4 (commit)
via 02aa9813c1f6829bb9089400c5397f3faba7d9e0 (commit)
via 3017593b63f34c4bc69494be8c80327eaad5d922 (commit)
via 62bc6cce6fe7343c4ef06c7e690939fd0aa20148 (commit)
via 77c17d3f03de64646da89de238288a22c49e3eb5 (commit)
via 6f8383136ae83eb439c71a70c4bde83524b72c5e (commit)
via a16c7925f9a00f44680e2ca984def99d6bb3cecf (commit)
via 12c37af78f65301858be28679695a9e818270947 (commit)
via 83f58ed580b457a424d4279d1deb8e698a0fcc9d (commit)
via 34698cb3c81a724da952e6d0059572c4536e3a0b (commit)
via 0494c88638d05f2c2004cf897f6360b06400b6c1 (commit)
via c58fa9e4c5aa486bb270681a45a4f0f7e04b4139 (commit)
via 89324744df3f73de1beaefb9420aeab5f9ff7824 (commit)
via f9070aee950581a47c0916cb1f3b48cd4bfcb7f4 (commit)
via 0c1589a0842cefe0793b538c53c1cb102080b570 (commit)
via 166b4747ffadbc6b3a94647f1470ba776aeb8c51 (commit)
via 181283a52982eaf9f8637bd09a2e1dfaef5ce302 (commit)
via cb8f695c11b2a6e5402ca58fabcc8a17800177ee (commit)
via a46b0d4ab62e16c4096bcb8790659bee93205470 (commit)
via aba816f40efe336b20ae56871a531c87117ad24c (commit)
via 217c09751aab2dc84f49e7942b2c081a0381945f (commit)
via ea15d26afc9ced4a11aea6733ea3df0969c5618b (commit)
via f685e5c06c382180eb1775bce714ea60154b08f2 (commit)
via 5a19ee14367d9bb796c8e43c034ee9f327052c86 (commit)
via f92d30bb55decf9ed4d7cdf10231dfe2913ca11a (commit)
via 461a9d0a1e896e0a1b676c6873f74404d5ab95c1 (commit)
via bc81810505f7263aedb8654d139510058c251626 (commit)
via b57c51e6ddfc6770d5c66eab5aeb1a5238e5a7ea (commit)
via ddb1b2241fc03a1d08dea42907ee8f859d3b2f46 (commit)
via 0b838ba0d3c60203a52d1a918333846116e607cb (commit)
via f77021d7838e33e1662b42776ccc49be4435b1f2 (commit)
via 632cd6151b871e060d09a79f6b8a283cc0ab469c (commit)
via 7358d4af5775ee1bfa6099f63443d2ad27347f0d (commit)
via 81a2df84a879ca5cbaaa61dffce5c413d920011d (commit)
via 59b380d3682bb9fca26cae2c70c6c49934823f01 (commit)
via 8b2247a6ae88fbf16bfd65852feb0216a4ea4dac (commit)
via 1b01a9d09e5ecf21ff8bd9cce1c20372846a775c (commit)
via 735f817c7f66813135b4ef576c117aa424a5bdad (commit)
via fef88019d325474471a353304499e7919023912e (commit)
via 99522dd887762e71cbf4d895486f0e2f915eabda (commit)
via 999736efa5e3aaf06949675c4f77e1ef9cd0d71b (commit)
via 9c862cc45629b24d0a704926d339796926c692e5 (commit)
via 85d5708e2c44e04b1a148610434de2c040d7142b (commit)
via e6b3d50483fb739da2ca83e493a1c30043ba0464 (commit)
via 4f82e3bdef9612a632da55d3ddfd02326d358160 (commit)
via fc29e92af2bd2cfe8fa77dd311b9382680fd6324 (commit)
via 78cffeb00933814658da0867ada0209403946b51 (commit)
via 6661b327d26dc4e090772b0dd263ae8a7c79d3b8 (commit)
via 868d5912e25819663d9f1f7690bf8a33dacd9b51 (commit)
via 1764a1eb64926211f77067ce98372aa4c748bb8d (commit)
via 9129a474d3289157a4d8eb761383352dbfc2586e (commit)
via 417893fc06dcd5339e2cd0278a6badbbe847d6c4 (commit)
via dd4c4405f56ebbcb74d8f792ad528daf9b2bc79a (commit)
via 8e715d5202d79361622e89ef11a0d433558768f8 (commit)
via 5ac59583a36f1d83d35ba8d159f87bb281d3edc5 (commit)
via 1368c87b932734919bc0f392b351651cc6dd03d7 (commit)
via 3f15151252dd734210582a2ae8923dada661231f (commit)
via 79ec6c320ec5c24036856fd6b589ba8bf8b26ffc (commit)
via e53411af37a32d0c9b14515bd90c1e701c69f6e9 (commit)
via edea2125fa0791f920e3dd9e45c8aa0c9bfc6eb5 (commit)
via 8f5fafa643f2d908b9e97b6d08aeb55c4b96addf (commit)
via 01f9c1c0adfb37d11133c87056161f1edfba2672 (commit)
via ac7aaa887d827f8bdf1c2881d245cc655c6847b7 (commit)
via ebb6493b8ff763d42fe99438c8befe48c381b4aa (commit)
via c786a61641a965545c2e304b1c946afdedc6dc1a (commit)
via 1efa5d9d7f699cc3ee636d4e1b50b3fb3a863180 (commit)
via e5251c4886f626e6ef9f6ba82771c0e949e0071f (commit)
via aaad42c52aed2c3890378511ecb2f97a3731d23a (commit)
via 4beebf47805d0c3f80872e8f690f09c1658ae4e2 (commit)
via 792c8b202cffc8fed726f10b3514523b1fc92469 (commit)
via 8c624c6644563ed9c4fecec8b0b5f5dd115fe7ef (commit)
via d1c7f98e910bd19d21a649386f1a8066e4f41677 (commit)
via a90c8a06056300e0f9f5ffdae72b8a2ba26346fc (commit)
via 30570ab2d917dc6adec02ba272ee50c17124b688 (commit)
via 59908b70a929baf829202197d6e7ab5a3557da32 (commit)
via 585d1c63d6d0126607f424571e38a4a60683cf4b (commit)
via d335ae50bb855b7b302dab852005385c0227dcfb (commit)
via 8034dbfe87c45eaa2c0aef0e715b86fa79a7c4e3 (commit)
via 0ddf0f5fa4d9d18599a1642b9f87caaa1f463c5e (commit)
via 5a75094dfdd5f2307c4a1669e05db70355b08682 (commit)
via df5bad72ac8dac07a038f29823a1938bc9bbe72c (commit)
via 24a865aeec3048620dea967cba9bb1df28cfd052 (commit)
via 6756ff6dacd40b74676b4243bc12ea02a43f3ae9 (commit)
via 4e025223cedb89d5dff5c250ab3cab42bfeb195e (commit)
via d4ef4b9a0cc72eb9d85c6fef4aeb4b2f90b2b590 (commit)
via e560672e5119540d6e6860c177a9b969e5a71fb1 (commit)
via ebc9aa4dc554ec8aced4413b47a0668f3f5f1da1 (commit)
via ddd40bde5412d11fb4d320958f26572797442b74 (commit)
via 4565ca4899e702da0c515e11d614cddb3f483a7d (commit)
via ca924dafc7902bbc2a22660fb00f70c0d34c6471 (commit)
via 679d8390f4fb1253ac26a86a47a9279f3d88174e (commit)
via 250100ecf6468ad2cbc47663d1f6e83f1fe10f9a (commit)
via 5aabfb971d4338d3e488d05f8c06a9db973ede5d (commit)
via e9a1e75b3d83fe811d9a4e32d6d9a21f446a37d3 (commit)
via 6722a4d52b519ed768fa70b31cdd10da868cacbc (commit)
via 9f2167d3b5878a5709fd9f1ba2cd200f29f057c8 (commit)
via ca9e1e2997420dea3e3b14fea010ab0af3d75f32 (commit)
via 5fe9e2f204d67a9ed65ab8fc2a1feb09f6700b5b (commit)
via 13eb9e8620e67d7c617423ac1992a720ecdfbf7d (commit)
via bf41f8dc2265ca0cd9ffb8b8c11047291e69ca3c (commit)
via 926a65fa08617be677a93e9e388df0f229b01067 (commit)
via 1c7930a7ba19706d388e4f8dcf2a55a886b74cd2 (commit)
via 61b01087195d5d1f875f01c5fd2eac5dc61d012d (commit)
via 84fcd68d77cc4aba23721e234622c33666e96c49 (commit)
via 96f093b960839b26ce37d9ff470933eed9c2b135 (commit)
via e828a215cc73946fa3681fcd88c3ef76b68272bd (commit)
via 0bea88f134736d6fd2872f77feaf309aca6c1bc6 (commit)
via 406cb1fd4af84fcbdf8339cf1afdae2cfb3b7946 (commit)
via 55689c559b3ac60765940d64a5b51007f94bddf7 (commit)
via 925ac83b98b02abec3f7f2a70b7c83170f851e29 (commit)
via 3f47015eab1abd9c7193a9e740f794c6a718c9f7 (commit)
via 4064b389d13d2861083499517f51d89492156099 (commit)
via 926985d03e3486f1a83615dc2794d310cb2cb520 (commit)
via 189f58f73fe02cf2729ab26d6ce8ab6469e82a1c (commit)
via 1ab0f2e8448a20674bfb8d12d463e5b3fec3ac6e (commit)
via dcb32f7928972c3ebe66f13a08560a1e19c62866 (commit)
via e25099da714a10dd3bc24be0002f9174fb9610c9 (commit)
via 2d39d007d30f65589cfe4b671dc91cdab70ed107 (commit)
via e9798fc8931856a7eaeee37155600146d7dc7c57 (commit)
via 59e01319e369a7c8e4f9a326d603dee7e3924c6b (commit)
via a6e68091deaf13986355b8763c7348b2da71d7d5 (commit)
via ad5a633a9e77e561675aca5263853db8161e82fa (commit)
via 362d429a32fecd1b59f309466e098935242f9054 (commit)
via 6c5a9b252b7bc062ed807aff342d0314811b5bde (commit)
via 8320629b004d5fc8194afb5d277a0d9e01299121 (commit)
via 0eb494bd7c49f0559c870d8a687ad0552f2feeb7 (commit)
via f79a424a36d3a5896c43c5cae5d88d690ecbe90e (commit)
via 75bda54b2b5cdf06f334e72cd554b616a887d1cf (commit)
via 2b97fc4f4f30bff13b94ad9b25766b4a6b2f6655 (commit)
via a1301a0545acc48bf2f94731cb26577806e3c383 (commit)
via 8bb79638bc658d8e57b15ae1b16d28a08ec06a69 (commit)
via 40cf6abada7f06648643b14b9b7db21d0fde3b27 (commit)
via 81b49bb4d72fdfb5db8d7ad5f9b086c489acdb86 (commit)
via c4430b49b30dcd74226d272fa3da4812afc2c6f7 (commit)
via eeebde9d81c4bbc4e5388db5cd6148ca3589b91e (commit)
via 4fae538655882db7c085dab798b4fb29c4a9d8f1 (commit)
via 8c5e6268927737a472348d1ff8ecb2201c76b98a (commit)
via cda19a7cbc56ddd67c7d19ec7d072a64477d254b (commit)
via c65177c8ea0dfba3aaa84ea1bf2583b2d818d23d (commit)
via b5cfd5e541d4bbb7f13ad93392018711e19ba0e5 (commit)
via 430f3e516852ee9a8af655626bbab16b03e4cf72 (commit)
via 1b47d1cf3e0bc8e3c6166d049070dda2f298c7bf (commit)
via d86526508726fc2941a7d35730013b75f49ab4a5 (commit)
via a2158e5b2c17043f0f3aa194009408aa73bd62ce (commit)
via 85b06e8c212c9733cc77e71d8a72c72161dc34f2 (commit)
via 4f87326ae6c17e26769b4ae276001b49d5bb3561 (commit)
via 06c9c2a763326d4b30ff9448f726928538fba94c (commit)
via 8d07954792d35120580d9d94fedd642d4797cd53 (commit)
via bc8bb2e13305ae879b31a6accbd3f5f1855bf327 (commit)
via 112aa5ce69ec2440db83d89196144b782f064564 (commit)
via 70af8c7c72300e1afe1974de22c117ff5566487d (commit)
via 03e690228b6f5184d67a4ff3de56a861fcac9a23 (commit)
via 3665d7750795ee247864c5619301bd1638c31175 (commit)
via bb22c63e7bf7544d5a765140cc29b87fd1e2410c (commit)
via d749aee2ec681e0304dd53c63f276af98edeaf31 (commit)
via f9a545ca4c82e51fc7c47793fec34eb5deb19e46 (commit)
via a4f1f8de765810aecff1194c74a108682e3de28e (commit)
via 287edb431de6ae5d7106dd4e593a193908b9ba9f (commit)
via 2f0a32d552d19ac9224dda91975fb128487714a6 (commit)
via 99d7c21284686ba3d021a6d09938b82ea56de783 (commit)
via dae1d2e24f993e1eef9ab429326652f40a006dfb (commit)
via ed6ec070b25a8995bccb3cec1a63cb111e06a6fb (commit)
via 97131f9739d60c41a530a52c5f2a2861ba68637e (commit)
via 8cad081427cd8326318ef1a0dc81c1eefaf73d29 (commit)
via 309b24ff461b623770e950d6ff12654241bdd39b (commit)
via 4fb4173e7538a13246af1ae75afa9d783630da55 (commit)
via 15556181365ba9b65c91c136ce00c74df06da3dd (commit)
via 0bfea71bfc68c4598cfc0ad936a0507e6423e014 (commit)
via 0dc7060246ec79dfb0de03aca4c5fe8ded3fbffa (commit)
via 52d165984d1a7784a1a6e0a3b845b19559698203 (commit)
via a948a71497441782d409e98f4068bd04cadf581a (commit)
via 69da5b8b1b3ed9f6c6e21cae8a8a86c8fbf83a76 (commit)
via 8f95286e55b815323110f79d92f338878930509f (commit)
via c4c85ce1694bd421912d1902f2d614c15bebbea1 (commit)
via f78cf6ebc22712c470da4af720915b09ae8e8ebe (commit)
via e31eaa10fc2fc2730311596b7ee4ac16050efe62 (commit)
via 4c250f85ed6ad7f697c42137f1e67aadacf73dac (commit)
via 941eceae0a54d023dce0c43757b0104b8adbcc9c (commit)
via 79143dc457f23670d860a2fa134b13eb62db490b (commit)
via 80dd433545aecf82aa178365dbc6e0650e12907b (commit)
via d867ca0fabdb5398d6a964aa393fadf678af2bbf (commit)
via 3eb58c78cacf7686435e963d423c6c035a737bc0 (commit)
via e90d2063e0bd98767fdcd38962ad5be6f2eda68e (commit)
via 0699e756aead6ec1b3a80f5e044d8c3cb35e3280 (commit)
via 384501f85cc9e66a686a96e349241442af29a56b (commit)
via 707e700d4861b2c47235183ce6e98d985819dd2d (commit)
via 67a88d3fd748cc42730e142cbfa79d0b7fb7a813 (commit)
via 42177c3c2dabc2b46adb7133374a4c2da46b1f9e (commit)
via 910caee23c188c5b9575d87bf479d9caa3ab8d07 (commit)
via e1bf2fce994c07e02466016d94007e509f2fe478 (commit)
via 179a2105dc0a9c9341d4347b711b62c2bbb9ccfb (commit)
via f7a8e38c733ab5695bda72b7b69a803c8d98c80b (commit)
via c1ebc31d07e2c04c0158fbd3e7289db650b41c1d (commit)
via ad3f4a5e40390f14762648986dae8430760202c2 (commit)
via 62318bb5e15a45164d9cba4a03c9e5583e0b6aa3 (commit)
via a9d549be7404552a13a95db041e7e1da64729341 (commit)
via dd3c0d1df47590362b21e7d582df513a98942a54 (commit)
via 3eb8c8e08c993b1458a6d79f434e0305936bcd14 (commit)
via e580e10deec55a34efd3fc2825bd80143af67d4a (commit)
via de063908e81bac570c2c485a9d91d496835fbbee (commit)
via 9723ecc53f8afd751d66d4e2db24f8fd05ffb467 (commit)
via 6fe98e3c2a669c9dc779980426a81fbe1ddcfff3 (commit)
via 576f337f42e5c72aec48d8ea1d36ab5059588301 (commit)
via d3a000535b506ae8af54119af3dde7d14509654e (commit)
via 2048643593b3e9c8f34af40bbd00342c2c0c1318 (commit)
via f3813c74f58f13c2b93859f0b4e07788af631960 (commit)
via e45768f4b916faff9ace1f1da25e423eba2ab430 (commit)
via 9652c9fd3f69836a928626ca9fc940e80e725279 (commit)
via a5572545c3b6106052898e016757398efd475dd8 (commit)
via b37a785ae2a03a7c84b316b67e50f77a827a81e8 (commit)
via 0b99dc120fc89081901be0b884f77e02530b8588 (commit)
via fde3b8cb6fc790249bfb918971a33a0cce7a4cf8 (commit)
via 3f6fb6a54d85ed33bb23aa3988934166b17261c9 (commit)
via 356ffa38fc98b8587499aa004eff6af103cfaa78 (commit)
via edfe1b966d53caf3ed9e17cd525b0d94beff0aaf (commit)
via bfd50c768ccf03b2e4f3d3ecbeb5fb344ff79129 (commit)
via fe244439fca78ce558073b856dac69290baf67c5 (commit)
via a6c21ad6d670896283a941b5bfb233ff5987c50b (commit)
via 30a20a04aae6b14749980d575cc69180998ca0c9 (commit)
via ae21ebb0f609f8a2aa8ffc3d4b84c465111ec2c3 (commit)
via 7cf66b7e44e389205ae4344764fbf136550854ce (commit)
via fd9334b7d856c4f748919d035b2a4ad3c85b545b (commit)
via 0c3b69c6e170bee7dd775090af2bdd1cae900080 (commit)
via 0fdc040591f07f5f876ff2a16ea363e9026346ae (commit)
via f5edd310465966137f0cd4e2109d90f7e5d5965f (commit)
via 46f82ccf0e1984c8b195a5492618f5d065682c3e (commit)
via d324bf65587ab92ffc8a5a6f94cefc2fc78a52f5 (commit)
via 49816e5010448c5c966ba3964934adf68a972400 (commit)
via 50f16fdb5e03054e470dca9aba6408062c198fd7 (commit)
via e59c215e14b5718f62699ec32514453b983ff603 (commit)
via 73ac6b09eeeebcdb03965076d4aa8a8a7a361ebe (commit)
via 463a593e465643f157fe806b3fe826b6ed593750 (commit)
via 6c92dafaffbb0c1ff83ffc8a1004cc40cd500310 (commit)
via 2826df63af5ce079b537fb8beaa09b0138b7c308 (commit)
via 3cf7f4bf3f7c6947bab97f269172236dea5b9d52 (commit)
via f28bffcf3e0369b89631df178865a2de00145f97 (commit)
via 2a81a3ad5a7fa790d2b03845b81380f395fb5ac8 (commit)
via c55ce18325b71ece62d213ef4ab60e6c95c07da8 (commit)
via 86a307f08882d02ad443e848e096a30ca14ec918 (commit)
via 9c2b48c4baccba83732e2586eff2b7ee64c63ac9 (commit)
via c13544e653c5ad96bcdc4ffb4fa13316fd4ac9b3 (commit)
via 1d4cd68ac7b1b5dbab042a1d5b7b82710d0a7769 (commit)
via 261450e93af0b0406178e9ef121f81e721e0855c (commit)
via 0bb032a0b6ce7ca30fb250a647b81394faeeb730 (commit)
via 99785047d78bc833643292c1b795ea24e7916641 (commit)
via 6135be8219f25c87c5a551354b889e8c4e55fbfa (commit)
via 49ae23f163b8e4ad45f7146f95235253691a0f4b (commit)
via 1ff545f50926224d095e3809636c642219ea9078 (commit)
via e47b0a39b7849489e6d9a167117ebb3be5eea4d4 (commit)
via 3e1fae0b23d4df32f239e5aa1350e40557d8ada4 (commit)
via 2636154f904f6123de434503ff72e65e4a27cbae (commit)
via be1bc7ac71892b98cf5449b9df388ba7de462e10 (commit)
via 4f714bac4547d0a025afd314c309ca5cb603e212 (commit)
via 6ff5cf12901e9322c4df72e5484b2ed9cdfe9a4f (commit)
via d4aeefff3273af7249ca459eb7e6bc4d358cecd5 (commit)
via e95a07de2e9eb88d461efd236664a7d12f204e74 (commit)
via 1302962baac0113c3a9ff10f1317271ca060a1af (commit)
via ac65c96b59824fabc738a300ad2a36332e4ff01d (commit)
via 3255c92714737bb461fb67012376788530f16e40 (commit)
via 01e08ad7fd14a1683b8eb21f4aeb5e8ca8e54a03 (commit)
via a8414d057fa2ea8406a8911492bf91fb4e6d8166 (commit)
via 91920024ba2897735eb1228a17a2cd2bd0f82c61 (commit)
via 5e0454234b2e1975d7ecd79f8e40a43e6782f968 (commit)
via 4ab86cf2cd1a3133f8a5e1dd529104cb641b2ac8 (commit)
via 9fa2a95177265905408c51d13c96e752b14a0824 (commit)
via cc5c0e24296cd58e1d6b04198da3e99af2c3071d (commit)
via 8e105c57dd7c082a36d4710e5ddf27bda118adb5 (commit)
via 679661cb33158ce088dc3b7a3b5cf2fc9a7dad29 (commit)
via 31a30f5485859fd3df2839fc309d836e3206546e (commit)
via ec6aba3f5e5ea3f3e9e5b9d70b162fe6e0f33df2 (commit)
via e2e2a6756e459bb49446c22b89efa07306c35f21 (commit)
via 18cebf0c6b77fb88486574e1fcfcc2b89d52b279 (commit)
via ef5f41fe04fc405c0db1aa7871fc90fa5110acf2 (commit)
via 9611300c4a297500c0130d6b372be3bd409c6aaa (commit)
via a739faa43b38c39ae49950f3abc79d66ac0ab545 (commit)
via 078162ae556edceb66899017b9a3e613f2886475 (commit)
via c589e8b9c1f1d3b4fd4959bae8bfae5be04ae8fa (commit)
via 6997749029cc8d634eba96d8fe8d222cb35ed41a (commit)
via 2e386bd4b400b73103900bcccb8b5166258448a6 (commit)
via dd46abf659db56f51b981ff675ef02ea92d70ed5 (commit)
via cbe4f685dbfcef07a17c6cef56d35abf12fd677b (commit)
via a4b89470c575878fbd0d3e1bddd45ed2a3289e6b (commit)
via 0c8063199fe37278da7fe03adb5723deb4263f82 (commit)
via e08c212ea82bf00c90eec566b1058862b82b78bf (commit)
via 4fd41636ae9c11a96b3893cbc939fd4854b64b56 (commit)
via a149f9e04c33ce6012a4a3e837c98db6d3074a44 (commit)
via 4e18854902a306ea6ad390eb90a9f43f49c8222b (commit)
via e77d93ac97216b2b67124a605649487a767e8d09 (commit)
via 63f62558f6490de53d57504ac48076165f18b9e8 (commit)
via 631651ce7025329af4264f8c576ef77ec3339288 (commit)
via 754c4ab6e60afe6f90ce7afda9670efe3debe13d (commit)
via 461fc3cb6ebabc9f3fa5213749956467a14ebfd4 (commit)
via 52cc6e6c2a7d61f93e17525b161b7c33ea4ade98 (commit)
via 88119987e9f33fa12243eacee75bd9ec4c99f936 (commit)
via 25cb92eaa2253745fd27c957e8d576cd90b7b244 (commit)
via 73487153601d7ad71f04e6762cabd3b3712c860a (commit)
via ec4d7f29e1fade106ce33ebbbe0147330584a784 (commit)
via c9abaf3a9e3e78a875e9722649615339dfb75047 (commit)
via 8c6a400b68d28b6c9b37b186a1313eece27b8222 (commit)
via bf384859fca46c07f2e43c376ccaf9111bd3b1b9 (commit)
via 83e33ec80c0c6485d8b116b13045b3488071770f (commit)
via 542a15d604018ea73a5a28f26b30b92fb668e399 (commit)
via e4543dee376bae0e459f9008a2061f2a2529dcee (commit)
via acf47441057d4ce66bcb84b67038e1e99e822c72 (commit)
via 70d184ad5dac430b1591166893689ecb5b774777 (commit)
via a215fa328b934f3d140e433725b2d7a540b826f5 (commit)
via 795329663bef70bf69fddd14267daff57abfadd4 (commit)
via 8a83c13c2037e69e64474131916b6c53ececef34 (commit)
via cd4ffa3493b2e1f3c53e83b60dd4f7d03dbb518a (commit)
via 717534f5a10d288081a0d8b40644794cc1b38861 (commit)
via a9d0e238a2ddfc35c772e2a72a3c73ced70538c4 (commit)
via a86e015d7c5fd0b0d8286523aa5d7e3b036f8589 (commit)
via 92113f50b12c221ca1db5dbccd51d762be5d4f6e (commit)
via 16717b1380deba97032b3ec698c051cbcb032263 (commit)
via 364d07e849b394da94b715450814ebf559ca9cb5 (commit)
via 5638c66218f33efb7fbf7e3700d49ff7b6b0f090 (commit)
via 1ef6e04be1902dfad65d7df155756ab5a73aa843 (commit)
via db8fc3e3a28dc8681dabafc868f2b38763876e7f (commit)
via 5705a62ebb133531c81ff820edae5f92774dfa43 (commit)
via 7855203ec77dd6165607297f90d0be20734fe692 (commit)
via e006dc3e92ac9a8bc8792bdf9c26fb18f064f4f8 (commit)
via bddb6473013e4cc91d4a76f182702d0bd1856f2c (commit)
via 2e5251ee15359eebf7d081c265948713222ef850 (commit)
via 3b2040e2af2f8139c1c319a2cbc429035d93f217 (commit)
via c9ec49aaf765eef5ba3450ad0b60ae031fc571c6 (commit)
via 77ac9b0e168fd353a8d4826e4aa46fcf98b377cf (commit)
via 7f8b0c9ad5ad478b0ed0df4a8a9138449b6f63b4 (commit)
via 2d1592a9e5aa5d212336af266e07ed20e8b56b06 (commit)
via 0a6fabd5e27753d3cb3827f05c3d279ba31cb1c2 (commit)
via 7385c5e065fe7ab4dfec519d97eea2788306b00c (commit)
via 0ac3a688424c0f5da2e0bd7086dfacf846c47970 (commit)
via 78fa7f965cb3bdb319a88028960e69fbf8773977 (commit)
via e245b3b40b9508a4579e0c9506df0cb3f8bc208a (commit)
via e4d3b7c6dc04c8dc443610bc45bafef86519c92c (commit)
via 182328dfa329c085df1f334a32d68ad925dab476 (commit)
via 5f20e1ed0833a7dc5816cee885235c42652cb517 (commit)
via 21b0e9d23d1c72940fb01f3dc9ad6c7975abaf5d (commit)
via 824f8da33c64169ea6e768f899ba7501a3536823 (commit)
via ff69cf49ecc375485209da7caabea778b5ff1ca2 (commit)
via 38b3546867425bd64dbc5920111a843a3330646b (commit)
via ec4ebeb8ecfae6b0e3cc6fedabb7e2f84509c930 (commit)
via 48e10c2530fe52c9bde6197db07674a851aa0f5d (commit)
via c4a9bf2722c9650c1c0f4290670ff33c0b7af87c (commit)
via 479cab63fda13cde6707a71dd16d9bb4c1f93b4a (commit)
via 17494c7a2f58e91cdcaa8b6e28bb275d49f25437 (commit)
via 7c4d8ab93f853f7abee812976340008554f25fb6 (commit)
via 684a395b40dd02ef04d3a914b7f90ea96e808f5c (commit)
via 4e058c0c696584bac4b299f241ca5e74d7121b06 (commit)
via d22ff00401ed90df31342dcce12bb4ceb493f232 (commit)
via a61bc91e4109168327481710f17affe728af80d5 (commit)
via c58ee77ef68b2cf49db6af85ede886580ad2b6bc (commit)
via 601d7f0170b1f9f929acadd3c37d60c5876ca7be (commit)
via ef8b3f326fe9ab9aab0050071c6f8975b8ecd354 (commit)
via 51f6bda14d754aa6aea474300c8c51f15f32f0be (commit)
via 711a621387d48993712031be9164510de7b27054 (commit)
via 4948469dafe0a5ff130706ff2fb13676c417a538 (commit)
via 2626464bbbb35e29f01a7e5a532d06da8feef837 (commit)
via 83a82bdac7711760eed682b91f6be4435606a0dd (commit)
via 616019706c0101316835f85843c59439e20a1c8e (commit)
via cb6be8450ae82e705dc4c65ca4415b1ed77abd6b (commit)
via a705f09ca7cf67fdf8947716d27e49f1f9c58366 (commit)
via 9f8b63669172d090e653b68b18b162539428663a (commit)
via ddcbd508353cff40985a2e40334a82d91bf95341 (commit)
via 560e7210fa3006c98c711e96cdbac6f9a0b391a0 (commit)
via 9500689fabae67565bee456d33953ae7b139a209 (commit)
via 505839e902948071f5e7876d274db4345b28b49d (commit)
via 78def66fe5de6f50a555b24d3134d5ec0dc32021 (commit)
via db96ce78efca85a1953fcb14d4ba24b7989044cc (commit)
via e2f1f9c450d7cd9e46bd23cef80be7db829d44c5 (commit)
via 56e8727ef471d54b56a88f340d19e2ee05d898bf (commit)
via 137e70ef17439961ac3ce8fa288675502cd26178 (commit)
via a9a606dd7a35d81932f970e00cdf38b466eee2d4 (commit)
via e848d7f24f16319fa07ded7a76af27187a8348ee (commit)
via db21e8ab55f132828542f1a34642d98bbaf6d8ee (commit)
via 31d3f525dc01638aecae460cb4bc2040c9e4df10 (commit)
via 489ef1ad9701548d0987c45cab34d33d8dcb73fa (commit)
via 95f7b6ea18efc84f160053bec169ab8a736cbb1a (commit)
via c24dd6f877460227e6eed02d0cca0956366748cb (commit)
via 2f9c3c1f2f692d16e55022f90333abbf7e3f13cf (commit)
via 4d4d0ee24097af2ec942ca78025c95908063a4b3 (commit)
via 14153d9340c8220b4cc4f0088a14f6260b3af548 (commit)
via e402f8d476894b34d7bce97fbc961123e4775b4f (commit)
via 9b93c0778662b587f0a526583fb011ebdf9788d2 (commit)
via e219a9fb24d6efe15066526ded67093f1a59c8ad (commit)
via 99a107f4ab33c0791df351408c76c07f5820cde5 (commit)
via f843346fdfc748cb5b67d079958202c8d4e32e0c (commit)
via 54c201f10077610d7e09a7787d89d17264761f5d (commit)
via 5637b1961aafcdc6950d22aa7a3637221a57e99a (commit)
via 39238e406f2e0cdd63a71f4b4b8700988e1dac9e (commit)
via 1a32f61cafb9f735815bf8f4abb0d8e1019269c3 (commit)
via f3395938a1ead66ef30ab1d261e3a38fea16faa4 (commit)
via 36e95aa1164d24b5b2c46d64958c709a9a02fd51 (commit)
via 0692e63e77ba5f6e1dd24619f215c06a53a48845 (commit)
via c9d69c549110808b6314bac44b357b4f0fb2b699 (commit)
via fd99c6922384541b484e5e3ac4422d8472011b42 (commit)
via 432a1a86cea043993348f31d14628548e267e3b8 (commit)
via fe5e7003544e4e8f18efa7b466a65f336d8c8e4d (commit)
via 475624c79f83adc6befd4d631eeaf83d83fe89f9 (commit)
via 3d05a83e9466e6075dec6e4b7a1502989d9205e9 (commit)
via acfbd3b209c50aeff46fc09f9d792a457f51722d (commit)
via 9e48d735ae41dd79b6ff856ea4e5ba79112f440c (commit)
via cfa31a99243065c40dc3f5a6393bb8ec923d2bd2 (commit)
via 5178a891fb3713609ade921d010bed453498f355 (commit)
via 67727b623a4889ee0e38bd0ad9aaba710e722e4f (commit)
via b31501e6a7d8cb5c1b3714f289d9c55753447d83 (commit)
via e9975a7383a299789fd18b184880a00dfb0e15ed (commit)
via c8cf64492fbb3329d10b15ff660c9f262e46ae1f (commit)
via 1ccec996459f9cea1bc32b6d736692fd087aae0e (commit)
via 4f06531cf0b40476bbfd7d6a4312bacb1d958ffe (commit)
via c1d2bae691446b1fb44d3281092a7529f2f4f648 (commit)
via 5e6de42e41584afd09a579b08d25902c9db88927 (commit)
via b39ac098a37803588a3a801d91b85170ddaa7137 (commit)
via a5dd2311e2083f2c32fc55b5339b1e064e8b7de3 (commit)
via e57fcdbaee0fe7562168a363c1f3273a92fa595a (commit)
via dc44fdda4e0705993f44275d22b2daa45d4bae6f (commit)
via 160519b6e3c5da340011b454e034a188529a4cc8 (commit)
via 5538d85d751405c573cfc0491b1e663bb7ce19f5 (commit)
via e3a81c18ff3f55727c1308bb894a8eb03e8f48b1 (commit)
via d6b120403a24c9c233fa70ff8fca4a5da39f209e (commit)
via e156ec53472f22bbb890ebc76a1acdd9a9eda70c (commit)
via f80bba65ac947cdc2f99253623e976c815b63140 (commit)
via d694e5ded809ef2842eb5101e4d03e6e657e4671 (commit)
via 5d33e8c43d18b6aef9461086c8fbd5ee493b55df (commit)
via fae2d0d5854141a9af7c1177c663a0910ab18ad0 (commit)
via 14a1dcf8c6d299b7b7d91b27c4bd73b06e1fd61d (commit)
via ce11fa4222196986805f54ab5b67b77dd5fe8e47 (commit)
via 74402ae4cf3274fb20bc3183941c4099dc672b89 (commit)
via c4412dfaa0bb8c6e95cb4476e80b648cfa9288f2 (commit)
via b2c418d1d6aed9e2eb599941218e1f8c0d13a445 (commit)
via 48fab55d59089444ac9dbd81ea3c6978f55f9474 (commit)
via efb25130e7c98335f72741cb62c9b3cf4ab076b3 (commit)
via e37672ef8e814456d53772220d885c91941db7bd (commit)
via a3edee3da65d09f2b00256277ed8d8a608c9c627 (commit)
via b8855cafa94a12af7e087a4866367f2336e10bdb (commit)
via 682108d6e371706f340cfe8575d8e00d1dab09cc (commit)
via 6ea285d4f7cf6a9fd18bd9a3811944fc02d0e34c (commit)
via 1aa773d84cd6431aa1483eb34a7f4204949a610f (commit)
via 5519b6cb34e655c96dca4ec8be2a3eabda941f3c (commit)
via ec6446047b5007290ca4d6d31e67239c424f33bb (commit)
via de552586600ecd4123b904e65d09c3ef0717fc2a (commit)
via 58ec390c5c70c0c7625e3f52caf4c4f20b3bffa3 (commit)
via 34ebe17e3d731f19c6c44a8777b994241b5de7ee (commit)
via 67c6c4489b2daeb6001cf2f462cc189f2c841b5a (commit)
via 41139644452bb11a162254b442ab611644ceb603 (commit)
via c84b3ecae4d2d0b532c64c958857f8104dbae923 (commit)
via 27ba6b2117e3ab88aedaa904292707871e131393 (commit)
via 76ae47d1f6061a09f4a8e20852a9874ea28a4e19 (commit)
via bfaafb81eac3c4ea98cb63e2c0bdb8fc02105b6c (commit)
via 6a9f0125c633e6203e5fda37e6220ea862038df6 (commit)
via 990a0cff9891fce08b3e163720dfa08fffdede5f (commit)
via 54f86d7796c55289518befaefdcdd7d84ebefa88 (commit)
via e66cc9c2e0eca69c62a3a61133fc4ba220274bc7 (commit)
via c37ebedf94c5dbbed3c685272a0cdc4bea67fb04 (commit)
via 24fd003fcc458c673803b953ca857a96bfa5183c (commit)
via a9d323bf4df6aeb6333b057514b748d05febc1a9 (commit)
via e1df41b8ba21d04560ad4b65837bf7bb32fbdf34 (commit)
via 2af8dc8ec9301a935228568f80f8ba18531a3ffc (commit)
via b0dfec1a5000ad22ef4dfa9c21c89e90b0e68673 (commit)
via 862e13c5c852b8ea55c1b53a67803105d473a342 (commit)
via ee0e702fa85ebecb95ac89aeba5959b111ba5d57 (commit)
via 0dcde0e52b99cade8f584751b2d2756c20e78625 (commit)
via f1213a3d4df71a4009a3f9a09a9aab002b42ce35 (commit)
via f39c8aea4a0f9dc4b910c003b336e124149ab88b (commit)
via 5dcd9589aff3bc2a91ad2dbef98cd9012ed9b637 (commit)
via 857abf9ee880aecc9039b993e73683f1a6019cf6 (commit)
via 5db2d64ddcd9f4eec0db6bbb6a160c95ac81cc6e (commit)
via 37ded0b31a0a0dcadb93da7bd85248b90d9af57f (commit)
via 50dd99f81efde9267569def411a470e082ac312b (commit)
via 496a4378cfcc8430dd1155bb3ae5448f27ebfdfa (commit)
via 6e6eda44e9b1b7236e3baccc25e008eb674440aa (commit)
via 27e122bfdd9edcc04e39cbe8a033d89ef36207d3 (commit)
via 9cc499cb83497dcf89dcaac8c7a59e84a0b4a0e1 (commit)
via d04505cee4d9df2cb9cf9a2909a948589be4ff09 (commit)
via 2273e3e71f5879cdf51f956c19a153f4280d9531 (commit)
via 77d792c9d7c1a3f95d3e6a8b721ac79002cd7db1 (commit)
via 82ea1075a4ae1ef8f6c0ba3cca5d6a1968a77f3f (commit)
via c926e1e8bf4eedc6c999c6ad1a1f0fff96783154 (commit)
via 358e2303c565cb387aca660c2e55967b683792ff (commit)
via 2ddb97d633c1721d74615b84d2c6ebd360b9bda3 (commit)
via 74567fe1e92c8f10da3a639d235cdbc4c4d5cc9d (commit)
via 192aefd97c825f7636e0885eecef9a5834e53d05 (commit)
via cc3ccc1f474407507118ae0c53af307d0dd7bdf2 (commit)
via 41521402ab646288ff1a4a12c5110d52dddeb4b1 (commit)
via 5cb80378e05cc0ec208adfbe09e15efd04431267 (commit)
via 0c7670c04135ba2ade5ffe241b66094aeb891f16 (commit)
via f0645fdb5d7be3c7d7090159847853f5a06ed7e4 (commit)
via df354bdb4031c1f67c03d71527105da4e35d1a6e (commit)
via b30c6c0843ae18a275f65e7ad5ab86bd78c82678 (commit)
via 30d350e8cae28631a083c04a9ccf8d5bcaf69adb (commit)
via 6c86102687c491bdac96577cabf98a0640ad8a60 (commit)
via 37035a073be57c9f6d00f2558f74ce69b7067295 (commit)
via 765bfaf3ccf1a96e981210c429d5beacb229b4ce (commit)
via 938eb7ec8ac6bc670fcdc95a2189f4b13390e9cb (commit)
via 50c678a05ecc181f4a63fd930bd345bce72dfc68 (commit)
via b5dc1d86b2a763f98d8414589a545fcbda09a36c (commit)
via 67d9442a23eb4fab4dff3f4d1ab142948e21e880 (commit)
via 5f74b5f0820d7bf4aa50d0341c9316e6a915738e (commit)
via bf4d941c69ed13ec03a2e179009780eb1ad11359 (commit)
via 71ff1ab09693f1a90e46ba74cd5bbadf449161ea (commit)
via 030c77386b5018d30282cda2fb6aabe620a0f15b (commit)
via a206dbd5432240440b6bbc63ff3165ad579d5efb (commit)
via 6342eda010ae512cb972e70f3824ca0de638c293 (commit)
via ff008c094a577e61f619cc39df2eab858dee0fe5 (commit)
via d2ee006cd5d5f3cf5ff56594fbf3d8955685d8a1 (commit)
via 74a60943da1619dc598bbf4b440b78914ebe98a9 (commit)
via 71d57323f77270278f75a57f63499c82f8f4ddd0 (commit)
via 4fcd0d3125dd7d8441f33c3ebf8b63e5b2093a68 (commit)
via 6f8d74e83a1c8073dada7288be92c2976c638e27 (commit)
via 0034051f94a5e0e91e2574ec07f9b5374d39d6cd (commit)
via 0346449357648e45f5b68f792cbf6dc914b16d9c (commit)
via 89e3ffaa1fb56bcc76f626a64afcc25e506d8b54 (commit)
via fc97ca7de4ecd731aa6e470a7c68f5b26cbda7b0 (commit)
via 78502c021478d97672232015b7df06a7d52e531b (commit)
via 71eb80242fd144bddc06c98b3bdaa91341a65f26 (commit)
via 0555ab65d0e43d03b2d40c95d833dd050eea6c23 (commit)
via 23d63caf0cc09f19a01794458bf2457a67caac05 (commit)
via 031eda633fe45496f47fad9d4b656a0100144f75 (commit)
via 77def12e8d0d957d9fe587816b27b6df4f88ce90 (commit)
via 080db7e6245ebf63f0b3104e92b99142c87fe291 (commit)
via b50e03318eeb6464f7143eb524eb7b8ed2a5d02a (commit)
via 88504d121c5e08fff947b92e698a54d24d14c375 (commit)
via ede93598806f8ddb21d193eb002fad8218416e40 (commit)
via 35ac625edfbc78dde6ccf78f8d577f3d8edadbce (commit)
via 4bd053d4e1a4165c7b4b5d91a6f674e40250301a (commit)
via a2936cc155f8b5ce2afaa82820fa377a037f2be3 (commit)
via 94cfeebfec6574350fb4980c2b0cc6a7d84ba4f7 (commit)
via 47e5578077fa332f8476a13aa4ad6ba29e003a1a (commit)
via 830c0ba6c96b009d1c9c4fa31dea7cf4f6de4e37 (commit)
via a8307030f7af9fc88e3e66b6eefcc89f6b6e15c5 (commit)
via 07a778c5592002042269785ec41bf4d93fc7db91 (commit)
via 9cc0c06cac86d3460ee1f5b5e2c8669d9709663e (commit)
via 9e84764e4911a4ecf9b82df3beb6cd289eb68ada (commit)
via b8da54961c78a690c6bf02618d4c28fb9d320177 (commit)
via 28465ec39050a2779fe98503c690ed4df2711e98 (commit)
via bcea2b3da4bca69194a2154b92f9f734edbe8322 (commit)
via a251c4f239e7b42856314412142cd9777f91dbf1 (commit)
via 2360fc72c24eb39d32b0afd6a389cfe8f10cb2f9 (commit)
via fc9c42d22de8c2c5555573a1a3e29b2d30146a29 (commit)
via 3303478b6f9943fd5514268bb1c0c42a638d107a (commit)
via d158640b970e5d8f0e5d4f8c6c278f03ee0e47e7 (commit)
via d098ac2ee97872ebcd8366cb700d7d2a5e668b8c (commit)
via d11b5e89203a5340d4e5ca51c4c02db17c33dc1f (commit)
via 7a1c4cc8e1baaf51a2058edd0a4179bd586345f3 (commit)
via de0694d2f4c7dba909eb922e3fa1a1269d3cbc78 (commit)
via bc0688b0e7846ec9bb38e4e014ed23be84876948 (commit)
via b8a7bf58c974c9ba4518fd894963cd66a19baf7e (commit)
via 3a54f64afc94e9394a527872b957609d14002ff1 (commit)
via af788f1fef2cbdb63b05f14106f907dff87fd6bf (commit)
via 7c390c1149baab7bd33e741afcdefb827275d5cd (commit)
via a94c70e333e3082a55f8a82ff026aae9b35964ad (commit)
via b6982ea32af206b6ef661b492eea7b274af97bd2 (commit)
via b5673b0b0860921b516d9a0e514891e78e444578 (commit)
via 4cea30b55073337c32d03bbd9dc813eb92795c2e (commit)
via f0c1ac2d7613802bbbc9545684ba0a9346bfdc0f (commit)
via 5d422dbf6640bae02dc13ad7c9406fe13f42a1ef (commit)
via 072ba32545ec255923699c0c181345910f86a208 (commit)
via 62168918aceee04418765089cdc5fdfb34bf66a3 (commit)
via 929a9cae2b351e67ac1953514a63b0c54095361c (commit)
via 04ea273e2988cc405ae3ee7555a0f028258c17a5 (commit)
via 58a5aabf65705e4107cc59ad25c76bbbcedc52bf (commit)
via 43f5888605c770c012421a420766b01e1ad8a96b (commit)
via d1b5154a7c17058ff49aa67f389f52496228e4b6 (commit)
via 5b495e880e559c413a80d0dcc741a5076f3f7eb4 (commit)
via 141bf465a7b13f1e4c92a76ca2df208c8d375385 (commit)
via 7a87432e627715d9062367e2321427a4510d5822 (commit)
via ac2e283bd92dcc5af494938d6cfed82e4074abde (commit)
via 03fd43339b3ffd2537a1621d628d504cee13c9d5 (commit)
via 6ed8870ee7ac32e786030403de4423b8d7647679 (commit)
via 7a6f36fc9073def2a531a4090b97d223d5a5c69d (commit)
via 7c576f2e3d986b0f58883776822323ff57535a3c (commit)
via 1f91575ae48846e3a97a0c3921dd808470ba4427 (commit)
via 523ab3a9278385abc26e1438803499a66e260f43 (commit)
via 8e4fcdf22e1b5d14f740276e3a6c34a90bf3117c (commit)
via 201bae76d11706e9fe9c09491ed216c225a02e9d (commit)
via be4334aa0b66fd01f9b3f859c309545459dfab00 (commit)
via b02c482b25d489b723927f45c4cfe3fedd89f5b7 (commit)
via 99240dbca4848d3aa76b34f5ff0d6da4d6bf911c (commit)
via 1440e71559ad3565dff46e9cac31f31e6748d8f2 (commit)
via a407cb3d58c78b93afe294be13e7b360b11fd542 (commit)
via 1cccbc6845d795f8d8e2b0c7ba637fb28e179e71 (commit)
via 0c81e90348e53e673a05e92e150cec0f598a2d4e (commit)
via 882cc4d6b2b9f391e72fdc0bf8eb82bdb846ca61 (commit)
via 081891b38f05f9a186814ab7d1cd5c572b8f777f (commit)
via 4472e3c7cbfb5fd33ce575bd2666036d363b2ce2 (commit)
via 04c0fbf9fa780b4a281c5982ec33e5632852cfa1 (commit)
via cd79c2fae07f7b1a8d2e2f501488de7a2d11eac5 (commit)
via 76039741c19fa58e404879b334475b9ae01cd8dd (commit)
via dc19181b46dec42bc5db83861731656a5b45b899 (commit)
via 224c7b9aeb3000e11790a2c667f0ee45c9481f17 (commit)
via 94d43a69237b5d2bf671e384ff8b2b9a5ce445b4 (commit)
via c3e1e45419104bdb01dd385b22eae85bb8799611 (commit)
via 946b527467c19236814cae6e35191ce19db3284a (commit)
via 02e2f3a3c2ae669824d595bc9b42f37d9624b22c (commit)
via 54210ba456b4e0822c5e333fd1f996bb35c6bee9 (commit)
via 7a52a9a3618fd19ea9779eb0cef1a3e4f1c3d444 (commit)
via 98f5e0e604e60fead409c28645b064510b8fc8de (commit)
via a1a58a7382e82256f3f6785b7bebfa4643cced67 (commit)
via 928a439496c6040392cc03615c38aa3de45bfe87 (commit)
via 75340e4d2906762ecd088180087c9229a253e4ed (commit)
via 324cf344f3e0cd9e35e50076911fe7801a7d4690 (commit)
via 71315bc901882a8bbb35a95c19781528560fcb82 (commit)
via 3f27e2dc3c0ce961a95e4791604e3cb12fd43dfa (commit)
via 50ca3176a95f7ca760c0749d7a92634e2526369d (commit)
via 08878f6bfb271301564ad307339d2599bc6d951c (commit)
via 3bb68553bb39502b749265542c5b6d36ad80e32d (commit)
via ade0b1a890f1fe21c075d4fef332e3e35407f086 (commit)
via ff20711233b8bd6a5df5a896c0d2855222291a9e (commit)
via 3e8ef1595dd2c7095540b22cca77e69991ad8ee1 (commit)
via c6827475447b07375cfdd2902c08519cd1cc9dde (commit)
via a1420dabc6ace739cb21044184654717a32604a5 (commit)
via cd689c463a59fa5fee72d3f977835e0369eb3650 (commit)
via c3f769ce6f0e3be367f7e0079a97a11e3f344761 (commit)
via fd9855894957b318876a9cb9a0dbe2b4cbbdd4b6 (commit)
via 0f07b7c15231aa778e693e0f2b36d32e1023c431 (commit)
via a28db69a6e8e3548e8e41f62999fff18dbd33bec (commit)
via e3ab39868457157166a8b7b2f1753555409426b9 (commit)
via cba6db1c8d894a7f30ae820a49aea3d0ff8c18be (commit)
via d4a2c864bf1952f1bcccead59159462519c58e10 (commit)
via ab5868a8dc4c6859f772219daddb7775848d3dc6 (commit)
via fd51a1883a332c305fa4015a6210971d3956fc12 (commit)
via 04611df9f5e34b2a8d6949b5b00d25a065c7c920 (commit)
via 9eddc07cd32918f3b8e9ebd114d9c8f8f39a359b (commit)
via 76a1d7e547e9eecdc4aecfa3cf6cc4ba940ad725 (commit)
via ee53746391bcfdaa75bba0db87add3dc7becb84d (commit)
via 92c0c95057cd98a5b8cf8099367a122684cf329b (commit)
via 296ec6be00c208aa9ab8cbaa20376749e8b6ab49 (commit)
via 82ead9ea724a5482c44e8a6235bcd4634eacce2c (commit)
via 7deb4955ef8816885376e558e7f0eaf65e22b4d4 (commit)
via 4a78ffbf8a2b9d1171415d7f0af1b7adc4e53481 (commit)
via b916d130e0799f457634cfba2b06fc1250db54a0 (commit)
via 1ea71dc16928433c1243375bd9210d5fceb28fe6 (commit)
via e7a49bee8af14cf5d4e16e14cb5bdd3ba83385b9 (commit)
via ed62a7f4dde0264cd60e4b739c41be9c98bbe3e4 (commit)
via 38932d7e76c4e35aca7382640e9e86360a389545 (commit)
via 55b3152ebb03fbe946986a52732f79a68d926163 (commit)
via fe59cd140491dbc685932bd22440e28c703c1053 (commit)
via e93f054a2d0cf1c21d00112ccaa93d5d2432a677 (commit)
via 9c4b079aca67cffe9385e54671f8eb9ed232e1e5 (commit)
via b1b486e59d86633e7b5e17aeedf0f78688ecef05 (commit)
via 45fb91884dc0741384a14e7f52633b0566779dff (commit)
via c166c0c96a476eea511fe363b90df3d78fae7506 (commit)
via 0e1eaada6f38f23e7de9404a94b2cd5dd2794879 (commit)
via 16f90854f8957d12e2861bc77303f186402a8805 (commit)
via 9c3195a542df8d7e747e28d49d7abdc707781632 (commit)
via efea92bd3f50b23ed5d551cd7f140abe47959bfd (commit)
from fd08a0dc40846f58aaa8a7df7726ac83e5e4c038 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 01c6801b65e167ba2cf635143b988bf4bcbbdc68
Merge: 0a3592efda9bd97cf251163cf9a30f38122cb7c2 31d5a4f66b18cca838ca1182b9f13034066427a7
Author: Tomek Mrugalski <tomasz at isc.org>
Date: Wed Nov 9 14:18:38 2011 +0100
Merge branch 'trac1228'
Conflicts:
ChangeLog
commit 0a3592efda9bd97cf251163cf9a30f38122cb7c2
Author: Jelte Jansen <jelte at isc.org>
Date: Wed Nov 9 09:58:32 2011 +0100
[master] update changelog
commit 1177bfe30e17a76bea6b6447e14ae9be9e1ca8c2
Merge: e6a596fe8f57103c735d8e135f855d46c248844c c03e6df1521a378fa3cb9eab4a11db93e6e34969
Author: Jelte Jansen <jelte at isc.org>
Date: Wed Nov 9 09:58:15 2011 +0100
[master] Merge branch 'trac1298'
Conflicts:
src/bin/xfrin/tests/xfrin_test.py
src/bin/xfrin/xfrin.py.in
commit e6a596fe8f57103c735d8e135f855d46c248844c
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Tue Nov 8 10:11:43 2011 -0800
[master] tabbing consistency
commit f8cea54b5bb8f870a01beebbdcde5eb90dd7d8b4
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Tue Nov 8 10:08:00 2011 -0800
[master] added changelog entry for #1329 (also made some cosmetic changes
to an existing entry)
commit 137a61f2afcd6d16ea20c3a4436046d783a5babf
Author: Jelte Jansen <jelte at isc.org>
Date: Tue Nov 8 17:53:28 2011 +0100
[master] update changelog
commit 6b75c128bcdcefd85c18ccb6def59e9acedd4437
Merge: 8cea64b69af8d5ef21497d2f1c9812968ce5d8f7 1a5bd80bbe01abbb2a5932bc43fab8e7a287dcf5
Author: Jelte Jansen <jelte at isc.org>
Date: Tue Nov 8 14:42:53 2011 +0100
Merge branch 'trac1290'
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 542 ++-
Makefile.am | 7 +
README | 224 +-
src/bin/stats/tests/http/__init__.py => TODO | 0
configure.ac | 279 +-
doc/Doxyfile | 14 +-
doc/guide/Makefile.am | 23 +-
doc/guide/bind10-guide.html | 632 +++-
doc/guide/bind10-guide.txt | 1201 +++++
doc/guide/bind10-guide.xml | 1024 ++++-
doc/guide/bind10-messages.html | 2081 ++++++++
doc/guide/bind10-messages.xml | 5026 ++++++++++++++++++++
ext/asio/asio/impl/error_code.ipp | 3 +
src/bin/Makefile.am | 2 +-
src/bin/auth/Makefile.am | 23 +-
src/bin/auth/auth.spec.pre.in | 18 +
src/bin/auth/auth_config.cc | 31 +-
src/bin/auth/auth_log.cc | 26 +
src/bin/auth/auth_log.h | 54 +
src/bin/auth/auth_messages.mes | 262 +
src/bin/auth/auth_srv.cc | 293 +-
src/bin/auth/auth_srv.h | 58 +-
src/bin/auth/b10-auth.8 | 47 +-
src/bin/auth/b10-auth.xml | 48 +-
src/bin/auth/benchmarks/Makefile.am | 11 +
src/bin/auth/command.cc | 58 +-
src/bin/auth/common.cc | 9 +-
src/bin/auth/main.cc | 67 +-
src/bin/auth/query.cc | 224 +-
src/bin/auth/query.h | 87 +-
src/bin/auth/spec_config.h.pre.in | 32 +-
src/bin/auth/statistics.cc | 67 +-
src/bin/auth/statistics.h | 27 +-
src/bin/auth/tests/Makefile.am | 13 +
src/bin/auth/tests/auth_srv_unittest.cc | 157 +-
src/bin/auth/tests/command_unittest.cc | 65 +-
src/bin/auth/tests/config_unittest.cc | 53 +-
src/bin/auth/tests/query_unittest.cc | 662 +++-
src/bin/auth/tests/run_unittests.cc | 5 +-
src/bin/auth/tests/statistics_unittest.cc | 77 +-
src/bin/auth/tests/testdata/Makefile.am | 2 +-
src/bin/bind10/Makefile.am | 25 +-
src/bin/bind10/bind10.8 | 32 +-
src/bin/bind10/bind10.py.in | 1039 ----
src/bin/bind10/bind10.xml | 42 +-
src/bin/bind10/bind10_messages.mes | 235 +
src/bin/bind10/bind10_src.py.in | 1168 +++++
src/bin/bind10/bob.spec | 11 +
src/bin/bind10/creatorapi.txt | 123 +
src/bin/bind10/run_bind10.sh.in | 9 +-
src/bin/bind10/tests/Makefile.am | 13 +-
src/bin/bind10/tests/bind10_test.py.in | 122 +-
src/bin/bindctl/Makefile.am | 7 +
src/bin/bindctl/bindcmd.py | 157 +-
src/bin/bindctl/bindctl_main.py.in | 22 +-
src/bin/bindctl/run_bindctl.sh.in | 10 +-
src/bin/bindctl/tests/Makefile.am | 10 +-
src/bin/bindctl/tests/bindctl_test.py | 126 +-
src/bin/cfgmgr/Makefile.am | 5 +
src/bin/cfgmgr/b10-cfgmgr.py.in | 12 +-
src/bin/cfgmgr/plugins/Makefile.am | 14 +-
src/bin/cfgmgr/plugins/b10logging.py | 109 +
src/bin/cfgmgr/plugins/logging.spec | 81 +
src/bin/cfgmgr/plugins/tests/Makefile.am | 14 +-
src/bin/cfgmgr/plugins/tests/logging_test.py | 135 +
src/bin/cfgmgr/plugins/tests/tsig_keys_test.py | 2 +-
src/bin/cfgmgr/tests/Makefile.am | 21 +-
src/bin/cmdctl/Makefile.am | 25 +-
src/bin/cmdctl/cmdctl.py.in | 140 +-
src/bin/cmdctl/cmdctl_messages.mes | 84 +
src/bin/cmdctl/run_b10-cmdctl.sh.in | 10 +-
src/bin/cmdctl/tests/Makefile.am | 10 +-
src/bin/cmdctl/tests/cmdctl_test.py | 6 +-
src/bin/dhcp6/.gitignore | 9 +
src/bin/dhcp6/Makefile.am | 46 +
src/bin/dhcp6/b10-dhcp6.8 | 51 +
src/bin/dhcp6/b10-dhcp6.xml | 98 +
src/bin/dhcp6/dhcp6.spec | 14 +
src/bin/dhcp6/dhcp6_srv.cc | 231 +
src/bin/dhcp6/dhcp6_srv.h | 156 +
src/bin/dhcp6/iface_mgr.cc | 542 +++
src/bin/dhcp6/iface_mgr.h | 229 +
src/bin/dhcp6/interfaces.txt | 10 +
src/bin/dhcp6/main.cc | 112 +
src/bin/dhcp6/spec_config.h.pre.in | 15 +
src/bin/dhcp6/tests/Makefile.am | 64 +
src/bin/dhcp6/tests/dhcp6_srv_unittest.cc | 148 +
src/bin/dhcp6/tests/dhcp6_test.py | 65 +
src/bin/dhcp6/tests/dhcp6_unittests.cc | 28 +
src/bin/dhcp6/tests/iface_mgr_unittest.cc | 367 ++
src/bin/host/Makefile.am | 1 +
src/bin/host/b10-host.1 | 4 -
src/bin/host/b10-host.xml | 5 -
src/bin/loadzone/Makefile.am | 1 +
src/bin/loadzone/run_loadzone.sh.in | 10 +-
src/bin/loadzone/tests/correct/Makefile.am | 11 +-
src/bin/loadzone/tests/correct/correct_test.sh.in | 2 +-
src/bin/loadzone/tests/error/Makefile.am | 11 +-
src/bin/loadzone/tests/error/error_test.sh.in | 2 +-
src/bin/msgq/Makefile.am | 7 +-
src/bin/msgq/msgq.py.in | 33 +-
src/bin/msgq/tests/Makefile.am | 10 +-
src/bin/msgq/tests/msgq_test.py | 2 +-
src/bin/resolver/Makefile.am | 21 +-
src/bin/resolver/b10-resolver.8 | 30 +-
src/bin/resolver/b10-resolver.xml | 32 +-
src/bin/resolver/main.cc | 47 +-
src/bin/resolver/resolver.cc | 287 +-
src/bin/resolver/resolver.h | 22 +
src/bin/resolver/resolver.spec.pre.in | 35 +
src/bin/resolver/resolver_log.cc | 19 +
src/bin/resolver/resolver_log.h | 49 +
src/bin/resolver/resolver_messages.mes | 248 +
src/bin/resolver/tests/Makefile.am | 19 +-
src/bin/resolver/tests/resolver_config_unittest.cc | 230 +-
src/bin/resolver/tests/resolver_unittest.cc | 61 +-
.../resolver/tests/response_scrubber_unittest.cc | 6 +
src/bin/resolver/tests/run_unittests.cc | 5 +-
src/bin/sockcreator/README | 2 +-
src/bin/sockcreator/tests/Makefile.am | 5 +-
src/bin/sockcreator/tests/run_unittests.cc | 3 +-
src/bin/stats/Makefile.am | 30 +-
src/bin/stats/b10-stats-httpd.8 | 6 +-
src/bin/stats/b10-stats-httpd.xml | 10 +-
src/bin/stats/b10-stats.8 | 103 +-
src/bin/stats/b10-stats.xml | 130 +-
...{stats-httpd-xml.tpl.in => stats-httpd-xml.tpl} | 0
...{stats-httpd-xsd.tpl.in => stats-httpd-xsd.tpl} | 0
src/bin/stats/stats-httpd-xsl.tpl | 57 +
src/bin/stats/stats-httpd-xsl.tpl.in | 56 -
.../{stats-httpd.spec.in => stats-httpd.spec} | 0
src/bin/stats/stats-schema.spec.in | 87 -
src/bin/stats/stats.py.in | 621 ++--
src/bin/stats/stats.spec | 125 +
src/bin/stats/stats.spec.in | 61 -
src/bin/stats/stats_httpd.py.in | 369 +-
src/bin/stats/stats_httpd_messages.mes | 92 +
src/bin/stats/stats_messages.mes | 76 +
src/bin/stats/tests/Makefile.am | 24 +-
src/bin/stats/tests/b10-stats-httpd_test.py | 785 ++--
src/bin/stats/tests/b10-stats_test.py | 1196 +++---
src/bin/stats/tests/fake_select.py | 43 -
src/bin/stats/tests/fake_socket.py | 70 -
src/bin/stats/tests/fake_time.py | 47 -
src/bin/stats/tests/http/Makefile.am | 2 -
src/bin/stats/tests/http/server.py | 96 -
src/bin/stats/tests/isc/Makefile.am | 3 -
src/bin/stats/tests/isc/cc/Makefile.am | 2 -
src/bin/stats/tests/isc/cc/__init__.py | 1 -
src/bin/stats/tests/isc/cc/session.py | 148 -
src/bin/stats/tests/isc/config/Makefile.am | 2 -
src/bin/stats/tests/isc/config/__init__.py | 1 -
src/bin/stats/tests/isc/config/ccsession.py | 160 -
src/bin/stats/tests/isc/util/Makefile.am | 2 -
src/bin/stats/tests/isc/util/process.py | 21 -
src/bin/stats/tests/test_utils.py | 367 ++
src/bin/stats/tests/testdata/Makefile.am | 1 -
src/bin/stats/tests/testdata/stats_test.spec | 19 -
src/bin/tests/Makefile.am | 14 +-
src/bin/tests/process_rename_test.py.in | 9 +-
src/bin/xfrin/Makefile.am | 21 +-
src/bin/xfrin/b10-xfrin.8 | 68 +-
src/bin/xfrin/b10-xfrin.xml | 65 +-
src/bin/xfrin/tests/Makefile.am | 11 +-
src/bin/xfrin/tests/testdata/Makefile.am | 2 +
src/bin/xfrin/tests/testdata/example.com | 17 +
src/bin/xfrin/tests/testdata/example.com.sqlite3 | Bin 0 -> 11264 bytes
src/bin/xfrin/tests/xfrin_test.py | 2116 ++++++++-
src/bin/xfrin/xfrin.py.in | 1101 ++++-
src/bin/xfrin/xfrin.spec | 55 +-
src/bin/xfrin/xfrin_messages.mes | 148 +
src/bin/xfrout/Makefile.am | 20 +-
src/bin/xfrout/b10-xfrout.xml | 8 +
src/bin/xfrout/tests/Makefile.am | 12 +-
src/bin/xfrout/tests/xfrout_test.py.in | 510 ++-
src/bin/xfrout/xfrout.py.in | 386 ++-
src/bin/xfrout/xfrout.spec.pre.in | 77 +-
src/bin/xfrout/xfrout_messages.mes | 162 +
src/bin/zonemgr/Makefile.am | 21 +-
src/bin/zonemgr/b10-zonemgr.8 | 37 +-
src/bin/zonemgr/b10-zonemgr.xml | 66 +-
src/bin/zonemgr/tests/Makefile.am | 10 +-
src/bin/zonemgr/tests/zonemgr_test.py | 104 +-
src/bin/zonemgr/zonemgr.py.in | 181 +-
src/bin/zonemgr/zonemgr_messages.mes | 145 +
src/cppcheck-suppress.lst | 10 +-
src/lib/Makefile.am | 6 +-
src/lib/acl/Makefile.am | 27 +
src/lib/acl/acl.h | 143 +
src/lib/acl/check.h | 195 +
src/lib/acl/dns.cc | 140 +
src/lib/acl/dns.h | 154 +
src/lib/acl/dnsname_check.h | 83 +
src/lib/acl/ip_check.cc | 141 +
src/lib/acl/ip_check.h | 417 ++
src/lib/acl/loader.cc | 46 +
src/lib/acl/loader.h | 479 ++
src/lib/acl/logic_check.h | 286 ++
src/lib/acl/tests/Makefile.am | 40 +
src/lib/acl/tests/acl_test.cc | 90 +
src/lib/acl/tests/check_test.cc | 70 +
src/lib/acl/tests/creators.h | 158 +
src/lib/acl/tests/dns_test.cc | 271 ++
src/lib/acl/tests/dnsname_check_unittest.cc | 59 +
src/lib/acl/tests/ip_check_unittest.cc | 617 +++
src/lib/acl/tests/loader_test.cc | 383 ++
src/lib/acl/tests/logcheck.h | 94 +
src/lib/acl/tests/logic_check_test.cc | 291 ++
src/lib/acl/tests/run_unittests.cc | 24 +
src/lib/acl/tests/sockaddr.h | 69 +
src/lib/asiodns/Makefile.am | 12 +-
src/lib/asiodns/asiodef.mes | 56 -
src/lib/asiodns/asiodns_messages.mes | 56 +
src/lib/asiodns/io_fetch.cc | 53 +-
src/lib/asiodns/tests/Makefile.am | 4 +-
src/lib/asiodns/tests/io_fetch_unittest.cc | 3 +
src/lib/asiodns/tests/run_unittests.cc | 7 +-
src/lib/asiolink/Makefile.am | 9 +-
src/lib/asiolink/README | 7 +
src/lib/asiolink/dummy_io_cb.h | 7 +-
src/lib/asiolink/interval_timer.cc | 63 +-
src/lib/asiolink/interval_timer.h | 10 +-
src/lib/asiolink/io_address.cc | 38 +-
src/lib/asiolink/io_address.h | 42 +
src/lib/asiolink/io_asio_socket.h | 20 +-
src/lib/asiolink/io_endpoint.h | 44 +
src/lib/asiolink/tcp_endpoint.h | 8 +
src/lib/asiolink/tests/Makefile.am | 11 +-
src/lib/asiolink/tests/interval_timer_unittest.cc | 59 +-
src/lib/asiolink/tests/io_address_unittest.cc | 38 +
src/lib/asiolink/tests/io_endpoint_unittest.cc | 204 +-
src/lib/asiolink/tests/run_unittests.cc | 10 +-
src/lib/asiolink/udp_endpoint.h | 8 +
src/lib/bench/Makefile.am | 2 +-
src/lib/bench/tests/Makefile.am | 5 +-
src/lib/bench/tests/run_unittests.cc | 3 +-
src/lib/cache/Makefile.am | 11 +-
src/lib/cache/TODO | 5 +-
src/lib/cache/cache_messages.mes | 148 +
src/lib/cache/local_zone_data.cc | 4 +
src/lib/cache/logger.cc | 23 +
src/lib/cache/logger.h | 43 +
src/lib/cache/message_cache.cc | 38 +-
src/lib/cache/message_cache.h | 20 +-
src/lib/cache/message_entry.cc | 5 +-
src/lib/cache/resolver_cache.cc | 42 +-
src/lib/cache/resolver_cache.h | 24 +-
src/lib/cache/rrset_cache.cc | 45 +-
src/lib/cache/rrset_cache.h | 25 +-
src/lib/cache/rrset_entry.h | 4 +-
src/lib/cache/tests/Makefile.am | 28 +-
src/lib/cache/tests/run_unittests.cc | 7 +-
src/lib/cc/Makefile.am | 12 +-
src/lib/cc/cc_messages.mes | 108 +
src/lib/cc/data.cc | 13 +-
src/lib/cc/data.h | 2 +-
src/lib/cc/logger.cc | 23 +
src/lib/cc/logger.h | 46 +
src/lib/cc/session.cc | 35 +-
src/lib/cc/tests/Makefile.am | 2 +
src/lib/cc/tests/data_unittests.cc | 15 +
src/lib/cc/tests/run_unittests.cc | 7 +-
src/lib/config/Makefile.am | 12 +-
src/lib/config/ccsession.cc | 363 ++-
src/lib/config/ccsession.h | 131 +-
src/lib/config/config_data.cc | 136 +-
src/lib/config/config_data.h | 10 +
src/lib/config/config_log.h | 7 +-
src/lib/config/config_messages.mes | 84 +
src/lib/config/configdef.mes | 50 -
src/lib/config/module_spec.cc | 115 +-
src/lib/config/module_spec.h | 23 +-
src/lib/config/tests/Makefile.am | 7 +-
src/lib/config/tests/ccsession_unittests.cc | 281 ++-
src/lib/config/tests/config_data_unittests.cc | 20 +-
.../config/tests/data_def_unittests_config.h.in | 1 +
src/lib/config/tests/fake_session.cc | 22 +-
src/lib/config/tests/fake_session.h | 9 +
src/lib/config/tests/module_spec_unittests.cc | 167 +-
src/lib/config/tests/run_unittests.cc | 10 +-
src/lib/config/tests/testdata/Makefile.am | 14 +
src/lib/config/tests/testdata/data32_1.data | 3 +
src/lib/config/tests/testdata/data32_2.data | 3 +
src/lib/config/tests/testdata/data32_3.data | 3 +
src/lib/config/tests/testdata/data33_1.data | 7 +
src/lib/config/tests/testdata/data33_2.data | 7 +
src/lib/config/tests/testdata/spec2.spec | 11 +
src/lib/config/tests/testdata/spec30.spec | 45 +
src/lib/config/tests/testdata/spec31.spec | 63 +
src/lib/config/tests/testdata/spec32.spec | 40 +
src/lib/config/tests/testdata/spec33.spec | 50 +
src/lib/config/tests/testdata/spec34.spec | 14 +
src/lib/config/tests/testdata/spec35.spec | 15 +
src/lib/config/tests/testdata/spec36.spec | 17 +
src/lib/config/tests/testdata/spec37.spec | 7 +
src/lib/config/tests/testdata/spec38.spec | 17 +
src/lib/cryptolink/crypto_hmac.cc | 47 +-
src/lib/cryptolink/cryptolink.h | 12 +-
src/lib/cryptolink/tests/Makefile.am | 5 +-
src/lib/cryptolink/tests/crypto_unittests.cc | 229 +-
src/lib/cryptolink/tests/run_unittests.cc | 3 +-
src/lib/datasrc/Makefile.am | 30 +-
src/lib/datasrc/cache.cc | 20 +-
src/lib/datasrc/client.h | 292 ++
src/lib/datasrc/data_source.cc | 6 +-
src/lib/datasrc/data_source.h | 12 +-
src/lib/datasrc/database.cc | 990 ++++
src/lib/datasrc/database.h | 892 ++++
src/lib/datasrc/datasrc_messages.mes | 632 +++
src/lib/datasrc/factory.cc | 95 +
src/lib/datasrc/factory.h | 170 +
src/lib/datasrc/iterator.h | 105 +
src/lib/datasrc/logger.h | 20 +-
src/lib/datasrc/memory_datasrc.cc | 386 ++-
src/lib/datasrc/memory_datasrc.h | 223 +-
src/lib/datasrc/messagedef.mes | 498 --
src/lib/datasrc/rbtree.h | 12 +-
src/lib/datasrc/sqlite3_accessor.cc | 895 ++++
src/lib/datasrc/sqlite3_accessor.h | 231 +
src/lib/datasrc/sqlite3_datasrc.cc | 100 +-
src/lib/datasrc/static_datasrc.cc | 3 +-
src/lib/datasrc/tests/Makefile.am | 34 +-
src/lib/datasrc/tests/cache_unittest.cc | 6 +-
src/lib/datasrc/tests/client_unittest.cc | 50 +
src/lib/datasrc/tests/database_unittest.cc | 2630 ++++++++++
src/lib/datasrc/tests/factory_unittest.cc | 175 +
src/lib/datasrc/tests/memory_datasrc_unittest.cc | 675 ++--
src/lib/datasrc/tests/run_unittests.cc | 6 +-
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 1115 +++++
src/lib/datasrc/tests/static_unittest.cc | 1 +
src/lib/datasrc/tests/testdata/Makefile.am | 6 +
src/lib/datasrc/tests/testdata/rwtest.sqlite3 | Bin 0 -> 11264 bytes
src/lib/datasrc/tests/testdata/test.sqlite3 | Bin 43008 -> 44032 bytes
.../{test.sqlite3 => test.sqlite3.nodiffs} | Bin 43008 -> 43008 bytes
src/lib/datasrc/tests/zonetable_unittest.cc | 36 +-
src/lib/datasrc/zone.h | 441 ++-
src/lib/datasrc/zonetable.cc | 12 +-
src/lib/datasrc/zonetable.h | 6 +-
src/lib/dhcp/Makefile.am | 26 +
src/lib/dhcp/README | 11 +
src/lib/dhcp/dhcp4.h | 191 +
src/lib/dhcp/dhcp6.h | 184 +
src/lib/dhcp/libdhcp.cc | 170 +
src/lib/dhcp/libdhcp.h | 103 +
src/lib/dhcp/option.cc | 333 ++
src/lib/dhcp/option.h | 331 ++
src/lib/dhcp/option6_addrlst.cc | 134 +
src/lib/dhcp/option6_addrlst.h | 127 +
src/lib/dhcp/option6_ia.cc | 136 +
src/lib/dhcp/option6_ia.h | 137 +
src/lib/dhcp/option6_iaaddr.cc | 132 +
src/lib/dhcp/option6_iaaddr.h | 146 +
src/lib/dhcp/pkt4.cc | 255 +
src/lib/dhcp/pkt4.h | 409 ++
src/lib/dhcp/pkt6.cc | 232 +
src/lib/dhcp/pkt6.h | 234 +
src/lib/dhcp/tests/Makefile.am | 41 +
src/lib/dhcp/tests/libdhcp_unittest.cc | 234 +
src/lib/dhcp/tests/option6_addrlst_unittest.cc | 232 +
src/lib/dhcp/tests/option6_ia_unittest.cc | 266 ++
src/lib/dhcp/tests/option6_iaaddr_unittest.cc | 105 +
src/lib/dhcp/tests/option_unittest.cc | 419 ++
src/lib/dhcp/tests/pkt4_unittest.cc | 562 +++
src/lib/dhcp/tests/pkt6_unittest.cc | 207 +
src/lib/dhcp/tests/run_unittests.cc | 27 +
src/lib/dns/Makefile.am | 21 +
src/lib/dns/benchmarks/Makefile.am | 1 +
src/lib/dns/character_string.cc | 140 +
src/lib/dns/character_string.h | 57 +
src/lib/dns/gen-rdatacode.py.in | 17 +-
src/lib/dns/message.cc | 92 +-
src/lib/dns/message.h | 70 +-
src/lib/dns/messagerenderer.cc | 2 -
src/lib/dns/name.cc | 2 +-
src/lib/dns/python/Makefile.am | 51 +-
src/lib/dns/python/edns_python.cc | 262 +-
src/lib/dns/python/edns_python.h | 64 +
src/lib/dns/python/message_python.cc | 647 ++--
src/lib/dns/python/message_python.h | 40 +
src/lib/dns/python/message_python_inc.cc | 41 +
src/lib/dns/python/messagerenderer_python.cc | 259 +-
src/lib/dns/python/messagerenderer_python.h | 57 +
src/lib/dns/python/name_python.cc | 593 ++--
src/lib/dns/python/name_python.h | 81 +
src/lib/dns/python/opcode_python.cc | 231 +-
src/lib/dns/python/opcode_python.h | 64 +
src/lib/dns/python/pydnspp.cc | 751 +++-
src/lib/dns/python/pydnspp_common.cc | 54 +-
src/lib/dns/python/pydnspp_common.h | 27 +-
src/lib/dns/python/pydnspp_towire.h | 127 +
src/lib/dns/python/question_python.cc | 271 +-
src/lib/dns/python/question_python.h | 66 +
src/lib/dns/python/rcode_python.cc | 238 +-
src/lib/dns/python/rcode_python.h | 64 +
src/lib/dns/python/rdata_python.cc | 289 +-
src/lib/dns/python/rdata_python.h | 68 +
src/lib/dns/python/rrclass_python.cc | 303 +-
src/lib/dns/python/rrclass_python.h | 68 +
src/lib/dns/python/rrset_python.cc | 505 ++-
src/lib/dns/python/rrset_python.h | 78 +
src/lib/dns/python/rrttl_python.cc | 281 +-
src/lib/dns/python/rrttl_python.h | 67 +
src/lib/dns/python/rrtype_python.cc | 348 +-
src/lib/dns/python/rrtype_python.h | 68 +
src/lib/dns/python/tests/Makefile.am | 12 +-
src/lib/dns/python/tests/message_python_test.py | 239 +-
src/lib/dns/python/tests/name_python_test.py | 9 +
src/lib/dns/python/tests/question_python_test.py | 10 +-
src/lib/dns/python/tests/rrset_python_test.py | 7 +
src/lib/dns/python/tests/tsig_python_test.py | 535 +++-
src/lib/dns/python/tests/tsig_rdata_python_test.py | 30 +
src/lib/dns/python/tests/tsigerror_python_test.py | 97 +
src/lib/dns/python/tests/tsigkey_python_test.py | 3 +
src/lib/dns/python/tests/tsigrecord_python_test.py | 44 +
src/lib/dns/python/tsig_python.cc | 288 +-
src/lib/dns/python/tsig_python.h | 59 +
src/lib/dns/python/tsig_rdata_python.cc | 367 ++
src/lib/dns/python/tsig_rdata_python.h | 68 +
src/lib/dns/python/tsigerror_python.cc | 291 ++
src/lib/dns/python/tsigerror_python.h | 44 +
src/lib/dns/python/tsigerror_python_inc.cc | 83 +
src/lib/dns/python/tsigkey_python.cc | 429 +-
src/lib/dns/python/tsigkey_python.h | 75 +
src/lib/dns/python/tsigrecord_python.cc | 293 ++
src/lib/dns/python/tsigrecord_python.h | 65 +
src/lib/dns/question.cc | 9 +
src/lib/dns/question.h | 16 +-
src/lib/dns/rdata/any_255/tsig_250.cc | 127 +-
src/lib/dns/rdata/generic/afsdb_18.cc | 171 +
src/lib/dns/rdata/generic/afsdb_18.h | 74 +
src/lib/dns/rdata/generic/detail/ds_like.h | 225 +
src/lib/dns/rdata/generic/detail/txt_like.h | 224 +
src/lib/dns/rdata/generic/dlv_32769.cc | 121 +
src/lib/dns/rdata/generic/dlv_32769.h | 77 +
src/lib/dns/rdata/generic/ds_43.cc | 109 +-
src/lib/dns/rdata/generic/ds_43.h | 33 +-
src/lib/dns/rdata/generic/hinfo_13.cc | 129 +
src/lib/dns/rdata/generic/hinfo_13.h | 77 +
src/lib/dns/rdata/generic/minfo_14.cc | 156 +
src/lib/dns/rdata/generic/minfo_14.h | 82 +
src/lib/dns/rdata/generic/naptr_35.cc | 220 +
src/lib/dns/rdata/generic/naptr_35.h | 83 +
src/lib/dns/rdata/generic/nsec_47.cc | 5 +
src/lib/dns/rdata/generic/nsec_47.h | 10 +
src/lib/dns/rdata/generic/rp_17.cc | 1 +
src/lib/dns/rdata/generic/rrsig_46.cc | 5 +
src/lib/dns/rdata/generic/rrsig_46.h | 3 +
src/lib/dns/rdata/generic/spf_99.cc | 131 +
src/lib/dns/rdata/generic/spf_99.h | 78 +
src/lib/dns/rdata/generic/txt_16.cc | 121 +-
src/lib/dns/rdata/generic/txt_16.h | 11 +-
src/lib/dns/rdata/in_1/dhcid_49.cc | 145 +
src/lib/dns/rdata/in_1/dhcid_49.h | 58 +
src/lib/dns/rdata/in_1/srv_33.cc | 245 +
src/lib/dns/rdata/in_1/srv_33.h | 93 +
src/lib/dns/rdata/template.cc | 1 +
src/lib/dns/rdatafields.h | 2 +-
src/lib/dns/rrset.h | 2 +-
src/lib/dns/rrtype-placeholder.h | 5 +
src/lib/dns/tests/Makefile.am | 19 +-
src/lib/dns/tests/character_string_unittest.cc | 92 +
src/lib/dns/tests/message_unittest.cc | 355 ++-
src/lib/dns/tests/question_unittest.cc | 16 +
src/lib/dns/tests/rdata_afsdb_unittest.cc | 210 +
src/lib/dns/tests/rdata_dhcid_unittest.cc | 111 +
src/lib/dns/tests/rdata_ds_like_unittest.cc | 171 +
src/lib/dns/tests/rdata_ds_unittest.cc | 99 -
src/lib/dns/tests/rdata_hinfo_unittest.cc | 115 +
src/lib/dns/tests/rdata_minfo_unittest.cc | 184 +
src/lib/dns/tests/rdata_naptr_unittest.cc | 178 +
src/lib/dns/tests/rdata_nsec_unittest.cc | 6 +
src/lib/dns/tests/rdata_rrsig_unittest.cc | 2 +-
src/lib/dns/tests/rdata_srv_unittest.cc | 173 +
src/lib/dns/tests/rdata_txt_like_unittest.cc | 261 +
src/lib/dns/tests/rdata_txt_unittest.cc | 166 -
src/lib/dns/tests/run_unittests.cc | 3 +-
src/lib/dns/tests/testdata/Makefile.am | 37 +-
src/lib/dns/tests/testdata/gen-wiredata.py.in | 612 ---
src/lib/dns/tests/testdata/message_fromWire17.spec | 22 +
src/lib/dns/tests/testdata/message_fromWire18.spec | 23 +
src/lib/dns/tests/testdata/message_fromWire19.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire20.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire21.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire22.spec | 14 +
src/lib/dns/tests/testdata/message_toWire4.spec | 27 +
src/lib/dns/tests/testdata/message_toWire5.spec | 36 +
.../dns/tests/testdata/rdata_afsdb_fromWire1.spec | 3 +
.../dns/tests/testdata/rdata_afsdb_fromWire2.spec | 6 +
.../dns/tests/testdata/rdata_afsdb_fromWire3.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_fromWire4.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_fromWire5.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_toWire1.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_toWire2.spec | 8 +
src/lib/dns/tests/testdata/rdata_dhcid_fromWire | 12 +
src/lib/dns/tests/testdata/rdata_dhcid_toWire | 7 +
.../dns/tests/testdata/rdata_minfo_fromWire1.spec | 3 +
.../dns/tests/testdata/rdata_minfo_fromWire2.spec | 7 +
.../dns/tests/testdata/rdata_minfo_fromWire3.spec | 6 +
.../dns/tests/testdata/rdata_minfo_fromWire4.spec | 6 +
.../dns/tests/testdata/rdata_minfo_fromWire5.spec | 5 +
.../dns/tests/testdata/rdata_minfo_fromWire6.spec | 5 +
.../dns/tests/testdata/rdata_minfo_toWire1.spec | 5 +
.../dns/tests/testdata/rdata_minfo_toWire2.spec | 6 +
.../testdata/rdata_minfo_toWireUncompressed1.spec | 7 +
.../testdata/rdata_minfo_toWireUncompressed2.spec | 8 +
src/lib/dns/tests/testdata/rdata_srv_fromWire | 36 +
src/lib/dns/tests/tsig_unittest.cc | 95 +
src/lib/dns/tests/tsigkey_unittest.cc | 12 +
src/lib/dns/tsig.cc | 103 +-
src/lib/dns/tsig.h | 21 +
src/lib/dns/tsigerror.h | 8 +-
src/lib/dns/tsigkey.cc | 28 +
src/lib/dns/tsigkey.h | 16 +-
src/lib/exceptions/exceptions.h | 23 +
src/lib/exceptions/tests/run_unittests.cc | 3 +
src/lib/log/Makefile.am | 31 +-
src/lib/log/README | 464 ++-
src/lib/log/compiler/Makefile.am | 8 +-
src/lib/log/compiler/message.cc | 348 +-
src/lib/log/debug_levels.h | 29 -
src/lib/log/log_dbglevels.h | 93 +
src/lib/log/log_formatter.h | 103 +-
src/lib/log/log_messages.cc | 63 +
src/lib/log/log_messages.h | 35 +
src/lib/log/log_messages.mes | 146 +
src/lib/log/logger.cc | 47 +-
src/lib/log/logger.h | 168 +-
src/lib/log/logger_impl.cc | 202 +-
src/lib/log/logger_impl.h | 144 +-
src/lib/log/logger_impl_log4cxx.cc | 242 -
src/lib/log/logger_impl_log4cxx.h | 315 --
src/lib/log/logger_level.cc | 48 +
src/lib/log/logger_level.h | 76 +
src/lib/log/logger_level_impl.cc | 217 +
src/lib/log/logger_level_impl.h | 127 +
src/lib/log/logger_levels.h | 42 -
src/lib/log/logger_manager.cc | 184 +
src/lib/log/logger_manager.h | 141 +
src/lib/log/logger_manager_impl.cc | 228 +
src/lib/log/logger_manager_impl.h | 169 +
src/lib/log/logger_name.cc | 59 +
src/lib/log/logger_name.h | 57 +
src/lib/log/logger_specification.h | 156 +
src/lib/log/logger_support.cc | 195 +-
src/lib/log/logger_support.h | 64 +-
src/lib/log/logger_unittest_support.cc | 175 +
src/lib/log/logger_unittest_support.h | 126 +
src/lib/log/logimpl_messages.cc | 29 +
src/lib/log/logimpl_messages.h | 18 +
src/lib/log/logimpl_messages.mes | 43 +
src/lib/log/macros.h | 1 +
src/lib/log/message_dictionary.h | 2 +-
src/lib/log/message_reader.cc | 28 +-
src/lib/log/messagedef.cc | 57 -
src/lib/log/messagedef.h | 32 -
src/lib/log/messagedef.mes | 119 -
src/lib/log/output_option.cc | 55 +
src/lib/log/output_option.h | 85 +
src/lib/log/root_logger_name.cc | 44 -
src/lib/log/root_logger_name.h | 46 -
src/lib/log/tests/Makefile.am | 67 +-
src/lib/log/tests/console_test.sh.in | 67 +
src/lib/log/tests/destination_test.sh.in | 91 +
src/lib/log/tests/init_logger_test.cc | 42 +
src/lib/log/tests/init_logger_test.sh.in | 110 +
src/lib/log/tests/local_file_test.sh.in | 83 +
src/lib/log/tests/log_formatter_unittest.cc | 37 +-
src/lib/log/tests/logger_example.cc | 305 ++
src/lib/log/tests/logger_impl_log4cxx_unittest.cc | 91 -
src/lib/log/tests/logger_level_impl_unittest.cc | 174 +
src/lib/log/tests/logger_level_unittest.cc | 84 +
src/lib/log/tests/logger_manager_unittest.cc | 321 ++
src/lib/log/tests/logger_name_unittest.cc | 77 +
src/lib/log/tests/logger_specification_unittest.cc | 96 +
src/lib/log/tests/logger_support_test.cc | 106 -
src/lib/log/tests/logger_support_unittest.cc | 83 +
src/lib/log/tests/logger_unittest.cc | 113 +-
src/lib/log/tests/message_dictionary_unittest.cc | 4 +-
src/lib/log/tests/message_reader_unittest.cc | 24 +-
src/lib/log/tests/output_option_unittest.cc | 66 +
src/lib/log/tests/root_logger_name_unittest.cc | 50 -
src/lib/log/tests/run_time_init_test.sh.in | 90 -
src/lib/log/tests/run_unittests.cc | 6 +-
src/lib/log/tests/severity_test.sh.in | 89 +
src/lib/log/tests/tempdir.h.in | 29 +
src/lib/log/tests/xdebuglevel_unittest.cc | 203 -
src/lib/log/xdebuglevel.cc | 146 -
src/lib/log/xdebuglevel.h | 162 -
src/lib/nsas/Makefile.am | 14 +-
src/lib/nsas/nameserver_address_store.cc | 5 +-
src/lib/nsas/nameserver_address_store.h | 5 +-
src/lib/nsas/nameserver_entry.cc | 12 +-
src/lib/nsas/nsas_log.h | 8 +-
src/lib/nsas/nsas_messages.mes | 69 +
src/lib/nsas/nsasdef.mes | 61 -
src/lib/nsas/tests/Makefile.am | 5 +-
src/lib/nsas/tests/run_unittests.cc | 15 +-
src/lib/nsas/zone_entry.h | 2 +-
src/lib/python/Makefile.am | 5 +
src/lib/python/bind10_config.py.in | 5 +-
src/lib/python/isc/Makefile.am | 8 +-
src/lib/python/isc/__init__.py | 8 +-
src/lib/python/isc/acl/Makefile.am | 45 +
src/lib/python/isc/acl/__init__.py | 11 +
src/lib/python/isc/acl/_dns.py | 29 +
src/lib/python/isc/acl/acl.cc | 80 +
src/lib/python/isc/acl/acl.py | 29 +
src/lib/python/isc/acl/acl_inc.cc | 16 +
src/lib/python/isc/acl/dns.cc | 135 +
src/lib/python/isc/acl/dns.h | 52 +
src/lib/python/isc/acl/dns.py | 73 +
src/lib/python/isc/acl/dns_requestacl_inc.cc | 33 +
src/lib/python/isc/acl/dns_requestacl_python.cc | 184 +
src/lib/python/isc/acl/dns_requestacl_python.h | 53 +
src/lib/python/isc/acl/dns_requestcontext_inc.cc | 33 +
.../python/isc/acl/dns_requestcontext_python.cc | 382 ++
src/lib/python/isc/acl/dns_requestcontext_python.h | 54 +
src/lib/python/isc/acl/dns_requestloader_inc.cc | 87 +
src/lib/python/isc/acl/dns_requestloader_python.cc | 270 ++
src/lib/python/isc/acl/dns_requestloader_python.h | 46 +
src/lib/python/isc/acl/dnsacl_inc.cc | 17 +
src/lib/python/isc/acl/tests/Makefile.am | 30 +
src/lib/python/isc/acl/tests/acl_test.py | 29 +
src/lib/python/isc/acl/tests/dns_test.py | 357 ++
src/lib/python/isc/bind10/Makefile.am | 4 +
.../isc => lib/python/isc/bind10}/__init__.py | 0
src/lib/python/isc/bind10/sockcreator.py | 228 +
src/lib/python/isc/bind10/tests/Makefile.am | 29 +
.../python/isc/bind10/tests/sockcreator_test.py | 327 ++
src/lib/python/isc/cc/Makefile.am | 5 +
src/lib/python/isc/cc/data.py | 18 +-
src/lib/python/isc/cc/message.py | 2 +-
src/lib/python/isc/cc/session.py | 37 +-
src/lib/python/isc/cc/tests/Makefile.am | 10 +-
src/lib/python/isc/cc/tests/message_test.py | 5 +
src/lib/python/isc/cc/tests/session_test.py | 10 +
src/lib/python/isc/config/Makefile.am | 28 +-
src/lib/python/isc/config/ccsession.py | 239 +-
src/lib/python/isc/config/cfgmgr.py | 76 +-
src/lib/python/isc/config/cfgmgr_messages.mes | 57 +
src/lib/python/isc/config/config_data.py | 231 +-
src/lib/python/isc/config/config_messages.mes | 33 +
src/lib/python/isc/config/module_spec.py | 131 +-
src/lib/python/isc/config/tests/Makefile.am | 16 +-
src/lib/python/isc/config/tests/ccsession_test.py | 160 +-
src/lib/python/isc/config/tests/cfgmgr_test.py | 45 +-
.../python/isc/config/tests/config_data_test.py | 105 +-
.../python/isc/config/tests/module_spec_test.py | 112 +
src/lib/python/isc/datasrc/Makefile.am | 35 +-
src/lib/python/isc/datasrc/__init__.py | 35 +-
src/lib/python/isc/datasrc/client_inc.cc | 170 +
src/lib/python/isc/datasrc/client_python.cc | 277 ++
src/lib/python/isc/datasrc/client_python.h | 35 +
src/lib/python/isc/datasrc/datasrc.cc | 256 +
src/lib/python/isc/datasrc/datasrc.h | 50 +
src/lib/python/isc/datasrc/finder_inc.cc | 133 +
src/lib/python/isc/datasrc/finder_python.cc | 286 ++
src/lib/python/isc/datasrc/finder_python.h | 44 +
src/lib/python/isc/datasrc/iterator_inc.cc | 67 +
src/lib/python/isc/datasrc/iterator_python.cc | 242 +
src/lib/python/isc/datasrc/iterator_python.h | 46 +
src/lib/python/isc/datasrc/sqlite3_ds.py | 124 +-
src/lib/python/isc/datasrc/tests/Makefile.am | 21 +-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 542 +++
.../python/isc/datasrc/tests/sqlite3_ds_test.py | 106 +-
src/lib/python/isc/datasrc/updater_inc.cc | 181 +
src/lib/python/isc/datasrc/updater_python.cc | 288 ++
src/lib/python/isc/datasrc/updater_python.h | 47 +
src/lib/python/isc/dns/Makefile.am | 8 +
src/lib/python/isc/log/Makefile.am | 37 +-
src/lib/python/isc/log/__init__.py | 34 +-
src/lib/python/isc/log/log.cc | 764 +++
src/lib/python/isc/log/log.py | 280 --
src/lib/python/isc/log/tests/Makefile.am | 34 +-
src/lib/python/isc/log/tests/check_output.sh | 3 +
src/lib/python/isc/log/tests/console.out | 4 +
src/lib/python/isc/log/tests/log_console.py.in | 15 +
src/lib/python/isc/log/tests/log_test.in | 26 -
src/lib/python/isc/log/tests/log_test.py | 360 +-
src/lib/python/isc/log_messages/Makefile.am | 32 +
src/lib/python/isc/log_messages/README | 68 +
src/lib/python/isc/log_messages/__init__.py | 3 +
src/lib/python/isc/log_messages/bind10_messages.py | 1 +
src/lib/python/isc/log_messages/cfgmgr_messages.py | 1 +
src/lib/python/isc/log_messages/cmdctl_messages.py | 1 +
src/lib/python/isc/log_messages/config_messages.py | 1 +
src/lib/python/isc/log_messages/gen-forwarder.sh | 14 +
.../python/isc/log_messages/libxfrin_messages.py | 1 +
.../python/isc/log_messages/notify_out_messages.py | 1 +
.../isc/log_messages/stats_httpd_messages.py | 1 +
src/lib/python/isc/log_messages/stats_messages.py | 1 +
src/lib/python/isc/log_messages/work/Makefile.am | 12 +
.../python/isc/log_messages/work/__init__.py.in | 3 +
src/lib/python/isc/log_messages/xfrin_messages.py | 1 +
src/lib/python/isc/log_messages/xfrout_messages.py | 1 +
.../python/isc/log_messages/zonemgr_messages.py | 1 +
src/lib/python/isc/net/Makefile.am | 5 +
src/lib/python/isc/net/tests/Makefile.am | 10 +-
src/lib/python/isc/notify/Makefile.am | 19 +-
src/lib/python/isc/notify/notify_out.py | 193 +-
src/lib/python/isc/notify/notify_out_messages.mes | 83 +
src/lib/python/isc/notify/tests/Makefile.am | 4 +-
src/lib/python/isc/notify/tests/notify_out_test.py | 155 +-
src/lib/python/isc/testutils/Makefile.am | 7 +-
src/lib/python/isc/testutils/tsigctx_mock.py | 53 +
src/lib/python/isc/util/Makefile.am | 5 +
src/lib/python/isc/util/tests/Makefile.am | 10 +-
src/lib/python/isc/xfrin/Makefile.am | 23 +
.../isc/util => lib/python/isc/xfrin}/__init__.py | 0
src/lib/python/isc/xfrin/diff.py | 237 +
src/lib/python/isc/xfrin/libxfrin_messages.mes | 21 +
src/lib/python/isc/xfrin/tests/Makefile.am | 24 +
src/lib/python/isc/xfrin/tests/diff_tests.py | 446 ++
src/lib/resolve/Makefile.am | 22 +-
src/lib/resolve/recursive_query.cc | 173 +-
src/lib/resolve/recursive_query.h | 16 +-
src/lib/resolve/resolve.h | 1 -
src/lib/resolve/resolve_log.cc | 26 +
src/lib/resolve/resolve_log.h | 53 +
src/lib/resolve/resolve_messages.mes | 154 +
src/lib/resolve/tests/Makefile.am | 10 +-
src/lib/resolve/tests/run_unittests.cc | 5 +-
src/lib/server_common/Makefile.am | 17 +-
src/lib/server_common/client.cc | 68 +
src/lib/server_common/client.h | 154 +
src/lib/server_common/keyring.cc | 71 +
src/lib/server_common/keyring.h | 102 +
src/lib/server_common/logger.cc | 23 +
src/lib/server_common/logger.h | 43 +
src/lib/server_common/portconfig.cc | 21 +-
src/lib/server_common/server_common_messages.mes | 73 +
src/lib/server_common/tests/Makefile.am | 15 +-
src/lib/server_common/tests/client_unittest.cc | 103 +
src/lib/server_common/tests/data_path.h.in | 16 +
src/lib/server_common/tests/keyring_test.cc | 150 +
src/lib/server_common/tests/run_unittests.cc | 6 +-
src/lib/server_common/tests/testdata/spec.spec | 6 +
src/lib/testutils/Makefile.am | 2 +-
src/lib/testutils/dnsmessage_test.h | 28 +-
src/lib/testutils/srv_test.cc | 8 +-
src/lib/testutils/srv_test.h | 3 +-
src/lib/testutils/testdata/Makefile.am | 2 +-
src/lib/util/Makefile.am | 6 +-
src/lib/util/buffer.h | 22 +-
src/lib/util/encode/base_n.cc | 35 +-
src/lib/util/filename.cc | 18 +
src/lib/util/filename.h | 12 +
src/lib/util/io/Makefile.am | 6 +-
src/lib/util/io/tests/Makefile.am | 25 -
src/lib/util/io/tests/fd_share_tests.cc | 74 -
src/lib/util/io/tests/fd_tests.cc | 66 -
src/lib/util/io/tests/run_unittests.cc | 22 -
src/lib/util/io_utilities.h | 45 +-
src/lib/util/python/Makefile.am | 1 +
src/lib/util/python/gen_wiredata.py.in | 1232 +++++
src/lib/util/python/mkpywrapper.py.in | 100 +
src/lib/util/python/pycppwrapper_util.h | 335 ++
src/lib/util/python/wrapper_template.cc | 309 ++
src/lib/util/python/wrapper_template.h | 59 +
src/lib/util/pyunittests/Makefile.am | 22 +
src/lib/util/pyunittests/pyunittests_util.cc | 84 +
src/lib/util/strutil.cc | 11 +
src/lib/util/strutil.h | 62 +
src/lib/util/tests/Makefile.am | 24 +-
src/lib/util/tests/base32hex_unittest.cc | 7 +-
src/lib/util/tests/base64_unittest.cc | 8 +-
src/lib/util/tests/buffer_unittest.cc | 32 +
src/lib/util/tests/fd_share_tests.cc | 74 +
src/lib/util/tests/fd_tests.cc | 66 +
src/lib/util/tests/filename_unittest.cc | 52 +
src/lib/util/tests/io_utilities_unittest.cc | 46 +
src/lib/util/tests/run_unittests.cc | 4 +-
src/lib/util/tests/strutil_unittest.cc | 80 +-
src/lib/util/unittests/Makefile.am | 14 +-
src/lib/util/unittests/run_all.cc | 95 +
src/lib/util/unittests/run_all.h | 52 +
tests/lettuce/README | 127 +
tests/lettuce/README.tutorial | 157 +
.../lettuce/configurations/example.org.config.orig | 17 +
tests/lettuce/configurations/example2.org.config | 18 +
tests/lettuce/configurations/no_db_file.config | 10 +
tests/lettuce/data/empty_db.sqlite3 | Bin 0 -> 11264 bytes
.../lettuce/data}/example.org.sqlite3 | Bin 14336 -> 14336 bytes
tests/lettuce/features/example.feature | 142 +
tests/lettuce/features/terrain/bind10_control.py | 108 +
tests/lettuce/features/terrain/querying.py | 279 ++
tests/lettuce/features/terrain/steps.py | 73 +
tests/lettuce/features/terrain/terrain.py | 360 ++
tests/lettuce/setup_intree_bind10.sh.in | 46 +
tests/system/README | 53 +-
tests/system/bindctl/tests.sh | 16 +-
tests/system/cleanall.sh | 5 +-
tests/system/common/rndc.conf | 25 +
tests/system/common/rndc.key | 22 +
tests/system/conf.sh.in | 45 +-
tests/system/ixfr/README | 86 +
tests/system/ixfr/b10-config.db.in | 23 +
tests/system/ixfr/clean_ns.sh | 28 +
tests/system/ixfr/common_tests.sh.in | 78 +
tests/system/ixfr/db.example.common | 1556 ++++++
tests/system/ixfr/db.example.n0.in | 29 +
tests/system/ixfr/db.example.n2.in | 28 +
tests/system/ixfr/db.example.n2.refresh.in | 28 +
tests/system/ixfr/db.example.n4.in | 31 +
tests/system/ixfr/db.example.n6.in | 29 +
tests/system/ixfr/in-1/clean.sh | 1 +
tests/system/ixfr/in-1/ns1/README | 3 +
tests/system/ixfr/in-1/nsx2/README | 3 +
tests/system/ixfr/in-1/setup.sh.in | 30 +
tests/system/ixfr/in-1/tests.sh | 37 +
tests/system/ixfr/in-2/clean.sh | 1 +
tests/system/ixfr/in-2/ns1/README | 3 +
tests/system/ixfr/in-2/nsx2/README | 3 +
tests/system/ixfr/in-2/setup.sh.in | 29 +
tests/system/ixfr/in-2/tests.sh | 81 +
tests/system/ixfr/in-3/clean.sh | 1 +
tests/system/ixfr/in-3/ns1/README | 3 +
tests/system/ixfr/in-3/nsx2/README | 3 +
tests/system/ixfr/in-3/setup.sh.in | 29 +
tests/system/ixfr/in-3/tests.sh | 66 +
tests/system/ixfr/in-4/clean.sh | 1 +
tests/system/ixfr/in-4/ns1/README | 3 +
tests/system/ixfr/in-4/nsx2/README | 3 +
tests/system/ixfr/in-4/setup.sh.in | 30 +
tests/system/ixfr/in-4/tests.sh | 53 +
tests/system/ixfr/ixfr_init.sh.in | 330 ++
tests/system/ixfr/named_noixfr.conf | 42 +
tests/system/ixfr/named_nonotify.conf | 40 +
tests/system/ixfr/named_notify.conf | 41 +
tests/system/run.sh | 125 -
tests/system/run.sh.in | 125 +
tests/system/start.pl | 4 +-
tests/tools/badpacket/Makefile.am | 2 +-
tests/tools/badpacket/tests/Makefile.am | 8 +-
tests/tools/badpacket/tests/run_unittests.cc | 3 +-
tools/system_messages.py | 419 ++
836 files changed, 83262 insertions(+), 14992 deletions(-)
rename src/bin/stats/tests/http/__init__.py => TODO (100%)
create mode 100644 doc/guide/bind10-guide.txt
create mode 100644 doc/guide/bind10-messages.html
create mode 100644 doc/guide/bind10-messages.xml
create mode 100644 src/bin/auth/auth_log.cc
create mode 100644 src/bin/auth/auth_log.h
create mode 100644 src/bin/auth/auth_messages.mes
delete mode 100755 src/bin/bind10/bind10.py.in
create mode 100644 src/bin/bind10/bind10_messages.mes
create mode 100755 src/bin/bind10/bind10_src.py.in
create mode 100644 src/bin/bind10/creatorapi.txt
mode change 100644 => 100755 src/bin/bindctl/run_bindctl.sh.in
create mode 100644 src/bin/cfgmgr/plugins/b10logging.py
create mode 100644 src/bin/cfgmgr/plugins/logging.spec
create mode 100644 src/bin/cfgmgr/plugins/tests/logging_test.py
create mode 100644 src/bin/cmdctl/cmdctl_messages.mes
create mode 100644 src/bin/dhcp6/.gitignore
create mode 100644 src/bin/dhcp6/Makefile.am
create mode 100644 src/bin/dhcp6/b10-dhcp6.8
create mode 100644 src/bin/dhcp6/b10-dhcp6.xml
create mode 100644 src/bin/dhcp6/dhcp6.spec
create mode 100644 src/bin/dhcp6/dhcp6_srv.cc
create mode 100644 src/bin/dhcp6/dhcp6_srv.h
create mode 100644 src/bin/dhcp6/iface_mgr.cc
create mode 100644 src/bin/dhcp6/iface_mgr.h
create mode 100644 src/bin/dhcp6/interfaces.txt
create mode 100644 src/bin/dhcp6/main.cc
create mode 100644 src/bin/dhcp6/spec_config.h.pre.in
create mode 100644 src/bin/dhcp6/tests/Makefile.am
create mode 100644 src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
create mode 100644 src/bin/dhcp6/tests/dhcp6_test.py
create mode 100644 src/bin/dhcp6/tests/dhcp6_unittests.cc
create mode 100644 src/bin/dhcp6/tests/iface_mgr_unittest.cc
mode change 100644 => 100755 src/bin/loadzone/run_loadzone.sh.in
mode change 100644 => 100755 src/bin/loadzone/tests/correct/correct_test.sh.in
mode change 100644 => 100755 src/bin/loadzone/tests/error/error_test.sh.in
create mode 100644 src/bin/resolver/resolver_log.cc
create mode 100644 src/bin/resolver/resolver_log.h
create mode 100644 src/bin/resolver/resolver_messages.mes
rename src/bin/stats/{stats-httpd-xml.tpl.in => stats-httpd-xml.tpl} (100%)
rename src/bin/stats/{stats-httpd-xsd.tpl.in => stats-httpd-xsd.tpl} (100%)
create mode 100644 src/bin/stats/stats-httpd-xsl.tpl
delete mode 100644 src/bin/stats/stats-httpd-xsl.tpl.in
rename src/bin/stats/{stats-httpd.spec.in => stats-httpd.spec} (100%)
delete mode 100644 src/bin/stats/stats-schema.spec.in
mode change 100644 => 100755 src/bin/stats/stats.py.in
create mode 100644 src/bin/stats/stats.spec
delete mode 100644 src/bin/stats/stats.spec.in
create mode 100644 src/bin/stats/stats_httpd_messages.mes
create mode 100644 src/bin/stats/stats_messages.mes
delete mode 100644 src/bin/stats/tests/fake_select.py
delete mode 100644 src/bin/stats/tests/fake_socket.py
delete mode 100644 src/bin/stats/tests/fake_time.py
delete mode 100644 src/bin/stats/tests/http/Makefile.am
delete mode 100644 src/bin/stats/tests/http/server.py
delete mode 100644 src/bin/stats/tests/isc/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/cc/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/cc/__init__.py
delete mode 100644 src/bin/stats/tests/isc/cc/session.py
delete mode 100644 src/bin/stats/tests/isc/config/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/config/__init__.py
delete mode 100644 src/bin/stats/tests/isc/config/ccsession.py
delete mode 100644 src/bin/stats/tests/isc/util/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/util/process.py
create mode 100644 src/bin/stats/tests/test_utils.py
delete mode 100644 src/bin/stats/tests/testdata/Makefile.am
delete mode 100644 src/bin/stats/tests/testdata/stats_test.spec
create mode 100644 src/bin/xfrin/tests/testdata/Makefile.am
create mode 100644 src/bin/xfrin/tests/testdata/example.com
create mode 100644 src/bin/xfrin/tests/testdata/example.com.sqlite3
create mode 100644 src/bin/xfrin/xfrin_messages.mes
create mode 100644 src/bin/xfrout/xfrout_messages.mes
create mode 100644 src/bin/zonemgr/zonemgr_messages.mes
create mode 100644 src/lib/acl/Makefile.am
create mode 100644 src/lib/acl/acl.h
create mode 100644 src/lib/acl/check.h
create mode 100644 src/lib/acl/dns.cc
create mode 100644 src/lib/acl/dns.h
create mode 100644 src/lib/acl/dnsname_check.h
create mode 100644 src/lib/acl/ip_check.cc
create mode 100644 src/lib/acl/ip_check.h
create mode 100644 src/lib/acl/loader.cc
create mode 100644 src/lib/acl/loader.h
create mode 100644 src/lib/acl/logic_check.h
create mode 100644 src/lib/acl/tests/Makefile.am
create mode 100644 src/lib/acl/tests/acl_test.cc
create mode 100644 src/lib/acl/tests/check_test.cc
create mode 100644 src/lib/acl/tests/creators.h
create mode 100644 src/lib/acl/tests/dns_test.cc
create mode 100644 src/lib/acl/tests/dnsname_check_unittest.cc
create mode 100644 src/lib/acl/tests/ip_check_unittest.cc
create mode 100644 src/lib/acl/tests/loader_test.cc
create mode 100644 src/lib/acl/tests/logcheck.h
create mode 100644 src/lib/acl/tests/logic_check_test.cc
create mode 100644 src/lib/acl/tests/run_unittests.cc
create mode 100644 src/lib/acl/tests/sockaddr.h
delete mode 100644 src/lib/asiodns/asiodef.mes
create mode 100644 src/lib/asiodns/asiodns_messages.mes
create mode 100644 src/lib/cache/cache_messages.mes
create mode 100644 src/lib/cache/logger.cc
create mode 100644 src/lib/cache/logger.h
create mode 100644 src/lib/cc/cc_messages.mes
create mode 100644 src/lib/cc/logger.cc
create mode 100644 src/lib/cc/logger.h
create mode 100644 src/lib/config/config_messages.mes
delete mode 100644 src/lib/config/configdef.mes
create mode 100644 src/lib/config/tests/testdata/data32_1.data
create mode 100644 src/lib/config/tests/testdata/data32_2.data
create mode 100644 src/lib/config/tests/testdata/data32_3.data
create mode 100644 src/lib/config/tests/testdata/data33_1.data
create mode 100644 src/lib/config/tests/testdata/data33_2.data
create mode 100644 src/lib/config/tests/testdata/spec30.spec
create mode 100644 src/lib/config/tests/testdata/spec31.spec
create mode 100644 src/lib/config/tests/testdata/spec32.spec
create mode 100644 src/lib/config/tests/testdata/spec33.spec
create mode 100644 src/lib/config/tests/testdata/spec34.spec
create mode 100644 src/lib/config/tests/testdata/spec35.spec
create mode 100644 src/lib/config/tests/testdata/spec36.spec
create mode 100644 src/lib/config/tests/testdata/spec37.spec
create mode 100644 src/lib/config/tests/testdata/spec38.spec
create mode 100644 src/lib/datasrc/client.h
create mode 100644 src/lib/datasrc/database.cc
create mode 100644 src/lib/datasrc/database.h
create mode 100644 src/lib/datasrc/datasrc_messages.mes
create mode 100644 src/lib/datasrc/factory.cc
create mode 100644 src/lib/datasrc/factory.h
create mode 100644 src/lib/datasrc/iterator.h
delete mode 100644 src/lib/datasrc/messagedef.mes
create mode 100644 src/lib/datasrc/sqlite3_accessor.cc
create mode 100644 src/lib/datasrc/sqlite3_accessor.h
create mode 100644 src/lib/datasrc/tests/client_unittest.cc
create mode 100644 src/lib/datasrc/tests/database_unittest.cc
create mode 100644 src/lib/datasrc/tests/factory_unittest.cc
create mode 100644 src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
create mode 100644 src/lib/datasrc/tests/testdata/Makefile.am
create mode 100644 src/lib/datasrc/tests/testdata/rwtest.sqlite3
copy src/lib/datasrc/tests/testdata/{test.sqlite3 => test.sqlite3.nodiffs} (100%)
create mode 100644 src/lib/dhcp/Makefile.am
create mode 100644 src/lib/dhcp/README
create mode 100644 src/lib/dhcp/dhcp4.h
create mode 100644 src/lib/dhcp/dhcp6.h
create mode 100644 src/lib/dhcp/libdhcp.cc
create mode 100644 src/lib/dhcp/libdhcp.h
create mode 100644 src/lib/dhcp/option.cc
create mode 100644 src/lib/dhcp/option.h
create mode 100644 src/lib/dhcp/option6_addrlst.cc
create mode 100644 src/lib/dhcp/option6_addrlst.h
create mode 100644 src/lib/dhcp/option6_ia.cc
create mode 100644 src/lib/dhcp/option6_ia.h
create mode 100644 src/lib/dhcp/option6_iaaddr.cc
create mode 100644 src/lib/dhcp/option6_iaaddr.h
create mode 100644 src/lib/dhcp/pkt4.cc
create mode 100644 src/lib/dhcp/pkt4.h
create mode 100644 src/lib/dhcp/pkt6.cc
create mode 100644 src/lib/dhcp/pkt6.h
create mode 100644 src/lib/dhcp/tests/Makefile.am
create mode 100644 src/lib/dhcp/tests/libdhcp_unittest.cc
create mode 100644 src/lib/dhcp/tests/option6_addrlst_unittest.cc
create mode 100644 src/lib/dhcp/tests/option6_ia_unittest.cc
create mode 100644 src/lib/dhcp/tests/option6_iaaddr_unittest.cc
create mode 100644 src/lib/dhcp/tests/option_unittest.cc
create mode 100644 src/lib/dhcp/tests/pkt4_unittest.cc
create mode 100644 src/lib/dhcp/tests/pkt6_unittest.cc
create mode 100644 src/lib/dhcp/tests/run_unittests.cc
create mode 100644 src/lib/dns/character_string.cc
create mode 100644 src/lib/dns/character_string.h
create mode 100644 src/lib/dns/python/edns_python.h
create mode 100644 src/lib/dns/python/message_python.h
create mode 100644 src/lib/dns/python/message_python_inc.cc
create mode 100644 src/lib/dns/python/messagerenderer_python.h
create mode 100644 src/lib/dns/python/name_python.h
create mode 100644 src/lib/dns/python/opcode_python.h
create mode 100644 src/lib/dns/python/pydnspp_towire.h
create mode 100644 src/lib/dns/python/question_python.h
create mode 100644 src/lib/dns/python/rcode_python.h
create mode 100644 src/lib/dns/python/rdata_python.h
create mode 100644 src/lib/dns/python/rrclass_python.h
create mode 100644 src/lib/dns/python/rrset_python.h
create mode 100644 src/lib/dns/python/rrttl_python.h
create mode 100644 src/lib/dns/python/rrtype_python.h
create mode 100644 src/lib/dns/python/tests/tsig_rdata_python_test.py
create mode 100644 src/lib/dns/python/tests/tsigerror_python_test.py
create mode 100644 src/lib/dns/python/tests/tsigrecord_python_test.py
create mode 100644 src/lib/dns/python/tsig_python.h
create mode 100644 src/lib/dns/python/tsig_rdata_python.cc
create mode 100644 src/lib/dns/python/tsig_rdata_python.h
create mode 100644 src/lib/dns/python/tsigerror_python.cc
create mode 100644 src/lib/dns/python/tsigerror_python.h
create mode 100644 src/lib/dns/python/tsigerror_python_inc.cc
create mode 100644 src/lib/dns/python/tsigkey_python.h
create mode 100644 src/lib/dns/python/tsigrecord_python.cc
create mode 100644 src/lib/dns/python/tsigrecord_python.h
create mode 100644 src/lib/dns/rdata/generic/afsdb_18.cc
create mode 100644 src/lib/dns/rdata/generic/afsdb_18.h
create mode 100644 src/lib/dns/rdata/generic/detail/ds_like.h
create mode 100644 src/lib/dns/rdata/generic/detail/txt_like.h
create mode 100644 src/lib/dns/rdata/generic/dlv_32769.cc
create mode 100644 src/lib/dns/rdata/generic/dlv_32769.h
create mode 100644 src/lib/dns/rdata/generic/hinfo_13.cc
create mode 100644 src/lib/dns/rdata/generic/hinfo_13.h
create mode 100644 src/lib/dns/rdata/generic/minfo_14.cc
create mode 100644 src/lib/dns/rdata/generic/minfo_14.h
create mode 100644 src/lib/dns/rdata/generic/naptr_35.cc
create mode 100644 src/lib/dns/rdata/generic/naptr_35.h
create mode 100644 src/lib/dns/rdata/generic/spf_99.cc
create mode 100644 src/lib/dns/rdata/generic/spf_99.h
create mode 100644 src/lib/dns/rdata/in_1/dhcid_49.cc
create mode 100644 src/lib/dns/rdata/in_1/dhcid_49.h
create mode 100644 src/lib/dns/rdata/in_1/srv_33.cc
create mode 100644 src/lib/dns/rdata/in_1/srv_33.h
create mode 100644 src/lib/dns/tests/character_string_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_afsdb_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_dhcid_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_ds_like_unittest.cc
delete mode 100644 src/lib/dns/tests/rdata_ds_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_hinfo_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_minfo_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_naptr_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_srv_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_txt_like_unittest.cc
delete mode 100644 src/lib/dns/tests/rdata_txt_unittest.cc
delete mode 100755 src/lib/dns/tests/testdata/gen-wiredata.py.in
create mode 100644 src/lib/dns/tests/testdata/message_fromWire17.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire18.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire19.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire20.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire21.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire22.spec
create mode 100644 src/lib/dns/tests/testdata/message_toWire4.spec
create mode 100644 src/lib/dns/tests/testdata/message_toWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_dhcid_fromWire
create mode 100644 src/lib/dns/tests/testdata/rdata_dhcid_toWire
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_srv_fromWire
delete mode 100644 src/lib/log/debug_levels.h
create mode 100644 src/lib/log/log_dbglevels.h
create mode 100644 src/lib/log/log_messages.cc
create mode 100644 src/lib/log/log_messages.h
create mode 100644 src/lib/log/log_messages.mes
delete mode 100644 src/lib/log/logger_impl_log4cxx.cc
delete mode 100644 src/lib/log/logger_impl_log4cxx.h
create mode 100644 src/lib/log/logger_level.cc
create mode 100644 src/lib/log/logger_level.h
create mode 100644 src/lib/log/logger_level_impl.cc
create mode 100644 src/lib/log/logger_level_impl.h
delete mode 100644 src/lib/log/logger_levels.h
create mode 100644 src/lib/log/logger_manager.cc
create mode 100644 src/lib/log/logger_manager.h
create mode 100644 src/lib/log/logger_manager_impl.cc
create mode 100644 src/lib/log/logger_manager_impl.h
create mode 100644 src/lib/log/logger_name.cc
create mode 100644 src/lib/log/logger_name.h
create mode 100644 src/lib/log/logger_specification.h
create mode 100644 src/lib/log/logger_unittest_support.cc
create mode 100644 src/lib/log/logger_unittest_support.h
create mode 100644 src/lib/log/logimpl_messages.cc
create mode 100644 src/lib/log/logimpl_messages.h
create mode 100644 src/lib/log/logimpl_messages.mes
delete mode 100644 src/lib/log/messagedef.cc
delete mode 100644 src/lib/log/messagedef.h
delete mode 100644 src/lib/log/messagedef.mes
create mode 100644 src/lib/log/output_option.cc
create mode 100644 src/lib/log/output_option.h
delete mode 100644 src/lib/log/root_logger_name.cc
delete mode 100644 src/lib/log/root_logger_name.h
create mode 100755 src/lib/log/tests/console_test.sh.in
create mode 100755 src/lib/log/tests/destination_test.sh.in
create mode 100644 src/lib/log/tests/init_logger_test.cc
create mode 100755 src/lib/log/tests/init_logger_test.sh.in
create mode 100755 src/lib/log/tests/local_file_test.sh.in
create mode 100644 src/lib/log/tests/logger_example.cc
delete mode 100644 src/lib/log/tests/logger_impl_log4cxx_unittest.cc
create mode 100644 src/lib/log/tests/logger_level_impl_unittest.cc
create mode 100644 src/lib/log/tests/logger_level_unittest.cc
create mode 100644 src/lib/log/tests/logger_manager_unittest.cc
create mode 100644 src/lib/log/tests/logger_name_unittest.cc
create mode 100644 src/lib/log/tests/logger_specification_unittest.cc
delete mode 100644 src/lib/log/tests/logger_support_test.cc
create mode 100644 src/lib/log/tests/logger_support_unittest.cc
create mode 100644 src/lib/log/tests/output_option_unittest.cc
delete mode 100644 src/lib/log/tests/root_logger_name_unittest.cc
delete mode 100755 src/lib/log/tests/run_time_init_test.sh.in
create mode 100755 src/lib/log/tests/severity_test.sh.in
create mode 100644 src/lib/log/tests/tempdir.h.in
delete mode 100644 src/lib/log/tests/xdebuglevel_unittest.cc
delete mode 100644 src/lib/log/xdebuglevel.cc
delete mode 100644 src/lib/log/xdebuglevel.h
create mode 100644 src/lib/nsas/nsas_messages.mes
delete mode 100644 src/lib/nsas/nsasdef.mes
create mode 100644 src/lib/python/isc/acl/Makefile.am
create mode 100644 src/lib/python/isc/acl/__init__.py
create mode 100644 src/lib/python/isc/acl/_dns.py
create mode 100644 src/lib/python/isc/acl/acl.cc
create mode 100644 src/lib/python/isc/acl/acl.py
create mode 100644 src/lib/python/isc/acl/acl_inc.cc
create mode 100644 src/lib/python/isc/acl/dns.cc
create mode 100644 src/lib/python/isc/acl/dns.h
create mode 100644 src/lib/python/isc/acl/dns.py
create mode 100644 src/lib/python/isc/acl/dns_requestacl_inc.cc
create mode 100644 src/lib/python/isc/acl/dns_requestacl_python.cc
create mode 100644 src/lib/python/isc/acl/dns_requestacl_python.h
create mode 100644 src/lib/python/isc/acl/dns_requestcontext_inc.cc
create mode 100644 src/lib/python/isc/acl/dns_requestcontext_python.cc
create mode 100644 src/lib/python/isc/acl/dns_requestcontext_python.h
create mode 100644 src/lib/python/isc/acl/dns_requestloader_inc.cc
create mode 100644 src/lib/python/isc/acl/dns_requestloader_python.cc
create mode 100644 src/lib/python/isc/acl/dns_requestloader_python.h
create mode 100644 src/lib/python/isc/acl/dnsacl_inc.cc
create mode 100644 src/lib/python/isc/acl/tests/Makefile.am
create mode 100644 src/lib/python/isc/acl/tests/acl_test.py
create mode 100644 src/lib/python/isc/acl/tests/dns_test.py
create mode 100644 src/lib/python/isc/bind10/Makefile.am
rename src/{bin/stats/tests/isc => lib/python/isc/bind10}/__init__.py (100%)
create mode 100644 src/lib/python/isc/bind10/sockcreator.py
create mode 100644 src/lib/python/isc/bind10/tests/Makefile.am
create mode 100644 src/lib/python/isc/bind10/tests/sockcreator_test.py
create mode 100644 src/lib/python/isc/config/cfgmgr_messages.mes
create mode 100644 src/lib/python/isc/config/config_messages.mes
create mode 100644 src/lib/python/isc/datasrc/client_inc.cc
create mode 100644 src/lib/python/isc/datasrc/client_python.cc
create mode 100644 src/lib/python/isc/datasrc/client_python.h
create mode 100644 src/lib/python/isc/datasrc/datasrc.cc
create mode 100644 src/lib/python/isc/datasrc/datasrc.h
create mode 100644 src/lib/python/isc/datasrc/finder_inc.cc
create mode 100644 src/lib/python/isc/datasrc/finder_python.cc
create mode 100644 src/lib/python/isc/datasrc/finder_python.h
create mode 100644 src/lib/python/isc/datasrc/iterator_inc.cc
create mode 100644 src/lib/python/isc/datasrc/iterator_python.cc
create mode 100644 src/lib/python/isc/datasrc/iterator_python.h
create mode 100644 src/lib/python/isc/datasrc/tests/datasrc_test.py
create mode 100644 src/lib/python/isc/datasrc/updater_inc.cc
create mode 100644 src/lib/python/isc/datasrc/updater_python.cc
create mode 100644 src/lib/python/isc/datasrc/updater_python.h
create mode 100644 src/lib/python/isc/dns/Makefile.am
create mode 100644 src/lib/python/isc/log/log.cc
delete mode 100644 src/lib/python/isc/log/log.py
create mode 100755 src/lib/python/isc/log/tests/check_output.sh
create mode 100644 src/lib/python/isc/log/tests/console.out
create mode 100755 src/lib/python/isc/log/tests/log_console.py.in
delete mode 100644 src/lib/python/isc/log/tests/log_test.in
create mode 100644 src/lib/python/isc/log_messages/Makefile.am
create mode 100644 src/lib/python/isc/log_messages/README
create mode 100644 src/lib/python/isc/log_messages/__init__.py
create mode 100644 src/lib/python/isc/log_messages/bind10_messages.py
create mode 100644 src/lib/python/isc/log_messages/cfgmgr_messages.py
create mode 100644 src/lib/python/isc/log_messages/cmdctl_messages.py
create mode 100644 src/lib/python/isc/log_messages/config_messages.py
create mode 100755 src/lib/python/isc/log_messages/gen-forwarder.sh
create mode 100644 src/lib/python/isc/log_messages/libxfrin_messages.py
create mode 100644 src/lib/python/isc/log_messages/notify_out_messages.py
create mode 100644 src/lib/python/isc/log_messages/stats_httpd_messages.py
create mode 100644 src/lib/python/isc/log_messages/stats_messages.py
create mode 100644 src/lib/python/isc/log_messages/work/Makefile.am
create mode 100644 src/lib/python/isc/log_messages/work/__init__.py.in
create mode 100644 src/lib/python/isc/log_messages/xfrin_messages.py
create mode 100644 src/lib/python/isc/log_messages/xfrout_messages.py
create mode 100644 src/lib/python/isc/log_messages/zonemgr_messages.py
create mode 100644 src/lib/python/isc/notify/notify_out_messages.mes
create mode 100644 src/lib/python/isc/testutils/tsigctx_mock.py
create mode 100644 src/lib/python/isc/xfrin/Makefile.am
rename src/{bin/stats/tests/isc/util => lib/python/isc/xfrin}/__init__.py (100%)
create mode 100644 src/lib/python/isc/xfrin/diff.py
create mode 100644 src/lib/python/isc/xfrin/libxfrin_messages.mes
create mode 100644 src/lib/python/isc/xfrin/tests/Makefile.am
create mode 100644 src/lib/python/isc/xfrin/tests/diff_tests.py
create mode 100644 src/lib/resolve/resolve_log.cc
create mode 100644 src/lib/resolve/resolve_log.h
create mode 100644 src/lib/resolve/resolve_messages.mes
create mode 100644 src/lib/server_common/client.cc
create mode 100644 src/lib/server_common/client.h
create mode 100644 src/lib/server_common/keyring.cc
create mode 100644 src/lib/server_common/keyring.h
create mode 100644 src/lib/server_common/logger.cc
create mode 100644 src/lib/server_common/logger.h
create mode 100644 src/lib/server_common/server_common_messages.mes
create mode 100644 src/lib/server_common/tests/client_unittest.cc
create mode 100644 src/lib/server_common/tests/data_path.h.in
create mode 100644 src/lib/server_common/tests/keyring_test.cc
create mode 100644 src/lib/server_common/tests/testdata/spec.spec
delete mode 100644 src/lib/util/io/tests/Makefile.am
delete mode 100644 src/lib/util/io/tests/fd_share_tests.cc
delete mode 100644 src/lib/util/io/tests/fd_tests.cc
delete mode 100644 src/lib/util/io/tests/run_unittests.cc
create mode 100644 src/lib/util/python/Makefile.am
create mode 100755 src/lib/util/python/gen_wiredata.py.in
create mode 100755 src/lib/util/python/mkpywrapper.py.in
create mode 100644 src/lib/util/python/pycppwrapper_util.h
create mode 100644 src/lib/util/python/wrapper_template.cc
create mode 100644 src/lib/util/python/wrapper_template.h
create mode 100644 src/lib/util/pyunittests/Makefile.am
create mode 100644 src/lib/util/pyunittests/pyunittests_util.cc
create mode 100644 src/lib/util/tests/fd_share_tests.cc
create mode 100644 src/lib/util/tests/fd_tests.cc
create mode 100644 src/lib/util/unittests/run_all.cc
create mode 100644 src/lib/util/unittests/run_all.h
create mode 100644 tests/lettuce/README
create mode 100644 tests/lettuce/README.tutorial
create mode 100644 tests/lettuce/configurations/example.org.config.orig
create mode 100644 tests/lettuce/configurations/example2.org.config
create mode 100644 tests/lettuce/configurations/no_db_file.config
create mode 100644 tests/lettuce/data/empty_db.sqlite3
copy {src/lib/datasrc/tests/testdata => tests/lettuce/data}/example.org.sqlite3 (100%)
create mode 100644 tests/lettuce/features/example.feature
create mode 100644 tests/lettuce/features/terrain/bind10_control.py
create mode 100644 tests/lettuce/features/terrain/querying.py
create mode 100644 tests/lettuce/features/terrain/steps.py
create mode 100644 tests/lettuce/features/terrain/terrain.py
create mode 100755 tests/lettuce/setup_intree_bind10.sh.in
create mode 100644 tests/system/common/rndc.conf
create mode 100644 tests/system/common/rndc.key
create mode 100644 tests/system/ixfr/README
create mode 100644 tests/system/ixfr/b10-config.db.in
create mode 100644 tests/system/ixfr/clean_ns.sh
create mode 100644 tests/system/ixfr/common_tests.sh.in
create mode 100644 tests/system/ixfr/db.example.common
create mode 100644 tests/system/ixfr/db.example.n0.in
create mode 100644 tests/system/ixfr/db.example.n2.in
create mode 100644 tests/system/ixfr/db.example.n2.refresh.in
create mode 100644 tests/system/ixfr/db.example.n4.in
create mode 100644 tests/system/ixfr/db.example.n6.in
create mode 120000 tests/system/ixfr/in-1/clean.sh
create mode 100644 tests/system/ixfr/in-1/ns1/README
create mode 100644 tests/system/ixfr/in-1/nsx2/README
create mode 100644 tests/system/ixfr/in-1/setup.sh.in
create mode 100644 tests/system/ixfr/in-1/tests.sh
create mode 120000 tests/system/ixfr/in-2/clean.sh
create mode 100644 tests/system/ixfr/in-2/ns1/README
create mode 100644 tests/system/ixfr/in-2/nsx2/README
create mode 100644 tests/system/ixfr/in-2/setup.sh.in
create mode 100644 tests/system/ixfr/in-2/tests.sh
create mode 120000 tests/system/ixfr/in-3/clean.sh
create mode 100644 tests/system/ixfr/in-3/ns1/README
create mode 100644 tests/system/ixfr/in-3/nsx2/README
create mode 100644 tests/system/ixfr/in-3/setup.sh.in
create mode 100644 tests/system/ixfr/in-3/tests.sh
create mode 120000 tests/system/ixfr/in-4/clean.sh
create mode 100644 tests/system/ixfr/in-4/ns1/README
create mode 100644 tests/system/ixfr/in-4/nsx2/README
create mode 100644 tests/system/ixfr/in-4/setup.sh.in
create mode 100644 tests/system/ixfr/in-4/tests.sh
create mode 100644 tests/system/ixfr/ixfr_init.sh.in
create mode 100644 tests/system/ixfr/named_noixfr.conf
create mode 100644 tests/system/ixfr/named_nonotify.conf
create mode 100644 tests/system/ixfr/named_notify.conf
delete mode 100755 tests/system/run.sh
create mode 100755 tests/system/run.sh.in
create mode 100644 tools/system_messages.py
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 679542c..45671b7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,517 @@
+ 315. [func] tomek
+ libdhcp: Support for DHCPv4 packet manipulation is now implemented.
+ All fixed fields are now supported. Generic support for DHCPv4
+ options is available (both parsing and assembly). There is no code
+ that uses this new functionality yet, so it is not usable directly
+ at this time. This code will be used by upcoming b10-dhcp4 daemon.
+ (Trac #1228, git 31d5a4f66b18cca838ca1182b9f13034066427a7)
+
+314. [bug] jelte
+ b10-xfrin would previously initiate incoming transfers upon
+ receiving NOTIFY messages from any address (if the zone was
+ known to b10-xfrin, and using the configured address). It now
+ only starts a transfer if the source address from the NOTIFY
+ packet matches the configured master address and port. This was
+ really already fixed in release bind10-devel-20111014, but there
+ were some deferred cleanups to add.
+ (Trac #1298, git 1177bfe30e17a76bea6b6447e14ae9be9e1ca8c2)
+
+313. [func] jinmei
+ datasrc: Added C++ API for adding zone differences to database
+ based data sources. It's intended to be used for the support for
+ IXFR-in and dynamic update (so they can subsequently be retrieved
+ for IXFR-out). The addRecordDiff method of the DatabaseAccessor
+ defines the interface, and a concrete implementation for SQLite3
+ was provided.
+ (Trac #1329, git 1aa233fab1d74dc776899df61181806679d14013)
+
+312. [func] jelte
+ Added an initial framework for doing system tests using the
+ cucumber-based BDD tool Lettuce. A number of general steps are
+ included, for instance running bind10 with specific
+ configurations, sending queries, and inspecting query answers. A
+ few very basic tests are included as well.
+ (Trac #1290, git 6b75c128bcdcefd85c18ccb6def59e9acedd4437)
+
+311. [bug] jelte
+ Fixed a bug in bindctl where tab-completion for names that
+ contain a hyphen resulted in unexpected behaviour, such as
+ appending the already-typed part again.
+ (Trac #1345, git f80ab7879cc29f875c40dde6b44e3796ac98d6da)
+
+310. [bug] jelte
+ Fixed a bug where bindctl could not set a value that is optional
+ and has no default, resulting in the error that the setting
+ itself was unknown. bindctl now correctly sees the setting and
+ is able to set it.
+ (Trac #1344, git 0e776c32330aee466073771600390ce74b959b38)
+
+309. [bug] jelte
+ Fixed a bug in bindctl where the removal of elements from a set
+ with default values was not stored, unless the set had been
+ modified in another way already.
+ (Trac #1343, git 25c802dd1c30580b94345e83eeb6a168ab329a33)
+
+308. [build] jelte
+ The configure script will now use pkg-config for finding
+ information about the Botan library. If pkg-config is unavailable,
+ or unaware of Botan, it will fall back to botan-config. It will
+ also use botan-config when a specific botan library directory is
+ given using the '--with-botan=' flag
+ (Trac #1194, git dc491833cf75ac1481ba1475795b0f266545013d)
+
+307. [func] vorner
+ When zone transfer in fails with IXFR, it is retried with AXFR
+ automatically.
+ (Trac #1279, git cd3588c9020d0310f949bfd053c4d3a4bd84ef88)
+
+306. [bug] Stephen
+ Boss process now waits for the configuration manager to initialize
+ itself before continuing with startup. This fixes a race condition
+ whereby the Boss could start the configuration manager and then
+ immediately start components that depended on that component being
+ fully initialized.
+ (Trac #1271, git 607cbae949553adac7e2a684fa25bda804658f61)
+
+305. [bug] jinmei
+ Python isc.dns, isc.datasrc, xfrin, xfrout: fixed reference leak
+ in Message.get_question(), Message.get_section(),
+ RRset.get_rdata(), and DataSourceClient.get_updater().
+ The leak caused severe memory leak in b10-xfrin, and (although no
+ one reported it) should have caused less visible leak in
+ b10-xfrout. b10-xfrin had its own leak, which was also fixed.
+ (Trac #1028, git a72886e643864bb6f86ab47b115a55e0c7f7fcad)
+
+304. [bug] jelte
+ The run_bind10.sh test script now no longer runs processes from
+ an installed version of BIND 10, but will correctly use the
+ build tree paths.
+ (Trac #1246, git 1d43b46ab58077daaaf5cae3c6aa3e0eb76eb5d8)
+
+303. [bug] jinmei
+ Changed the installation path for the UNIX domain file used
+ for the communication between b10-auth and b10-xfrout to a
+ "@PACKAGE@" subdirectory (e.g. from /usr/local/var to
+ /usr/local/var/bind10-devel). This should be transparent change
+ because this file is automatically created and cleaned up, but
+ if the old file somehow remains, it can now be safely removed.
+ (Trac #869, git 96e22f4284307b1d5f15e03837559711bb4f580c)
+
+302. [bug] jelte
+ msgq no longer crashes if the remote end is closed while msgq
+ tries to send data. It will now simply drop the message and close
+ the connection itself.
+ (Trac #1180, git 6e68b97b050e40e073f736d84b62b3e193dd870a)
+
+301. [func] stephen
+ Add system test for IXFR over TCP.
+ (Trac #1213, git 68ee3818bcbecebf3e6789e81ea79d551a4ff3e8)
+
+300. [func]* tomek
+ libdhcp: DHCP packet library was implemented. Currently it handles
+ packet reception, option parsing, option generation and output
+ packet building. Generic and specialized classes for several
+ DHCPv6 options (IA_NA, IAADDR, address-list) are available. A
+ simple code was added that leverages libdhcp. It is a skeleton
+ DHCPv6 server. It receives incoming SOLICIT and REQUEST messages
+ and responds with proper ADVERTISE and REPLY. Note that since
+ LeaseManager is not implemented, server assigns the same
+ hardcoded lease for every client. This change removes existing
+ DHCPv6 echo server as it was only a proof of concept code.
+ (Trac #1186, git 67ea6de047d4dbd63c25fe7f03f5d5cc2452ad7d)
+
+299. [build] jreed
+ Do not install the libfake_session, libtestutils, or libbench
+ libraries. They are used by tests within the source tree.
+ Convert all test-related makefiles to build test code at
+ regular make time to better work with test-driven development.
+ This reverts some of #1901. (The tests are ran using "make
+ check".)
+ (Trac #1286, git cee641fd3d12341d6bfce5a6fbd913e3aebc1e8e)
+
+bind10-devel-20111014 released on October 14, 2011
+
+298. [doc] jreed
+ Shorten README. Include plain text format of the Guide.
+ (git d1897d3, git 337198f)
+
+297. [func] dvv
+ Implement the SPF rrtype according to RFC4408.
+ (Trac #1140, git 146934075349f94ee27f23bf9ff01711b94e369e)
+
+296. [build] jreed
+ Do not install the unittest libraries. At this time, they
+ are not useful without source tree (and they may or may
+ not have googletest support). Also, convert several makefiles
+ to build tests at "check" time and not build time.
+ (Trac #1091, git 2adf4a90ad79754d52126e7988769580d20501c3)
+
+295. [bug] jinmei
+ __init__.py for isc.dns was installed in the wrong directory,
+ which would now make xfrin fail to start. It was also bad
+ in that it replaced any existing __init__.py in th public
+ site-packages directory. After applying this fix You may want to
+ check if the wrong init file is in the wrong place, in which
+ case it should be removed.
+ (Trac #1285, git af3b17472694f58b3d6a56d0baf64601b0f6a6a1)
+
+294. [func] jelte, jinmei, vorner
+ b10-xfrin now supports incoming IXFR. See BIND 10 Guide for
+ how to configure it and operational notes.
+ (Trac #1212, multiple git merges)
+
+293. [func]* tomek
+ b10-dhcp6: Implemented DHCPv6 echo server. It joins DHCPv6
+ multicast groups and listens to incoming DHCPv6 client messages.
+ Received messages are then echoed back to clients. This
+ functionality is limited, but it can be used to test out client
+ resiliency to unexpected messages. Note that network interface
+ detection routines are not implemented yet, so interface name
+ and its address must be specified in interfaces.txt.
+ (Trac #878, git 3b1a604abf5709bfda7271fa94213f7d823de69d)
+
+292. [func] dvv
+ Implement the DLV rrtype according to RFC4431.
+ (Trac #1144, git d267c0511a07c41cd92e3b0b9ee9bf693743a7cf)
+
+291. [func] naokikambe
+ Statistics items are specified by each module's spec file.
+ Stats module can read these through the config manager. Stats
+ module and stats httpd report statistics data and statistics
+ schema by each module via both bindctl and HTTP/XML.
+ (Trac #928,#929,#930,#1175,
+ git 054699635affd9c9ecbe7a108d880829f3ba229e)
+
+290. [func] jinmei
+ libdns++/pydnspp: added an option parameter to the "from wire"
+ methods of the Message class. One option is defined,
+ PRESERVE_ORDER, which specifies the parser to handle each RR
+ separately, preserving the order, and constructs RRsets in the
+ message sections so that each RRset contains only one RR.
+ (Trac #1258, git c874cb056e2a5e656165f3c160e1b34ccfe8b302)
+
+289. [func]* jinmei
+ b10-xfrout: ACLs for xfrout can now be configured per zone basis.
+ A per zone ACL is part of a more general zone configuration. A
+ quick example for configuring an ACL for zone "example.com" that
+ rejects any transfer request for that zone is as follows:
+ > config add Xfrout/zone_config
+ > config set Xfrout/zone_config[0]/origin "example.com"
+ > config add Xfrout/zone_config[0]/transfer_acl
+ > config set Xfrout/zone_config[0]/transfer_acl[0] {"action": "REJECT"}
+ The previous global ACL (query_acl) was renamed to transfer_acl,
+ which now works as the default ACL. Note: backward compatibility
+ is not provided, so an existing configuration using query_acl
+ needs to be updated by hand.
+ Note: the per zone configuration framework is a temporary
+ workaround. It will eventually be redesigned as a system wide
+ configuration.
+ (Trac #1165, git 698176eccd5d55759fe9448b2c249717c932ac31)
+
+288. [bug] stephen
+ Fixed problem whereby the order in which component files appeared in
+ rdataclass.cc was system dependent, leading to problems on some
+ systems where data types were used before the header file in which
+ they were declared was included.
+ (Trac #1202, git 4a605525cda67bea8c43ca8b3eae6e6749797450)
+
+287. [bug]* jinmei
+ Python script files for log messages (xxx_messages.py) should have
+ been installed under the "isc" package. This fix itself should
+ be a transparent change without affecting existing configurations
+ or other operational practices, but you may want to clean up the
+ python files from the common directly (such as "site-packages").
+ (Trac #1101, git 0eb576518f81c3758c7dbaa2522bd8302b1836b3)
+
+286. [func] ocean
+ libdns++: Implement the HINFO rrtype support according to RFC1034,
+ and RFC1035.
+ (Trac #1112, git 12d62d54d33fbb1572a1aa3089b0d547d02924aa)
+
+285. [bug] jelte
+ sqlite3 data source: fixed a race condition on initial startup,
+ when the database has not been initialized yet, and multiple
+ processes are trying to do so, resulting in one of them failing.
+ (Trac #326, git 5de6f9658f745e05361242042afd518b444d7466)
+
+284. [bug] jerry
+ b10-zonemgr: zonemgr will not terminate on empty zones, it will
+ log a warning and try to do zone transfer for them.
+ (Trac #1153, git 0a39659638fc68f60b95b102968d7d0ad75443ea)
+
+283. [bug] zhanglikun
+ Make stats and boss processes wait for answer messages from each
+ other in block mode to avoid orphan answer messages, add an internal
+ command "getstats" to boss process for getting statistics data from
+ boss.
+ (Trac #519, git 67d8e93028e014f644868fede3570abb28e5fb43)
+
+282. [func] ocean
+ libdns++: Implement the NAPTR rrtype according to RFC2915,
+ RFC2168 and RFC3403.
+ (Trac #1130, git 01d8d0f13289ecdf9996d6d5d26ac0d43e30549c)
+
+bind10-devel-20110819 released on August 19, 2011
+
+281. [func] jelte
+ Added a new type for configuration data: "named set". This allows for
+ similar configuration as the current "list" type, but with strings
+ instead of indices as identifiers. The intended use is for instance
+ /foo/zones/example.org/bar instead of /foo/zones[2]/bar. Currently
+ this new type is not in use yet.
+ (Trac #926, git 06aeefc4787c82db7f5443651f099c5af47bd4d6)
+
+280. [func] jerry
+ libdns++: Implement the MINFO rrtype according to RFC1035.
+ (Trac #1113, git 7a9a19d6431df02d48a7bc9de44f08d9450d3a37)
+
+279. [func] jerry
+ libdns++: Implement the AFSDB rrtype according to RFC1183.
+ (Trac #1114, git ce052cd92cd128ea3db5a8f154bd151956c2920c)
+
+278. [doc] jelte
+ Add logging configuration documentation to the guide.
+ (Trac #1011, git 2cc500af0929c1f268aeb6f8480bc428af70f4c4)
+
+277. [func] jerry
+ libdns++: Implement the SRV rrtype according to RFC2782.
+ (Trac #1128, git 5fd94aa027828c50e63ae1073d9d6708e0a9c223)
+
+276. [func] stephen
+ Although the top-level loggers are named after the program (e.g.
+ b10-auth, b10-resolver), allow the logger configuration to omit the
+ "b10-" prefix and use just the module name.
+ (Trac #1003, git a01cd4ac5a68a1749593600c0f338620511cae2d)
+
+275. [func] jinmei
+ Added support for TSIG key matching in ACLs. The xfrout ACL can
+ now refer to TSIG key names using the "key" attribute. For
+ example, the following specifies an ACL that allows zone transfer
+ if and only if the request is signed with a TSIG of a key name
+ "key.example":
+ > config set Xfrout/query_acl[0] {"action": "ACCEPT", \
+ "key": "key.example"}
+ (Trac #1104, git 9b2e89cabb6191db86f88ee717f7abc4171fa979)
+
+274. [bug] naokikambe
+ add unittests for functions xml_handler, xsd_handler and xsl_handler
+ respectively to make sure their behaviors are correct, regardless of
+ whether type which xml.etree.ElementTree.tostring() after Python3.2
+ returns is str or byte.
+ (Trac #1021, git 486bf91e0ecc5fbecfe637e1e75ebe373d42509b)
+
+273. [func] vorner
+ It is possible to specify ACL for the xfrout module. It is in the ACL
+ configuration key and has the usual ACL syntax. It currently supports
+ only the source address. Default ACL accepts everything.
+ (Trac #772, git 50070c824270d5da1db0b716db73b726d458e9f7)
+
+272. [func] jinmei
+ libdns++/pydnspp: TSIG signing now handles truncated DNS messages
+ (i.e. with TC bit on) with TSIG correctly.
+ (Trac #910, 8e00f359e81c3cb03c5075710ead0f87f87e3220)
+
+271. [func] stephen
+ Default logging for unit tests changed to severity DEBUG (level 99)
+ with the output routed to /dev/null. This can be altered by setting
+ the B10_LOGGER_XXX environment variables.
+ (Trac #1024, git 72a0beb8dfe85b303f546d09986461886fe7a3d8)
+
+270. [func] jinmei
+ Added python bindings for ACLs using the DNS request as the
+ context. They are accessible via the isc.acl.dns module.
+ (Trac #983, git c24553e21fe01121a42e2136d0a1230d75812b27)
+
+269. [bug] y-aharen
+ Modified IntervalTimerTest not to rely on the accuracy of the timer.
+ This fix addresses occasional failure of build tests.
+ (Trac #1016, git 090c4c5abac33b2b28d7bdcf3039005a014f9c5b)
+
+268. [func] stephen
+ Add environment variable to allow redirection of logging output during
+ unit tests.
+ (Trac #1071, git 05164f9d61006869233b498d248486b4307ea8b6)
+
+bind10-devel-20110705 released on July 05, 2011
+
+267. [func] tomek
+ Added a dummy module for DHCP6. This module does not actually
+ do anything at this point, and BIND 10 has no option for
+ starting it yet. It is included as a base for further
+ development.
+ (Trac #990, git 4a590df96a1b1d373e87f1f56edaceccb95f267d)
+
+266. [func] Multiple developers
+ Convert various error messages, debugging and other output
+ to the new logging interface, including for b10-resolver,
+ the resolver library, the CC library, b10-auth, b10-cfgmgr,
+ b10-xfrin, and b10-xfrout. This includes a lot of new
+ documentation describing the new log messages.
+ (Trac #738, #739, #742, #746, #759, #761, #762)
+
+265. [func]* jinmei
+ b10-resolver: Introduced ACL on incoming queries. By default the
+ resolver accepts queries from ::1 and 127.0.0.1 and rejects all
+ others. The ACL can be configured with bindctl via the
+ "Resolver/query_acl" parameter. For example, to accept queries
+ from 192.0.2.0/24 (in addition to the default list), do this:
+ > config add Resolver/query_acl
+ > config set Resolver/query_acl[2]/action "ACCEPT"
+ > config set Resolver/query_acl[2]/from "192.0.2.0/24"
+ > config commit
+ (Trac #999, git e0744372924442ec75809d3964e917680c57a2ce,
+ also based on other ACL related work done by stephen and vorner)
+
+264. [bug] jerry
+ b10-xfrout: fixed a busy loop in its notify-out subthread. Due to
+ the loop, the thread previously woke up every 0.5 seconds throughout
+ most of the lifetime of b10-xfrout, wasting the corresponding CPU
+ time.
+ (Trac #1001, git fb993ba8c52dca4a3a261e319ed095e5af8db15a)
+
+263. [func] jelte
+ Logging configuration can now also accept a * as a first-level
+ name (e.g. '*', or '*.cache'), indicating that every module
+ should use that configuration, unless overridden by an explicit
+ logging configuration for that module
+ (Trac #1004, git 0fad7d4a8557741f953eda9fed1d351a3d9dc5ef)
+
+262. [func] stephen
+ Add some initial documentation about the logging framework.
+ Provide BIND 10 Messages Manual in HTML and DocBook? XML formats.
+ This provides all the log message descriptions in a single document.
+ A developer tool, tools/system_messages.py (available in git repo),
+ was written to generate this.
+ (Trac #1012, git 502100d7b9cd9d2300e78826a3bddd024ef38a74)
+
+261. [func] stephen
+ Add new-style logging messages to b10-auth.
+ (Trac #738, git c021505a1a0d6ecb15a8fd1592b94baff6d115f4)
+
+260. [func] stephen
+ Remove comma between message identification and the message
+ text in the new-style logging messages.
+ (Trac #1031, git 1c7930a7ba19706d388e4f8dcf2a55a886b74cd2)
+
+259. [bug] stephen
+ Logging now correctly initialized in b10-auth. Also, fixed
+ bug whereby querying for "version.bind txt ch" would cause
+ b10-auth to crash if BIND 10 was started with the "-v" switch.
+ (Trac #1022,#1023, git 926a65fa08617be677a93e9e388df0f229b01067)
+
+258. [build] jelte
+ Now builds and runs with Python 3.2
+ (Trac #710, git dae1d2e24f993e1eef9ab429326652f40a006dfb)
+
+257. [bug] y-aharen
+ Fixed a bug an instance of IntervalTimerImpl may be destructed
+ while deadline_timer is holding the handler. This fix addresses
+ occasional failure of IntervalTimerTest.destructIntervalTimer.
+ (Trac #957, git e59c215e14b5718f62699ec32514453b983ff603)
+
+256. [bug] jerry
+ src/bin/xfrin: update xfrin to check TSIG before other part of
+ incoming message.
+ (Trac #955, git 261450e93af0b0406178e9ef121f81e721e0855c)
+
+255. [func] zhang likun
+ src/lib/cache: remove empty code in lib/cache and the corresponding
+ suppression rule in src/cppcheck-suppress.lst.
+ (Trac #639, git 4f714bac4547d0a025afd314c309ca5cb603e212)
+
+254. [bug] jinmei
+ b10-xfrout: failed to send notifies over IPv6 correctly.
+ (Trac #964, git 3255c92714737bb461fb67012376788530f16e40)
+
+253. [func] jelte
+ Add configuration options for logging through the virtual module
+ Logging.
+ (Trac #736, git 9fa2a95177265905408c51d13c96e752b14a0824)
+
+252. [func] stephen
+ Add syslog as destination for logging.
+ (Trac #976, git 31a30f5485859fd3df2839fc309d836e3206546e)
+
+251. [bug]* jinmei
+ Make sure bindctl private files are non readable to anyone except
+ the owner or users in the same group. Note that if BIND 10 is run
+ with changing the user, this change means that the file owner or
+ group will have to be adjusted. Also note that this change is
+ only effective for a fresh install; if these files already exist,
+ their permissions must be adjusted by hand (if necessary).
+ (Trac #870, git 461fc3cb6ebabc9f3fa5213749956467a14ebfd4)
+
+250. [bug] ocean
+ src/lib/util/encode, in some conditions, the DecodeNormalizer's
+ iterator may reach the end() and when later being dereferenced
+ it will cause crash on some platform.
+ (Trac #838, git 83e33ec80c0c6485d8b116b13045b3488071770f)
+
+249. [func] jerry
+ xfrout: add support for TSIG verification.
+ (Trac #816, git 3b2040e2af2f8139c1c319a2cbc429035d93f217)
+
+248. [func] stephen
+ Add file and stderr as destinations for logging.
+ (Trac #555, git 38b3546867425bd64dbc5920111a843a3330646b)
+
+247. [func] jelte
+ Upstream queries from the resolver now set EDNS0 buffer size.
+ (Trac #834, git 48e10c2530fe52c9bde6197db07674a851aa0f5d)
+
+246. [func] stephen
+ Implement logging using log4cplus (http://log4cplus.sourceforge.net)
+ (Trac #899, git 31d3f525dc01638aecae460cb4bc2040c9e4df10)
+
+245. [func] vorner
+ Authoritative server can now sign the answers using TSIG
+ (configured in tsig_keys/keys, list of strings like
+ "name:<base64-secret>:sha1-hmac"). It doesn't use them for
+ ACL yet, only verifies them and signs if the request is signed.
+ (Trac #875, git fe5e7003544e4e8f18efa7b466a65f336d8c8e4d)
+
+244. [func] stephen
+ In unit tests, allow the choice of whether unhandled exceptions are
+ caught in the unit test program (and details printed) or allowed to
+ propagate to the default exception handler. See the bind10-dev thread
+ https://lists.isc.org/pipermail/bind10-dev/2011-January/001867.html
+ for more details.
+ (Trac #542, git 1aa773d84cd6431aa1483eb34a7f4204949a610f)
+
+243. [func]* feng
+ Add optional hmac algorithm SHA224/384/812.
+ (Trac #782, git 77d792c9d7c1a3f95d3e6a8b721ac79002cd7db1)
+
+bind10-devel-20110519 released on May 19, 2011
+
+242. [func] jinmei
+ xfrin: added support for TSIG verify. This change completes TSIG
+ support in b10-xfrin.
+ (Trac #914, git 78502c021478d97672232015b7df06a7d52e531b)
+
+241. [func] jinmei
+ pydnspp: added python extension for the TSIG API introduced in
+ change 235.
+ (Trac #905, git 081891b38f05f9a186814ab7d1cd5c572b8f777f)
+ (Trac #915, git 0555ab65d0e43d03b2d40c95d833dd050eea6c23)
+
+240. [func]* jelte
+ Updated configuration options to Xfrin, so that you can specify
+ a master address, port, and TSIG key per zone. Still only one per
+ zone at this point, and TSIG keys are (currently) only specified
+ by their full string representation. This replaces the
+ Xfrin/master_addr, Xfrin/master_port, and short-lived
+ Xfrin/tsig_key configurations with a Xfrin/zones list.
+ (Trac #811, git 88504d121c5e08fff947b92e698a54d24d14c375)
+
+239. [bug] jerry
+ src/bin/xfrout: If a zone doesn't have notify slaves (only has
+ one apex ns record - the primary master name server) will cause
+ b10-xfrout uses 100% of CPU.
+ (Trac #684, git d11b5e89203a5340d4e5ca51c4c02db17c33dc1f)
+
238. [func] zhang likun
- Implement the simplest forwarder, which pass everything throught
+ Implement the simplest forwarder, which pass everything through
except QID, port number. The response will not be cached.
(Trac #598_new, git 8e28187a582820857ef2dae9b13637a3881f13ba)
@@ -15,7 +527,7 @@
stats module and stats-httpd module, and maybe with other
statistical modules in future. "stats.spec" has own configuration
and commands of stats module, if it requires.
- (Trac#719, git a234b20dc6617392deb8a1e00eb0eed0ff353c0a)
+ (Trac #719, git a234b20dc6617392deb8a1e00eb0eed0ff353c0a)
236. [func] jelte
C++ client side of configuration now uses BIND10 logging system.
@@ -52,19 +564,19 @@
(Trac #900, git b395258c708b49a5da8d0cffcb48d83294354ba3)
231. [func]* vorner
- The logging interface changed slightly. We use
+ The logging interface changed slightly. We use
logger.foo(MESSAGE_ID).arg(bar); instead of logger.foo(MESSAGE_ID,
bar); internally. The message definitions use '%1,%2,...'
instead of '%s,%d', which allows us to cope better with
mismatched placeholders and allows reordering of them in
case of translation.
- (Trac901, git 4903410e45670b30d7283f5d69dc28c2069237d6)
+ (Trac #901, git 4903410e45670b30d7283f5d69dc28c2069237d6)
230. [bug] naokikambe
Removed too repeated verbose messages in two cases of:
- when auth sends statistics data to stats
- when stats receives statistics data from other modules
- (Trac#620, git 0ecb807011196eac01f281d40bc7c9d44565b364)
+ (Trac #620, git 0ecb807011196eac01f281d40bc7c9d44565b364)
229. [doc] jreed
Add manual page for b10-host.
@@ -94,14 +606,14 @@
(Trac #781, git 9df42279a47eb617f586144dce8cce680598558a)
225. [func] naokikambe
- Added the HTTP/XML interface(b10-stats-httpd) to the
+ Added the HTTP/XML interface (b10-stats-httpd) to the
statistics feature in BIND 10. b10-stats-httpd is a standalone
HTTP server and it requests statistics data to the stats
- daemon(b10-stats) and sends it to HTTP clients in XML
+ daemon (b10-stats) and sends it to HTTP clients in XML
format. Items of the data collected via b10-stats-httpd
are almost equivalent to ones which are collected via
- bindctl. Since it also can send XSL(Extensible Stylessheet
- Language) document and XSD(XML Schema definition) document,
+ bindctl. Since it also can send XSL (Extensible Stylesheet
+ Language) document and XSD (XML Schema definition) document,
XML document is human-friendly to view through web browsers
and its data types are strictly defined.
(Trac #547, git 1cbd51919237a6e65983be46e4f5a63d1877b1d3)
@@ -120,11 +632,13 @@
reconfigure them.
(Trac #775, git 572ac2cf62e18f7eb69d670b890e2a3443bfd6e7)
-222. [bug] jerry
- src/lib/zonemgr: Fix a bug that xfrin not checking for new copy of
- zone on startup. Imposes some random jitters to avoid many zones
- need to do refresh at the same time.
- (Trac #387, svn 9140fab9bab5f6502bd15d391fd51ac078b0b89b)
+222. [bug]* jerry
+ src/lib/zonemgr: Fix a bug that xfrin not checking for new
+ copy of zone on startup. Imposes some random jitters to
+ avoid many zones need to do refresh at the same time. This
+ removed the Zonemgr/jitter_scope setting and introduced
+ Zonemgr/refresh_jitter and Zonemgr/reload_jitter.
+ (Trac #387, git 1241ddcffa16285d0a7bb01d6a8526e19fbb70cb)
221. [func]* jerry
src/lib/util: Create C++ utility library.
diff --git a/Makefile.am b/Makefile.am
index bab6679..50aa6b9 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -2,12 +2,16 @@ SUBDIRS = doc src tests
USE_LCOV=@USE_LCOV@
LCOV=@LCOV@
GENHTML=@GENHTML@
+DISTCHECK_GTEST_CONFIGURE_FLAG=@DISTCHECK_GTEST_CONFIGURE_FLAG@
DISTCLEANFILES = config.report
# When running distcheck target, do not install the configurations
DISTCHECK_CONFIGURE_FLAGS = --disable-install-configurations
+# Use same --with-gtest flag if set
+DISTCHECK_CONFIGURE_FLAGS += $(DISTCHECK_GTEST_CONFIGURE_FLAG)
+
clean-cpp-coverage:
@if [ $(USE_LCOV) = yes ] ; then \
$(LCOV) --directory . --zerocounters; \
@@ -38,8 +42,11 @@ report-cpp-coverage:
c++/4.4\*/ext/\* \
c++/4.4\*/\*-\*/bits/\* \
boost/\* \
+ botan/\* \
ext/asio/\* \
+ ext/coroutine/\* \
gtest/\* \
+ log4cplus/\* \
usr/include/\* \
tests/\* \
unittests/\* \
diff --git a/README b/README
index 5320a6e..99e2ece 100644
--- a/README
+++ b/README
@@ -1,3 +1,4 @@
+
This is the source for the development version of BIND 10.
BIND is the popular implementation of a DNS server, developer
@@ -8,10 +9,10 @@ for serving, maintaining, and developing DNS.
BIND10-devel is new development leading up to the production
BIND 10 release. It contains prototype code and experimental
interfaces. Nevertheless it is ready to use now for testing the
-new BIND 10 infrastructure ideas. The Year 2 milestones of the
-five year plan are described here:
+new BIND 10 infrastructure ideas. The Year 3 goals of the five
+year plan are described here:
- https://bind10.isc.org/wiki/Year2Milestones
+ http://bind10.isc.org/wiki/Year3Goals
This release includes the bind10 master process, b10-msgq message
bus, b10-auth authoritative DNS server (with SQLite3 and in-memory
@@ -19,12 +20,17 @@ backends), b10-resolver recursive or forwarding DNS server, b10-cmdctl
remote control daemon, b10-cfgmgr configuration manager, b10-xfrin
AXFR inbound service, b10-xfrout outgoing AXFR service, b10-zonemgr
secondary manager, b10-stats statistics collection and reporting
-daemon, and a new libdns++ library for C++ with a python wrapper.
+daemon, b10-stats-httpd for HTTP access to XML-formatted stats,
+b10-host DNS lookup utility, and a new libdns++ library for C++
+with a python wrapper. BIND 10 also provides an experimental DHCPv6
+echo server, b10-dhcp6.
-Documentation is included and also available via the BIND 10
-website at http://bind10.isc.org/
+Documentation is included with the source. See doc/guide/bind10-guide.txt
+(or bind10-guide.html) for installation instructions. The
+documentation is also available via the BIND 10 website at
+http://bind10.isc.org/
-The latest released source may be downloaded from:
+The latest released source tar file may be downloaded from:
ftp://ftp.isc.org/isc/bind10/
@@ -38,15 +44,11 @@ Bugs may be reported as tickets via the developers website:
http://bind10.isc.org/
-BUILDING
-
-See the Guide for detailed installation directions at
-doc/guide/bind10-guide.html.
-
-Simple build instructions:
+Simple build and installation instructions:
./configure
make
+ make install
If building from Git repository, run:
@@ -54,197 +56,11 @@ If building from Git repository, run:
before running ./configure
-Requires autoconf 2.59 or newer.
-
-Use automake-1.11 or better for working Python 3.1 tests.
-Alternatively, you could manually specify an absolute path to python
-executable by the --with-pythonpath option of the configure script,
-e.g.,
-% ./configure --with-pythonpath=/usr/local/bin/python3.1
-
-Operating-System specific tips:
-
-- FreeBSD
- You may need to install a python binding for sqlite3 by hand. A
- sample procedure is as follows:
- - add the following to /etc/make.conf
- PYTHON_VERSION=3.1
- - build and install the python binding from ports, assuming the top
- directory of the ports system is /usr/ports
- % cd /usr/ports/databases/py-sqlite3/
- % make
- % sudo make install
-
-INSTALLATION
+See the Guide for detailed installation directions at
+doc/guide/bind10-guide.txt.
-Install with:
+For operating system specific tips see the wiki at:
- make install
+ http://bind10.isc.org/wiki/SystemSpecificNotes
-TESTS
-
-The tests use the googletests framework for C++. It is available
-from http://code.google.com/p/googletest/. To enable the tests,
-configure BIND 10 with:
-
- ./configure --with-gtest
-
-Then run "make check" to run these tests.
-
-TEST COVERAGE
-
-Code coverage reports may be generated using make. These are
-based on running on the unit tests. The resulting reports are placed
-in coverage-cpp-html and coverage-python-html directories for C++
-and Python, respectively.
-
-The code coverage report for the C++ tests uses LCOV. It is available
-from http://ltp.sourceforge.net/. To generate the HTML report,
-first configure BIND 10 with:
-
- ./configure --with-lcov
-
-The code coverage report for the Python tests uses coverage.py (aka
-pycoverage). It is available from http://nedbatchelder.com/code/coverage/.
-To generate the HTML report, first configure BIND 10 with:
-
- ./configure --with-pycoverage
-
-Doing code coverage tests:
-
- make coverage
- Does the clean, perform, and report targets for C++ and Python.
-
- make clean-coverage
- Zeroes the code coverage counters and removes the HTML reports
- for C++ and Python.
-
- make perform-coverage
- Runs the C++ (using the googletests framework) and Python
- tests.
-
- make report-coverage
- Generates the coverage reports in HTML for C++ and Python.
-
- make clean-cpp-coverage
- Zeroes the code coverage counters and removes the HTML report
- for the C++ tests.
-
- make clean-python-coverage
- Zeroes the code coverage counters and removes the HTML report
- for the Python tests.
-
- make report-cpp-coverage
- Generates the coverage report in HTML for C++, excluding
- some unrelated headers. The HTML reports are placed in a
- directory called coverage-cpp-html/.
-
- make report-python-coverage
- Generates the coverage report in HTML for Python. The HTML
- reports are placed in a directory called coverage-python-html/.
-
-DEVELOPERS
-
-The generated run_*.sh scripts available in the src/bin directories
-are for running the code using the source tree.
-
-RUNNING
-
-You can start the BIND 10 processes by running bind10 which is
-installed to the sbin directory under the installation prefix.
-The default location is:
-
- /usr/local/sbin/bind10
-
-For development work, you can also run the bind10 services from the
-source tree:
-
- ./src/bin/bind10/run_bind10.sh
-
-(Which will use the modules and configurations also from the source
-tree.)
-
-CONFIGURATION
-
-Commands can be given through the bindctl tool.
-
-The server must be running for bindctl to work.
-
-The following configuration commands are available
-
-help: show the different command modules
-<module> help: show the commands for module
-<module> <command> help: show info for the command
-
-
-config show [identifier]: Show the currently set values. If no identifier is
- given, the current location is used. If a config
- option is a list or a map, the value is not
- shown directly, but must be requested separately.
-config go [identifier]: Go to the given location within the configuration.
-config set [identifier] <value>: Set a configuration value.
-config unset [identifier]: Remove a value (reverts to default if the option
- is mandatory).
-config add [identifier] <value>: add a value to a list
-config remove [identifier] <value>: remove a value from a list
-config revert: Revert all changes that have not been committed
-config commit: Commit all changes
-config diff: Show the changes that have not been committed yet
-
-
-EXAMPLE SESSION
-
-~> bindctl
-["login success "] login as root
-> help
-BindCtl, verstion 0.1
-usage: <module name> <command name> [param1 = value1 [, param2 = value2]]
-Type Tab character to get the hint of module/command/paramters.
-Type "help(? h)" for help on bindctl.
-Type "<module_name> help" for help on the specific module.
-Type "<module_name> <command_name> help" for help on the specific command.
-
-Available module names:
- help Get help for bindctl
- config Configuration commands
- Xfrin same here
- Auth same here
- Boss same here
-> config help
-Module config Configuration commands
-Available commands:
- help (Get help for module)
- show (Show configuration)
- add (Add entry to configuration list)
- remove (Remove entry from configuration list)
- set (Set a configuration value)
- unset (Unset a configuration value)
- diff (Show all local changes)
- revert (Revert all local changes)
- commit (Commit all local changes)
- go (Go to a specific configuration part)
-> config show
-Xfrin/ module
-Auth/ module
-Boss/ module
-> config show Xfrin
-transfers_in: 10 integer
-> config go Auth
-/Auth> config show
-database_file: None string
-/Auth> config set database_file /tmp/bind10_zones.db
-/Auth> config commit
-/Auth> config go /
-> config show Auth/
-database_file: /tmp/bind10_zones.db string
-> config diff
-{}
-> config set Auth/foobar
-Error: missing identifier or value
-> config set Auth/database_file foobar
-> config diff
-{'Auth': {'database_file': 'foobar'}}
-> config revert
-> config diff
-{}
-> quit
+Please see the wiki and the doc/ directory for various documentation.
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..e69de29
diff --git a/configure.ac b/configure.ac
index a8378ce..9723b8d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
# Process this file with autoconf to produce a configure script.
AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20110322, bind10-dev at isc.org)
+AC_INIT(bind10-devel, 20111021, bind10-dev at isc.org)
AC_CONFIG_SRCDIR(README)
AM_INIT_AUTOMAKE
AC_CONFIG_HEADERS([config.h])
@@ -12,6 +12,12 @@ AC_PROG_CXX
# Libtool configuration
#
+
+# libtool cannot handle spaces in paths, so exit early if there is one
+if [ test `echo $PWD | grep -c ' '` != "0" ]; then
+ AC_MSG_ERROR([BIND 10 cannot be built in a directory that contains spaces, because of libtool limitations. Please change the directory name, or use a symbolic link that does not contain spaces.])
+fi
+
# On FreeBSD (and probably some others), clang++ does not meet an autoconf
# assumption in identifying libtool configuration regarding shared library:
# the configure script will execute "$CC -shared $CFLAGS/$CXXFLAGS -v" and
@@ -139,6 +145,26 @@ else
AC_SUBST(pkgpyexecdir)
fi
+# We need to store the default pyexecdir in a separate variable so that
+# we can specify in Makefile.am the install directory of various BIND 10
+# python scripts and loadable modules; in Makefile.am we cannot replace
+# $(pyexecdir) using itself, e.g, this doesn't work:
+# pyexecdir = $(pyexecdir)/isc/some_module
+# The separate variable makes this setup possible as follows:
+# pyexecdir = $(PYTHON_SITEPKG_DIR)/isc/some_module
+PYTHON_SITEPKG_DIR=${pyexecdir}
+AC_SUBST(PYTHON_SITEPKG_DIR)
+
+# This will be commonly used in various Makefile.am's that need to generate
+# python log messages.
+PYTHON_LOGMSGPKG_DIR="\$(top_builddir)/src/lib/python/isc/log_messages"
+AC_SUBST(PYTHON_LOGMSGPKG_DIR)
+
+# This is python package paths commonly used in python tests. See
+# README of log_messages for why it's included.
+COMMON_PYTHON_PATH="\$(abs_top_builddir)/src/lib/python/isc/log_messages:\$(abs_top_srcdir)/src/lib/python:\$(abs_top_builddir)/src/lib/python"
+AC_SUBST(COMMON_PYTHON_PATH)
+
# Check for python development environments
if test -x ${PYTHON}-config; then
PYTHON_INCLUDES=`${PYTHON}-config --includes`
@@ -260,6 +286,8 @@ B10_CXXFLAGS="-Wall -Wextra -Wwrite-strings -Woverloaded-virtual -Wno-sign-compa
case "$host" in
*-solaris*)
MULTITHREADING_FLAG=-pthreads
+ # In Solaris, IN6ADDR_ANY_INIT and IN6ADDR_LOOPBACK_INIT need -Wno-missing-braces
+ B10_CXXFLAGS="$B10_CXXFLAGS -Wno-missing-braces"
;;
*)
MULTITHREADING_FLAG=-pthread
@@ -280,6 +308,35 @@ namespace isc {class Bar {Foo foo_;};} ],,
[AC_MSG_RESULT(yes)])
CXXFLAGS="$CXXFLAGS_SAVED"
+# Python 3.2 has an unused parameter in one of its headers. This
+# has been reported, but not fixed as of yet, so we check if we need
+# to set -Wno-unused-parameter.
+if test $werror_ok = 1; then
+ CPPFLAGS_SAVED="$CPPFLAGS"
+ CPPFLAGS=${PYTHON_INCLUDES}
+ CXXFLAGS_SAVED="$CXXFLAGS"
+ CXXFLAGS="$CXXFLAGS $B10_CXXFLAGS -Werror"
+ AC_MSG_CHECKING([whether we need -Wno-unused-parameter for python])
+ AC_TRY_COMPILE(
+ [#include <Python.h>],
+ [],
+ [AC_MSG_RESULT(no)],
+ [
+ CXXFLAGS="$CXXFLAGS -Wno-unused-parameter"
+ AC_TRY_COMPILE([#include <Python.h>],
+ [],
+ [AC_MSG_RESULT(yes)
+ PYTHON_CXXFLAGS="${PYTHON_CXXFLAGS} -Wno-unused-parameter"
+ AC_SUBST(PYTHON_CXXFLAGS)
+ ],
+ [AC_MSG_ERROR([Can't compile against Python.h])]
+ )
+ ]
+ )
+ CXXFLAGS="$CXXFLAGS_SAVED"
+ CPPFLAGS="$CPPFLAGS_SAVED"
+fi
+
fi dnl GXX = yes
AM_CONDITIONAL(GCC_WERROR_OK, test $werror_ok = 1)
@@ -380,7 +437,7 @@ AC_ARG_WITH([botan],
AC_HELP_STRING([--with-botan=PATH],
[specify exact directory of Botan library]),
[botan_path="$withval"])
-if test "${botan_path}" == "no" ; then
+if test "${botan_path}" = "no" ; then
AC_MSG_ERROR([Need botan for libcryptolink])
fi
if test "${botan_path}" != "yes" ; then
@@ -390,42 +447,65 @@ if test "${botan_path}" != "yes" ; then
AC_MSG_ERROR([${botan_path}/bin/botan-config not found])
fi
else
+ # First see if pkg-config knows of it.
+ # Unfortunately, the botan.pc files have their minor version in them
+ # too, so we need to try them one by one
+ BOTAN_CONFIG=""
+ AC_PATH_PROG([PKG_CONFIG], [pkg-config])
+ if test "$PKG_CONFIG" != "" ; then
+ BOTAN_VERSIONS="botan-1.10 botan-1.9 botan-1.8"
+ for version in $BOTAN_VERSIONS; do
+ AC_MSG_CHECKING([Checking botan version with pkg-config $version])
+
+ if [ $PKG_CONFIG --exists ${version} ]; then
+ AC_MSG_RESULT([found])
+ BOTAN_CONFIG="$PKG_CONFIG ${version}"
+ break
+ else
+ AC_MSG_RESULT([not found])
+ fi
+ done
+ fi
+ # If we had no pkg-config, or it didn't know about botan, use botan-config
+ if test "$BOTAN_CONFIG" = "" ; then
AC_PATH_PROG([BOTAN_CONFIG], [botan-config])
+ fi
fi
-if test -x "${BOTAN_CONFIG}" ; then
- BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
- # We expect botan-config --libs to contain -L<path_to_libbotan>, but
- # this is not always the case. As a heuristics workaround we add
- # -L`botan-config --prefix/lib` in this case. Same for BOTAN_INCLUDES
- # (but using include instead of lib) below.
+BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
+BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
+
+# We expect botan-config --libs to contain -L<path_to_libbotan>, but
+# this is not always the case. As a heuristics workaround we add
+# -L`botan-config --prefix/lib` in this case. Same for BOTAN_INCLUDES
+# (but using include instead of lib) below.
+if [ $BOTAN_CONFIG --prefix >/dev/null 2>&1 ] ; then
echo ${BOTAN_LDFLAGS} | grep -- -L > /dev/null || \
- BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
- BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
+ BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
echo ${BOTAN_INCLUDES} | grep -- -I > /dev/null || \
- BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
- # See python_rpath for some info on why we do this
- if test $rpath_available = yes; then
- BOTAN_RPATH=
- for flag in ${BOTAN_LDFLAGS}; do
- BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
- done
- AC_SUBST(BOTAN_RPATH)
-
- # According to the libtool manual, it should be sufficient if we
- # specify the "-R libdir" in our wrapper library of botan (no other
- # programs will need libbotan directly); "libdir" should be added to
- # the program's binary image. But we've seen in our build environments
- # that (some versions of?) libtool doesn't propagate -R as documented,
- # and it caused a linker error at run time. To work around this, we
- # also add the rpath to the global LDFLAGS.
- LDFLAGS="$BOTAN_RPATH $LDFLAGS"
- fi
-
- AC_SUBST(BOTAN_LDFLAGS)
- AC_SUBST(BOTAN_INCLUDES)
+ BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
+fi
+# See python_rpath for some info on why we do this
+if test $rpath_available = yes; then
+ BOTAN_RPATH=
+ for flag in ${BOTAN_LDFLAGS}; do
+ BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
+ done
+AC_SUBST(BOTAN_RPATH)
+
+# According to the libtool manual, it should be sufficient if we
+# specify the "-R libdir" in our wrapper library of botan (no other
+# programs will need libbotan directly); "libdir" should be added to
+# the program's binary image. But we've seen in our build environments
+# that (some versions of?) libtool doesn't propagate -R as documented,
+# and it caused a linker error at run time. To work around this, we
+# also add the rpath to the global LDFLAGS.
+ LDFLAGS="$BOTAN_RPATH $LDFLAGS"
fi
+AC_SUBST(BOTAN_LDFLAGS)
+AC_SUBST(BOTAN_INCLUDES)
+
CPPFLAGS_SAVED=$CPPFLAGS
CPPFLAGS="$BOTAN_INCLUDES $CPPFLAGS"
LDFLAGS_SAVED="$LDFLAGS"
@@ -447,6 +527,55 @@ AC_LINK_IFELSE(
CPPFLAGS=$CPPFLAGS_SAVED
LDFLAGS=$LDFLAGS_SAVED
+# Check for log4cplus
+log4cplus_path="yes"
+AC_ARG_WITH([log4cplus],
+ AC_HELP_STRING([--with-log4cplus=PATH],
+ [specify exact directory of log4cplus library and headers]),
+ [log4cplus_path="$withval"])
+if test "${log4cplus_path}" = "no" ; then
+ AC_MSG_ERROR([Need log4cplus])
+elif test "${log4cplus_path}" != "yes" ; then
+ LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
+ LOG4CPLUS_LDFLAGS="-L${log4cplus_path}/lib"
+else
+# If not specified, try some common paths.
+ log4cplusdirs="/usr/local /usr/pkg /opt /opt/local"
+ for d in $log4cplusdirs
+ do
+ if test -f $d/include/log4cplus/logger.h; then
+ LOG4CPLUS_INCLUDES="-I$d/include"
+ LOG4CPLUS_LDFLAGS="-L$d/lib"
+ break
+ fi
+ done
+fi
+
+LOG4CPLUS_LDFLAGS="$LOG4CPLUS_LDFLAGS -llog4cplus $MULTITHREADING_FLAG"
+
+AC_SUBST(LOG4CPLUS_LDFLAGS)
+AC_SUBST(LOG4CPLUS_INCLUDES)
+
+CPPFLAGS_SAVED=$CPPFLAGS
+CPPFLAGS="$LOG4CPLUS_INCLUDES $CPPFLAGS"
+LDFLAGS_SAVED="$LDFLAGS"
+LDFLAGS="$LOG4CPLUS_LDFLAGS $LDFLAGS"
+
+AC_CHECK_HEADERS([log4cplus/logger.h],,AC_MSG_ERROR([Missing required header files.]))
+AC_LINK_IFELSE(
+ [AC_LANG_PROGRAM([#include <log4cplus/logger.h>
+ ],
+ [using namespace log4cplus;
+ Logger logger = Logger::getInstance("main");
+ ])],
+ [AC_MSG_RESULT([checking for log4cplus library... yes])],
+ [AC_MSG_RESULT([checking for log4cplus library... no])
+ AC_MSG_ERROR([Needs log4cplus library])]
+)
+
+CPPFLAGS=$CPPFLAGS_SAVED
+LDFLAGS=$LDFLAGS_SAVED
+
#
# Configure Boost header path
#
@@ -544,6 +673,7 @@ fi
#
if test "$gtest_path" != "no"
then
+ DISTCHECK_GTEST_CONFIGURE_FLAG="--with-gtest=\"$gtest_path\""
if test "$gtest_path" != "yes"; then
GTEST_PATHS=$gtest_path
if test -x "${gtest_path}/bin/gtest-config" ; then
@@ -584,8 +714,10 @@ else
GTEST_INCLUDES=
GTEST_LDFLAGS=
GTEST_LDADD=
+ DISTCHECK_GTEST_CONFIGURE_FLAG=
fi
AM_CONDITIONAL(HAVE_GTEST, test $gtest_path != "no")
+AC_SUBST(DISTCHECK_GTEST_CONFIGURE_FLAG)
AC_SUBST(GTEST_INCLUDES)
AC_SUBST(GTEST_LDFLAGS)
AC_SUBST(GTEST_LDADD)
@@ -661,6 +793,8 @@ fi
#
AC_PATH_PROGS(PERL, perl5 perl)
AC_SUBST(PERL)
+AC_PATH_PROGS(AWK, gawk awk)
+AC_SUBST(AWK)
AC_ARG_ENABLE(man, [AC_HELP_STRING([--enable-man],
[regenerate man pages [default=no]])], enable_man=yes, enable_man=no)
@@ -697,24 +831,21 @@ AC_CONFIG_FILES([Makefile
src/bin/auth/Makefile
src/bin/auth/tests/Makefile
src/bin/auth/benchmarks/Makefile
+ src/bin/dhcp6/Makefile
+ src/bin/dhcp6/tests/Makefile
src/bin/resolver/Makefile
src/bin/resolver/tests/Makefile
src/bin/sockcreator/Makefile
src/bin/sockcreator/tests/Makefile
src/bin/xfrin/Makefile
src/bin/xfrin/tests/Makefile
+ src/bin/xfrin/tests/testdata/Makefile
src/bin/xfrout/Makefile
src/bin/xfrout/tests/Makefile
src/bin/zonemgr/Makefile
src/bin/zonemgr/tests/Makefile
src/bin/stats/Makefile
src/bin/stats/tests/Makefile
- src/bin/stats/tests/isc/Makefile
- src/bin/stats/tests/isc/cc/Makefile
- src/bin/stats/tests/isc/config/Makefile
- src/bin/stats/tests/isc/util/Makefile
- src/bin/stats/tests/testdata/Makefile
- src/bin/stats/tests/http/Makefile
src/bin/usermgr/Makefile
src/bin/tests/Makefile
src/lib/Makefile
@@ -729,21 +860,30 @@ AC_CONFIG_FILES([Makefile
src/lib/cc/tests/Makefile
src/lib/python/Makefile
src/lib/python/isc/Makefile
+ src/lib/python/isc/acl/Makefile
+ src/lib/python/isc/acl/tests/Makefile
src/lib/python/isc/util/Makefile
src/lib/python/isc/util/tests/Makefile
src/lib/python/isc/datasrc/Makefile
src/lib/python/isc/datasrc/tests/Makefile
+ src/lib/python/isc/dns/Makefile
src/lib/python/isc/cc/Makefile
src/lib/python/isc/cc/tests/Makefile
src/lib/python/isc/config/Makefile
src/lib/python/isc/config/tests/Makefile
src/lib/python/isc/log/Makefile
src/lib/python/isc/log/tests/Makefile
+ src/lib/python/isc/log_messages/Makefile
+ src/lib/python/isc/log_messages/work/Makefile
src/lib/python/isc/net/Makefile
src/lib/python/isc/net/tests/Makefile
src/lib/python/isc/notify/Makefile
src/lib/python/isc/notify/tests/Makefile
src/lib/python/isc/testutils/Makefile
+ src/lib/python/isc/bind10/Makefile
+ src/lib/python/isc/bind10/tests/Makefile
+ src/lib/python/isc/xfrin/Makefile
+ src/lib/python/isc/xfrin/tests/Makefile
src/lib/config/Makefile
src/lib/config/tests/Makefile
src/lib/config/tests/testdata/Makefile
@@ -755,10 +895,13 @@ AC_CONFIG_FILES([Makefile
src/lib/dns/python/Makefile
src/lib/dns/python/tests/Makefile
src/lib/dns/benchmarks/Makefile
+ src/lib/dhcp/Makefile
+ src/lib/dhcp/tests/Makefile
src/lib/exceptions/Makefile
src/lib/exceptions/tests/Makefile
src/lib/datasrc/Makefile
src/lib/datasrc/tests/Makefile
+ src/lib/datasrc/tests/testdata/Makefile
src/lib/xfr/Makefile
src/lib/log/Makefile
src/lib/log/compiler/Makefile
@@ -775,9 +918,12 @@ AC_CONFIG_FILES([Makefile
src/lib/server_common/tests/Makefile
src/lib/util/Makefile
src/lib/util/io/Makefile
- src/lib/util/io/tests/Makefile
src/lib/util/unittests/Makefile
+ src/lib/util/python/Makefile
+ src/lib/util/pyunittests/Makefile
src/lib/util/tests/Makefile
+ src/lib/acl/Makefile
+ src/lib/acl/tests/Makefile
tests/Makefile
tests/system/Makefile
tests/tools/Makefile
@@ -807,13 +953,7 @@ AC_OUTPUT([doc/version.ent
src/bin/zonemgr/run_b10-zonemgr.sh
src/bin/stats/stats.py
src/bin/stats/stats_httpd.py
- src/bin/stats/stats.spec
- src/bin/stats/stats-schema.spec
- src/bin/stats/stats-httpd.spec
- src/bin/stats/stats-httpd-xml.tpl
- src/bin/stats/stats-httpd-xsd.tpl
- src/bin/stats/stats-httpd-xsl.tpl
- src/bin/bind10/bind10.py
+ src/bin/bind10/bind10_src.py
src/bin/bind10/run_bind10.sh
src/bin/bind10/tests/bind10_test.py
src/bin/bindctl/run_bindctl.sh
@@ -830,22 +970,45 @@ AC_OUTPUT([doc/version.ent
src/bin/msgq/run_msgq.sh
src/bin/auth/auth.spec.pre
src/bin/auth/spec_config.h.pre
+ src/bin/dhcp6/spec_config.h.pre
src/bin/tests/process_rename_test.py
src/lib/config/tests/data_def_unittests_config.h
src/lib/python/isc/config/tests/config_test
src/lib/python/isc/cc/tests/cc_test
- src/lib/python/isc/log/tests/log_test
src/lib/python/isc/notify/tests/notify_out_test
+ src/lib/python/isc/log/tests/log_console.py
+ src/lib/python/isc/log_messages/work/__init__.py
src/lib/dns/gen-rdatacode.py
src/lib/python/bind10_config.py
- src/lib/dns/tests/testdata/gen-wiredata.py
src/lib/cc/session_config.h.pre
src/lib/cc/tests/session_unittests_config.h
- src/lib/log/tests/run_time_init_test.sh
+ src/lib/log/tests/console_test.sh
+ src/lib/log/tests/destination_test.sh
+ src/lib/log/tests/init_logger_test.sh
+ src/lib/log/tests/local_file_test.sh
+ src/lib/log/tests/severity_test.sh
+ src/lib/log/tests/tempdir.h
+ src/lib/util/python/mkpywrapper.py
+ src/lib/util/python/gen_wiredata.py
+ src/lib/server_common/tests/data_path.h
+ tests/lettuce/setup_intree_bind10.sh
tests/system/conf.sh
+ tests/system/run.sh
tests/system/glue/setup.sh
tests/system/glue/nsx1/b10-config.db
tests/system/bindctl/nsx1/b10-config.db.template
+ tests/system/ixfr/db.example.n0
+ tests/system/ixfr/db.example.n2
+ tests/system/ixfr/db.example.n2.refresh
+ tests/system/ixfr/db.example.n4
+ tests/system/ixfr/db.example.n6
+ tests/system/ixfr/ixfr_init.sh
+ tests/system/ixfr/b10-config.db
+ tests/system/ixfr/common_tests.sh
+ tests/system/ixfr/in-1/setup.sh
+ tests/system/ixfr/in-2/setup.sh
+ tests/system/ixfr/in-3/setup.sh
+ tests/system/ixfr/in-4/setup.sh
], [
chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
chmod +x src/bin/xfrin/run_b10-xfrin.sh
@@ -865,9 +1028,22 @@ AC_OUTPUT([doc/version.ent
chmod +x src/bin/msgq/run_msgq.sh
chmod +x src/bin/msgq/tests/msgq_test
chmod +x src/lib/dns/gen-rdatacode.py
- chmod +x src/lib/dns/tests/testdata/gen-wiredata.py
- chmod +x src/lib/log/tests/run_time_init_test.sh
+ chmod +x src/lib/log/tests/console_test.sh
+ chmod +x src/lib/log/tests/destination_test.sh
+ chmod +x src/lib/log/tests/init_logger_test.sh
+ chmod +x src/lib/log/tests/local_file_test.sh
+ chmod +x src/lib/log/tests/severity_test.sh
+ chmod +x src/lib/util/python/mkpywrapper.py
+ chmod +x src/lib/util/python/gen_wiredata.py
+ chmod +x src/lib/python/isc/log/tests/log_console.py
chmod +x tests/system/conf.sh
+ chmod +x tests/system/run.sh
+ chmod +x tests/system/ixfr/ixfr_init.sh
+ chmod +x tests/system/ixfr/common_tests.sh
+ chmod +x tests/system/ixfr/in-1/setup.sh
+ chmod +x tests/system/ixfr/in-2/setup.sh
+ chmod +x tests/system/ixfr/in-3/setup.sh
+ chmod +x tests/system/ixfr/in-4/setup.sh
])
AC_OUTPUT
@@ -893,11 +1069,14 @@ Flags:
B10_CXXFLAGS: $B10_CXXFLAGS
dnl includes too
Python: ${PYTHON_INCLUDES}
+ ${PYTHON_CXXFLAGS}
${PYTHON_LDFLAGS}
${PYTHON_LIB}
Boost: ${BOOST_INCLUDES}
Botan: ${BOTAN_INCLUDES}
${BOTAN_LDFLAGS}
+ Log4cplus: ${LOG4CPLUS_INCLUDES}
+ ${LOG4CPLUS_LDFLAGS}
SQLite: $SQLITE_CFLAGS
$SQLITE_LIBS
diff --git a/doc/Doxyfile b/doc/Doxyfile
index 783e63a..ee5aaf8 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -568,13 +568,13 @@ WARN_LOGFILE =
# directories like "/usr/src/myproject". Separate the files or directories
# with spaces.
-INPUT = ../src/lib/cc ../src/lib/config \
- ../src/lib/cryptolink ../src/lib/dns ../src/lib/datasrc \
- ../src/bin/auth ../src/bin/resolver ../src/lib/bench \
- ../src/lib/log ../src/lib/asiolink/ ../src/lib/nsas \
+INPUT = ../src/lib/exceptions ../src/lib/cc \
+ ../src/lib/config ../src/lib/cryptolink ../src/lib/dns ../src/lib/datasrc \
+ ../src/bin/auth ../src/bin/resolver ../src/lib/bench ../src/lib/log \
+ ../src/lib/log/compiler ../src/lib/asiolink/ ../src/lib/nsas \
../src/lib/testutils ../src/lib/cache ../src/lib/server_common/ \
- ../src/bin/sockcreator/ ../src/lib/util/
- ../src/lib/resolve
+ ../src/bin/sockcreator/ ../src/lib/util/ \
+ ../src/lib/resolve ../src/lib/acl ../src/bin/dhcp6 ../src/lib/dhcp
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
@@ -1165,7 +1165,7 @@ XML_DTD =
# and cross-referencing information) to the XML output. Note that
# enabling this will significantly increase the size of the XML output.
-XML_PROGRAMLISTING = YES
+XML_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# configuration options for the AutoGen Definitions output
diff --git a/doc/guide/Makefile.am b/doc/guide/Makefile.am
index c790139..239f235 100644
--- a/doc/guide/Makefile.am
+++ b/doc/guide/Makefile.am
@@ -1,10 +1,12 @@
EXTRA_DIST = bind10-guide.css
-EXTRA_DIST += bind10-guide.html
-EXTRA_DIST += bind10-guide.xml
+EXTRA_DIST += bind10-guide.xml bind10-guide.html bind10-guide.txt
+EXTRA_DIST += bind10-messages.xml bind10-messages.html
# This is not a "man" manual, but reuse this for now for docbook.
if ENABLE_MAN
+.PHONY: bind10-messages.xml
+
bind10-guide.html: bind10-guide.xml
xsltproc --novalid --xinclude --nonet \
--path $(top_builddir)/doc \
@@ -13,4 +15,21 @@ bind10-guide.html: bind10-guide.xml
http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl \
$(srcdir)/bind10-guide.xml
+HTML2TXT = elinks -dump -no-numbering -no-references
+
+bind10-guide.txt: bind10-guide.html
+ $(HTML2TXT) $(srcdir)/bind10-guide.html > $@
+
+bind10-messages.html: bind10-messages.xml
+ xsltproc --novalid --xinclude --nonet \
+ --path $(top_builddir)/doc \
+ -o $@ \
+ --stringparam html.stylesheet $(srcdir)/bind10-guide.css \
+ http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl \
+ $(srcdir)/bind10-messages.xml
+
+# So many dependencies that it's easiest just to regenerate it every time
+bind10-messages.xml:
+ $(PYTHON) $(top_srcdir)/tools/system_messages.py -o $@ $(top_srcdir)
+
endif
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index a631a9c..97ffb84 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,24 +1,26 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110322. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
- 20110322.</p></div><div><p class="copyright">Copyright © 2010 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110809. The most up-to-date version of this document (in PDF, HTML, and plain text formats), along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p c
lass="releaseinfo">This is the reference guide for BIND 10 version
+ 20110809.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the reference guide for BIND 10 version 20110322.
- The most up-to-date version of this document, along with
- other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284842">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285021">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285041">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285101">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285198">Build</a></span></dt><dt><span class="section"><a href="#id1168230285214">Install</a></span></dt><dt><span class="section"><a href="#id1168230285238">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285812">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285877">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285908">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286296">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+ This is the reference guide for BIND 10 version 20110809.
+ The most up-to-date version of this document (in PDF, HTML,
+ and plain text formats), along with other documents for
+ BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
+ </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span c
lass="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a href="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b
10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229437660">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229437725">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229437755">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229437989">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438027">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438069">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfe
rs</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438327">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438512">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438628">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438638">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439154">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439328">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229439609">Logging Message Format</a></span></dt></dl></dd></dl><
/div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
BIND is the popular implementation of a DNS server, developer
interfaces, and DNS tools.
BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++ and Python
and provides a modular environment for serving and maintaining DNS.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This guide covers the experimental prototype of
- BIND 10 version 20110322.
+ BIND 10 version 20110809.
</p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
BIND 10 provides a EDNS0- and DNSSEC-capable
authoritative DNS server and a caching recursive name server
which also provides forwarding.
- </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299038"></a>Supported Platforms</h2></div></div></div><p>
+ </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229451238"></a>Supported Platforms</h2></div></div></div><p>
BIND 10 builds have been tested on Debian GNU/Linux 5,
Ubuntu 9.10, NetBSD 5, Solaris 10, FreeBSD 7 and 8, and CentOS
Linux 5.3.
@@ -28,15 +30,21 @@
It is planned for BIND 10 to build, install and run on
Windows and standard Unix-type platforms.
- </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299065"></a>Required Software</h2></div></div></div><p>
+ </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229451265"></a>Required Software</h2></div></div></div><p>
BIND 10 requires Python 3.1. Later versions may work, but Python
3.1 is the minimum version which will work.
- </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p><p>
+ BIND 10 uses the Botan crypto library for C++. It requires
+ at least Botan version 1.8.
+ </p><p>
+ BIND 10 uses the log4cplus C++ logging library. It requires
+ at least log4cplus version 1.0.3.
+ </p><p>
The authoritative server requires SQLite 3.3.9 or newer.
The <span class="command"><strong>b10-xfrin</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>,
and <span class="command"><strong>b10-zonemgr</strong></span> modules require the
libpython3 library and the Python _sqlite3.so module.
- </p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Some operating systems do not provide these dependencies
in their default installation nor standard packages
collections.
@@ -132,7 +140,10 @@
and, of course, DNS. These include detailed developer
documentation and code examples.
- </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284842">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285021">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285041">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285101">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285198">Build</a></span></dt><dt><span class="section"><a href="#id1168230285214">Install</a></span></dt><dt><span class="section"><a href="#id1168230285238">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284842"></a>Building Requirements</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229436567"></a>Building Requirements</h2></div></div></div><p>
+ In addition to the run-time requirements, building BIND 10
+ from source code requires various development include headers.
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Some operating systems have split their distribution packages into
a run-time and a development package. You will need to install
the development package versions, which include header files and
@@ -143,6 +154,11 @@
</p><p>
+ To build BIND 10, also install the Botan (at least version
+ 1.8) and the log4cplus (at least version 1.0.3)
+ development include headers.
+ </p><p>
+
The Python Library and Python _sqlite3 module are required to
enable the Xfrout and Xfrin support.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
@@ -152,7 +168,7 @@
Building BIND 10 also requires a C++ compiler and
standard development headers, make, and pkg-config.
BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
- 4.1.3, 4.2.1, 4.3.2, and 4.4.1.
+ 4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
</p></div><div class="section" title="Quick start"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="quickstart"></a>Quick start</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This quickly covers the standard steps for installing
and deploying BIND 10 as an authoritative name server using
@@ -188,14 +204,14 @@
the Git code revision control system or as a downloadable
tar file. It may also be available in pre-compiled ready-to-use
packages from operating system vendors.
- </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285021"></a>Download Tar File</h3></div></div></div><p>
+ </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229436859"></a>Download Tar File</h3></div></div></div><p>
Downloading a release tar file is the recommended method to
obtain the source code.
</p><p>
The BIND 10 releases are available as tar file downloads from
<a class="ulink" href="ftp://ftp.isc.org/isc/bind10/" target="_top">ftp://ftp.isc.org/isc/bind10/</a>.
Periodic development snapshots may also be available.
- </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285041"></a>Retrieve from Git</h3></div></div></div><p>
+ </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229436878"></a>Retrieve from Git</h3></div></div></div><p>
Downloading this "bleeding edge" code is recommended only for
developers or advanced users. Using development code in a production
environment is not recommended.
@@ -229,7 +245,7 @@
<span class="command"><strong>autoheader</strong></span>,
<span class="command"><strong>automake</strong></span>,
and related commands.
- </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285101"></a>Configure before the build</h3></div></div></div><p>
+ </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229436939"></a>Configure before the build</h3></div></div></div><p>
BIND 10 uses the GNU Build System to discover build environment
details.
To generate the makefiles using the defaults, simply run:
@@ -238,7 +254,7 @@
Run <span class="command"><strong>./configure</strong></span> with the <code class="option">--help</code>
switch to view the different options. The commonly-used options are:
- </p><div class="variablelist"><dl><dt><span class="term">--prefix</span></dt><dd>Define the the installation location (the
+ </p><div class="variablelist"><dl><dt><span class="term">--prefix</span></dt><dd>Define the installation location (the
default is <code class="filename">/usr/local/</code>).
</dd><dt><span class="term">--with-boost-include</span></dt><dd>Define the path to find the Boost headers.
</dd><dt><span class="term">--with-pythonpath</span></dt><dd>Define the path to Python 3.1 if it is not in the
@@ -260,16 +276,16 @@
</p><p>
If the configure fails, it may be due to missing or old
dependencies.
- </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285198"></a>Build</h3></div></div></div><p>
+ </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229437037"></a>Build</h3></div></div></div><p>
After the configure step is complete, to build the executables
from the C++ code and prepare the Python scripts, run:
</p><pre class="screen">$ <strong class="userinput"><code>make</code></strong></pre><p>
- </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285214"></a>Install</h3></div></div></div><p>
+ </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229437052"></a>Install</h3></div></div></div><p>
To install the BIND 10 executables, support files,
and documentation, run:
</p><pre class="screen">$ <strong class="userinput"><code>make install</code></strong></pre><p>
- </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285238"></a>Install Hierarchy</h3></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229437076"></a>Install Hierarchy</h3></div></div></div><p>
The following is the layout of the complete BIND 10 installation:
</p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem">
<code class="filename">bin/</code> —
@@ -300,14 +316,14 @@
data source and configuration databases.
</li></ul></div><p>
</p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></div><p>
- BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
+ BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
starts up the required processes.
<span class="command"><strong>bind10</strong></span>
will also restart processes that exit unexpectedly.
This is the only command needed to start the BIND 10 system.
</p><p>
After starting the <span class="command"><strong>b10-msgq</strong></span> communications channel,
- <span class="command"><strong>bind10</strong></span> connects to it,
+ <span class="command"><strong>bind10</strong></span> connects to it,
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</p><p>
@@ -330,7 +346,12 @@
To start the BIND 10 service, simply run <span class="command"><strong>bind10</strong></span>.
Run it with the <code class="option">--verbose</code> switch to
get additional debugging or diagnostic output.
- </p></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ If the setproctitle Python module is detected at start up,
+ the process names for the Python-based daemons will be renamed
+ to better identify them instead of just <span class="quote">“<span class="quote">python</span>”</span>.
+ This is not needed on some operating systems.
+ </p></div></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
The BIND 10 components use the <span class="command"><strong>b10-msgq</strong></span>
message routing daemon to communicate with other BIND 10 components.
The <span class="command"><strong>b10-msgq</strong></span> implements what is called the
@@ -486,12 +507,12 @@ shutdown
the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
channel) the configuration on to the specified module.
</p><p>
- </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285812">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285877">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285908">Loading Master Zones Files</a></span></dt></dl></div><p>
+ </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229437660">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229437725">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229437755">Loading Master Zones Files</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
It supports EDNS0 and DNSSEC. It supports IPv6.
Normally it is started by the <span class="command"><strong>bind10</strong></span> master
process.
- </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285812"></a>Server Configurations</h2></div></div></div><p>
+ </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437660"></a>Server Configurations</h2></div></div></div><p>
<span class="command"><strong>b10-auth</strong></span> is configured via the
<span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -511,7 +532,7 @@ This may be a temporary setting until then.
</p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
</dd></dl></div><p>
- </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285877"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437725"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
supports a SQLite3 data source backend and in-memory data source
backend.
@@ -525,7 +546,7 @@ This may be a temporary setting until then.
The default is <code class="filename">/usr/local/var/</code>.)
This data file location may be changed by defining the
<span class="quote">“<span class="quote">database_file</span>”</span> configuration.
- </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285908"></a>Loading Master Zones Files</h2></div></div></div><p>
+ </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437755"></a>Loading Master Zones Files</h2></div></div></div><p>
RFC 1035 style DNS master zone files may imported
into a BIND 10 data source by using the
<span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -554,28 +575,69 @@ This may be a temporary setting until then.
If you reload a zone already existing in the database,
all records from that prior zone disappear and a whole new set
appears.
- </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><p>
+ </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229437989">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438027">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438069">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></div><p>
Incoming zones are transferred using the <span class="command"><strong>b10-xfrin</strong></span>
process which is started by <span class="command"><strong>bind10</strong></span>.
- When received, the zone is stored in the BIND 10
- data store, and its records can be served by
+ When received, the zone is stored in the corresponding BIND 10
+ data source, and its records can be served by
<span class="command"><strong>b10-auth</strong></span>.
In combination with <span class="command"><strong>b10-zonemgr</strong></span> (for
automated SOA checks), this allows the BIND 10 server to
provide <span class="quote">“<span class="quote">secondary</span>”</span> service.
+ </p><p>
+ The <span class="command"><strong>b10-xfrin</strong></span> process supports both AXFR and
+ IXFR. Due to some implementation limitations of the current
+ development release, however, it only tries AXFR by default,
+ and care should be taken to enable IXFR.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
- The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
-
+ In the current development release of BIND 10, incoming zone
+ transfers are only available for SQLite3-based data sources,
+ that is, they don't work for an in-memory data source.
+ </p></div><div class="section" title="Configuration for Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437989"></a>Configuration for Incoming Zone Transfers</h2></div></div></div><p>
+ In practice, you need to specify a list of secondary zones to
+ enable incoming zone transfers for these zones (you can still
+ trigger a zone transfer manually, without a prior configuration
+ (see below)).
+ </p><p>
+ For example, to enable zone transfers for a zone named "example.com"
+ (whose master address is assumed to be 2001:db8::53 here),
+ run the following at the <span class="command"><strong>bindctl</strong></span> prompt:
+ </p><pre class="screen">> <strong class="userinput"><code>config add Xfrin/zones</code></strong>
+> <strong class="userinput"><code>config set Xfrin/zones[0]/name "<code class="option">example.com</code>"</code></strong>
+> <strong class="userinput"><code>config set Xfrin/zones[0]/master_addr "<code class="option">2001:db8::53</code>"</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
- </p></div><p>
- To manually trigger a zone transfer to retrieve a remote zone,
- you may use the <span class="command"><strong>bindctl</strong></span> utility.
- For example, at the <span class="command"><strong>bindctl</strong></span> prompt run:
+ (We assume there has been no zone configuration before).
+ </p></div><div class="section" title="Enabling IXFR"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438027"></a>Enabling IXFR</h2></div></div></div><p>
+ As noted above, <span class="command"><strong>b10-xfrin</strong></span> uses AXFR for
+ zone transfers by default. To enable IXFR for zone transfers
+ for a particular zone, set the <strong class="userinput"><code>use_ixfr</code></strong>
+ configuration parameter to <strong class="userinput"><code>true</code></strong>.
+ In the above example of configuration sequence, you'll need
+ to add the following before performing <strong class="userinput"><code>commit</code></strong>:
+ </p><pre class="screen">> <strong class="userinput"><code>config set Xfrin/zones[0]/use_ixfr true</code></strong></pre><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ One reason why IXFR is disabled by default in the current
+ release is because it does not support automatic fallback from IXFR to
+ AXFR when it encounters a primary server that doesn't support
+ outbound IXFR (and, not many existing implementations support
+ it). Another, related reason is that it does not use AXFR even
+ if it has no knowledge about the zone (like at the very first
+ time the secondary server is set up). IXFR requires the
+ "current version" of the zone, so obviously it doesn't work
+ in this situation and AXFR is the only workable choice.
+ The current release of <span class="command"><strong>b10-xfrin</strong></span> does not
+ make this selection automatically.
+ These features will be implemented in a near future
+ version, at which point we will enable IXFR by default.
+ </p></div></div><div class="section" title="Trigger an Incoming Zone Transfer Manually"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438069"></a>Trigger an Incoming Zone Transfer Manually</h2></div></div></div><p>
+ To manually trigger a zone transfer to retrieve a remote zone,
+ you may use the <span class="command"><strong>bindctl</strong></span> utility.
+ For example, at the <span class="command"><strong>bindctl</strong></span> prompt run:
- </p><pre class="screen">> <strong class="userinput"><code>Xfrin retransfer zone_name="<code class="option">foo.example.org</code>" master=<code class="option">192.0.2.99</code></code></strong></pre><p>
- </p></div><div class="chapter" title="Chapter 10. Outbound Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrout"></a>Chapter 10. Outbound Zone Transfers</h2></div></div></div><p>
+ </p><pre class="screen">> <strong class="userinput"><code>Xfrin retransfer zone_name="<code class="option">foo.example.org</code>" master=<code class="option">192.0.2.99</code></code></strong></pre><p>
+ </p></div></div><div class="chapter" title="Chapter 10. Outbound Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrout"></a>Chapter 10. Outbound Zone Transfers</h2></div></div></div><p>
The <span class="command"><strong>b10-xfrout</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
When the <span class="command"><strong>b10-auth</strong></span> authoritative DNS server
@@ -587,7 +649,7 @@ This may be a temporary setting until then.
NOTIFY messages to slaves.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
Access control is not yet provided.
</p></div></div><div class="chapter" title="Chapter 11. Secondary Manager"><div class="titlepage"><div><div><h2 class="title"><a name="zonemgr"></a>Chapter 11. Secondary Manager</h2></div></div></div><p>
The <span class="command"><strong>b10-zonemgr</strong></span> process is started by
@@ -603,13 +665,13 @@ This may be a temporary setting until then.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Access control (such as allowing notifies) is not yet provided.
The primary/secondary service is not yet complete.
- </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286296">Forwarding</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438327">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438512">Forwarding</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-resolver</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
</p><p>
The main <span class="command"><strong>bind10</strong></span> process can be configured
- to select to run either the authoritative or resolver.
+ to select to run either the authoritative or resolver or both.
By default, it starts the authoritative service.
@@ -625,14 +687,52 @@ This may be a temporary setting until then.
The master <span class="command"><strong>bind10</strong></span> will stop and start
the desired services.
</p><p>
- The resolver also needs to be configured to listen on an address
- and port:
+ By default, the resolver listens on port 53 for 127.0.0.1 and ::1.
+ The following example shows how it can be configured to
+ listen on an additional address (and port):
</p><pre class="screen">
-> <strong class="userinput"><code>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</code></strong>
+> <strong class="userinput"><code>config add Resolver/listen_on</code></strong>
+> <strong class="userinput"><code>config set Resolver/listen_on[<em class="replaceable"><code>2</code></em>]/address "192.168.1.1"</code></strong>
+> <strong class="userinput"><code>config set Resolver/listen_on[<em class="replaceable"><code>2</code></em>]/port 53</code></strong>
> <strong class="userinput"><code>config commit</code></strong>
</pre><p>
- </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286296"></a>Forwarding</h2></div></div></div><p>
+ </p><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
+ as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
+ Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438327"></a>Access Control</h2></div></div></div><p>
+ By default, the <span class="command"><strong>b10-resolver</strong></span> daemon only accepts
+ DNS queries from the localhost (127.0.0.1 and ::1).
+ The <code class="option">Resolver/query_acl</code> configuration may
+ be used to reject, drop, or allow specific IPs or networks.
+ This configuration list is first match.
+ </p><p>
+ The configuration's <code class="option">action</code> item may be
+ set to <span class="quote">“<span class="quote">ACCEPT</span>”</span> to allow the incoming query,
+ <span class="quote">“<span class="quote">REJECT</span>”</span> to respond with a DNS REFUSED return
+ code, or <span class="quote">“<span class="quote">DROP</span>”</span> to ignore the query without
+ any response (such as a blackhole). For more information,
+ see the respective debugging messages: <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_ACCEPTED" target="_top">RESOLVER_QUERY_ACCEPTED</a>,
+ <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_REJECTED" target="_top">RESOLVER_QUERY_REJECTED</a>,
+ and <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_DROPPED" target="_top">RESOLVER_QUERY_DROPPED</a>.
+ </p><p>
+ The required configuration's <code class="option">from</code> item is set
+ to an IPv4 or IPv6 address, addresses with an network mask, or to
+ the special lowercase keywords <span class="quote">“<span class="quote">any6</span>”</span> (for
+ any IPv6 address) or <span class="quote">“<span class="quote">any4</span>”</span> (for any IPv4
+ address).
+ </p><p>
+ For example to allow the <em class="replaceable"><code>192.168.1.0/24</code></em>
+ network to use your recursive name server, at the
+ <span class="command"><strong>bindctl</strong></span> prompt run:
+ </p><pre class="screen">
+> <strong class="userinput"><code>config add Resolver/query_acl</code></strong>
+> <strong class="userinput"><code>config set Resolver/query_acl[<em class="replaceable"><code>2</code></em>]/action "ACCEPT"</code></strong>
+> <strong class="userinput"><code>config set Resolver/query_acl[<em class="replaceable"><code>2</code></em>]/from "<em class="replaceable"><code>192.168.1.0/24</code></em>"</code></strong>
+> <strong class="userinput"><code>config commit</code></strong>
+</pre><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
+ as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
+ Resolver/query_acl</code></strong></span>”</span> if needed.)</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>This prototype access control configuration
+ syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438512"></a>Forwarding</h2></div></div></div><p>
To enable forwarding, the upstream address and port must be
configured to forward queries to, such as:
@@ -660,24 +760,440 @@ This may be a temporary setting until then.
</p><p>
- This stats daemon provides commands to identify if it is running,
- show specified or all statistics data, set values, remove data,
- and reset data.
+ This stats daemon provides commands to identify if it is
+ running, show specified or all statistics data, show specified
+ or all statistics data schema, and set specified statistics
+ data.
For example, using <span class="command"><strong>bindctl</strong></span>:
</p><pre class="screen">
> <strong class="userinput"><code>Stats show</code></strong>
{
- "auth.queries.tcp": 1749,
- "auth.queries.udp": 867868,
- "bind10.boot_time": "2011-01-20T16:59:03Z",
- "report_time": "2011-01-20T17:04:06Z",
- "stats.boot_time": "2011-01-20T16:59:05Z",
- "stats.last_update_time": "2011-01-20T17:04:05Z",
- "stats.lname": "4d3869d9_a at jreed.example.net",
- "stats.start_time": "2011-01-20T16:59:05Z",
- "stats.timestamp": 1295543046.823504
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
}
</pre><p>
- </p></div></div></body></html>
+ </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438628">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438638">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439154">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439328">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229439609">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438628"></a>Logging configuration</h2></div></div></div><p>
+
+ The logging system in BIND 10 is configured through the
+ Logging module. All BIND 10 modules will look at the
+ configuration in Logging to see what should be logged and
+ to where.
+
+
+
+ </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229438638"></a>Loggers</h3></div></div></div><p>
+
+ Within BIND 10, a message is logged through a component
+ called a "logger". Different parts of BIND 10 log messages
+ through different loggers, and each logger can be configured
+ independently of one another.
+
+ </p><p>
+
+ In the Logging module, you can specify the configuration
+ for zero or more loggers; any that are not specified will
+ take appropriate default values..
+
+ </p><p>
+
+ The three most important elements of a logger configuration
+ are the <code class="option">name</code> (the component that is
+ generating the messages), the <code class="option">severity</code>
+ (what to log), and the <code class="option">output_options</code>
+ (where to log).
+
+ </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229438663"></a>name (string)</h4></div></div></div><p>
+ Each logger in the system has a name, the name being that
+ of the component using it to log messages. For instance,
+ if you want to configure logging for the resolver module,
+ you add an entry for a logger named <span class="quote">“<span class="quote">Resolver</span>”</span>. This
+ configuration will then be used by the loggers in the
+ Resolver module, and all the libraries used by it.
+ </p><p>
+
+ If you want to specify logging for one specific library
+ within the module, you set the name to
+ <em class="replaceable"><code>module.library</code></em>. For example, the
+ logger used by the nameserver address store component
+ has the full name of <span class="quote">“<span class="quote">Resolver.nsas</span>”</span>. If
+ there is no entry in Logging for a particular library,
+ it will use the configuration given for the module.
+
+
+
+ </p><p>
+
+
+
+ To illustrate this, suppose you want the cache library
+ to log messages of severity DEBUG, and the rest of the
+ resolver code to log messages of severity INFO. To achieve
+ this you specify two loggers, one with the name
+ <span class="quote">“<span class="quote">Resolver</span>”</span> and severity INFO, and one with
+ the name <span class="quote">“<span class="quote">Resolver.cache</span>”</span> with severity
+ DEBUG. As there are no entries for other libraries (e.g.
+ the nsas), they will use the configuration for the module
+ (<span class="quote">“<span class="quote">Resolver</span>”</span>), so giving the desired behavior.
+
+ </p><p>
+
+ One special case is that of a module name of <span class="quote">“<span class="quote">*</span>”</span>
+ (asterisks), which is interpreted as <span class="emphasis"><em>any</em></span>
+ module. You can set global logging options by using this,
+ including setting the logging configuration for a library
+ that is used by multiple modules (e.g. <span class="quote">“<span class="quote">*.config</span>”</span>
+ specifies the configuration library code in whatever
+ module is using it).
+
+ </p><p>
+
+ If there are multiple logger specifications in the
+ configuration that might match a particular logger, the
+ specification with the more specific logger name takes
+ precedence. For example, if there are entries for for
+ both <span class="quote">“<span class="quote">*</span>”</span> and <span class="quote">“<span class="quote">Resolver</span>”</span>, the
+ resolver module — and all libraries it uses —
+ will log messages according to the configuration in the
+ second entry (<span class="quote">“<span class="quote">Resolver</span>”</span>). All other modules
+ will use the configuration of the first entry
+ (<span class="quote">“<span class="quote">*</span>”</span>). If there was also a configuration
+ entry for <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, the cache library
+ within the resolver would use that in preference to the
+ entry for <span class="quote">“<span class="quote">Resolver</span>”</span>.
+
+ </p><p>
+
+ One final note about the naming. When specifying the
+ module name within a logger, use the name of the module
+ as specified in <span class="command"><strong>bindctl</strong></span>, e.g.
+ <span class="quote">“<span class="quote">Resolver</span>”</span> for the resolver module,
+ <span class="quote">“<span class="quote">Xfrout</span>”</span> for the xfrout module, etc. When
+ the message is logged, the message will include the name
+ of the logger generating the message, but with the module
+ name replaced by the name of the process implementing
+ the module (so for example, a message generated by the
+ <span class="quote">“<span class="quote">Auth.cache</span>”</span> logger will appear in the output
+ with a logger name of <span class="quote">“<span class="quote">b10-auth.cache</span>”</span>).
+
+ </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439035"></a>severity (string)</h4></div></div></div><p>
+
+ This specifies the category of messages logged.
+ Each message is logged with an associated severity which
+ may be one of the following (in descending order of
+ severity):
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> FATAL </li><li class="listitem"> ERROR </li><li class="listitem"> WARN </li><li class="listitem"> INFO </li><li class="listitem"> DEBUG </li></ul></div><p>
+
+ When the severity of a logger is set to one of these
+ values, it will only log messages of that severity, and
+ the severities above it. The severity may also be set to
+ NONE, in which case all messages from that logger are
+ inhibited.
+
+
+
+ </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439086"></a>output_options (list)</h4></div></div></div><p>
+
+ Each logger can have zero or more
+ <code class="option">output_options</code>. These specify where log
+ messages are sent to. These are explained in detail below.
+
+ </p><p>
+
+ The other options for a logger are:
+
+ </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439102"></a>debuglevel (integer)</h4></div></div></div><p>
+
+ When a logger's severity is set to DEBUG, this value
+ specifies what debug messages should be printed. It ranges
+ from 0 (least verbose) to 99 (most verbose).
+ </p><p>
+
+ If severity for the logger is not DEBUG, this value is ignored.
+
+ </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439117"></a>additive (true or false)</h4></div></div></div><p>
+
+ If this is true, the <code class="option">output_options</code> from
+ the parent will be used. For example, if there are two
+ loggers configured; <span class="quote">“<span class="quote">Resolver</span>”</span> and
+ <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, and <code class="option">additive</code>
+ is true in the second, it will write the log messages
+ not only to the destinations specified for
+ <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, but also to the destinations
+ as specified in the <code class="option">output_options</code> in
+ the logger named <span class="quote">“<span class="quote">Resolver</span>”</span>.
+
+
+
+ </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439154"></a>Output Options</h3></div></div></div><p>
+
+ The main settings for an output option are the
+ <code class="option">destination</code> and a value called
+ <code class="option">output</code>, the meaning of which depends on
+ the destination that is set.
+
+ </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439169"></a>destination (string)</h4></div></div></div><p>
+
+ The destination is the type of output. It can be one of:
+
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439201"></a>output (string)</h4></div></div></div><p>
+
+ Depending on what is set as the output destination, this
+ value is interpreted as follows:
+
+ </p><div class="variablelist"><dl><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">console</span>”</span></span></dt><dd>
+ The value of output must be one of <span class="quote">“<span class="quote">stdout</span>”</span>
+ (messages printed to standard output) or
+ <span class="quote">“<span class="quote">stderr</span>”</span> (messages printed to standard
+ error).
+ </dd><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span></span></dt><dd>
+ The value of output is interpreted as a file name;
+ log messages will be appended to this file.
+ </dd><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">syslog</span>”</span></span></dt><dd>
+ The value of output is interpreted as the
+ <span class="command"><strong>syslog</strong></span> facility (e.g.
+ <span class="emphasis"><em>local0</em></span>) that should be used
+ for log messages.
+ </dd></dl></div><p>
+
+ The other options for <code class="option">output_options</code> are:
+
+ </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439286"></a>flush (true of false)</h5></div></div></div><p>
+ Flush buffers after each log message. Doing this will
+ reduce performance but will ensure that if the program
+ terminates abnormally, all messages up to the point of
+ termination are output.
+ </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439296"></a>maxsize (integer)</h5></div></div></div><p>
+ Only relevant when destination is file, this is maximum
+ file size of output files in bytes. When the maximum
+ size is reached, the file is renamed and a new file opened.
+ (For example, a ".1" is appended to the name —
+ if a ".1" file exists, it is renamed ".2",
+ etc.)
+ </p><p>
+ If this is 0, no maximum file size is used.
+ </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439308"></a>maxver (integer)</h5></div></div></div><p>
+ Maximum number of old log files to keep around when
+ rolling the output file. Only relevant when
+ <code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span>.
+ </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439328"></a>Example session</h3></div></div></div><p>
+
+ In this example we want to set the global logging to
+ write to the file <code class="filename">/var/log/my_bind10.log</code>,
+ at severity WARN. We want the authoritative server to
+ log at DEBUG with debuglevel 40, to a different file
+ (<code class="filename">/tmp/debug_messages</code>).
+
+ </p><p>
+
+ Start <span class="command"><strong>bindctl</strong></span>.
+
+ </p><p>
+
+ </p><pre class="screen">["login success "]
+> <strong class="userinput"><code>config show Logging</code></strong>
+Logging/loggers [] list
+</pre><p>
+
+ </p><p>
+
+ By default, no specific loggers are configured, in which
+ case the severity defaults to INFO and the output is
+ written to stderr.
+
+ </p><p>
+
+ Let's first add a default logger:
+
+ </p><p>
+
+ </p><pre class="screen"><strong class="userinput"><code>> config add Logging/loggers</code></strong>
+> <strong class="userinput"><code>config show Logging</code></strong>
+Logging/loggers/ list (modified)
+</pre><p>
+
+ </p><p>
+
+ The loggers value line changed to indicate that it is no
+ longer an empty list:
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code>config show Logging/loggers</code></strong>
+Logging/loggers[0]/name "" string (default)
+Logging/loggers[0]/severity "INFO" string (default)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</pre><p>
+
+ </p><p>
+
+ The name is mandatory, so we must set it. We will also
+ change the severity as well. Let's start with the global
+ logger.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code>config set Logging/loggers[0]/name *</code></strong>
+> <strong class="userinput"><code>config set Logging/loggers[0]/severity WARN</code></strong>
+> <strong class="userinput"><code>config show Logging/loggers</code></strong>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</pre><p>
+
+ </p><p>
+
+ Of course, we need to specify where we want the log
+ messages to go, so we add an entry for an output option.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config add Logging/loggers[0]/output_options</code></strong>
+> <strong class="userinput"><code> config show Logging/loggers[0]/output_options</code></strong>
+Logging/loggers[0]/output_options[0]/destination "console" string (default)
+Logging/loggers[0]/output_options[0]/output "stdout" string (default)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 0 integer (default)
+Logging/loggers[0]/output_options[0]/maxver 0 integer (default)
+</pre><p>
+
+
+ </p><p>
+
+ These aren't the values we are looking for.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/destination file</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/maxsize 30000</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/maxver 8</code></strong>
+</pre><p>
+
+ </p><p>
+
+ Which would make the entire configuration for this logger
+ look like:
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config show all Logging/loggers</code></strong>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options[0]/destination "file" string (modified)
+Logging/loggers[0]/output_options[0]/output "/var/log/bind10.log" string (modified)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 30000 integer (modified)
+Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
+</pre><p>
+
+ </p><p>
+
+ That looks OK, so let's commit it before we add the
+ configuration for the authoritative server's logger.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config commit</code></strong></pre><p>
+
+ </p><p>
+
+ Now that we have set it, and checked each value along
+ the way, adding a second entry is quite similar.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config add Logging/loggers</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/name Auth</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/severity DEBUG</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/debuglevel 40</code></strong>
+> <strong class="userinput"><code> config add Logging/loggers[1]/output_options</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/output_options[0]/destination file</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log</code></strong>
+> <strong class="userinput"><code> config commit</code></strong>
+</pre><p>
+
+ </p><p>
+
+ And that's it. Once we have found whatever it was we
+ needed the debug messages for, we can simply remove the
+ second logger to let the authoritative server use the
+ same settings as the rest.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config remove Logging/loggers[1]</code></strong>
+> <strong class="userinput"><code> config commit</code></strong>
+</pre><p>
+
+ </p><p>
+
+ And every module will now be using the values from the
+ logger named <span class="quote">“<span class="quote">*</span>”</span>.
+
+ </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229439609"></a>Logging Message Format</h2></div></div></div><p>
+ Each message written by BIND 10 to the configured logging
+ destinations comprises a number of components that identify
+ the origin of the message and, if the message indicates
+ a problem, information about the problem that may be
+ useful in fixing it.
+ </p><p>
+ Consider the message below logged to a file:
+ </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</pre><p>
+ </p><p>
+ Note: the layout of messages written to the system logging
+ file (syslog) may be slightly different. This message has
+ been split across two lines here for display reasons; in the
+ logging file, it will appear on one line.)
+ </p><p>
+ The log message comprises a number of components:
+
+ </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
+ The date and time at which the message was generated.
+ </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
+ The severity of the message.
+ </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
+ The source of the message. This comprises two components:
+ the BIND 10 process generating the message (in this
+ case, <span class="command"><strong>b10-resolver</strong></span>) and the module
+ within the program from which the message originated
+ (which in the example is the asynchronous I/O link
+ module, asiolink).
+ </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
+ The message identification. Every message in BIND 10
+ has a unique identification, which can be used as an
+ index into the <a class="ulink" href="bind10-messages.html" target="_top"><em class="citetitle">BIND 10 Messages
+ Manual</em></a> (<a class="ulink" href="http://bind10.isc.org/docs/bind10-messages.html" target="_top">http://bind10.isc.org/docs/bind10-messages.html</a>) from which more information can be obtained.
+ </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
+ A brief description of the cause of the problem.
+ Within this text, information relating to the condition
+ that caused the message to be logged will be included.
+ In this example, error number 111 (an operating
+ system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the
+ local system (address 127.0.0.1). The next step
+ would be to find out the reason for the failure by
+ consulting your system's documentation to identify
+ what error number 111 means.
+ </p></dd></dl></div><p>
+ </p></div></div></div></body></html>
diff --git a/doc/guide/bind10-guide.txt b/doc/guide/bind10-guide.txt
new file mode 100644
index 0000000..619d56f
--- /dev/null
+++ b/doc/guide/bind10-guide.txt
@@ -0,0 +1,1201 @@
+ BIND 10 Guide
+
+Administrator Reference for BIND 10
+
+ This is the reference guide for BIND 10 version 20110809.
+
+ Copyright (c) 2010-2011 Internet Systems Consortium, Inc.
+
+ Abstract
+
+ BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems
+ Consortium (ISC). It includes DNS libraries and modular components for
+ controlling authoritative and recursive DNS servers.
+
+ This is the reference guide for BIND 10 version 20110809. The most
+ up-to-date version of this document (in PDF, HTML, and plain text
+ formats), along with other documents for BIND 10, can be found at
+ http://bind10.isc.org/docs.
+
+ --------------------------------------------------------------------------
+
+ Table of Contents
+
+ 1. Introduction
+
+ Supported Platforms
+
+ Required Software
+
+ Starting and Stopping the Server
+
+ Managing BIND 10
+
+ 2. Installation
+
+ Building Requirements
+
+ Quick start
+
+ Installation from source
+
+ Download Tar File
+
+ Retrieve from Git
+
+ Configure before the build
+
+ Build
+
+ Install
+
+ Install Hierarchy
+
+ 3. Starting BIND10 with bind10
+
+ Starting BIND 10
+
+ 4. Command channel
+
+ 5. Configuration manager
+
+ 6. Remote control daemon
+
+ Configuration specification for b10-cmdctl
+
+ 7. Control and configure user interface
+
+ 8. Authoritative Server
+
+ Server Configurations
+
+ Data Source Backends
+
+ Loading Master Zones Files
+
+ 9. Incoming Zone Transfers
+
+ Configuration for Incoming Zone Transfers
+
+ Enabling IXFR
+
+ Trigger an Incoming Zone Transfer Manually
+
+ 10. Outbound Zone Transfers
+
+ 11. Secondary Manager
+
+ 12. Recursive Name Server
+
+ Access Control
+
+ Forwarding
+
+ 13. Statistics
+
+ 14. Logging
+
+ Logging configuration
+
+ Loggers
+
+ Output Options
+
+ Example session
+
+ Logging Message Format
+
+Chapter 1. Introduction
+
+ Table of Contents
+
+ Supported Platforms
+
+ Required Software
+
+ Starting and Stopping the Server
+
+ Managing BIND 10
+
+ BIND is the popular implementation of a DNS server, developer interfaces,
+ and DNS tools. BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++
+ and Python and provides a modular environment for serving and maintaining
+ DNS.
+
+ Note
+
+ This guide covers the experimental prototype of BIND 10 version 20110809.
+
+ Note
+
+ BIND 10 provides a EDNS0- and DNSSEC-capable authoritative DNS server and
+ a caching recursive name server which also provides forwarding.
+
+Supported Platforms
+
+ BIND 10 builds have been tested on Debian GNU/Linux 5, Ubuntu 9.10, NetBSD
+ 5, Solaris 10, FreeBSD 7 and 8, and CentOS Linux 5.3. It has been tested
+ on Sparc, i386, and amd64 hardware platforms. It is planned for BIND 10 to
+ build, install and run on Windows and standard Unix-type platforms.
+
+Required Software
+
+ BIND 10 requires Python 3.1. Later versions may work, but Python 3.1 is
+ the minimum version which will work.
+
+ BIND 10 uses the Botan crypto library for C++. It requires at least Botan
+ version 1.8.
+
+ BIND 10 uses the log4cplus C++ logging library. It requires at least
+ log4cplus version 1.0.3.
+
+ The authoritative server requires SQLite 3.3.9 or newer. The b10-xfrin,
+ b10-xfrout, and b10-zonemgr modules require the libpython3 library and the
+ Python _sqlite3.so module.
+
+ Note
+
+ Some operating systems do not provide these dependencies in their default
+ installation nor standard packages collections. You may need to install
+ them separately.
+
+Starting and Stopping the Server
+
+ BIND 10 is modular. Part of this modularity is accomplished using multiple
+ cooperating processes which, together, provide the server functionality.
+ This is a change from the previous generation of BIND software, which used
+ a single process.
+
+ At first, running many different processes may seem confusing. However,
+ these processes are started, stopped, and maintained by a single command,
+ bind10. This command starts a master process which will start other
+ processes as needed. The processes started by the bind10 command have
+ names starting with "b10-", including:
+
+ o b10-msgq -- Message bus daemon. This process coordinates communication
+ between all of the other BIND 10 processes.
+ o b10-auth -- Authoritative DNS server. This process serves DNS
+ requests.
+ o b10-cfgmgr -- Configuration manager. This process maintains all of the
+ configuration for BIND 10.
+ o b10-cmdctl -- Command and control service. This process allows
+ external control of the BIND 10 system.
+ o b10-resolver -- Recursive name server. This process handles incoming
+ queries.
+ o b10-stats -- Statistics collection daemon. This process collects and
+ reports statistics data.
+ o b10-xfrin -- Incoming zone transfer service. This process is used to
+ transfer a new copy of a zone into BIND 10, when acting as a secondary
+ server.
+ o b10-xfrout -- Outgoing zone transfer service. This process is used to
+ handle transfer requests to send a local zone to a remote secondary
+ server, when acting as a master server.
+ o b10-zonemgr -- Secondary manager. This process keeps track of timers
+ and other necessary information for BIND 10 to act as a slave server.
+
+ These are ran automatically by bind10 and do not need to be run manually.
+
+Managing BIND 10
+
+ Once BIND 10 is running, a few commands are used to interact directly with
+ the system:
+
+ o bindctl -- interactive administration interface. This is a
+ command-line tool which allows an administrator to control BIND 10.
+ o b10-loadzone -- zone file loader. This tool will load standard
+ masterfile-format zone files into BIND 10.
+ o b10-cmdctl-usermgr -- user access control. This tool allows an
+ administrator to authorize additional users to manage BIND 10.
+
+ The tools and modules are covered in full detail in this guide. In
+ addition, manual pages are also provided in the default installation.
+
+ BIND 10 also provides libraries and programmer interfaces for C++ and
+ Python for the message bus, configuration backend, and, of course, DNS.
+ These include detailed developer documentation and code examples.
+
+Chapter 2. Installation
+
+ Table of Contents
+
+ Building Requirements
+
+ Quick start
+
+ Installation from source
+
+ Download Tar File
+
+ Retrieve from Git
+
+ Configure before the build
+
+ Build
+
+ Install
+
+ Install Hierarchy
+
+Building Requirements
+
+ In addition to the run-time requirements, building BIND 10 from source
+ code requires various development include headers.
+
+ Note
+
+ Some operating systems have split their distribution packages into a
+ run-time and a development package. You will need to install the
+ development package versions, which include header files and libraries, to
+ build BIND 10 from source code.
+
+ Building from source code requires the Boost build-time headers. At least
+ Boost version 1.35 is required.
+
+ To build BIND 10, also install the Botan (at least version 1.8) and the
+ log4cplus (at least version 1.0.3) development include headers.
+
+ The Python Library and Python _sqlite3 module are required to enable the
+ Xfrout and Xfrin support.
+
+ Note
+
+ The Python related libraries and modules need to be built for Python 3.1.
+
+ Building BIND 10 also requires a C++ compiler and standard development
+ headers, make, and pkg-config. BIND 10 builds have been tested with GCC
+ g++ 3.4.3, 4.1.2, 4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++
+ 5.10.
+
+Quick start
+
+ Note
+
+ This quickly covers the standard steps for installing and deploying BIND
+ 10 as an authoritative name server using its defaults. For
+ troubleshooting, full customizations and further details, see the
+ respective chapters in the BIND 10 guide.
+
+ To quickly get started with BIND 10, follow these steps.
+
+ 1. Install required build dependencies.
+ 2. Download the BIND 10 source tar file from
+ ftp://ftp.isc.org/isc/bind10/.
+ 3. Extract the tar file:
+
+ $ gzcat bind10-VERSION.tar.gz | tar -xvf -
+
+ 4. Go into the source and run configure:
+
+ $ cd bind10-VERSION
+ $ ./configure
+
+ 5. Build it:
+
+ $ make
+
+ 6. Install it (to default /usr/local):
+
+ $ make install
+
+ 7. Start the server:
+
+ $ /usr/local/sbin/bind10
+
+ 8. Test it; for example:
+
+ $ dig @127.0.0.1 -c CH -t TXT authors.bind
+
+ 9. Load desired zone file(s), for example:
+
+ $ b10-loadzone your.zone.example.org
+
+ 10. Test the new zone.
+
+Installation from source
+
+ BIND 10 is open source software written in C++ and Python. It is freely
+ available in source code form from ISC via the Git code revision control
+ system or as a downloadable tar file. It may also be available in
+ pre-compiled ready-to-use packages from operating system vendors.
+
+ Download Tar File
+
+ Downloading a release tar file is the recommended method to obtain the
+ source code.
+
+ The BIND 10 releases are available as tar file downloads from
+ ftp://ftp.isc.org/isc/bind10/. Periodic development snapshots may also be
+ available.
+
+ Retrieve from Git
+
+ Downloading this "bleeding edge" code is recommended only for developers
+ or advanced users. Using development code in a production environment is
+ not recommended.
+
+ Note
+
+ When using source code retrieved via Git additional software will be
+ required: automake (v1.11 or newer), libtoolize, and autoconf (2.59 or
+ newer). These may need to be installed.
+
+ The latest development code, including temporary experiments and
+ un-reviewed code, is available via the BIND 10 code revision control
+ system. This is powered by Git and all the BIND 10 development is public.
+ The leading development is done in the "master".
+
+ The code can be checked out from git://bind10.isc.org/bind10; for example:
+
+ $ git clone git://bind10.isc.org/bind10
+
+ When checking out the code from the code version control system, it
+ doesn't include the generated configure script, Makefile.in files, nor the
+ related configure files. They can be created by running autoreconf with
+ the --install switch. This will run autoconf, aclocal, libtoolize,
+ autoheader, automake, and related commands.
+
+ Configure before the build
+
+ BIND 10 uses the GNU Build System to discover build environment details.
+ To generate the makefiles using the defaults, simply run:
+
+ $ ./configure
+
+ Run ./configure with the --help switch to view the different options. The
+ commonly-used options are:
+
+ --prefix
+ Define the installation location (the default is /usr/local/).
+
+ --with-boost-include
+ Define the path to find the Boost headers.
+
+ --with-pythonpath
+ Define the path to Python 3.1 if it is not in the standard
+ execution path.
+
+ --with-gtest
+ Enable building the C++ Unit Tests using the Google Tests
+ framework. Optionally this can define the path to the gtest header
+ files and library.
+
+ For example, the following configures it to find the Boost headers, find
+ the Python interpreter, and sets the installation location:
+
+ $ ./configure \
+ --with-boost-include=/usr/pkg/include \
+ --with-pythonpath=/usr/pkg/bin/python3.1 \
+ --prefix=/opt/bind10
+
+ If the configure fails, it may be due to missing or old dependencies.
+
+ Build
+
+ After the configure step is complete, to build the executables from the
+ C++ code and prepare the Python scripts, run:
+
+ $ make
+
+ Install
+
+ To install the BIND 10 executables, support files, and documentation, run:
+
+ $ make install
+
+ Note
+
+ The install step may require superuser privileges.
+
+ Install Hierarchy
+
+ The following is the layout of the complete BIND 10 installation:
+
+ o bin/ -- general tools and diagnostic clients.
+ o etc/bind10-devel/ -- configuration files.
+ o lib/ -- libraries and python modules.
+ o libexec/bind10-devel/ -- executables that a user wouldn't normally run
+ directly and are not run independently. These are the BIND 10 modules
+ which are daemons started by the bind10 tool.
+ o sbin/ -- commands used by the system administrator.
+ o share/bind10-devel/ -- configuration specifications.
+ o share/man/ -- manual pages (online documentation).
+ o var/bind10-devel/ -- data source and configuration databases.
+
+Chapter 3. Starting BIND10 with bind10
+
+ Table of Contents
+
+ Starting BIND 10
+
+ BIND 10 provides the bind10 command which starts up the required
+ processes. bind10 will also restart processes that exit unexpectedly. This
+ is the only command needed to start the BIND 10 system.
+
+ After starting the b10-msgq communications channel, bind10 connects to it,
+ runs the configuration manager, and reads its own configuration. Then it
+ starts the other modules.
+
+ The b10-msgq and b10-cfgmgr services make up the core. The b10-msgq daemon
+ provides the communication channel between every part of the system. The
+ b10-cfgmgr daemon is always needed by every module, if only to send
+ information about themselves somewhere, but more importantly to ask about
+ their own settings, and about other modules. The bind10 master process
+ will also start up b10-cmdctl for admins to communicate with the system,
+ b10-auth for authoritative DNS service or b10-resolver for recursive name
+ service, b10-stats for statistics collection, b10-xfrin for inbound DNS
+ zone transfers, b10-xfrout for outbound DNS zone transfers, and
+ b10-zonemgr for secondary service.
+
+Starting BIND 10
+
+ To start the BIND 10 service, simply run bind10. Run it with the --verbose
+ switch to get additional debugging or diagnostic output.
+
+ Note
+
+ If the setproctitle Python module is detected at start up, the process
+ names for the Python-based daemons will be renamed to better identify them
+ instead of just "python". This is not needed on some operating systems.
+
+Chapter 4. Command channel
+
+ The BIND 10 components use the b10-msgq message routing daemon to
+ communicate with other BIND 10 components. The b10-msgq implements what is
+ called the "Command Channel". Processes intercommunicate by sending
+ messages on the command channel. Example messages include shutdown, get
+ configurations, and set configurations. This Command Channel is not used
+ for DNS message passing. It is used only to control and monitor the BIND
+ 10 system.
+
+ Administrators do not communicate directly with the b10-msgq daemon. By
+ default, BIND 10 uses port 9912 for the b10-msgq service. It listens on
+ 127.0.0.1.
+
+Chapter 5. Configuration manager
+
+ The configuration manager, b10-cfgmgr, handles all BIND 10 system
+ configuration. It provides persistent storage for configuration, and
+ notifies running modules of configuration changes.
+
+ The b10-auth and b10-xfrin daemons and other components receive their
+ configurations from the configuration manager over the b10-msgq command
+ channel.
+
+ The administrator doesn't connect to it directly, but uses a user
+ interface to communicate with the configuration manager via b10-cmdctl's
+ REST-ful interface. b10-cmdctl is covered in Chapter 6, Remote control
+ daemon.
+
+ Note
+
+ The development prototype release only provides the bindctl as a user
+ interface to b10-cmdctl. Upcoming releases will provide another
+ interactive command-line interface and a web-based interface.
+
+ The b10-cfgmgr daemon can send all specifications and all current settings
+ to the bindctl client (via b10-cmdctl).
+
+ b10-cfgmgr relays configurations received from b10-cmdctl to the
+ appropriate modules.
+
+ The stored configuration file is at
+ /usr/local/var/bind10-devel/b10-config.db. (The full path is what was
+ defined at build configure time for --localstatedir. The default is
+ /usr/local/var/.) The format is loosely based on JSON and is directly
+ parseable python, but this may change in a future version. This
+ configuration data file is not manually edited by the administrator.
+
+ The configuration manager does not have any command line arguments.
+ Normally it is not started manually, but is automatically started using
+ the bind10 master process (as covered in Chapter 3, Starting BIND10 with
+ bind10).
+
+Chapter 6. Remote control daemon
+
+ Table of Contents
+
+ Configuration specification for b10-cmdctl
+
+ b10-cmdctl is the gateway between administrators and the BIND 10 system.
+ It is a HTTPS server that uses standard HTTP Digest Authentication for
+ username and password validation. It provides a REST-ful interface for
+ accessing and controlling BIND 10.
+
+ When b10-cmdctl starts, it firsts asks b10-cfgmgr about what modules are
+ running and what their configuration is (over the b10-msgq channel). Then
+ it will start listening on HTTPS for clients -- the user interface -- such
+ as bindctl.
+
+ b10-cmdctl directly sends commands (received from the user interface) to
+ the specified component. Configuration changes are actually commands to
+ b10-cfgmgr so are sent there.
+
+ The HTTPS server requires a private key, such as a RSA PRIVATE KEY. The
+ default location is at /usr/local/etc/bind10-devel/cmdctl-keyfile.pem. (A
+ sample key is at /usr/local/share/bind10-devel/cmdctl-keyfile.pem.) It
+ also uses a certificate located at
+ /usr/local/etc/bind10-devel/cmdctl-certfile.pem. (A sample certificate is
+ at /usr/local/share/bind10-devel/cmdctl-certfile.pem.) This may be a
+ self-signed certificate or purchased from a certification authority.
+
+ Note
+
+ The HTTPS server doesn't support a certificate request from a client (at
+ this time). The b10-cmdctl daemon does not provide a public service. If
+ any client wants to control BIND 10, then a certificate needs to be first
+ received from the BIND 10 administrator. The BIND 10 installation provides
+ a sample PEM bundle that matches the sample key and certificate.
+
+ The b10-cmdctl daemon also requires the user account file located at
+ /usr/local/etc/bind10-devel/cmdctl-accounts.csv. This comma-delimited file
+ lists the accounts with a user name, hashed password, and salt. (A sample
+ file is at /usr/local/share/bind10-devel/cmdctl-accounts.csv. It contains
+ the user named "root" with the password "bind10".)
+
+ The administrator may create a user account with the b10-cmdctl-usermgr
+ tool.
+
+ By default the HTTPS server listens on the localhost port 8080. The port
+ can be set by using the --port command line option. The address to listen
+ on can be set using the --address command line argument. Each HTTPS
+ connection is stateless and timesout in 1200 seconds by default. This can
+ be redefined by using the --idle-timeout command line argument.
+
+Configuration specification for b10-cmdctl
+
+ The configuration items for b10-cmdctl are: key_file cert_file
+ accounts_file
+
+ The control commands are: print_settings shutdown
+
+Chapter 7. Control and configure user interface
+
+ Note
+
+ For this development prototype release, bindctl is the only user
+ interface. It is expected that upcoming releases will provide another
+ interactive command-line interface and a web-based interface for
+ controlling and configuring BIND 10.
+
+ The bindctl tool provides an interactive prompt for configuring,
+ controlling, and querying the BIND 10 components. It communicates directly
+ with a REST-ful interface over HTTPS provided by b10-cmdctl. It doesn't
+ communicate to any other components directly.
+
+ Configuration changes are actually commands to b10-cfgmgr. So when bindctl
+ sends a configuration, it is sent to b10-cmdctl (over a HTTPS connection);
+ then b10-cmdctl sends the command (over a b10-msgq command channel) to
+ b10-cfgmgr which then stores the details and relays (over a b10-msgq
+ command channel) the configuration on to the specified module.
+
+Chapter 8. Authoritative Server
+
+ Table of Contents
+
+ Server Configurations
+
+ Data Source Backends
+
+ Loading Master Zones Files
+
+ The b10-auth is the authoritative DNS server. It supports EDNS0 and
+ DNSSEC. It supports IPv6. Normally it is started by the bind10 master
+ process.
+
+Server Configurations
+
+ b10-auth is configured via the b10-cfgmgr configuration manager. The
+ module name is "Auth". The configuration data item is:
+
+ database_file
+ This is an optional string to define the path to find the SQLite3
+ database file. Note: Later the DNS server will use various data
+ source backends. This may be a temporary setting until then.
+
+ The configuration command is:
+
+ shutdown
+ Stop the authoritative DNS server.
+
+Data Source Backends
+
+ Note
+
+ For the development prototype release, b10-auth supports a SQLite3 data
+ source backend and in-memory data source backend. Upcoming versions will
+ be able to use multiple different data sources, such as MySQL and Berkeley
+ DB.
+
+ By default, the SQLite3 backend uses the data file located at
+ /usr/local/var/bind10-devel/zone.sqlite3. (The full path is what was
+ defined at build configure time for --localstatedir. The default is
+ /usr/local/var/.) This data file location may be changed by defining the
+ "database_file" configuration.
+
+Loading Master Zones Files
+
+ RFC 1035 style DNS master zone files may imported into a BIND 10 data
+ source by using the b10-loadzone utility.
+
+ b10-loadzone supports the following special directives (control entries):
+
+ $INCLUDE
+ Loads an additional zone file. This may be recursive.
+
+ $ORIGIN
+ Defines the relative domain name.
+
+ $TTL
+ Defines the time-to-live value used for following records that
+ don't include a TTL.
+
+ The -o argument may be used to define the default origin for loaded zone
+ file records.
+
+ Note
+
+ In the development prototype release, only the SQLite3 back end is used.
+ By default, it stores the zone data in
+ /usr/local/var/bind10-devel/zone.sqlite3 unless the -d switch is used to
+ set the database filename. Multiple zones are stored in a single SQLite3
+ zone database.
+
+ If you reload a zone already existing in the database, all records from
+ that prior zone disappear and a whole new set appears.
+
+Chapter 9. Incoming Zone Transfers
+
+ Table of Contents
+
+ Configuration for Incoming Zone Transfers
+
+ Enabling IXFR
+
+ Trigger an Incoming Zone Transfer Manually
+
+ Incoming zones are transferred using the b10-xfrin process which is
+ started by bind10. When received, the zone is stored in the corresponding
+ BIND 10 data source, and its records can be served by b10-auth. In
+ combination with b10-zonemgr (for automated SOA checks), this allows the
+ BIND 10 server to provide "secondary" service.
+
+ The b10-xfrin process supports both AXFR and IXFR. Due to some
+ implementation limitations of the current development release, however, it
+ only tries AXFR by default, and care should be taken to enable IXFR.
+
+ Note
+
+ In the current development release of BIND 10, incoming zone transfers are
+ only available for SQLite3-based data sources, that is, they don't work
+ for an in-memory data source.
+
+Configuration for Incoming Zone Transfers
+
+ In practice, you need to specify a list of secondary zones to enable
+ incoming zone transfers for these zones (you can still trigger a zone
+ transfer manually, without a prior configuration (see below)).
+
+ For example, to enable zone transfers for a zone named "example.com"
+ (whose master address is assumed to be 2001:db8::53 here), run the
+ following at the bindctl prompt:
+
+ > config add Xfrin/zones
+ > config set Xfrin/zones[0]/name "example.com"
+ > config set Xfrin/zones[0]/master_addr "2001:db8::53"
+ > config commit
+
+ (We assume there has been no zone configuration before).
+
+Enabling IXFR
+
+ As noted above, b10-xfrin uses AXFR for zone transfers by default. To
+ enable IXFR for zone transfers for a particular zone, set the use_ixfr
+ configuration parameter to true. In the above example of configuration
+ sequence, you'll need to add the following before performing commit:
+
+ > config set Xfrin/zones[0]/use_ixfr true
+
+ Note
+
+ One reason why IXFR is disabled by default in the current release is
+ because it does not support automatic fallback from IXFR to AXFR when it
+ encounters a primary server that doesn't support outbound IXFR (and, not
+ many existing implementations support it). Another, related reason is that
+ it does not use AXFR even if it has no knowledge about the zone (like at
+ the very first time the secondary server is set up). IXFR requires the
+ "current version" of the zone, so obviously it doesn't work in this
+ situation and AXFR is the only workable choice. The current release of
+ b10-xfrin does not make this selection automatically. These features will
+ be implemented in a near future version, at which point we will enable
+ IXFR by default.
+
+Trigger an Incoming Zone Transfer Manually
+
+ To manually trigger a zone transfer to retrieve a remote zone, you may use
+ the bindctl utility. For example, at the bindctl prompt run:
+
+ > Xfrin retransfer zone_name="foo.example.org" master=192.0.2.99
+
+Chapter 10. Outbound Zone Transfers
+
+ The b10-xfrout process is started by bind10. When the b10-auth
+ authoritative DNS server receives an AXFR request, b10-xfrout sends the
+ zone. This is used to provide master DNS service to share zones to
+ secondary name servers. The b10-xfrout is also used to send NOTIFY
+ messages to slaves.
+
+ Note
+
+ The current development release of BIND 10 only supports AXFR. (IXFR is
+ not supported.) Access control is not yet provided.
+
+Chapter 11. Secondary Manager
+
+ The b10-zonemgr process is started by bind10. It keeps track of SOA
+ refresh, retry, and expire timers and other details for BIND 10 to perform
+ as a slave. When the b10-auth authoritative DNS server receives a NOTIFY
+ message, b10-zonemgr may tell b10-xfrin to do a refresh to start an
+ inbound zone transfer. The secondary manager resets its counters when a
+ new zone is transferred in.
+
+ Note
+
+ Access control (such as allowing notifies) is not yet provided. The
+ primary/secondary service is not yet complete.
+
+Chapter 12. Recursive Name Server
+
+ Table of Contents
+
+ Access Control
+
+ Forwarding
+
+ The b10-resolver process is started by bind10.
+
+ The main bind10 process can be configured to select to run either the
+ authoritative or resolver or both. By default, it starts the authoritative
+ service. You may change this using bindctl, for example:
+
+ > config set Boss/start_auth false
+ > config set Boss/start_resolver true
+ > config commit
+
+ The master bind10 will stop and start the desired services.
+
+ By default, the resolver listens on port 53 for 127.0.0.1 and ::1. The
+ following example shows how it can be configured to listen on an
+ additional address (and port):
+
+ > config add Resolver/listen_on
+ > config set Resolver/listen_on[2]/address "192.168.1.1"
+ > config set Resolver/listen_on[2]/port 53
+ > config commit
+
+ (Replace the "2" as needed; run "config show Resolver/listen_on" if
+ needed.)
+
+Access Control
+
+ By default, the b10-resolver daemon only accepts DNS queries from the
+ localhost (127.0.0.1 and ::1). The Resolver/query_acl configuration may be
+ used to reject, drop, or allow specific IPs or networks. This
+ configuration list is first match.
+
+ The configuration's action item may be set to "ACCEPT" to allow the
+ incoming query, "REJECT" to respond with a DNS REFUSED return code, or
+ "DROP" to ignore the query without any response (such as a blackhole). For
+ more information, see the respective debugging messages:
+ RESOLVER_QUERY_ACCEPTED, RESOLVER_QUERY_REJECTED, and
+ RESOLVER_QUERY_DROPPED.
+
+ The required configuration's from item is set to an IPv4 or IPv6 address,
+ addresses with an network mask, or to the special lowercase keywords
+ "any6" (for any IPv6 address) or "any4" (for any IPv4 address).
+
+ For example to allow the 192.168.1.0/24 network to use your recursive name
+ server, at the bindctl prompt run:
+
+ > config add Resolver/query_acl
+ > config set Resolver/query_acl[2]/action "ACCEPT"
+ > config set Resolver/query_acl[2]/from "192.168.1.0/24"
+ > config commit
+
+ (Replace the "2" as needed; run "config show Resolver/query_acl" if
+ needed.)
+
+ Note
+
+ This prototype access control configuration syntax may be changed.
+
+Forwarding
+
+ To enable forwarding, the upstream address and port must be configured to
+ forward queries to, such as:
+
+ > config set Resolver/forward_addresses [{ "address": "192.168.1.1", "port": 53 }]
+ > config commit
+
+ (Replace 192.168.1.1 to point to your full resolver.)
+
+ Normal iterative name service can be re-enabled by clearing the forwarding
+ address(es); for example:
+
+ > config set Resolver/forward_addresses []
+ > config commit
+
+Chapter 13. Statistics
+
+ The b10-stats process is started by bind10. It periodically collects
+ statistics data from various modules and aggregates it.
+
+ This stats daemon provides commands to identify if it is running, show
+ specified or all statistics data, show specified or all statistics data
+ schema, and set specified statistics data. For example, using bindctl:
+
+ > Stats show
+ {
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
+ }
+
+
+Chapter 14. Logging
+
+ Table of Contents
+
+ Logging configuration
+
+ Loggers
+
+ Output Options
+
+ Example session
+
+ Logging Message Format
+
+Logging configuration
+
+ The logging system in BIND 10 is configured through the Logging module.
+ All BIND 10 modules will look at the configuration in Logging to see what
+ should be logged and to where.
+
+ Loggers
+
+ Within BIND 10, a message is logged through a component called a "logger".
+ Different parts of BIND 10 log messages through different loggers, and
+ each logger can be configured independently of one another.
+
+ In the Logging module, you can specify the configuration for zero or more
+ loggers; any that are not specified will take appropriate default values..
+
+ The three most important elements of a logger configuration are the name
+ (the component that is generating the messages), the severity (what to
+ log), and the output_options (where to log).
+
+ name (string)
+
+ Each logger in the system has a name, the name being that of the component
+ using it to log messages. For instance, if you want to configure logging
+ for the resolver module, you add an entry for a logger named "Resolver".
+ This configuration will then be used by the loggers in the Resolver
+ module, and all the libraries used by it.
+
+ If you want to specify logging for one specific library within the module,
+ you set the name to module.library. For example, the logger used by the
+ nameserver address store component has the full name of "Resolver.nsas".
+ If there is no entry in Logging for a particular library, it will use the
+ configuration given for the module.
+
+ To illustrate this, suppose you want the cache library to log messages of
+ severity DEBUG, and the rest of the resolver code to log messages of
+ severity INFO. To achieve this you specify two loggers, one with the name
+ "Resolver" and severity INFO, and one with the name "Resolver.cache" with
+ severity DEBUG. As there are no entries for other libraries (e.g. the
+ nsas), they will use the configuration for the module ("Resolver"), so
+ giving the desired behavior.
+
+ One special case is that of a module name of "*" (asterisks), which is
+ interpreted as any module. You can set global logging options by using
+ this, including setting the logging configuration for a library that is
+ used by multiple modules (e.g. "*.config" specifies the configuration
+ library code in whatever module is using it).
+
+ If there are multiple logger specifications in the configuration that
+ might match a particular logger, the specification with the more specific
+ logger name takes precedence. For example, if there are entries for for
+ both "*" and "Resolver", the resolver module -- and all libraries it uses
+ -- will log messages according to the configuration in the second entry
+ ("Resolver"). All other modules will use the configuration of the first
+ entry ("*"). If there was also a configuration entry for "Resolver.cache",
+ the cache library within the resolver would use that in preference to the
+ entry for "Resolver".
+
+ One final note about the naming. When specifying the module name within a
+ logger, use the name of the module as specified in bindctl, e.g.
+ "Resolver" for the resolver module, "Xfrout" for the xfrout module, etc.
+ When the message is logged, the message will include the name of the
+ logger generating the message, but with the module name replaced by the
+ name of the process implementing the module (so for example, a message
+ generated by the "Auth.cache" logger will appear in the output with a
+ logger name of "b10-auth.cache").
+
+ severity (string)
+
+ This specifies the category of messages logged. Each message is logged
+ with an associated severity which may be one of the following (in
+ descending order of severity):
+
+ o FATAL
+ o ERROR
+ o WARN
+ o INFO
+ o DEBUG
+
+ When the severity of a logger is set to one of these values, it will only
+ log messages of that severity, and the severities above it. The severity
+ may also be set to NONE, in which case all messages from that logger are
+ inhibited.
+
+ output_options (list)
+
+ Each logger can have zero or more output_options. These specify where log
+ messages are sent to. These are explained in detail below.
+
+ The other options for a logger are:
+
+ debuglevel (integer)
+
+ When a logger's severity is set to DEBUG, this value specifies what debug
+ messages should be printed. It ranges from 0 (least verbose) to 99 (most
+ verbose).
+
+ If severity for the logger is not DEBUG, this value is ignored.
+
+ additive (true or false)
+
+ If this is true, the output_options from the parent will be used. For
+ example, if there are two loggers configured; "Resolver" and
+ "Resolver.cache", and additive is true in the second, it will write the
+ log messages not only to the destinations specified for "Resolver.cache",
+ but also to the destinations as specified in the output_options in the
+ logger named "Resolver".
+
+ Output Options
+
+ The main settings for an output option are the destination and a value
+ called output, the meaning of which depends on the destination that is
+ set.
+
+ destination (string)
+
+ The destination is the type of output. It can be one of:
+
+ o console
+ o file
+ o syslog
+
+ output (string)
+
+ Depending on what is set as the output destination, this value is
+ interpreted as follows:
+
+ destination is "console"
+ The value of output must be one of "stdout" (messages printed to
+ standard output) or "stderr" (messages printed to standard error).
+
+ destination is "file"
+ The value of output is interpreted as a file name; log messages
+ will be appended to this file.
+
+ destination is "syslog"
+ The value of output is interpreted as the syslog facility (e.g.
+ local0) that should be used for log messages.
+
+ The other options for output_options are:
+
+ flush (true of false)
+
+ Flush buffers after each log message. Doing this will reduce performance
+ but will ensure that if the program terminates abnormally, all messages up
+ to the point of termination are output.
+
+ maxsize (integer)
+
+ Only relevant when destination is file, this is maximum file size of
+ output files in bytes. When the maximum size is reached, the file is
+ renamed and a new file opened. (For example, a ".1" is appended to the
+ name -- if a ".1" file exists, it is renamed ".2", etc.)
+
+ If this is 0, no maximum file size is used.
+
+ maxver (integer)
+
+ Maximum number of old log files to keep around when rolling the output
+ file. Only relevant when destination is "file".
+
+ Example session
+
+ In this example we want to set the global logging to write to the file
+ /var/log/my_bind10.log, at severity WARN. We want the authoritative server
+ to log at DEBUG with debuglevel 40, to a different file
+ (/tmp/debug_messages).
+
+ Start bindctl.
+
+ ["login success "]
+ > config show Logging
+ Logging/loggers [] list
+
+ By default, no specific loggers are configured, in which case the severity
+ defaults to INFO and the output is written to stderr.
+
+ Let's first add a default logger:
+
+ > config add Logging/loggers
+ > config show Logging
+ Logging/loggers/ list (modified)
+
+ The loggers value line changed to indicate that it is no longer an empty
+ list:
+
+ > config show Logging/loggers
+ Logging/loggers[0]/name "" string (default)
+ Logging/loggers[0]/severity "INFO" string (default)
+ Logging/loggers[0]/debuglevel 0 integer (default)
+ Logging/loggers[0]/additive false boolean (default)
+ Logging/loggers[0]/output_options [] list (default)
+
+ The name is mandatory, so we must set it. We will also change the severity
+ as well. Let's start with the global logger.
+
+ > config set Logging/loggers[0]/name *
+ > config set Logging/loggers[0]/severity WARN
+ > config show Logging/loggers
+ Logging/loggers[0]/name "*" string (modified)
+ Logging/loggers[0]/severity "WARN" string (modified)
+ Logging/loggers[0]/debuglevel 0 integer (default)
+ Logging/loggers[0]/additive false boolean (default)
+ Logging/loggers[0]/output_options [] list (default)
+
+ Of course, we need to specify where we want the log messages to go, so we
+ add an entry for an output option.
+
+ > config add Logging/loggers[0]/output_options
+ > config show Logging/loggers[0]/output_options
+ Logging/loggers[0]/output_options[0]/destination "console" string (default)
+ Logging/loggers[0]/output_options[0]/output "stdout" string (default)
+ Logging/loggers[0]/output_options[0]/flush false boolean (default)
+ Logging/loggers[0]/output_options[0]/maxsize 0 integer (default)
+ Logging/loggers[0]/output_options[0]/maxver 0 integer (default)
+
+ These aren't the values we are looking for.
+
+ > config set Logging/loggers[0]/output_options[0]/destination file
+ > config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log
+ > config set Logging/loggers[0]/output_options[0]/maxsize 30000
+ > config set Logging/loggers[0]/output_options[0]/maxver 8
+
+ Which would make the entire configuration for this logger look like:
+
+ > config show all Logging/loggers
+ Logging/loggers[0]/name "*" string (modified)
+ Logging/loggers[0]/severity "WARN" string (modified)
+ Logging/loggers[0]/debuglevel 0 integer (default)
+ Logging/loggers[0]/additive false boolean (default)
+ Logging/loggers[0]/output_options[0]/destination "file" string (modified)
+ Logging/loggers[0]/output_options[0]/output "/var/log/bind10.log" string (modified)
+ Logging/loggers[0]/output_options[0]/flush false boolean (default)
+ Logging/loggers[0]/output_options[0]/maxsize 30000 integer (modified)
+ Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
+
+ That looks OK, so let's commit it before we add the configuration for the
+ authoritative server's logger.
+
+ > config commit
+
+ Now that we have set it, and checked each value along the way, adding a
+ second entry is quite similar.
+
+ > config add Logging/loggers
+ > config set Logging/loggers[1]/name Auth
+ > config set Logging/loggers[1]/severity DEBUG
+ > config set Logging/loggers[1]/debuglevel 40
+ > config add Logging/loggers[1]/output_options
+ > config set Logging/loggers[1]/output_options[0]/destination file
+ > config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log
+ > config commit
+
+ And that's it. Once we have found whatever it was we needed the debug
+ messages for, we can simply remove the second logger to let the
+ authoritative server use the same settings as the rest.
+
+ > config remove Logging/loggers[1]
+ > config commit
+
+ And every module will now be using the values from the logger named "*".
+
+Logging Message Format
+
+ Each message written by BIND 10 to the configured logging destinations
+ comprises a number of components that identify the origin of the message
+ and, if the message indicates a problem, information about the problem
+ that may be useful in fixing it.
+
+ Consider the message below logged to a file:
+
+ 2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)
+
+ Note: the layout of messages written to the system logging file (syslog)
+ may be slightly different. This message has been split across two lines
+ here for display reasons; in the logging file, it will appear on one
+ line.)
+
+ The log message comprises a number of components:
+
+ 2011-06-15 13:48:22.034
+
+ The date and time at which the message was generated.
+
+ ERROR
+
+ The severity of the message.
+
+ [b10-resolver.asiolink]
+
+ The source of the message. This comprises two components: the BIND
+ 10 process generating the message (in this case, b10-resolver) and
+ the module within the program from which the message originated
+ (which in the example is the asynchronous I/O link module,
+ asiolink).
+
+ ASIODNS_OPENSOCK
+
+ The message identification. Every message in BIND 10 has a unique
+ identification, which can be used as an index into the BIND 10
+ Messages Manual (http://bind10.isc.org/docs/bind10-messages.html)
+ from which more information can be obtained.
+
+ error 111 opening TCP socket to 127.0.0.1(53)
+
+ A brief description of the cause of the problem. Within this text,
+ information relating to the condition that caused the message to
+ be logged will be included. In this example, error number 111 (an
+ operating system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the local system
+ (address 127.0.0.1). The next step would be to find out the reason
+ for the failure by consulting your system's documentation to
+ identify what error number 111 means.
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index c020f11..21bb671 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -5,6 +5,23 @@
<!ENTITY % version SYSTEM "version.ent">
%version;
]>
+
+<!--
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
<book>
<?xml-stylesheet href="bind10-guide.css" type="text/css"?>
@@ -13,7 +30,7 @@
<subtitle>Administrator Reference for BIND 10</subtitle>
<copyright>
- <year>2010</year><holder>Internet Systems Consortium, Inc.</holder>
+ <year>2010-2011</year><holder>Internet Systems Consortium, Inc.</holder>
</copyright>
<abstract>
@@ -24,9 +41,10 @@
</para>
<para>
This is the reference guide for BIND 10 version &__VERSION__;.
- The most up-to-date version of this document, along with
- other documents for BIND 10, can be found at <ulink
- url="http://bind10.isc.org/docs"/>. </para> </abstract>
+ The most up-to-date version of this document (in PDF, HTML,
+ and plain text formats), along with other documents for
+ BIND 10, can be found at <ulink url="http://bind10.isc.org/docs"/>.
+ </para> </abstract>
<releaseinfo>This is the reference guide for BIND 10 version
&__VERSION__;.</releaseinfo>
@@ -79,12 +97,22 @@
3.1 is the minimum version which will work.
</para>
- <note><para>
+ <para>
+ BIND 10 uses the Botan crypto library for C++. It requires
+ at least Botan version 1.8.
+ </para>
+
+ <para>
+ BIND 10 uses the log4cplus C++ logging library. It requires
+ at least log4cplus version 1.0.3.
+ </para>
+
+ <para>
The authoritative server requires SQLite 3.3.9 or newer.
The <command>b10-xfrin</command>, <command>b10-xfrout</command>,
and <command>b10-zonemgr</command> modules require the
libpython3 library and the Python _sqlite3.so module.
- </para></note>
+ </para>
<!-- TODO: this will change ... -->
<!-- TODO: list where to get these from -->
@@ -119,7 +147,7 @@
The processes started by the <command>bind10</command>
command have names starting with "b10-", including:
</para>
-
+
<para>
<itemizedlist>
@@ -214,7 +242,7 @@
<section id="managing_once_running">
<title>Managing BIND 10</title>
-
+
<para>
Once BIND 10 is running, a few commands are used to interact
directly with the system:
@@ -253,7 +281,7 @@
<!-- TODO point to these -->
In addition, manual pages are also provided in the default installation.
</para>
-
+
<!--
bin/
bindctl*
@@ -291,6 +319,12 @@ var/
<section>
<title>Building Requirements</title>
+
+ <para>
+ In addition to the run-time requirements, building BIND 10
+ from source code requires various development include headers.
+ </para>
+
<note>
<simpara>
Some operating systems have split their distribution packages into
@@ -308,6 +342,19 @@ var/
</para>
<para>
+ To build BIND 10, also install the Botan (at least version
+ 1.8) and the log4cplus (at least version 1.0.3)
+ development include headers.
+ </para>
+
+<!--
+TODO
+Debian and Ubuntu:
+ libgmp3-dev and libbz2-dev required for botan too
+-->
+
+ <para>
+<!-- TODO: is this needed at build time? test time? -->
The Python Library and Python _sqlite3 module are required to
enable the Xfrout and Xfrin support.
</para>
@@ -321,7 +368,7 @@ var/
Building BIND 10 also requires a C++ compiler and
standard development headers, make, and pkg-config.
BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
- 4.1.3, 4.2.1, 4.3.2, and 4.4.1.
+ 4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
</para>
</section>
@@ -341,7 +388,7 @@ var/
</para>
<orderedlist>
-
+
<listitem>
<simpara>
Install required build dependencies.
@@ -425,7 +472,7 @@ var/
Downloading a release tar file is the recommended method to
obtain the source code.
</para>
-
+
<para>
The BIND 10 releases are available as tar file downloads from
<ulink url="ftp://ftp.isc.org/isc/bind10/"/>.
@@ -501,37 +548,37 @@ var/
<varlistentry>
<term>--prefix</term>
<listitem>
- <simpara>Define the the installation location (the
+ <simpara>Define the installation location (the
default is <filename>/usr/local/</filename>).
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-boost-include</term>
- <listitem>
+ <listitem>
<simpara>Define the path to find the Boost headers.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-pythonpath</term>
- <listitem>
+ <listitem>
<simpara>Define the path to Python 3.1 if it is not in the
standard execution path.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-gtest</term>
- <listitem>
+ <listitem>
<simpara>Enable building the C++ Unit Tests using the
Google Tests framework. Optionally this can define the
path to the gtest header files and library.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
</variablelist>
@@ -650,13 +697,13 @@ var/
</para>
</section>
-->
-
+
</chapter>
<chapter id="bind10">
<title>Starting BIND10 with <command>bind10</command></title>
<para>
- BIND 10 provides the <command>bind10</command> command which
+ BIND 10 provides the <command>bind10</command> command which
starts up the required processes.
<command>bind10</command>
will also restart processes that exit unexpectedly.
@@ -665,7 +712,7 @@ var/
<para>
After starting the <command>b10-msgq</command> communications channel,
- <command>bind10</command> connects to it,
+ <command>bind10</command> connects to it,
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</para>
@@ -696,6 +743,16 @@ var/
get additional debugging or diagnostic output.
</para>
<!-- TODO: note it doesn't go into background -->
+
+ <note>
+ <para>
+ If the setproctitle Python module is detected at start up,
+ the process names for the Python-based daemons will be renamed
+ to better identify them instead of just <quote>python</quote>.
+ This is not needed on some operating systems.
+ </para>
+ </note>
+
</section>
</chapter>
@@ -723,7 +780,7 @@ var/
<command>b10-msgq</command> service.
It listens on 127.0.0.1.
</para>
-
+
<!-- TODO: this is broken, see Trac #111
<para>
To select an alternate port for the <command>b10-msgq</command> to
@@ -1049,10 +1106,10 @@ since we used bind10 -->
The configuration data item is:
<variablelist>
-
+
<varlistentry>
<term>database_file</term>
- <listitem>
+ <listitem>
<simpara>This is an optional string to define the path to find
the SQLite3 database file.
<!-- TODO: -->
@@ -1074,7 +1131,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>shutdown</term>
- <listitem>
+ <listitem>
<simpara>Stop the authoritative DNS server.
</simpara>
<!-- TODO: what happens when this is sent, will bind10 restart? -->
@@ -1130,7 +1187,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$INCLUDE</term>
- <listitem>
+ <listitem>
<simpara>Loads an additional zone file. This may be recursive.
</simpara>
</listitem>
@@ -1138,7 +1195,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$ORIGIN</term>
- <listitem>
+ <listitem>
<simpara>Defines the relative domain name.
</simpara>
</listitem>
@@ -1146,7 +1203,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$TTL</term>
- <listitem>
+ <listitem>
<simpara>Defines the time-to-live value used for following
records that don't include a TTL.
</simpara>
@@ -1201,21 +1258,80 @@ TODO
<para>
Incoming zones are transferred using the <command>b10-xfrin</command>
process which is started by <command>bind10</command>.
- When received, the zone is stored in the BIND 10
- data store, and its records can be served by
+ When received, the zone is stored in the corresponding BIND 10
+ data source, and its records can be served by
<command>b10-auth</command>.
In combination with <command>b10-zonemgr</command> (for
automated SOA checks), this allows the BIND 10 server to
provide <quote>secondary</quote> service.
</para>
+ <para>
+ The <command>b10-xfrin</command> process supports both AXFR and
+ IXFR. Due to some implementation limitations of the current
+ development release, however, it only tries AXFR by default,
+ and care should be taken to enable IXFR.
+ </para>
+<!-- TODO: http://bind10.isc.org/ticket/1279 -->
+
<note><simpara>
- The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ In the current development release of BIND 10, incoming zone
+ transfers are only available for SQLite3-based data sources,
+ that is, they don't work for an in-memory data source.
+ </simpara></note>
-<!-- TODO: sqlite3 data source only? -->
+ <section>
+ <title>Configuration for Incoming Zone Transfers</title>
+ <para>
+ In practice, you need to specify a list of secondary zones to
+ enable incoming zone transfers for these zones (you can still
+ trigger a zone transfer manually, without a prior configuration
+ (see below)).
+ </para>
- </simpara></note>
+ <para>
+ For example, to enable zone transfers for a zone named "example.com"
+ (whose master address is assumed to be 2001:db8::53 here),
+ run the following at the <command>bindctl</command> prompt:
+
+ <screen>> <userinput>config add Xfrin/zones</userinput>
+> <userinput>config set Xfrin/zones[0]/name "<option>example.com</option>"</userinput>
+> <userinput>config set Xfrin/zones[0]/master_addr "<option>2001:db8::53</option>"</userinput>
+> <userinput>config commit</userinput></screen>
+
+ (We assume there has been no zone configuration before).
+ </para>
+ </section>
+
+ <section>
+ <title>Enabling IXFR</title>
+ <para>
+ As noted above, <command>b10-xfrin</command> uses AXFR for
+ zone transfers by default. To enable IXFR for zone transfers
+ for a particular zone, set the <userinput>use_ixfr</userinput>
+ configuration parameter to <userinput>true</userinput>.
+ In the above example of configuration sequence, you'll need
+ to add the following before performing <userinput>commit</userinput>:
+ <screen>> <userinput>config set Xfrin/zones[0]/use_ixfr true</userinput></screen>
+ </para>
+
+<!-- TODO: http://bind10.isc.org/ticket/1279 -->
+ <note><simpara>
+ One reason why IXFR is disabled by default in the current
+ release is because it does not support automatic fallback from IXFR to
+ AXFR when it encounters a primary server that doesn't support
+ outbound IXFR (and, not many existing implementations support
+ it). Another, related reason is that it does not use AXFR even
+ if it has no knowledge about the zone (like at the very first
+ time the secondary server is set up). IXFR requires the
+ "current version" of the zone, so obviously it doesn't work
+ in this situation and AXFR is the only workable choice.
+ The current release of <command>b10-xfrin</command> does not
+ make this selection automatically.
+ These features will be implemented in a near future
+ version, at which point we will enable IXFR by default.
+ </simpara></note>
+ </section>
<!-- TODO:
@@ -1228,13 +1344,18 @@ what if a NOTIFY is sent?
-->
- <para>
- To manually trigger a zone transfer to retrieve a remote zone,
- you may use the <command>bindctl</command> utility.
- For example, at the <command>bindctl</command> prompt run:
+ <section>
+ <title>Trigger an Incoming Zone Transfer Manually</title>
+
+ <para>
+ To manually trigger a zone transfer to retrieve a remote zone,
+ you may use the <command>bindctl</command> utility.
+ For example, at the <command>bindctl</command> prompt run:
+
+ <screen>> <userinput>Xfrin retransfer zone_name="<option>foo.example.org</option>" master=<option>192.0.2.99</option></userinput></screen>
+ </para>
+ </section>
- <screen>> <userinput>Xfrin retransfer zone_name="<option>foo.example.org</option>" master=<option>192.0.2.99</option></userinput></screen>
- </para>
<!-- TODO: can that retransfer be used to identify a new zone? -->
<!-- TODO: what if doesn't exist at that master IP? -->
@@ -1258,7 +1379,7 @@ what if a NOTIFY is sent?
<note><simpara>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
Access control is not yet provided.
</simpara></note>
@@ -1314,7 +1435,7 @@ what is XfroutClient xfr_client??
<para>
The main <command>bind10</command> process can be configured
- to select to run either the authoritative or resolver.
+ to select to run either the authoritative or resolver or both.
By default, it starts the authoritative service.
<!-- TODO: later both -->
@@ -1334,16 +1455,85 @@ what is XfroutClient xfr_client??
</para>
<para>
- The resolver also needs to be configured to listen on an address
- and port:
+ By default, the resolver listens on port 53 for 127.0.0.1 and ::1.
+ The following example shows how it can be configured to
+ listen on an additional address (and port):
<screen>
-> <userinput>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</userinput>
+> <userinput>config add Resolver/listen_on</userinput>
+> <userinput>config set Resolver/listen_on[<replaceable>2</replaceable>]/address "192.168.1.1"</userinput>
+> <userinput>config set Resolver/listen_on[<replaceable>2</replaceable>]/port 53</userinput>
> <userinput>config commit</userinput>
</screen>
</para>
-<!-- TODO: later the above will have some defaults -->
+ <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+ as needed; run <quote><userinput>config show
+ Resolver/listen_on</userinput></quote> if needed.)</simpara>
+<!-- TODO: this example should not include the port, ticket #1185 -->
+
+ <section>
+ <title>Access Control</title>
+
+ <para>
+ By default, the <command>b10-resolver</command> daemon only accepts
+ DNS queries from the localhost (127.0.0.1 and ::1).
+ The <option>Resolver/query_acl</option> configuration may
+ be used to reject, drop, or allow specific IPs or networks.
+ This configuration list is first match.
+ </para>
+
+ <para>
+ The configuration's <option>action</option> item may be
+ set to <quote>ACCEPT</quote> to allow the incoming query,
+ <quote>REJECT</quote> to respond with a DNS REFUSED return
+ code, or <quote>DROP</quote> to ignore the query without
+ any response (such as a blackhole). For more information,
+ see the respective debugging messages: <ulink
+ url="bind10-messages.html#RESOLVER_QUERY_ACCEPTED">RESOLVER_QUERY_ACCEPTED</ulink>,
+ <ulink
+ url="bind10-messages.html#RESOLVER_QUERY_REJECTED">RESOLVER_QUERY_REJECTED</ulink>,
+ and <ulink
+url="bind10-messages.html#RESOLVER_QUERY_DROPPED">RESOLVER_QUERY_DROPPED</ulink>.
+ </para>
+
+ <para>
+ The required configuration's <option>from</option> item is set
+ to an IPv4 or IPv6 address, addresses with an network mask, or to
+ the special lowercase keywords <quote>any6</quote> (for
+ any IPv6 address) or <quote>any4</quote> (for any IPv4
+ address).
+ </para>
+
+<!-- TODO:
+/0 is for any address in that address family
+does that need any address too?
+
+TODO: tsig
+-->
+
+ <para>
+ For example to allow the <replaceable>192.168.1.0/24</replaceable>
+ network to use your recursive name server, at the
+ <command>bindctl</command> prompt run:
+ </para>
+
+ <screen>
+> <userinput>config add Resolver/query_acl</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/action "ACCEPT"</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/from "<replaceable>192.168.1.0/24</replaceable>"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+
+ <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+ as needed; run <quote><userinput>config show
+ Resolver/query_acl</userinput></quote> if needed.)</simpara>
+
+<!-- TODO: check this -->
+ <note><simpara>This prototype access control configuration
+ syntax may be changed.</simpara></note>
+
+ </section>
<section>
<title>Forwarding</title>
@@ -1397,30 +1587,744 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
<para>
- This stats daemon provides commands to identify if it is running,
- show specified or all statistics data, set values, remove data,
- and reset data.
+ This stats daemon provides commands to identify if it is
+ running, show specified or all statistics data, show specified
+ or all statistics data schema, and set specified statistics
+ data.
For example, using <command>bindctl</command>:
<screen>
> <userinput>Stats show</userinput>
{
- "auth.queries.tcp": 1749,
- "auth.queries.udp": 867868,
- "bind10.boot_time": "2011-01-20T16:59:03Z",
- "report_time": "2011-01-20T17:04:06Z",
- "stats.boot_time": "2011-01-20T16:59:05Z",
- "stats.last_update_time": "2011-01-20T17:04:05Z",
- "stats.lname": "4d3869d9_a at jreed.example.net",
- "stats.start_time": "2011-01-20T16:59:05Z",
- "stats.timestamp": 1295543046.823504
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
}
</screen>
</para>
</chapter>
+ <chapter id="logging">
+ <title>Logging</title>
+
+ <section>
+ <title>Logging configuration</title>
+
+ <para>
+
+ The logging system in BIND 10 is configured through the
+ Logging module. All BIND 10 modules will look at the
+ configuration in Logging to see what should be logged and
+ to where.
+
+<!-- TODO: what is context of Logging module for readers of this guide? -->
+
+ </para>
+
+ <section>
+ <title>Loggers</title>
+
+ <para>
+
+ Within BIND 10, a message is logged through a component
+ called a "logger". Different parts of BIND 10 log messages
+ through different loggers, and each logger can be configured
+ independently of one another.
+
+ </para>
+
+ <para>
+
+ In the Logging module, you can specify the configuration
+ for zero or more loggers; any that are not specified will
+ take appropriate default values..
+
+ </para>
+
+ <para>
+
+ The three most important elements of a logger configuration
+ are the <option>name</option> (the component that is
+ generating the messages), the <option>severity</option>
+ (what to log), and the <option>output_options</option>
+ (where to log).
+
+ </para>
+
+ <section>
+ <title>name (string)</title>
+
+ <para>
+ Each logger in the system has a name, the name being that
+ of the component using it to log messages. For instance,
+ if you want to configure logging for the resolver module,
+ you add an entry for a logger named <quote>Resolver</quote>. This
+ configuration will then be used by the loggers in the
+ Resolver module, and all the libraries used by it.
+ </para>
+
+<!-- TODO: later we will have a way to know names of all modules
+
+Right now you can only see what their names are if they are running
+(a simple 'help' without anything else in bindctl for instance).
+
+ -->
+
+ <para>
+
+ If you want to specify logging for one specific library
+ within the module, you set the name to
+ <replaceable>module.library</replaceable>. For example, the
+ logger used by the nameserver address store component
+ has the full name of <quote>Resolver.nsas</quote>. If
+ there is no entry in Logging for a particular library,
+ it will use the configuration given for the module.
+
+<!-- TODO: how to know these specific names?
+
+We will either have to document them or tell the administrator to
+specify module-wide logging and see what appears...
+
+-->
+
+ </para>
+
+ <para>
+
+<!-- TODO: severity has not been covered yet -->
+
+ To illustrate this, suppose you want the cache library
+ to log messages of severity DEBUG, and the rest of the
+ resolver code to log messages of severity INFO. To achieve
+ this you specify two loggers, one with the name
+ <quote>Resolver</quote> and severity INFO, and one with
+ the name <quote>Resolver.cache</quote> with severity
+ DEBUG. As there are no entries for other libraries (e.g.
+ the nsas), they will use the configuration for the module
+ (<quote>Resolver</quote>), so giving the desired behavior.
+
+ </para>
+
+ <para>
+
+ One special case is that of a module name of <quote>*</quote>
+ (asterisks), which is interpreted as <emphasis>any</emphasis>
+ module. You can set global logging options by using this,
+ including setting the logging configuration for a library
+ that is used by multiple modules (e.g. <quote>*.config</quote>
+ specifies the configuration library code in whatever
+ module is using it).
+
+ </para>
+
+ <para>
+
+ If there are multiple logger specifications in the
+ configuration that might match a particular logger, the
+ specification with the more specific logger name takes
+ precedence. For example, if there are entries for for
+ both <quote>*</quote> and <quote>Resolver</quote>, the
+ resolver module — and all libraries it uses —
+ will log messages according to the configuration in the
+ second entry (<quote>Resolver</quote>). All other modules
+ will use the configuration of the first entry
+ (<quote>*</quote>). If there was also a configuration
+ entry for <quote>Resolver.cache</quote>, the cache library
+ within the resolver would use that in preference to the
+ entry for <quote>Resolver</quote>.
+
+ </para>
+
+ <para>
+
+ One final note about the naming. When specifying the
+ module name within a logger, use the name of the module
+ as specified in <command>bindctl</command>, e.g.
+ <quote>Resolver</quote> for the resolver module,
+ <quote>Xfrout</quote> for the xfrout module, etc. When
+ the message is logged, the message will include the name
+ of the logger generating the message, but with the module
+ name replaced by the name of the process implementing
+ the module (so for example, a message generated by the
+ <quote>Auth.cache</quote> logger will appear in the output
+ with a logger name of <quote>b10-auth.cache</quote>).
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>severity (string)</title>
+
+ <para>
+
+ This specifies the category of messages logged.
+ Each message is logged with an associated severity which
+ may be one of the following (in descending order of
+ severity):
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <simpara> FATAL </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> ERROR </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> WARN </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> INFO </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> DEBUG </simpara>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+
+ When the severity of a logger is set to one of these
+ values, it will only log messages of that severity, and
+ the severities above it. The severity may also be set to
+ NONE, in which case all messages from that logger are
+ inhibited.
+
+<!-- TODO: worded wrong? If I set to INFO, why would it show DEBUG which is literally below in that list? -->
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>output_options (list)</title>
+
+ <para>
+
+ Each logger can have zero or more
+ <option>output_options</option>. These specify where log
+ messages are sent to. These are explained in detail below.
+
+ </para>
+
+ <para>
+
+ The other options for a logger are:
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>debuglevel (integer)</title>
+
+ <para>
+
+ When a logger's severity is set to DEBUG, this value
+ specifies what debug messages should be printed. It ranges
+ from 0 (least verbose) to 99 (most verbose).
+ </para>
+
+
+<!-- TODO: complete this sentence:
+
+ The general classification of debug message types is
+
+TODO; there's a ticket to determine these levels, see #1074
+
+ -->
+
+ <para>
+
+ If severity for the logger is not DEBUG, this value is ignored.
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>additive (true or false)</title>
+
+ <para>
+
+ If this is true, the <option>output_options</option> from
+ the parent will be used. For example, if there are two
+ loggers configured; <quote>Resolver</quote> and
+ <quote>Resolver.cache</quote>, and <option>additive</option>
+ is true in the second, it will write the log messages
+ not only to the destinations specified for
+ <quote>Resolver.cache</quote>, but also to the destinations
+ as specified in the <option>output_options</option> in
+ the logger named <quote>Resolver</quote>.
+
+<!-- TODO: check this -->
+
+ </para>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Output Options</title>
+
+ <para>
+
+ The main settings for an output option are the
+ <option>destination</option> and a value called
+ <option>output</option>, the meaning of which depends on
+ the destination that is set.
+
+ </para>
+
+ <section>
+ <title>destination (string)</title>
+
+ <para>
+
+ The destination is the type of output. It can be one of:
+
+ </para>
+
+ <itemizedlist>
+
+ <listitem>
+ <simpara> console </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> file </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> syslog </simpara>
+ </listitem>
+
+ </itemizedlist>
+
+ </section>
+
+ <section>
+ <title>output (string)</title>
+
+ <para>
+
+ Depending on what is set as the output destination, this
+ value is interpreted as follows:
+
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>console</quote></term>
+ <listitem>
+ <simpara>
+ The value of output must be one of <quote>stdout</quote>
+ (messages printed to standard output) or
+ <quote>stderr</quote> (messages printed to standard
+ error).
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>file</quote></term>
+ <listitem>
+ <simpara>
+ The value of output is interpreted as a file name;
+ log messages will be appended to this file.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>syslog</quote></term>
+ <listitem>
+ <simpara>
+ The value of output is interpreted as the
+ <command>syslog</command> facility (e.g.
+ <emphasis>local0</emphasis>) that should be used
+ for log messages.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <para>
+
+ The other options for <option>output_options</option> are:
+
+ </para>
+
+ <section>
+ <title>flush (true of false)</title>
+
+ <para>
+ Flush buffers after each log message. Doing this will
+ reduce performance but will ensure that if the program
+ terminates abnormally, all messages up to the point of
+ termination are output.
+ </para>
+
+ </section>
+
+ <section>
+ <title>maxsize (integer)</title>
+
+ <para>
+ Only relevant when destination is file, this is maximum
+ file size of output files in bytes. When the maximum
+ size is reached, the file is renamed and a new file opened.
+ (For example, a ".1" is appended to the name —
+ if a ".1" file exists, it is renamed ".2",
+ etc.)
+ </para>
+
+ <para>
+ If this is 0, no maximum file size is used.
+ </para>
+
+ </section>
+
+ <section>
+ <title>maxver (integer)</title>
+
+ <para>
+ Maximum number of old log files to keep around when
+ rolling the output file. Only relevant when
+ <option>destination</option> is <quote>file</quote>.
+ </para>
+
+ </section>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Example session</title>
+
+ <para>
+
+ In this example we want to set the global logging to
+ write to the file <filename>/var/log/my_bind10.log</filename>,
+ at severity WARN. We want the authoritative server to
+ log at DEBUG with debuglevel 40, to a different file
+ (<filename>/tmp/debug_messages</filename>).
+
+ </para>
+
+ <para>
+
+ Start <command>bindctl</command>.
+
+ </para>
+
+ <para>
+
+ <screen>["login success "]
+> <userinput>config show Logging</userinput>
+Logging/loggers [] list
+</screen>
+
+ </para>
+
+ <para>
+
+ By default, no specific loggers are configured, in which
+ case the severity defaults to INFO and the output is
+ written to stderr.
+
+ </para>
+
+ <para>
+
+ Let's first add a default logger:
+
+ </para>
+
+<!-- TODO: adding the empty loggers makes no sense -->
+ <para>
+
+ <screen><userinput>> config add Logging/loggers</userinput>
+> <userinput>config show Logging</userinput>
+Logging/loggers/ list (modified)
+</screen>
+
+ </para>
+
+ <para>
+
+ The loggers value line changed to indicate that it is no
+ longer an empty list:
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput>config show Logging/loggers</userinput>
+Logging/loggers[0]/name "" string (default)
+Logging/loggers[0]/severity "INFO" string (default)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</screen>
+
+ </para>
+
+ <para>
+
+ The name is mandatory, so we must set it. We will also
+ change the severity as well. Let's start with the global
+ logger.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput>config set Logging/loggers[0]/name *</userinput>
+> <userinput>config set Logging/loggers[0]/severity WARN</userinput>
+> <userinput>config show Logging/loggers</userinput>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</screen>
+
+ </para>
+
+ <para>
+
+ Of course, we need to specify where we want the log
+ messages to go, so we add an entry for an output option.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config add Logging/loggers[0]/output_options</userinput>
+> <userinput> config show Logging/loggers[0]/output_options</userinput>
+Logging/loggers[0]/output_options[0]/destination "console" string (default)
+Logging/loggers[0]/output_options[0]/output "stdout" string (default)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 0 integer (default)
+Logging/loggers[0]/output_options[0]/maxver 0 integer (default)
+</screen>
+
+
+ </para>
+
+ <para>
+
+ These aren't the values we are looking for.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config set Logging/loggers[0]/output_options[0]/destination file</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/maxsize 30000</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/maxver 8</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ Which would make the entire configuration for this logger
+ look like:
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config show all Logging/loggers</userinput>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options[0]/destination "file" string (modified)
+Logging/loggers[0]/output_options[0]/output "/var/log/bind10.log" string (modified)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 30000 integer (modified)
+Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
+</screen>
+
+ </para>
+
+ <para>
+
+ That looks OK, so let's commit it before we add the
+ configuration for the authoritative server's logger.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config commit</userinput></screen>
+
+ </para>
+
+ <para>
+
+ Now that we have set it, and checked each value along
+ the way, adding a second entry is quite similar.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config add Logging/loggers</userinput>
+> <userinput> config set Logging/loggers[1]/name Auth</userinput>
+> <userinput> config set Logging/loggers[1]/severity DEBUG</userinput>
+> <userinput> config set Logging/loggers[1]/debuglevel 40</userinput>
+> <userinput> config add Logging/loggers[1]/output_options</userinput>
+> <userinput> config set Logging/loggers[1]/output_options[0]/destination file</userinput>
+> <userinput> config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log</userinput>
+> <userinput> config commit</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ And that's it. Once we have found whatever it was we
+ needed the debug messages for, we can simply remove the
+ second logger to let the authoritative server use the
+ same settings as the rest.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config remove Logging/loggers[1]</userinput>
+> <userinput> config commit</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ And every module will now be using the values from the
+ logger named <quote>*</quote>.
+
+ </para>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Logging Message Format</title>
+
+ <para>
+ Each message written by BIND 10 to the configured logging
+ destinations comprises a number of components that identify
+ the origin of the message and, if the message indicates
+ a problem, information about the problem that may be
+ useful in fixing it.
+ </para>
+
+ <para>
+ Consider the message below logged to a file:
+ <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
+ </para>
+
+ <para>
+ Note: the layout of messages written to the system logging
+ file (syslog) may be slightly different. This message has
+ been split across two lines here for display reasons; in the
+ logging file, it will appear on one line.)
+ </para>
+
+ <para>
+ The log message comprises a number of components:
+
+ <variablelist>
+ <varlistentry>
+ <term>2011-06-15 13:48:22.034</term>
+<!-- TODO: timestamp repeated even if using syslog? -->
+ <listitem><para>
+ The date and time at which the message was generated.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ERROR</term>
+ <listitem><para>
+ The severity of the message.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>[b10-resolver.asiolink]</term>
+ <listitem><para>
+ The source of the message. This comprises two components:
+ the BIND 10 process generating the message (in this
+ case, <command>b10-resolver</command>) and the module
+ within the program from which the message originated
+ (which in the example is the asynchronous I/O link
+ module, asiolink).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ASIODNS_OPENSOCK</term>
+ <listitem><para>
+ The message identification. Every message in BIND 10
+ has a unique identification, which can be used as an
+ index into the <ulink
+ url="bind10-messages.html"><citetitle>BIND 10 Messages
+ Manual</citetitle></ulink> (<ulink
+ url="http://bind10.isc.org/docs/bind10-messages.html"
+ />) from which more information can be obtained.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
+ <listitem><para>
+ A brief description of the cause of the problem.
+ Within this text, information relating to the condition
+ that caused the message to be logged will be included.
+ In this example, error number 111 (an operating
+ system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the
+ local system (address 127.0.0.1). The next step
+ would be to find out the reason for the failure by
+ consulting your system's documentation to identify
+ what error number 111 means.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ </section>
+
+ </chapter>
+
<!-- TODO: how to help: run unit tests, join lists, review trac tickets -->
<!-- <index> <title>Index</title> </index> -->
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
new file mode 100644
index 0000000..237b7ad
--- /dev/null
+++ b/doc/guide/bind10-messages.html
@@ -0,0 +1,2081 @@
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+ 20110809.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+ Internet Systems Consortium (ISC). It includes DNS libraries
+ and modular components for controlling authoritative and
+ recursive DNS servers.
+ </p><p>
+ This is the messages manual for BIND 10 version 20110809.
+ The most up-to-date version of this document, along with
+ other documents for BIND 10, can be found at
+ <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
+ </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dt><span class="chapter"><a href="#messages">2. BIND 10 Messages</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><p>
+ This document lists each message that can be logged by the
+ programs in the BIND 10 package. Each entry in this manual
+ is of the form:
+ </p><pre class="screen">IDENTIFICATION message-text</pre><p>
+ ... where "IDENTIFICATION" is the message identification included
+ in each message logged and "message-text" is the accompanying
+ message text. The "message-text" may include placeholders of the
+ form "%1", "%2" etc.; these parameters are replaced by relevant
+ values when the message is logged.
+ </p><p>
+ Each entry is also accompanied by a description giving more
+ information about the circumstances that result in the message
+ being logged.
+ </p><p>
+ For information on configuring and using BIND 10 logging,
+ refer to the <a class="ulink" href="bind10-guide.html" target="_top">BIND 10 Guide</a>.
+ </p></div><div class="chapter" title="Chapter 2. BIND 10 Messages"><div class="titlepage"><div><div><h2 class="title"><a name="messages"></a>Chapter 2. BIND 10 Messages</h2></div></div></div><p>
+ </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCH_COMPLETED"></a><span class="term">ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</span></dt><dd><p>
+A debug message, this records that the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+</p></dd><dt><a name="ASIODNS_FETCH_STOPPED"></a><span class="term">ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
+An external component has requested the halting of an upstream fetch. This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+</p></dd><dt><a name="ASIODNS_OPEN_SOCKET"></a><span class="term">ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The number of the system error that caused the problem is given in the
+message.
+</p></dd><dt><a name="ASIODNS_READ_DATA"></a><span class="term">ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_READ_TIMEOUT"></a><span class="term">ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
+An upstream fetch from the specified address timed out. This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network. The message will only appear if debug is
+enabled.
+</p></dd><dt><a name="ASIODNS_SEND_DATA"></a><span class="term">ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_ORIGIN"></a><span class="term">ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_RESULT"></a><span class="term">ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
+</p></dd><dt><a name="AUTH_AXFR_ERROR"></a><span class="term">AUTH_AXFR_ERROR error handling AXFR request: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</p></dd><dt><a name="AUTH_AXFR_UDP"></a><span class="term">AUTH_AXFR_UDP AXFR query received over UDP</span></dt><dd><p>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_COMMAND_FAILED"></a><span class="term">AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</span></dt><dd><p>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_CREATED"></a><span class="term">AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</span></dt><dd><p>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</span></dt><dd><p>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_STARTED"></a><span class="term">AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</span></dt><dd><p>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_LOAD_FAIL"></a><span class="term">AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</span></dt><dd><p>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</p></dd><dt><a name="AUTH_CONFIG_UPDATE_FAIL"></a><span class="term">AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</span></dt><dd><p>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</p></dd><dt><a name="AUTH_DATA_SOURCE"></a><span class="term">AUTH_DATA_SOURCE data source database file: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</p></dd><dt><a name="AUTH_DNS_SERVICES_CREATED"></a><span class="term">AUTH_DNS_SERVICES_CREATED DNS services created</span></dt><dd><p>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_HEADER_PARSE_FAIL"></a><span class="term">AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</p></dd><dt><a name="AUTH_LOAD_TSIG"></a><span class="term">AUTH_LOAD_TSIG loading TSIG keys</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_LOAD_ZONE"></a><span class="term">AUTH_LOAD_ZONE loaded zone %1/%2</span></dt><dd><p>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_DISABLED"></a><span class="term">AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_ENABLED"></a><span class="term">AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</p></dd><dt><a name="AUTH_NOTIFY_QUESTIONS"></a><span class="term">AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</p></dd><dt><a name="AUTH_NOTIFY_RRTYPE"></a><span class="term">AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_NO_STATS_SESSION"></a><span class="term">AUTH_NO_STATS_SESSION session interface for statistics is not available</span></dt><dd><p>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</p></dd><dt><a name="AUTH_NO_XFRIN"></a><span class="term">AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PARSE_ERROR"></a><span class="term">AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PROTOCOL_ERROR"></a><span class="term">AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_RECEIVED"></a><span class="term">AUTH_PACKET_RECEIVED message received:\n%1</span></dt><dd><p>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</p><p>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_PROCESS_FAIL"></a><span class="term">AUTH_PROCESS_FAIL message processing failure: %1</span></dt><dd><p>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</p><p>
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
+</p></dd><dt><a name="AUTH_RECEIVED_COMMAND"></a><span class="term">AUTH_RECEIVED_COMMAND command '%1' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</p></dd><dt><a name="AUTH_RECEIVED_SENDSTATS"></a><span class="term">AUTH_RECEIVED_SENDSTATS command 'sendstats' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</p></dd><dt><a name="AUTH_RESPONSE_RECEIVED"></a><span class="term">AUTH_RESPONSE_RECEIVED received response message, ignoring</span></dt><dd><p>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</p></dd><dt><a name="AUTH_SEND_ERROR_RESPONSE"></a><span class="term">AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SEND_NORMAL_RESPONSE"></a><span class="term">AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SERVER_CREATED"></a><span class="term">AUTH_SERVER_CREATED server created</span></dt><dd><p>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</p></dd><dt><a name="AUTH_SERVER_FAILED"></a><span class="term">AUTH_SERVER_FAILED server failed: %1</span></dt><dd><p>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</p></dd><dt><a name="AUTH_SERVER_STARTED"></a><span class="term">AUTH_SERVER_STARTED server started</span></dt><dd><p>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</p></dd><dt><a name="AUTH_SQLITE3"></a><span class="term">AUTH_SQLITE3 nothing to do for loading sqlite3</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_CREATED"></a><span class="term">AUTH_STATS_CHANNEL_CREATED STATS session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_COMMS"></a><span class="term">AUTH_STATS_COMMS communication error in sending statistics data: %1</span></dt><dd><p>
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMEOUT"></a><span class="term">AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</span></dt><dd><p>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMER_DISABLED"></a><span class="term">AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</p></dd><dt><a name="AUTH_STATS_TIMER_SET"></a><span class="term">AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</p></dd><dt><a name="AUTH_UNSUPPORTED_OPCODE"></a><span class="term">AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</span></dt><dd><p>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_CREATED"></a><span class="term">AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_ZONEMGR_COMMS"></a><span class="term">AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</p></dd><dt><a name="AUTH_ZONEMGR_ERROR"></a><span class="term">AUTH_ZONEMGR_ERROR received error response from zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</p></dd><dt><a name="BIND10_CHECK_MSGQ_ALREADY_RUNNING"></a><span class="term">BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running</span></dt><dd><p>
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+</p></dd><dt><a name="BIND10_CONFIGURATION_START_AUTH"></a><span class="term">BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</span></dt><dd><p>
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+</p></dd><dt><a name="BIND10_CONFIGURATION_START_RESOLVER"></a><span class="term">BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</span></dt><dd><p>
+This message shows whether or not the resolver should be
+started according to the configuration.
+</p></dd><dt><a name="BIND10_INVALID_USER"></a><span class="term">BIND10_INVALID_USER invalid user: %1</span></dt><dd><p>
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+</p></dd><dt><a name="BIND10_KILLING_ALL_PROCESSES"></a><span class="term">BIND10_KILLING_ALL_PROCESSES killing all started processes</span></dt><dd><p>
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+</p></dd><dt><a name="BIND10_KILL_PROCESS"></a><span class="term">BIND10_KILL_PROCESS killing process %1</span></dt><dd><p>
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+</p></dd><dt><a name="BIND10_MSGQ_ALREADY_RUNNING"></a><span class="term">BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start</span></dt><dd><p>
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+</p></dd><dt><a name="BIND10_MSGQ_DAEMON_ENDED"></a><span class="term">BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</span></dt><dd><p>
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+</p></dd><dt><a name="BIND10_MSGQ_DISAPPEARED"></a><span class="term">BIND10_MSGQ_DISAPPEARED msgq channel disappeared</span></dt><dd><p>
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED_NO_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</span></dt><dd><p>
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</span></dt><dd><p>
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+</p></dd><dt><a name="BIND10_READING_BOSS_CONFIGURATION"></a><span class="term">BIND10_READING_BOSS_CONFIGURATION reading boss configuration</span></dt><dd><p>
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+</p></dd><dt><a name="BIND10_RECEIVED_COMMAND"></a><span class="term">BIND10_RECEIVED_COMMAND received command: %1</span></dt><dd><p>
+The boss module received a command and shall now process it. The command
+is printed.
+</p></dd><dt><a name="BIND10_RECEIVED_NEW_CONFIGURATION"></a><span class="term">BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1</span></dt><dd><p>
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+</p></dd><dt><a name="BIND10_RECEIVED_SIGNAL"></a><span class="term">BIND10_RECEIVED_SIGNAL received signal %1</span></dt><dd><p>
+The boss module received the given signal.
+</p></dd><dt><a name="BIND10_RESURRECTED_PROCESS"></a><span class="term">BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)</span></dt><dd><p>
+The given process has been restarted successfully, and is now running
+with the given process id.
+</p></dd><dt><a name="BIND10_RESURRECTING_PROCESS"></a><span class="term">BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...</span></dt><dd><p>
+The given process has ended unexpectedly, and is now restarted.
+</p></dd><dt><a name="BIND10_SELECT_ERROR"></a><span class="term">BIND10_SELECT_ERROR error in select() call: %1</span></dt><dd><p>
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+</p></dd><dt><a name="BIND10_SEND_SIGKILL"></a><span class="term">BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)</span></dt><dd><p>
+The boss module is sending a SIGKILL signal to the given process.
+</p></dd><dt><a name="BIND10_SEND_SIGTERM"></a><span class="term">BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</span></dt><dd><p>
+The boss module is sending a SIGTERM signal to the given process.
+</p></dd><dt><a name="BIND10_SHUTDOWN"></a><span class="term">BIND10_SHUTDOWN stopping the server</span></dt><dd><p>
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+</p></dd><dt><a name="BIND10_SHUTDOWN_COMPLETE"></a><span class="term">BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete</span></dt><dd><p>
+All child processes have been stopped, and the boss process will now
+stop itself.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_CAUSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1</span></dt><dd><p>
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_RESPONSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</span></dt><dd><p>
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_CRASHED"></a><span class="term">BIND10_SOCKCREATOR_CRASHED the socket creator crashed</span></dt><dd><p>
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_EOF"></a><span class="term">BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</span></dt><dd><p>
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_INIT"></a><span class="term">BIND10_SOCKCREATOR_INIT initializing socket creator parser</span></dt><dd><p>
+The boss module initializes routines for parsing the socket creator
+protocol.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_KILL"></a><span class="term">BIND10_SOCKCREATOR_KILL killing the socket creator</span></dt><dd><p>
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_TERMINATE"></a><span class="term">BIND10_SOCKCREATOR_TERMINATE terminating socket creator</span></dt><dd><p>
+The boss module sends a request to terminate to the socket creator.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_TRANSPORT_ERROR"></a><span class="term">BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1</span></dt><dd><p>
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+</p></dd><dt><a name="BIND10_SOCKET_CREATED"></a><span class="term">BIND10_SOCKET_CREATED successfully created socket %1</span></dt><dd><p>
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+</p></dd><dt><a name="BIND10_SOCKET_ERROR"></a><span class="term">BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3</span></dt><dd><p>
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+</p></dd><dt><a name="BIND10_SOCKET_GET"></a><span class="term">BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</span></dt><dd><p>
+The boss forwards a request for a socket to the socket creator.
+</p></dd><dt><a name="BIND10_STARTED_PROCESS"></a><span class="term">BIND10_STARTED_PROCESS started %1</span></dt><dd><p>
+The given process has successfully been started.
+</p></dd><dt><a name="BIND10_STARTED_PROCESS_PID"></a><span class="term">BIND10_STARTED_PROCESS_PID started %1 (PID %2)</span></dt><dd><p>
+The given process has successfully been started, and has the given PID.
+</p></dd><dt><a name="BIND10_STARTING"></a><span class="term">BIND10_STARTING starting BIND10: %1</span></dt><dd><p>
+Informational message on startup that shows the full version.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS"></a><span class="term">BIND10_STARTING_PROCESS starting process %1</span></dt><dd><p>
+The boss module is starting the given process.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT"></a><span class="term">BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</span></dt><dd><p>
+The boss module is starting the given process, which will listen on the
+given port number.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT_ADDRESS"></a><span class="term">BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)</span></dt><dd><p>
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+</p></dd><dt><a name="BIND10_STARTUP_COMPLETE"></a><span class="term">BIND10_STARTUP_COMPLETE BIND 10 started</span></dt><dd><p>
+All modules have been successfully started, and BIND 10 is now running.
+</p></dd><dt><a name="BIND10_STARTUP_ERROR"></a><span class="term">BIND10_STARTUP_ERROR error during startup: %1</span></dt><dd><p>
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT"></a><span class="term">BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</span></dt><dd><p>
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</p></dd><dt><a name="BIND10_STOP_PROCESS"></a><span class="term">BIND10_STOP_PROCESS asking %1 to shut down</span></dt><dd><p>
+The boss module is sending a shutdown command to the given module over
+the message channel.
+</p></dd><dt><a name="BIND10_UNKNOWN_CHILD_PROCESS_ENDED"></a><span class="term">BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</span></dt><dd><p>
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+</p></dd><dt><a name="CACHE_ENTRY_MISSING_RRSET"></a><span class="term">CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</span></dt><dd><p>
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+</p></dd><dt><a name="CACHE_LOCALZONE_FOUND"></a><span class="term">CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data</span></dt><dd><p>
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+</p></dd><dt><a name="CACHE_LOCALZONE_UNKNOWN"></a><span class="term">CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data</span></dt><dd><p>
+Debug message. The requested data was not found in the local zone data.
+</p></dd><dt><a name="CACHE_LOCALZONE_UPDATE"></a><span class="term">CACHE_LOCALZONE_UPDATE updating local zone element at key %1</span></dt><dd><p>
+Debug message issued when there's update to the local zone section of cache.
+</p></dd><dt><a name="CACHE_MESSAGES_DEINIT"></a><span class="term">CACHE_MESSAGES_DEINIT deinitialized message cache</span></dt><dd><p>
+Debug message. It is issued when the server deinitializes the message cache.
+</p></dd><dt><a name="CACHE_MESSAGES_EXPIRED"></a><span class="term">CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache</span></dt><dd><p>
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+</p></dd><dt><a name="CACHE_MESSAGES_FOUND"></a><span class="term">CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache</span></dt><dd><p>
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+</p></dd><dt><a name="CACHE_MESSAGES_INIT"></a><span class="term">CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2</span></dt><dd><p>
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+</p></dd><dt><a name="CACHE_MESSAGES_REMOVE"></a><span class="term">CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first</span></dt><dd><p>
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+</p></dd><dt><a name="CACHE_MESSAGES_UNCACHEABLE"></a><span class="term">CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3</span></dt><dd><p>
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+</p></dd><dt><a name="CACHE_MESSAGES_UNKNOWN"></a><span class="term">CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache</span></dt><dd><p>
+Debug message. The message cache didn't find any entry for the given key.
+</p></dd><dt><a name="CACHE_MESSAGES_UPDATE"></a><span class="term">CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3</span></dt><dd><p>
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
+</p></dd><dt><a name="CACHE_RESOLVER_DEEPEST"></a><span class="term">CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+</p></dd><dt><a name="CACHE_RESOLVER_INIT"></a><span class="term">CACHE_RESOLVER_INIT initializing resolver cache for class %1</span></dt><dd><p>
+Debug message. The resolver cache is being created for this given class.
+</p></dd><dt><a name="CACHE_RESOLVER_INIT_INFO"></a><span class="term">CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1</span></dt><dd><p>
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+</p></dd><dt><a name="CACHE_RESOLVER_LOCAL_MSG"></a><span class="term">CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data</span></dt><dd><p>
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+</p></dd><dt><a name="CACHE_RESOLVER_LOCAL_RRSET"></a><span class="term">CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data</span></dt><dd><p>
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+</p></dd><dt><a name="CACHE_RESOLVER_LOOKUP_MSG"></a><span class="term">CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+</p></dd><dt><a name="CACHE_RESOLVER_LOOKUP_RRSET"></a><span class="term">CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+</p></dd><dt><a name="CACHE_RESOLVER_NO_QUESTION"></a><span class="term">CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section</span></dt><dd><p>
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+</p></dd><dt><a name="CACHE_RESOLVER_UNKNOWN_CLASS_MSG"></a><span class="term">CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1</span></dt><dd><p>
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+</p></dd><dt><a name="CACHE_RESOLVER_UNKNOWN_CLASS_RRSET"></a><span class="term">CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1</span></dt><dd><p>
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_MSG"></a><span class="term">CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3</span></dt><dd><p>
+Debug message. The resolver is updating a message in the cache.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_RRSET"></a><span class="term">CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3</span></dt><dd><p>
+Debug message. The resolver is updating an RRset in the cache.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG"></a><span class="term">CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1</span></dt><dd><p>
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET"></a><span class="term">CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1</span></dt><dd><p>
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+</p></dd><dt><a name="CACHE_RRSET_EXPIRED"></a><span class="term">CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3</span></dt><dd><p>
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+</p></dd><dt><a name="CACHE_RRSET_INIT"></a><span class="term">CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2</span></dt><dd><p>
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+</p></dd><dt><a name="CACHE_RRSET_LOOKUP"></a><span class="term">CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</span></dt><dd><p>
+Debug message. The resolver is trying to look up data in the RRset cache.
+</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+</p></dd><dt><a name="CACHE_RRSET_REMOVE_OLD"></a><span class="term">CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+</p></dd><dt><a name="CACHE_RRSET_UNTRUSTED"></a><span class="term">CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+</p></dd><dt><a name="CACHE_RRSET_UPDATE"></a><span class="term">CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache</span></dt><dd><p>
+Debug message. The RRset is updating its data with this given RRset.
+</p></dd><dt><a name="CC_ASYNC_READ_FAILED"></a><span class="term">CC_ASYNC_READ_FAILED asynchronous read failed</span></dt><dd><p>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</p></dd><dt><a name="CC_CONN_ERROR"></a><span class="term">CC_CONN_ERROR error connecting to message queue (%1)</span></dt><dd><p>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</p></dd><dt><a name="CC_DISCONNECT"></a><span class="term">CC_DISCONNECT disconnecting from message queue daemon</span></dt><dd><p>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</p></dd><dt><a name="CC_ESTABLISH"></a><span class="term">CC_ESTABLISH trying to establish connection with message queue daemon at %1</span></dt><dd><p>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</p></dd><dt><a name="CC_ESTABLISHED"></a><span class="term">CC_ESTABLISHED successfully connected to message queue daemon</span></dt><dd><p>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</p></dd><dt><a name="CC_GROUP_RECEIVE"></a><span class="term">CC_GROUP_RECEIVE trying to receive a message</span></dt><dd><p>
+Debug message, noting that a message is expected to come over the command
+channel.
+</p></dd><dt><a name="CC_GROUP_RECEIVED"></a><span class="term">CC_GROUP_RECEIVED message arrived ('%1', '%2')</span></dt><dd><p>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</p></dd><dt><a name="CC_GROUP_SEND"></a><span class="term">CC_GROUP_SEND sending message '%1' to group '%2'</span></dt><dd><p>
+Debug message, we're about to send a message over the command channel.
+</p></dd><dt><a name="CC_INVALID_LENGTHS"></a><span class="term">CC_INVALID_LENGTHS invalid length parameters (%1, %2)</span></dt><dd><p>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+</p></dd><dt><a name="CC_LENGTH_NOT_READY"></a><span class="term">CC_LENGTH_NOT_READY length not ready</span></dt><dd><p>
+There should be data representing the length of message on the socket, but it
+is not there.
+</p></dd><dt><a name="CC_NO_MESSAGE"></a><span class="term">CC_NO_MESSAGE no message ready to be received yet</span></dt><dd><p>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</p></dd><dt><a name="CC_NO_MSGQ"></a><span class="term">CC_NO_MSGQ unable to connect to message queue (%1)</span></dt><dd><p>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</p></dd><dt><a name="CC_READ_ERROR"></a><span class="term">CC_READ_ERROR error reading data from command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</p></dd><dt><a name="CC_READ_EXCEPTION"></a><span class="term">CC_READ_EXCEPTION error reading data from command channel (%1)</span></dt><dd><p>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</p></dd><dt><a name="CC_REPLY"></a><span class="term">CC_REPLY replying to message from '%1' with '%2'</span></dt><dd><p>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</p></dd><dt><a name="CC_SET_TIMEOUT"></a><span class="term">CC_SET_TIMEOUT setting timeout to %1ms</span></dt><dd><p>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</p></dd><dt><a name="CC_START_READ"></a><span class="term">CC_START_READ starting asynchronous read</span></dt><dd><p>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</p></dd><dt><a name="CC_SUBSCRIBE"></a><span class="term">CC_SUBSCRIBE subscribing to communication group %1</span></dt><dd><p>
+Debug message. The program wants to receive messages addressed to this group.
+</p></dd><dt><a name="CC_TIMEOUT"></a><span class="term">CC_TIMEOUT timeout reading data from command channel</span></dt><dd><p>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</p></dd><dt><a name="CC_UNSUBSCRIBE"></a><span class="term">CC_UNSUBSCRIBE unsubscribing from communication group %1</span></dt><dd><p>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</p></dd><dt><a name="CC_WRITE_ERROR"></a><span class="term">CC_WRITE_ERROR error writing data to command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</p></dd><dt><a name="CC_ZERO_LENGTH"></a><span class="term">CC_ZERO_LENGTH invalid message length (0)</span></dt><dd><p>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</p></dd><dt><a name="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE"></a><span class="term">CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</span></dt><dd><p>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</p></dd><dt><a name="CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE"></a><span class="term">CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2</span></dt><dd><p>
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+</p></dd><dt><a name="CFGMGR_CC_SESSION_ERROR"></a><span class="term">CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</span></dt><dd><p>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</p></dd><dt><a name="CFGMGR_DATA_READ_ERROR"></a><span class="term">CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</span></dt><dd><p>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</p></dd><dt><a name="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_STOPPED_BY_KEYBOARD"></a><span class="term">CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_BAD_CONFIG_DATA"></a><span class="term">CMDCTL_BAD_CONFIG_DATA error in config data: %1</span></dt><dd><p>
+There was an error reading the updated configuration data. The specific
+error is printed.
+</p></dd><dt><a name="CMDCTL_BAD_PASSWORD"></a><span class="term">CMDCTL_BAD_PASSWORD bad password for user: %1</span></dt><dd><p>
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_CC_SESSION_ERROR"></a><span class="term">CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+</p></dd><dt><a name="CMDCTL_CC_SESSION_TIMEOUT"></a><span class="term">CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel</span></dt><dd><p>
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_COMMAND_ERROR"></a><span class="term">CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3</span></dt><dd><p>
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+</p></dd><dt><a name="CMDCTL_COMMAND_SENT"></a><span class="term">CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent</span></dt><dd><p>
+This debug message indicates that the given command has been sent to
+the given module.
+</p></dd><dt><a name="CMDCTL_NO_SUCH_USER"></a><span class="term">CMDCTL_NO_SUCH_USER username not found in user database: %1</span></dt><dd><p>
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_NO_USER_ENTRIES_READ"></a><span class="term">CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied</span></dt><dd><p>
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_SEND_COMMAND"></a><span class="term">CMDCTL_SEND_COMMAND sending command %1 to module %2</span></dt><dd><p>
+This debug message indicates that the given command is being sent to
+the given module.
+</p></dd><dt><a name="CMDCTL_SSL_SETUP_FAILURE_USER_DENIED"></a><span class="term">CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1</span></dt><dd><p>
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+</p></dd><dt><a name="CMDCTL_STOPPED_BY_KEYBOARD"></a><span class="term">CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_UNCAUGHT_EXCEPTION"></a><span class="term">CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1</span></dt><dd><p>
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+</p></dd><dt><a name="CMDCTL_USER_DATABASE_READ_ERROR"></a><span class="term">CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2</span></dt><dd><p>
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
+</p></dd><dt><a name="CONFIG_CCSESSION_MSG"></a><span class="term">CONFIG_CCSESSION_MSG error in CC session message: %1</span></dt><dd><p>
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+</p></dd><dt><a name="CONFIG_CCSESSION_MSG_INTERNAL"></a><span class="term">CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</span></dt><dd><p>
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</p><p>
+The most likely cause of this error is a programming error. Please raise
+a bug report.
+</p></dd><dt><a name="CONFIG_GET_FAIL"></a><span class="term">CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</span></dt><dd><p>
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</p></dd><dt><a name="CONFIG_GET_FAILED"></a><span class="term">CONFIG_GET_FAILED error getting configuration from cfgmgr: %1</span></dt><dd><p>
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
+</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</p></dd><dt><a name="CONFIG_LOG_CONFIG_ERRORS"></a><span class="term">CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1</span></dt><dd><p>
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
+</p></dd><dt><a name="CONFIG_LOG_EXPLICIT"></a><span class="term">CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program. The logging
+configuration for the program will be updated with the information.
+</p></dd><dt><a name="CONFIG_LOG_IGNORE_EXPLICIT"></a><span class="term">CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger. As this does not match the logger specification for the
+program, it has been ignored.
+</p></dd><dt><a name="CONFIG_LOG_IGNORE_WILD"></a><span class="term">CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry. The configuration is ignored.
+</p></dd><dt><a name="CONFIG_LOG_WILD_MATCH"></a><span class="term">CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_FORMAT"></a><span class="term">CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</span></dt><dd><p>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_REJECT"></a><span class="term">CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</span></dt><dd><p>
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+</p></dd><dt><a name="CONFIG_OPEN_FAIL"></a><span class="term">CONFIG_OPEN_FAIL error opening %1: %2</span></dt><dd><p>
+There was an error opening the given file. The reason for the failure
+is included in the message.
+</p></dd><dt><a name="DATASRC_CACHE_CREATE"></a><span class="term">DATASRC_CACHE_CREATE creating the hotspot cache</span></dt><dd><p>
+This is a debug message issued during startup when the hotspot cache
+is created.
+</p></dd><dt><a name="DATASRC_CACHE_DESTROY"></a><span class="term">DATASRC_CACHE_DESTROY destroying the hotspot cache</span></dt><dd><p>
+Debug information. The hotspot cache is being destroyed.
+</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the hotspot cache</span></dt><dd><p>
+A debug message issued when the hotspot cache is disabled.
+</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the hotspot cache</span></dt><dd><p>
+A debug message issued when the hotspot cache is enabled.
+</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired</span></dt><dd><p>
+A debug message issued when a hotspot cache lookup located the item but it
+had expired. The item was removed and the program proceeded as if the item
+had not been found.
+</p></dd><dt><a name="DATASRC_CACHE_FOUND"></a><span class="term">DATASRC_CACHE_FOUND the item '%1' was found</span></dt><dd><p>
+Debug information. An item was successfully located in the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL hotspot cache is full, dropping oldest</span></dt><dd><p>
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache</span></dt><dd><p>
+A debug message indicating that a new item is being inserted into the hotspot
+cache.
+</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache</span></dt><dd><p>
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
+</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing</span></dt><dd><p>
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache</span></dt><dd><p>
+Debug information. An item is being removed from the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items</span></dt><dd><p>
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</span></dt><dd><p>
+This was an internal error while reading data from a datasource. This can either
+mean the specific data source implementation is not behaving correctly, or the
+data it provides is invalid. The current search is aborted.
+The error message contains specific information about the error.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_RECORDS"></a><span class="term">DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</span></dt><dd><p>
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</span></dt><dd><p>
+The datasource backend provided resource records for the given RRset with
+different TTL values. The TTL of the RRSET is set to the lowest value, which
+is printed in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</span></dt><dd><p>
+There was an uncaught general exception while reading data from a datasource.
+This most likely points to a logic error in the code, and can be considered a
+bug. The current search is aborted. Specific information about the exception is
+printed in this error message.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</span></dt><dd><p>
+There was an uncaught ISC exception while reading data from a datasource. This
+most likely points to a logic error in the code, and can be considered a bug.
+The current search is aborted. Specific information about the exception is
+printed in this error message.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</span></dt><dd><p>
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION_EXACT"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1</span></dt><dd><p>
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DNAME"></a><span class="term">DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1</span></dt><dd><p>
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXDOMAIN"></a><span class="term">DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</span></dt><dd><p>
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXRRSET"></a><span class="term">DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4</span></dt><dd><p>
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_RRSET"></a><span class="term">DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2</span></dt><dd><p>
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+</p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
+A debug message indicating that a query for the given name and RR type is being
+processed.
+</p></dd><dt><a name="DATASRC_MEM_ADD_RRSET"></a><span class="term">DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</span></dt><dd><p>
+Debug information. An RRset is being added to the in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_ADD_WILDCARD"></a><span class="term">DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</span></dt><dd><p>
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
+</p></dd><dt><a name="DATASRC_MEM_ADD_ZONE"></a><span class="term">DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</span></dt><dd><p>
+Debug information. A zone is being added into the in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_ANY_SUCCESS"></a><span class="term">DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</span></dt><dd><p>
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+</p></dd><dt><a name="DATASRC_MEM_CNAME"></a><span class="term">DATASRC_MEM_CNAME CNAME at the domain '%1'</span></dt><dd><p>
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+</p></dd><dt><a name="DATASRC_MEM_CNAME_COEXIST"></a><span class="term">DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</span></dt><dd><p>
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some other data to CNAME.
+</p></dd><dt><a name="DATASRC_MEM_CNAME_TO_NONEMPTY"></a><span class="term">DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</span></dt><dd><p>
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_CREATE"></a><span class="term">DATASRC_MEM_CREATE creating zone '%1' in '%2' class</span></dt><dd><p>
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+</p></dd><dt><a name="DATASRC_MEM_DELEG_FOUND"></a><span class="term">DATASRC_MEM_DELEG_FOUND delegation found at '%1'</span></dt><dd><p>
+Debug information. A delegation point was found above the requested record.
+</p></dd><dt><a name="DATASRC_MEM_DESTROY"></a><span class="term">DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class</span></dt><dd><p>
+Debug information. A zone from in-memory data source is being destroyed.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_ENCOUNTERED"></a><span class="term">DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME</span></dt><dd><p>
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way. This may lead to redirection to a different domain and
+stop the search.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_FOUND"></a><span class="term">DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</span></dt><dd><p>
+Debug information. A DNAME was found instead of the requested information.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_DOMAIN_EMPTY"></a><span class="term">DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</span></dt><dd><p>
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+</p></dd><dt><a name="DATASRC_MEM_DUP_RRSET"></a><span class="term">DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'</span></dt><dd><p>
+An RRset is being inserted into in-memory data source for a second time. The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+</p></dd><dt><a name="DATASRC_MEM_EXACT_DELEGATION"></a><span class="term">DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'</span></dt><dd><p>
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+</p></dd><dt><a name="DATASRC_MEM_FIND"></a><span class="term">DATASRC_MEM_FIND find '%1/%2'</span></dt><dd><p>
+Debug information. A search for the requested RRset is being started.
+</p></dd><dt><a name="DATASRC_MEM_FIND_ZONE"></a><span class="term">DATASRC_MEM_FIND_ZONE looking for zone '%1'</span></dt><dd><p>
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+</p></dd><dt><a name="DATASRC_MEM_LOAD"></a><span class="term">DATASRC_MEM_LOAD loading zone '%1' from file '%2'</span></dt><dd><p>
+Debug information. The content of master file is being loaded into the memory.
+</p></dd><dt><a name="DATASRC_MEM_NOT_FOUND"></a><span class="term">DATASRC_MEM_NOT_FOUND requested domain '%1' not found</span></dt><dd><p>
+Debug information. The requested domain does not exist.
+</p></dd><dt><a name="DATASRC_MEM_NS_ENCOUNTERED"></a><span class="term">DATASRC_MEM_NS_ENCOUNTERED encountered a NS</span></dt><dd><p>
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+</p></dd><dt><a name="DATASRC_MEM_NXRRSET"></a><span class="term">DATASRC_MEM_NXRRSET no such type '%1' at '%2'</span></dt><dd><p>
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+</p></dd><dt><a name="DATASRC_MEM_OUT_OF_ZONE"></a><span class="term">DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'</span></dt><dd><p>
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_RENAME"></a><span class="term">DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'</span></dt><dd><p>
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+</p></dd><dt><a name="DATASRC_MEM_SINGLETON"></a><span class="term">DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'</span></dt><dd><p>
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_SUCCESS"></a><span class="term">DATASRC_MEM_SUCCESS query for '%1/%2' successful</span></dt><dd><p>
+Debug information. The requested record was found.
+</p></dd><dt><a name="DATASRC_MEM_SUPER_STOP"></a><span class="term">DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty</span></dt><dd><p>
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+</p></dd><dt><a name="DATASRC_MEM_SWAP"></a><span class="term">DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')</span></dt><dd><p>
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_CANCEL"></a><span class="term">DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'</span></dt><dd><p>
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here. This
+behaviour is specified by RFC 1034, section 4.3.3
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</span></dt><dd><p>
+The software refuses to load DNAME records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</span></dt><dd><p>
+The software refuses to load NS records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</p></dd><dt><a name="DATASRC_META_ADD"></a><span class="term">DATASRC_META_ADD adding a data source into meta data source</span></dt><dd><p>
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
+</p></dd><dt><a name="DATASRC_META_ADD_CLASS_MISMATCH"></a><span class="term">DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</span></dt><dd><p>
+It was attempted to add a data source into a meta data source, but their
+classes do not match.
+</p></dd><dt><a name="DATASRC_META_REMOVE"></a><span class="term">DATASRC_META_REMOVE removing data source from meta data source</span></dt><dd><p>
+Debug information. A data source is being removed from meta data source.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_NSEC"></a><span class="term">DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'</span></dt><dd><p>
+Debug information. A NSEC record covering this zone is being added.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_NSEC3"></a><span class="term">DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'</span></dt><dd><p>
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_RRSET"></a><span class="term">DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message</span></dt><dd><p>
+Debug information. An RRset is being added to the response message.
+</p></dd><dt><a name="DATASRC_QUERY_ADD_SOA"></a><span class="term">DATASRC_QUERY_ADD_SOA adding SOA of '%1'</span></dt><dd><p>
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+</p></dd><dt><a name="DATASRC_QUERY_AUTH_FAIL"></a><span class="term">DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+</p></dd><dt><a name="DATASRC_QUERY_BAD_REFERRAL"></a><span class="term">DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</span></dt><dd><p>
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache</span></dt><dd><p>
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'</span></dt><dd><p>
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+</p></dd><dt><a name="DATASRC_QUERY_COPY_AUTH"></a><span class="term">DATASRC_QUERY_COPY_AUTH copying authoritative section into message</span></dt><dd><p>
+Debug information. The whole referral information is being copied into the
+response message.
+</p></dd><dt><a name="DATASRC_QUERY_DELEGATION"></a><span class="term">DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</span></dt><dd><p>
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</span></dt><dd><p>
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_DNAME"></a><span class="term">DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</span></dt><dd><p>
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+</p></dd><dt><a name="DATASRC_QUERY_FAIL"></a><span class="term">DATASRC_QUERY_FAIL query failed</span></dt><dd><p>
+Some subtask of query processing failed. The reason should have been reported
+already and a SERVFAIL will be returned to the querying system.
+</p></dd><dt><a name="DATASRC_QUERY_FOLLOW_CNAME"></a><span class="term">DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</span></dt><dd><p>
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
+</p></dd><dt><a name="DATASRC_QUERY_GET_MX_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</span></dt><dd><p>
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</p></dd><dt><a name="DATASRC_QUERY_GET_NS_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'</span></dt><dd><p>
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</p></dd><dt><a name="DATASRC_QUERY_GLUE_FAIL"></a><span class="term">DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+</p></dd><dt><a name="DATASRC_QUERY_INVALID_OP"></a><span class="term">DATASRC_QUERY_INVALID_OP invalid query operation requested</span></dt><dd><p>
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+</p></dd><dt><a name="DATASRC_QUERY_IS_AUTH"></a><span class="term">DATASRC_QUERY_IS_AUTH auth query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is an auth query.
+</p></dd><dt><a name="DATASRC_QUERY_IS_GLUE"></a><span class="term">DATASRC_QUERY_IS_GLUE glue query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is a query for glue addresses.
+</p></dd><dt><a name="DATASRC_QUERY_IS_NOGLUE"></a><span class="term">DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is a query for addresses that are not
+glue.
+</p></dd><dt><a name="DATASRC_QUERY_IS_REF"></a><span class="term">DATASRC_QUERY_IS_REF query for referral (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is a query for referral information.
+</p></dd><dt><a name="DATASRC_QUERY_IS_SIMPLE"></a><span class="term">DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</span></dt><dd><p>
+Debug information. The last DO_QUERY is a simple query.
+</p></dd><dt><a name="DATASRC_QUERY_MISPLACED_TASK"></a><span class="term">DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</span></dt><dd><p>
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+</p></dd><dt><a name="DATASRC_QUERY_MISSING_NS"></a><span class="term">DATASRC_QUERY_MISSING_NS missing NS records for '%1'</span></dt><dd><p>
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+</p></dd><dt><a name="DATASRC_QUERY_MISSING_SOA"></a><span class="term">DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA</span></dt><dd><p>
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NOGLUE_FAIL"></a><span class="term">DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</span></dt><dd><p>
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC3"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone</span></dt><dd><p>
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</p></dd><dt><a name="DATASRC_QUERY_NO_ZONE"></a><span class="term">DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'</span></dt><dd><p>
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+</p></dd><dt><a name="DATASRC_QUERY_PROCESS"></a><span class="term">DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</span></dt><dd><p>
+Debug information. A sure query is being processed now.
+</p></dd><dt><a name="DATASRC_QUERY_PROVE_NX_FAIL"></a><span class="term">DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+</p></dd><dt><a name="DATASRC_QUERY_REF_FAIL"></a><span class="term">DATASRC_QUERY_REF_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+</p></dd><dt><a name="DATASRC_QUERY_RRSIG"></a><span class="term">DATASRC_QUERY_RRSIG unable to answer RRSIG query</span></dt><dd><p>
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+</p></dd><dt><a name="DATASRC_QUERY_SIMPLE_FAIL"></a><span class="term">DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1</span></dt><dd><p>
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</p></dd><dt><a name="DATASRC_QUERY_SYNTH_CNAME"></a><span class="term">DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</span></dt><dd><p>
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
+</p></dd><dt><a name="DATASRC_QUERY_TASK_FAIL"></a><span class="term">DATASRC_QUERY_TASK_FAIL task failed with %1</span></dt><dd><p>
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</span></dt><dd><p>
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+</p></dd><dt><a name="DATASRC_QUERY_UNKNOWN_RESULT"></a><span class="term">DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask</span></dt><dd><p>
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD"></a><span class="term">DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'</span></dt><dd><p>
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</span></dt><dd><p>
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record. The code is 1 for error and 2 for not implemented.
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_REFERRAL"></a><span class="term">DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</span></dt><dd><p>
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it. The code is 1 for error, 2 for not implemented.
+</p></dd><dt><a name="DATASRC_SQLITE_CLOSE"></a><span class="term">DATASRC_SQLITE_CLOSE closing SQLite database</span></dt><dd><p>
+Debug information. The SQLite data source is closing the database file.
+</p></dd><dt><a name="DATASRC_SQLITE_CONNCLOSE"></a><span class="term">DATASRC_SQLITE_CONNCLOSE Closing sqlite database</span></dt><dd><p>
+The database file is no longer needed and is being closed.
+</p></dd><dt><a name="DATASRC_SQLITE_CONNOPEN"></a><span class="term">DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'</span></dt><dd><p>
+The database file is being opened so it can start providing data.
+</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE SQLite data source created</span></dt><dd><p>
+Debug information. An instance of SQLite data source is being created.
+</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY SQLite data source destroyed</span></dt><dd><p>
+Debug information. An instance of SQLite data source is being destroyed.
+</p></dd><dt><a name="DATASRC_SQLITE_DROPCONN"></a><span class="term">DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized</span></dt><dd><p>
+The object around a database connection is being destroyed.
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE"></a><span class="term">DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is trying to identify which zone
+should hold this domain.
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOT_FOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'</span></dt><dd><p>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
+no such zone in our data.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND"></a><span class="term">DATASRC_SQLITE_FIND looking for RRset '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up a resource record
+set.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDADDRS"></a><span class="term">DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'</span></dt><dd><p>
+Debug information. The data source is looking up the addresses for given
+domain name.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDADDRS_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDEXACT"></a><span class="term">DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up an exact resource
+record.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDEXACT_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREC"></a><span class="term">DATASRC_SQLITE_FINDREC looking for record '%1/%2'</span></dt><dd><p>
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREF"></a><span class="term">DATASRC_SQLITE_FINDREF looking for referral at '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+</p></dd><dt><a name="DATASRC_SQLITE_FINDREF_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was trying to identify if there's a referral. But
+it contains different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'</span></dt><dd><p>
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</span></dt><dd><p>
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+</p></dd><dt><a name="DATASRC_SQLITE_NEWCONN"></a><span class="term">DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized</span></dt><dd><p>
+A wrapper object to hold database connection is being initialized.
+</p></dd><dt><a name="DATASRC_SQLITE_OPEN"></a><span class="term">DATASRC_SQLITE_OPEN opening SQLite database '%1'</span></dt><dd><p>
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS"></a><span class="term">DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</span></dt><dd><p>
+This is a debug message. The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
+</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS_NO_ZONE"></a><span class="term">DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</span></dt><dd><p>
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
+</p></dd><dt><a name="DATASRC_SQLITE_SETUP"></a><span class="term">DATASRC_SQLITE_SETUP setting up SQLite database</span></dt><dd><p>
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema. It'll still contain
+no data, but it will be ready for use.
+</p></dd><dt><a name="DATASRC_STATIC_CLASS_NOT_CH"></a><span class="term">DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only</span></dt><dd><p>
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
+</p></dd><dt><a name="DATASRC_STATIC_CREATE"></a><span class="term">DATASRC_STATIC_CREATE creating the static datasource</span></dt><dd><p>
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+</p></dd><dt><a name="DATASRC_STATIC_FIND"></a><span class="term">DATASRC_STATIC_FIND looking for '%1/%2'</span></dt><dd><p>
+Debug information. This resource record set is being looked up in the static
+data source.
+</p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
+This indicates a programming error. An internal task of unknown type was
+generated.
+</p></dd><dt><a name="LOGIMPL_ABOVE_MAX_DEBUG"></a><span class="term">LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BAD_DEBUG_STRING"></a><span class="term">LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</span></dt><dd><p>
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BELOW_MIN_DEBUG"></a><span class="term">LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOG_BAD_DESTINATION"></a><span class="term">LOG_BAD_DESTINATION unrecognized log destination: %1</span></dt><dd><p>
+A logger destination value was given that was not recognized. The
+destination should be one of "console", "file", or "syslog".
+</p></dd><dt><a name="LOG_BAD_SEVERITY"></a><span class="term">LOG_BAD_SEVERITY unrecognized log severity: %1</span></dt><dd><p>
+A logger severity value was given that was not recognized. The severity
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
+</p></dd><dt><a name="LOG_BAD_STREAM"></a><span class="term">LOG_BAD_STREAM bad log console output stream: %1</span></dt><dd><p>
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
+</p></dd><dt><a name="LOG_DUPLICATE_MESSAGE_ID"></a><span class="term">LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</span></dt><dd><p>
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code. This indicates a
+programming error; please submit a bug report.
+</p></dd><dt><a name="LOG_DUPLICATE_NAMESPACE"></a><span class="term">LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.) Such a condition is regarded as an
+error and the read will be abandoned.
+</p></dd><dt><a name="LOG_INPUT_OPEN_FAIL"></a><span class="term">LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</span></dt><dd><p>
+The program was not able to open the specified input message file for
+the reason given.
+</p></dd><dt><a name="LOG_INVALID_MESSAGE_ID"></a><span class="term">LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</span></dt><dd><p>
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</p></dd><dt><a name="LOG_NAMESPACE_EXTRA_ARGS"></a><span class="term">LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+</p></dd><dt><a name="LOG_NAMESPACE_INVALID_ARG"></a><span class="term">LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+</p></dd><dt><a name="LOG_NAMESPACE_NO_ARGS"></a><span class="term">LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+</p></dd><dt><a name="LOG_NO_MESSAGE_ID"></a><span class="term">LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+</p></dd><dt><a name="LOG_NO_MESSAGE_TEXT"></a><span class="term">LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</p></dd><dt><a name="LOG_NO_SUCH_MESSAGE"></a><span class="term">LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</span></dt><dd><p>
+During start-up a local message file was read. A line with the listed
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+</p><p>
+There may be several reasons why this message may appear:
+</p><p>
+- The message ID has been mis-spelled in the local message file.
+</p><p>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</p><p>
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+</p><p>
+Whatever the reason, there is no impact on the operation of BIND 10.
+</p></dd><dt><a name="LOG_OPEN_OUTPUT_FAIL"></a><span class="term">LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</span></dt><dd><p>
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
+</p></dd><dt><a name="LOG_PREFIX_EXTRA_ARGS"></a><span class="term">LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</p><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</p></dd><dt><a name="LOG_PREFIX_INVALID_ARG"></a><span class="term">LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
+</p><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</p></dd><dt><a name="LOG_READING_LOCAL_FILE"></a><span class="term">LOG_READING_LOCAL_FILE reading local message file %1</span></dt><dd><p>
+This is an informational message output by BIND 10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
+</p></dd><dt><a name="LOG_READ_ERROR"></a><span class="term">LOG_READ_ERROR error reading from message file %1: %2</span></dt><dd><p>
+The specified error was encountered reading from the named message file.
+</p></dd><dt><a name="LOG_UNRECOGNISED_DIRECTIVE"></a><span class="term">LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</span></dt><dd><p>
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
+</p></dd><dt><a name="LOG_WRITE_ERROR"></a><span class="term">LOG_WRITE_ERROR error writing to %1: %2</span></dt><dd><p>
+The specified error was encountered by the message compiler when writing
+to the named output file.
+</p></dd><dt><a name="NOTIFY_OUT_INVALID_ADDRESS"></a><span class="term">NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</span></dt><dd><p>
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_OPCODE"></a><span class="term">NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_QID"></a><span class="term">NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_QUERY_NAME"></a><span class="term">NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_QR_NOT_SET"></a><span class="term">NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2</span></dt><dd><p>
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION"></a><span class="term">NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1</span></dt><dd><p>
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+</p></dd><dt><a name="NOTIFY_OUT_RETRY_EXCEEDED"></a><span class="term">NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded</span></dt><dd><p>
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+</p></dd><dt><a name="NOTIFY_OUT_SENDING_NOTIFY"></a><span class="term">NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2</span></dt><dd><p>
+A notify message is sent to the secondary nameserver at the given
+address.
+</p></dd><dt><a name="NOTIFY_OUT_SOCKET_ERROR"></a><span class="term">NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3</span></dt><dd><p>
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
+</p></dd><dt><a name="NOTIFY_OUT_SOCKET_RECV_ERROR"></a><span class="term">NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3</span></dt><dd><p>
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
+</p></dd><dt><a name="NOTIFY_OUT_TIMEOUT"></a><span class="term">NOTIFY_OUT_TIMEOUT retry notify to %1#%2</span></dt><dd><p>
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
+</p></dd><dt><a name="NSAS_FIND_NS_ADDRESS"></a><span class="term">NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
+</p></dd><dt><a name="NSAS_FOUND_ADDRESS"></a><span class="term">NSAS_FOUND_ADDRESS found address %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
+</p></dd><dt><a name="NSAS_INVALID_RESPONSE"></a><span class="term">NSAS_INVALID_RESPONSE queried for %1 but got invalid response</span></dt><dd><p>
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</p><p>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+</p></dd><dt><a name="NSAS_LOOKUP_CANCEL"></a><span class="term">NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</span></dt><dd><p>
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
+</p></dd><dt><a name="NSAS_NS_LOOKUP_FAIL"></a><span class="term">NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
+</p></dd><dt><a name="NSAS_SEARCH_ZONE_NS"></a><span class="term">NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</span></dt><dd><p>
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
+</p></dd><dt><a name="NSAS_UPDATE_RTT"></a><span class="term">NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</span></dt><dd><p>
+A NSAS (nameserver address store - part of the resolver) debug message
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</p></dd><dt><a name="NSAS_WRONG_ANSWER"></a><span class="term">NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</p><p>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+</p></dd><dt><a name="RESLIB_ANSWER"></a><span class="term">RESLIB_ANSWER answer received in response to query for <%1></span></dt><dd><p>
+A debug message recording that an answer has been received to an upstream
+query for the specified question. Previous debug messages will have indicated
+the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_CNAME"></a><span class="term">RESLIB_CNAME CNAME received in response to query for <%1></span></dt><dd><p>
+A debug message recording that CNAME response has been received to an upstream
+query for the specified question. Previous debug messages will have indicated
+the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_DEEPEST"></a><span class="term">RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</span></dt><dd><p>
+A debug message, a cache lookup did not find the specified <name, class,
+type> tuple in the cache; instead, the deepest delegation found is indicated.
+</p></dd><dt><a name="RESLIB_FOLLOW_CNAME"></a><span class="term">RESLIB_FOLLOW_CNAME following CNAME chain to <%1></span></dt><dd><p>
+A debug message, a CNAME response was received and another query is being issued
+for the <name, class, type> tuple.
+</p></dd><dt><a name="RESLIB_LONG_CHAIN"></a><span class="term">RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
+A debug message recording that a CNAME response has been received to an upstream
+query for the specified question (Previous debug messages will have indicated
+the server to which the question was sent). However, receipt of this CNAME
+has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
+is where on CNAME points to another) and so an error is being returned.
+</p></dd><dt><a name="RESLIB_NO_NS_RRSET"></a><span class="term">RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
+A debug message, this indicates that a response was received for the specified
+query and was categorized as a referral. However, the received message did
+not contain any NS RRsets. This may indicate a programming error in the
+response classification code.
+</p></dd><dt><a name="RESLIB_NSAS_LOOKUP"></a><span class="term">RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
+A debug message, the RunningQuery object is querying the NSAS for the
+nameservers for the specified zone.
+</p></dd><dt><a name="RESLIB_NXDOM_NXRR"></a><span class="term">RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
+A debug message recording that either a NXDOMAIN or an NXRRSET response has
+been received to an upstream query for the specified question. Previous debug
+messages will have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_PROTOCOL"></a><span class="term">RESLIB_PROTOCOL protocol error in answer for %1: %3</span></dt><dd><p>
+A debug message indicating that a protocol error was received. As there
+are no retries left, an error will be reported.
+</p></dd><dt><a name="RESLIB_PROTOCOL_RETRY"></a><span class="term">RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
+A debug message indicating that a protocol error was received and that
+the resolver is repeating the query to the same nameserver. After this
+repeated query, there will be the indicated number of retries left.
+</p></dd><dt><a name="RESLIB_RCODE_ERR"></a><span class="term">RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
+A debug message, the response to the specified query indicated an error
+that is not covered by a specific code path. A SERVFAIL will be returned.
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_FIND"></a><span class="term">RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
+This is a debug message and indicates that a RecursiveQuery object found the
+the specified <name, class, type> tuple in the cache. The instance number
+at the end of the message indicates which of the two resolve() methods has
+been called.
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_NO_FIND"></a><span class="term">RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
+This is a debug message and indicates that the look in the cache made by the
+RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
+object has been created to resolve the question. The instance number at
+the end of the message indicates which of the two resolve() methods has
+been called.
+</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_REFER_ZONE"></a><span class="term">RESLIB_REFER_ZONE referred to zone %1</span></dt><dd><p>
+A debug message indicating that the last referral message was to the specified
+zone.
+</p></dd><dt><a name="RESLIB_RESOLVE"></a><span class="term">RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</span></dt><dd><p>
+A debug message, the RecursiveQuery::resolve method has been called to resolve
+the specified <name, class, type> tuple. The first action will be to lookup
+the specified tuple in the cache. The instance number at the end of the
+message indicates which of the two resolve() methods has been called.
+</p></dd><dt><a name="RESLIB_RRSET_FOUND"></a><span class="term">RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
+A debug message, indicating that when RecursiveQuery::resolve queried the
+cache, a single RRset was found which was put in the answer. The instance
+number at the end of the message indicates which of the two resolve()
+methods has been called.
+</p></dd><dt><a name="RESLIB_RTT"></a><span class="term">RESLIB_RTT round-trip time of last query calculated as %1 ms</span></dt><dd><p>
+A debug message giving the round-trip time of the last query and response.
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_FIND"></a><span class="term">RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</span></dt><dd><p>
+This is a debug message and indicates that a RunningQuery object found
+the specified <name, class, type> tuple in the cache.
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_LOOKUP"></a><span class="term">RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</span></dt><dd><p>
+This is a debug message and indicates that a RunningQuery object has made
+a call to its doLookup() method to look up the specified <name, class, type>
+tuple, the first action of which will be to examine the cache.
+</p></dd><dt><a name="RESLIB_RUNQ_FAIL"></a><span class="term">RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</span></dt><dd><p>
+A debug message indicating that a RunningQuery's failure callback has been
+called because all nameservers for the zone in question are unreachable.
+</p></dd><dt><a name="RESLIB_RUNQ_SUCCESS"></a><span class="term">RESLIB_RUNQ_SUCCESS success callback - sending query to %1</span></dt><dd><p>
+A debug message indicating that a RunningQuery's success callback has been
+called because a nameserver has been found, and that a query is being sent
+to the specified nameserver.
+</p></dd><dt><a name="RESLIB_TEST_SERVER"></a><span class="term">RESLIB_TEST_SERVER setting test server to %1(%2)</span></dt><dd><p>
+This is a warning message only generated in unit tests. It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed. If seen during normal operation,
+please submit a bug report.
+</p></dd><dt><a name="RESLIB_TEST_UPSTREAM"></a><span class="term">RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</span></dt><dd><p>
+This is a debug message and should only be seen in unit tests. A query for
+the specified <name, class, type> tuple is being sent to a test nameserver
+whose address is given in the message.
+</p></dd><dt><a name="RESLIB_TIMEOUT"></a><span class="term">RESLIB_TIMEOUT query <%1> to %2 timed out</span></dt><dd><p>
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
+</p></dd><dt><a name="RESLIB_TIMEOUT_RETRY"></a><span class="term">RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
+A debug message indicating that the specified query has timed out and that
+the resolver is repeating the query to the same nameserver. After this
+repeated query, there will be the indicated number of retries left.
+</p></dd><dt><a name="RESLIB_TRUNCATED"></a><span class="term">RESLIB_TRUNCATED response to query for <%1> was truncated, re-querying over TCP</span></dt><dd><p>
+A debug message, this indicates that the response to the specified query was
+truncated and that the resolver will be re-querying over TCP. There are
+various reasons why responses may be truncated, so this message is normal and
+gives no cause for concern.
+</p></dd><dt><a name="RESLIB_UPSTREAM"></a><span class="term">RESLIB_UPSTREAM sending upstream query for <%1> to %2</span></dt><dd><p>
+A debug message indicating that a query for the specified <name, class, type>
+tuple is being sent to a nameserver whose address is given in the message.
+</p></dd><dt><a name="RESOLVER_AXFR_TCP"></a><span class="term">RESOLVER_AXFR_TCP AXFR request received over TCP</span></dt><dd><p>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_AXFR_UDP"></a><span class="term">RESOLVER_AXFR_UDP AXFR request received over UDP</span></dt><dd><p>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_CLIENT_TIME_SMALL"></a><span class="term">RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
+</p></dd><dt><a name="RESOLVER_CONFIG_CHANNEL"></a><span class="term">RESOLVER_CONFIG_CHANNEL configuration channel created</span></dt><dd><p>
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
+</p></dd><dt><a name="RESOLVER_CONFIG_ERROR"></a><span class="term">RESOLVER_CONFIG_ERROR error in configuration: %1</span></dt><dd><p>
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
+</p></dd><dt><a name="RESOLVER_CONFIG_LOADED"></a><span class="term">RESOLVER_CONFIG_LOADED configuration loaded</span></dt><dd><p>
+This is a debug message output when the resolver configuration has been
+successfully loaded.
+</p></dd><dt><a name="RESOLVER_CONFIG_UPDATED"></a><span class="term">RESOLVER_CONFIG_UPDATED configuration updated: %1</span></dt><dd><p>
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
+</p></dd><dt><a name="RESOLVER_CREATED"></a><span class="term">RESOLVER_CREATED main resolver object created</span></dt><dd><p>
+This is a debug message indicating that the main resolver object has
+been created.
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_RECEIVED"></a><span class="term">RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</span></dt><dd><p>
+This is a debug message from the resolver listing the contents of a
+received DNS message.
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_SENT"></a><span class="term">RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
+</p></dd><dt><a name="RESOLVER_FAILED"></a><span class="term">RESOLVER_FAILED resolver failed, reason: %1</span></dt><dd><p>
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
+</p></dd><dt><a name="RESOLVER_FORWARD_ADDRESS"></a><span class="term">RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</span></dt><dd><p>
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
+</p></dd><dt><a name="RESOLVER_FORWARD_QUERY"></a><span class="term">RESOLVER_FORWARD_QUERY processing forward query</span></dt><dd><p>
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
+servers.
+</p></dd><dt><a name="RESOLVER_HEADER_ERROR"></a><span class="term">RESOLVER_HEADER_ERROR message received, exception when processing header: %1</span></dt><dd><p>
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
+</p></dd><dt><a name="RESOLVER_IXFR"></a><span class="term">RESOLVER_IXFR IXFR request received</span></dt><dd><p>
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_LOOKUP_TIME_SMALL"></a><span class="term">RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
+</p></dd><dt><a name="RESOLVER_MESSAGE_ERROR"></a><span class="term">RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_NEGATIVE_RETRIES"></a><span class="term">RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</span></dt><dd><p>
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
+</p></dd><dt><a name="RESOLVER_NON_IN_PACKET"></a><span class="term">RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</span></dt><dd><p>
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
+</p></dd><dt><a name="RESOLVER_NORMAL_QUERY"></a><span class="term">RESOLVER_NORMAL_QUERY processing normal query</span></dt><dd><p>
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
+</p></dd><dt><a name="RESOLVER_NOTIFY_RECEIVED"></a><span class="term">RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</span></dt><dd><p>
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
+</p></dd><dt><a name="RESOLVER_NOT_ONE_QUESTION"></a><span class="term">RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</span></dt><dd><p>
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_NO_ROOT_ADDRESS"></a><span class="term">RESOLVER_NO_ROOT_ADDRESS no root addresses available</span></dt><dd><p>
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
+</p></dd><dt><a name="RESOLVER_PARSE_ERROR"></a><span class="term">RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
+</p></dd><dt><a name="RESOLVER_PRINT_COMMAND"></a><span class="term">RESOLVER_PRINT_COMMAND print message command, arguments are: %1</span></dt><dd><p>
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
+</p></dd><dt><a name="RESOLVER_PROTOCOL_ERROR"></a><span class="term">RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_QUERY_ACCEPTED"></a><span class="term">RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</span></dt><dd><p>
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_DROPPED"></a><span class="term">RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</span></dt><dd><p>
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_REJECTED"></a><span class="term">RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</span></dt><dd><p>
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_SETUP"></a><span class="term">RESOLVER_QUERY_SETUP query setup</span></dt><dd><p>
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUERY_SHUTDOWN"></a><span class="term">RESOLVER_QUERY_SHUTDOWN query shutdown</span></dt><dd><p>
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUERY_TIME_SMALL"></a><span class="term">RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
+</p></dd><dt><a name="RESOLVER_RECEIVED_MESSAGE"></a><span class="term">RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</span></dt><dd><p>
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
+</p></dd><dt><a name="RESOLVER_RECURSIVE"></a><span class="term">RESOLVER_RECURSIVE running in recursive mode</span></dt><dd><p>
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
+</p></dd><dt><a name="RESOLVER_SERVICE_CREATED"></a><span class="term">RESOLVER_SERVICE_CREATED service object created</span></dt><dd><p>
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
+</p></dd><dt><a name="RESOLVER_SET_PARAMS"></a><span class="term">RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
+This debug message lists the parameters being set for the resolver. These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers. Client timeout: the interval to resolve a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolve the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query. Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+</p><p>
+The client and lookup timeouts require a bit more explanation. The
+resolution of the client query might require a large number of queries to
+upstream nameservers. Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout. When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process; data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+</p></dd><dt><a name="RESOLVER_SET_QUERY_ACL"></a><span class="term">RESOLVER_SET_QUERY_ACL query ACL is configured</span></dt><dd><p>
+This debug message is generated when a new query ACL is configured for
+the resolver.
+</p></dd><dt><a name="RESOLVER_SET_ROOT_ADDRESS"></a><span class="term">RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</span></dt><dd><p>
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
+</p></dd><dt><a name="RESOLVER_SHUTDOWN"></a><span class="term">RESOLVER_SHUTDOWN resolver shutdown complete</span></dt><dd><p>
+This informational message is output when the resolver has shut down.
+</p></dd><dt><a name="RESOLVER_STARTED"></a><span class="term">RESOLVER_STARTED resolver started</span></dt><dd><p>
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+</p></dd><dt><a name="RESOLVER_STARTING"></a><span class="term">RESOLVER_STARTING starting resolver with command line '%1'</span></dt><dd><p>
+An informational message, this is output when the resolver starts up.
+</p></dd><dt><a name="RESOLVER_UNEXPECTED_RESPONSE"></a><span class="term">RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</span></dt><dd><p>
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
+</p></dd><dt><a name="RESOLVER_UNSUPPORTED_OPCODE"></a><span class="term">RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</span></dt><dd><p>
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="SRVCOMM_ADDRESSES_NOT_LIST"></a><span class="term">SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1</span></dt><dd><p>
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_FAIL"></a><span class="term">SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)</span></dt><dd><p>
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+</p></dd><dt><a name="SRVCOMM_ADDRESS_MISSING"></a><span class="term">SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1</span></dt><dd><p>
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_TYPE"></a><span class="term">SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1</span></dt><dd><p>
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+</p></dd><dt><a name="SRVCOMM_ADDRESS_UNRECOVERABLE"></a><span class="term">SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)</span></dt><dd><p>
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+</p><p>
+The condition indicates problems with the server and/or the system on
+which it is running. The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_VALUE"></a><span class="term">SRVCOMM_ADDRESS_VALUE address to set: %1#%2</span></dt><dd><p>
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+</p></dd><dt><a name="SRVCOMM_KEYS_DEINIT"></a><span class="term">SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring</span></dt><dd><p>
+Debug message indicating that the server is deinitializing the TSIG keyring.
+</p></dd><dt><a name="SRVCOMM_KEYS_INIT"></a><span class="term">SRVCOMM_KEYS_INIT initializing TSIG keyring</span></dt><dd><p>
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+</p></dd><dt><a name="SRVCOMM_KEYS_UPDATE"></a><span class="term">SRVCOMM_KEYS_UPDATE updating TSIG keyring</span></dt><dd><p>
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+</p></dd><dt><a name="SRVCOMM_PORT_RANGE"></a><span class="term">SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)</span></dt><dd><p>
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+</p></dd><dt><a name="SRVCOMM_SET_LISTEN"></a><span class="term">SRVCOMM_SET_LISTEN setting addresses to listen to</span></dt><dd><p>
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
+</p></dd><dt><a name="STATHTTPD_BAD_OPTION_VALUE"></a><span class="term">STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1</span></dt><dd><p>
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+</p></dd><dt><a name="STATHTTPD_CC_SESSION_ERROR"></a><span class="term">STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1</span></dt><dd><p>
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+</p></dd><dt><a name="STATHTTPD_CLOSING"></a><span class="term">STATHTTPD_CLOSING closing %1#%2</span></dt><dd><p>
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+</p></dd><dt><a name="STATHTTPD_CLOSING_CC_SESSION"></a><span class="term">STATHTTPD_CLOSING_CC_SESSION stopping cc session</span></dt><dd><p>
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+</p></dd><dt><a name="STATHTTPD_HANDLE_CONFIG"></a><span class="term">STATHTTPD_HANDLE_CONFIG reading configuration: %1</span></dt><dd><p>
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_STATUS_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status</span></dt><dd><p>
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_UNKNOWN_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</span></dt><dd><p>
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+</p></dd><dt><a name="STATHTTPD_SERVER_ERROR"></a><span class="term">STATHTTPD_SERVER_ERROR HTTP server error: %1</span></dt><dd><p>
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+</p></dd><dt><a name="STATHTTPD_SERVER_INIT_ERROR"></a><span class="term">STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1</span></dt><dd><p>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+</p></dd><dt><a name="STATHTTPD_SHUTDOWN"></a><span class="term">STATHTTPD_SHUTDOWN shutting down</span></dt><dd><p>
+The stats-httpd daemon is shutting down.
+</p></dd><dt><a name="STATHTTPD_STARTED"></a><span class="term">STATHTTPD_STARTED listening on %1#%2</span></dt><dd><p>
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+</p></dd><dt><a name="STATHTTPD_STARTING_CC_SESSION"></a><span class="term">STATHTTPD_STARTING_CC_SESSION starting cc session</span></dt><dd><p>
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+</p></dd><dt><a name="STATHTTPD_START_SERVER_INIT_ERROR"></a><span class="term">STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1</span></dt><dd><p>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+</p></dd><dt><a name="STATHTTPD_STOPPED_BY_KEYBOARD"></a><span class="term">STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+</p></dd><dt><a name="STATHTTPD_UNKNOWN_CONFIG_ITEM"></a><span class="term">STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1</span></dt><dd><p>
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
+</p></dd><dt><a name="STATS_BAD_OPTION_VALUE"></a><span class="term">STATS_BAD_OPTION_VALUE bad command line argument: %1</span></dt><dd><p>
+The stats module was called with a bad command-line argument and will
+not start.
+</p></dd><dt><a name="STATS_CC_SESSION_ERROR"></a><span class="term">STATS_CC_SESSION_ERROR error connecting to message bus: %1</span></dt><dd><p>
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+</p></dd><dt><a name="STATS_RECEIVED_NEW_CONFIG"></a><span class="term">STATS_RECEIVED_NEW_CONFIG received new configuration: %1</span></dt><dd><p>
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+</p></dd><dt><a name="STATS_RECEIVED_REMOVE_COMMAND"></a><span class="term">STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</span></dt><dd><p>
+A remove command for the given name was sent to the stats module, and
+the given statistics value will now be removed. It will not appear in
+statistics reports until it appears in a statistics update from a
+module again.
+</p></dd><dt><a name="STATS_RECEIVED_RESET_COMMAND"></a><span class="term">STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</span></dt><dd><p>
+The stats module received a command to clear all collected statistics.
+The data is cleared until it receives an update from the modules again.
+</p></dd><dt><a name="STATS_RECEIVED_SHOW_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</span></dt><dd><p>
+The stats module received a command to show all statistics that it has
+collected.
+</p></dd><dt><a name="STATS_RECEIVED_SHOW_NAME_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1</span></dt><dd><p>
+The stats module received a command to show the statistics that it has
+collected for the given item.
+</p></dd><dt><a name="STATS_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+A shutdown command was sent to the stats module and it will now shut down.
+</p></dd><dt><a name="STATS_RECEIVED_STATUS_COMMAND"></a><span class="term">STATS_RECEIVED_STATUS_COMMAND received command to return status</span></dt><dd><p>
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+</p></dd><dt><a name="STATS_RECEIVED_UNKNOWN_COMMAND"></a><span class="term">STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</span></dt><dd><p>
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+</p></dd><dt><a name="STATS_SEND_REQUEST_BOSS"></a><span class="term">STATS_SEND_REQUEST_BOSS requesting boss to send statistics</span></dt><dd><p>
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+</p></dd><dt><a name="STATS_STOPPED_BY_KEYBOARD"></a><span class="term">STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+</p></dd><dt><a name="STATS_UNKNOWN_COMMAND_IN_SPEC"></a><span class="term">STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1</span></dt><dd><p>
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+</p></dd><dt><a name="XFRIN_AXFR_DATABASE_FAILURE"></a><span class="term">XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_INTERNAL_FAILURE"></a><span class="term">XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_STARTED"></a><span class="term">XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</span></dt><dd><p>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</span></dt><dd><p>
+The AXFR transfer of the given zone was successfully completed.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_ADDR_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</span></dt><dd><p>
+The given master address is not a valid IP address.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_PORT_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</span></dt><dd><p>
+The master port as read from the configuration is not a valid port number.
+</p></dd><dt><a name="XFRIN_BAD_TSIG_KEY_STRING"></a><span class="term">XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFRIN_BAD_ZONE_CLASS"></a><span class="term">XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</span></dt><dd><p>
+The zone class as read from the configuration is not a valid DNS class.
+</p></dd><dt><a name="XFRIN_CC_SESSION_ERROR"></a><span class="term">XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</p></dd><dt><a name="XFRIN_COMMAND_ERROR"></a><span class="term">XFRIN_COMMAND_ERROR error while executing command '%1': %2</span></dt><dd><p>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</p></dd><dt><a name="XFRIN_CONNECT_MASTER"></a><span class="term">XFRIN_CONNECT_MASTER error connecting to master at %1: %2</span></dt><dd><p>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</p></dd><dt><a name="XFRIN_IMPORT_DNS"></a><span class="term">XFRIN_IMPORT_DNS error importing python DNS module: %1</span></dt><dd><p>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR"></a><span class="term">XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</span></dt><dd><p>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER"></a><span class="term">XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</span></dt><dd><p>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</p></dd><dt><a name="XFRIN_RETRANSFER_UNKNOWN_ZONE"></a><span class="term">XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</span></dt><dd><p>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</p></dd><dt><a name="XFRIN_STARTING"></a><span class="term">XFRIN_STARTING starting resolver with command line '%1'</span></dt><dd><p>
+An informational message, this is output when the resolver starts up.
+</p></dd><dt><a name="XFRIN_STOPPED_BY_KEYBOARD"></a><span class="term">XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFRIN_UNKNOWN_ERROR"></a><span class="term">XFRIN_UNKNOWN_ERROR unknown error: %1</span></dt><dd><p>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_DONE"></a><span class="term">XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</span></dt><dd><p>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_ERROR"></a><span class="term">XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</span></dt><dd><p>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_FAILED"></a><span class="term">XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</span></dt><dd><p>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_STARTED"></a><span class="term">XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</span></dt><dd><p>
+A transfer out of the given zone has started.
+</p></dd><dt><a name="XFROUT_BAD_TSIG_KEY_STRING"></a><span class="term">XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFROUT_CC_SESSION_ERROR"></a><span class="term">XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</p></dd><dt><a name="XFROUT_CC_SESSION_TIMEOUT_ERROR"></a><span class="term">XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</span></dt><dd><p>
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</p></dd><dt><a name="XFROUT_FETCH_REQUEST_ERROR"></a><span class="term">XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</span></dt><dd><p>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</p></dd><dt><a name="XFROUT_HANDLE_QUERY_ERROR"></a><span class="term">XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</span></dt><dd><p>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</p></dd><dt><a name="XFROUT_IMPORT"></a><span class="term">XFROUT_IMPORT error importing python module: %1</span></dt><dd><p>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG"></a><span class="term">XFROUT_NEW_CONFIG Update xfrout configuration</span></dt><dd><p>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG_DONE"></a><span class="term">XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</span></dt><dd><p>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</p></dd><dt><a name="XFROUT_NOTIFY_COMMAND"></a><span class="term">XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</span></dt><dd><p>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</p></dd><dt><a name="XFROUT_PARSE_QUERY_ERROR"></a><span class="term">XFROUT_PARSE_QUERY_ERROR error parsing query: %1</span></dt><dd><p>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</p></dd><dt><a name="XFROUT_PROCESS_REQUEST_ERROR"></a><span class="term">XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</span></dt><dd><p>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</span></dt><dd><p>
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</span></dt><dd><p>
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+</p></dd><dt><a name="XFROUT_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</p></dd><dt><a name="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR"></a><span class="term">XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</span></dt><dd><p>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</p></dd><dt><a name="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</span></dt><dd><p>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</p></dd><dt><a name="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</span></dt><dd><p>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</p></dd><dt><a name="XFROUT_SOCKET_SELECT_ERROR"></a><span class="term">XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</span></dt><dd><p>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</p></dd><dt><a name="XFROUT_STOPPED_BY_KEYBOARD"></a><span class="term">XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFROUT_STOPPING"></a><span class="term">XFROUT_STOPPING the xfrout daemon is shutting down</span></dt><dd><p>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</p></dd><dt><a name="XFROUT_UNIX_SOCKET_FILE_IN_USE"></a><span class="term">XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</span></dt><dd><p>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</p></dd><dt><a name="ZONEMGR_CCSESSION_ERROR"></a><span class="term">ZONEMGR_CCSESSION_ERROR command channel session error: %1</span></dt><dd><p>
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+</p></dd><dt><a name="ZONEMGR_JITTER_TOO_BIG"></a><span class="term">ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5</span></dt><dd><p>
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+</p></dd><dt><a name="ZONEMGR_KEYBOARD_INTERRUPT"></a><span class="term">ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt</span></dt><dd><p>
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+</p></dd><dt><a name="ZONEMGR_LOAD_ZONE"></a><span class="term">ZONEMGR_LOAD_ZONE loading zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+</p></dd><dt><a name="ZONEMGR_NO_MASTER_ADDRESS"></a><span class="term">ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master</span></dt><dd><p>
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+</p></dd><dt><a name="ZONEMGR_NO_SOA"></a><span class="term">ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record</span></dt><dd><p>
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+</p></dd><dt><a name="ZONEMGR_NO_TIMER_THREAD"></a><span class="term">ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running</span></dt><dd><p>
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_NO_ZONE_CLASS"></a><span class="term">ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone</span></dt><dd><p>
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</p></dd><dt><a name="ZONEMGR_NO_ZONE_NAME"></a><span class="term">ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone</span></dt><dd><p>
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_NOTIFY"></a><span class="term">ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_SHUTDOWN"></a><span class="term">ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_UNKNOWN"></a><span class="term">ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'</span></dt><dd><p>
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_XFRIN_FAILED"></a><span class="term">ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_XFRIN_SUCCESS"></a><span class="term">ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+</p></dd><dt><a name="ZONEMGR_REFRESH_ZONE"></a><span class="term">ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)</span></dt><dd><p>
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+</p></dd><dt><a name="ZONEMGR_SELECT_ERROR"></a><span class="term">ZONEMGR_SELECT_ERROR error with select(): %1</span></dt><dd><p>
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+</p></dd><dt><a name="ZONEMGR_SEND_FAIL"></a><span class="term">ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed</span></dt><dd><p>
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+</p></dd><dt><a name="ZONEMGR_SESSION_ERROR"></a><span class="term">ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon</span></dt><dd><p>
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</p></dd><dt><a name="ZONEMGR_SESSION_TIMEOUT"></a><span class="term">ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon</span></dt><dd><p>
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</p></dd><dt><a name="ZONEMGR_SHUTDOWN"></a><span class="term">ZONEMGR_SHUTDOWN zone manager has shut down</span></dt><dd><p>
+A debug message, output when the zone manager has shut down completely.
+</p></dd><dt><a name="ZONEMGR_STARTING"></a><span class="term">ZONEMGR_STARTING zone manager starting</span></dt><dd><p>
+A debug message output when the zone manager starts up.
+</p></dd><dt><a name="ZONEMGR_TIMER_THREAD_RUNNING"></a><span class="term">ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running</span></dt><dd><p>
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_FAIL"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_NOTIFIED"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_SUCCESS"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
+</p></dd></dl></div><p>
+ </p></div></div></body></html>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
new file mode 100644
index 0000000..bade381
--- /dev/null
+++ b/doc/guide/bind10-messages.xml
@@ -0,0 +1,5026 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
+<!ENTITY mdash "—" >
+<!ENTITY % version SYSTEM "version.ent">
+%version;
+]>
+<!--
+ This XML document is generated using the system_messages.py tool
+ based on the .mes message files.
+
+ Do not edit this file.
+-->
+<book>
+ <?xml-stylesheet href="bind10-guide.css" type="text/css"?>
+
+ <bookinfo>
+ <title>BIND 10 Messages Manual</title>
+
+ <copyright>
+ <year>2011</year><holder>Internet Systems Consortium, Inc.</holder>
+ </copyright>
+
+ <abstract>
+ <para>BIND 10 is a Domain Name System (DNS) suite managed by
+ Internet Systems Consortium (ISC). It includes DNS libraries
+ and modular components for controlling authoritative and
+ recursive DNS servers.
+ </para>
+ <para>
+ This is the messages manual for BIND 10 version &__VERSION__;.
+ The most up-to-date version of this document, along with
+ other documents for BIND 10, can be found at
+ <ulink url="http://bind10.isc.org/docs"/>.
+ </para>
+ </abstract>
+
+ <releaseinfo>This is the messages manual for BIND 10 version
+ &__VERSION__;.</releaseinfo>
+ </bookinfo>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This document lists each message that can be logged by the
+ programs in the BIND 10 package. Each entry in this manual
+ is of the form:
+ <screen>IDENTIFICATION message-text</screen>
+ ... where "IDENTIFICATION" is the message identification included
+ in each message logged and "message-text" is the accompanying
+ message text. The "message-text" may include placeholders of the
+ form "%1", "%2" etc.; these parameters are replaced by relevant
+ values when the message is logged.
+ </para>
+ <para>
+ Each entry is also accompanied by a description giving more
+ information about the circumstances that result in the message
+ being logged.
+ </para>
+ <para>
+ For information on configuring and using BIND 10 logging,
+ refer to the <ulink url="bind10-guide.html">BIND 10 Guide</ulink>.
+ </para>
+ </chapter>
+
+ <chapter id="messages">
+ <title>BIND 10 Messages</title>
+ <para>
+ <variablelist>
+
+<varlistentry id="ASIODNS_FETCH_COMPLETED">
+<term>ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</term>
+<listitem><para>
+A debug message, this records that the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_FETCH_STOPPED">
+<term>ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</term>
+<listitem><para>
+An external component has requested the halting of an upstream fetch. This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_OPEN_SOCKET">
+<term>ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The number of the system error that caused the problem is given in the
+message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_READ_DATA">
+<term>ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_READ_TIMEOUT">
+<term>ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</term>
+<listitem><para>
+An upstream fetch from the specified address timed out. This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network. The message will only appear if debug is
+enabled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_SEND_DATA">
+<term>ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_ORIGIN">
+<term>ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<listitem><para>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_RESULT">
+<term>ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<listitem><para>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_ERROR">
+<term>AUTH_AXFR_ERROR error handling AXFR request: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_UDP">
+<term>AUTH_AXFR_UDP AXFR query received over UDP</term>
+<listitem><para>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_COMMAND_FAILED">
+<term>AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</term>
+<listitem><para>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_CREATED">
+<term>AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</term>
+<listitem><para>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_ESTABLISHED">
+<term>AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</term>
+<listitem><para>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_STARTED">
+<term>AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</term>
+<listitem><para>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_LOAD_FAIL">
+<term>AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</term>
+<listitem><para>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_UPDATE_FAIL">
+<term>AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</term>
+<listitem><para>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DATA_SOURCE">
+<term>AUTH_DATA_SOURCE data source database file: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DNS_SERVICES_CREATED">
+<term>AUTH_DNS_SERVICES_CREATED DNS services created</term>
+<listitem><para>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_HEADER_PARSE_FAIL">
+<term>AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_INVALID_STATISTICS_DATA">
+<term>AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified</term>
+<listitem><para>
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_TSIG">
+<term>AUTH_LOAD_TSIG loading TSIG keys</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_ZONE">
+<term>AUTH_LOAD_ZONE loaded zone %1/%2</term>
+<listitem><para>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_DISABLED">
+<term>AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_ENABLED">
+<term>AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_QUESTIONS">
+<term>AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_RRTYPE">
+<term>AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_STATS_SESSION">
+<term>AUTH_NO_STATS_SESSION session interface for statistics is not available</term>
+<listitem><para>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_XFRIN">
+<term>AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PARSE_ERROR">
+<term>AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PROTOCOL_ERROR">
+<term>AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_RECEIVED">
+<term>AUTH_PACKET_RECEIVED message received:\n%1</term>
+<listitem><para>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</para><para>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PROCESS_FAIL">
+<term>AUTH_PROCESS_FAIL message processing failure: %1</term>
+<listitem><para>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</para><para>
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_COMMAND">
+<term>AUTH_RECEIVED_COMMAND command '%1' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_SENDSTATS">
+<term>AUTH_RECEIVED_SENDSTATS command 'sendstats' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RESPONSE_RECEIVED">
+<term>AUTH_RESPONSE_RECEIVED received response message, ignoring</term>
+<listitem><para>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_ERROR_RESPONSE">
+<term>AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</term>
+<listitem><para>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_NORMAL_RESPONSE">
+<term>AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</term>
+<listitem><para>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_CREATED">
+<term>AUTH_SERVER_CREATED server created</term>
+<listitem><para>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_FAILED">
+<term>AUTH_SERVER_FAILED server failed: %1</term>
+<listitem><para>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_STARTED">
+<term>AUTH_SERVER_STARTED server started</term>
+<listitem><para>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SQLITE3">
+<term>AUTH_SQLITE3 nothing to do for loading sqlite3</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_CREATED">
+<term>AUTH_STATS_CHANNEL_CREATED STATS session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_ESTABLISHED">
+<term>AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_COMMS">
+<term>AUTH_STATS_COMMS communication error in sending statistics data: %1</term>
+<listitem><para>
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMEOUT">
+<term>AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</term>
+<listitem><para>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_DISABLED">
+<term>AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_SET">
+<term>AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_UNSUPPORTED_OPCODE">
+<term>AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</term>
+<listitem><para>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_CREATED">
+<term>AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_ESTABLISHED">
+<term>AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_COMMS">
+<term>AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_ERROR">
+<term>AUTH_ZONEMGR_ERROR received error response from zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CHECK_MSGQ_ALREADY_RUNNING">
+<term>BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running</term>
+<listitem><para>
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATION_START_AUTH">
+<term>BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</term>
+<listitem><para>
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATION_START_RESOLVER">
+<term>BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</term>
+<listitem><para>
+This message shows whether or not the resolver should be
+started according to the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_INVALID_STATISTICS_DATA">
+<term>BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified</term>
+<listitem><para>
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_INVALID_USER">
+<term>BIND10_INVALID_USER invalid user: %1</term>
+<listitem><para>
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_KILLING_ALL_PROCESSES">
+<term>BIND10_KILLING_ALL_PROCESSES killing all started processes</term>
+<listitem><para>
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_KILL_PROCESS">
+<term>BIND10_KILL_PROCESS killing process %1</term>
+<listitem><para>
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_ALREADY_RUNNING">
+<term>BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start</term>
+<listitem><para>
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_DAEMON_ENDED">
+<term>BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</term>
+<listitem><para>
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_DISAPPEARED">
+<term>BIND10_MSGQ_DISAPPEARED msgq channel disappeared</term>
+<listitem><para>
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_PROCESS_ENDED_NO_EXIT_STATUS">
+<term>BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</term>
+<listitem><para>
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS">
+<term>BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</term>
+<listitem><para>
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_READING_BOSS_CONFIGURATION">
+<term>BIND10_READING_BOSS_CONFIGURATION reading boss configuration</term>
+<listitem><para>
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_COMMAND">
+<term>BIND10_RECEIVED_COMMAND received command: %1</term>
+<listitem><para>
+The boss module received a command and shall now process it. The command
+is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_NEW_CONFIGURATION">
+<term>BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1</term>
+<listitem><para>
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_SIGNAL">
+<term>BIND10_RECEIVED_SIGNAL received signal %1</term>
+<listitem><para>
+The boss module received the given signal.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RESURRECTED_PROCESS">
+<term>BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)</term>
+<listitem><para>
+The given process has been restarted successfully, and is now running
+with the given process id.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RESURRECTING_PROCESS">
+<term>BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...</term>
+<listitem><para>
+The given process has ended unexpectedly, and is now restarted.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SELECT_ERROR">
+<term>BIND10_SELECT_ERROR error in select() call: %1</term>
+<listitem><para>
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SEND_SIGKILL">
+<term>BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)</term>
+<listitem><para>
+The boss module is sending a SIGKILL signal to the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SEND_SIGTERM">
+<term>BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</term>
+<listitem><para>
+The boss module is sending a SIGTERM signal to the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SHUTDOWN">
+<term>BIND10_SHUTDOWN stopping the server</term>
+<listitem><para>
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SHUTDOWN_COMPLETE">
+<term>BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete</term>
+<listitem><para>
+All child processes have been stopped, and the boss process will now
+stop itself.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_BAD_CAUSE">
+<term>BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1</term>
+<listitem><para>
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_BAD_RESPONSE">
+<term>BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</term>
+<listitem><para>
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_CRASHED">
+<term>BIND10_SOCKCREATOR_CRASHED the socket creator crashed</term>
+<listitem><para>
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_EOF">
+<term>BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</term>
+<listitem><para>
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_INIT">
+<term>BIND10_SOCKCREATOR_INIT initializing socket creator parser</term>
+<listitem><para>
+The boss module initializes routines for parsing the socket creator
+protocol.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_KILL">
+<term>BIND10_SOCKCREATOR_KILL killing the socket creator</term>
+<listitem><para>
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_TERMINATE">
+<term>BIND10_SOCKCREATOR_TERMINATE terminating socket creator</term>
+<listitem><para>
+The boss module sends a request to terminate to the socket creator.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_TRANSPORT_ERROR">
+<term>BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1</term>
+<listitem><para>
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_CREATED">
+<term>BIND10_SOCKET_CREATED successfully created socket %1</term>
+<listitem><para>
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_ERROR">
+<term>BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3</term>
+<listitem><para>
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_GET">
+<term>BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</term>
+<listitem><para>
+The boss forwards a request for a socket to the socket creator.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_PROCESS">
+<term>BIND10_STARTED_PROCESS started %1</term>
+<listitem><para>
+The given process has successfully been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_PROCESS_PID">
+<term>BIND10_STARTED_PROCESS_PID started %1 (PID %2)</term>
+<listitem><para>
+The given process has successfully been started, and has the given PID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING">
+<term>BIND10_STARTING starting BIND10: %1</term>
+<listitem><para>
+Informational message on startup that shows the full version.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS">
+<term>BIND10_STARTING_PROCESS starting process %1</term>
+<listitem><para>
+The boss module is starting the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS_PORT">
+<term>BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</term>
+<listitem><para>
+The boss module is starting the given process, which will listen on the
+given port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS_PORT_ADDRESS">
+<term>BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)</term>
+<listitem><para>
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_COMPLETE">
+<term>BIND10_STARTUP_COMPLETE BIND 10 started</term>
+<listitem><para>
+All modules have been successfully started, and BIND 10 is now running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_ERROR">
+<term>BIND10_STARTUP_ERROR error during startup: %1</term>
+<listitem><para>
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT">
+<term>BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</term>
+<listitem><para>
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STOP_PROCESS">
+<term>BIND10_STOP_PROCESS asking %1 to shut down</term>
+<listitem><para>
+The boss module is sending a shutdown command to the given module over
+the message channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_UNKNOWN_CHILD_PROCESS_ENDED">
+<term>BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</term>
+<listitem><para>
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_ENTRY_MISSING_RRSET">
+<term>CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</term>
+<listitem><para>
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_FOUND">
+<term>CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data</term>
+<listitem><para>
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_UNKNOWN">
+<term>CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data</term>
+<listitem><para>
+Debug message. The requested data was not found in the local zone data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_UPDATE">
+<term>CACHE_LOCALZONE_UPDATE updating local zone element at key %1</term>
+<listitem><para>
+Debug message issued when there's update to the local zone section of cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_DEINIT">
+<term>CACHE_MESSAGES_DEINIT deinitialized message cache</term>
+<listitem><para>
+Debug message. It is issued when the server deinitializes the message cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_EXPIRED">
+<term>CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache</term>
+<listitem><para>
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_FOUND">
+<term>CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache</term>
+<listitem><para>
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_INIT">
+<term>CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2</term>
+<listitem><para>
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_REMOVE">
+<term>CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first</term>
+<listitem><para>
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UNCACHEABLE">
+<term>CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3</term>
+<listitem><para>
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UNKNOWN">
+<term>CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache</term>
+<listitem><para>
+Debug message. The message cache didn't find any entry for the given key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UPDATE">
+<term>CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3</term>
+<listitem><para>
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_DEEPEST">
+<term>CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_INIT">
+<term>CACHE_RESOLVER_INIT initializing resolver cache for class %1</term>
+<listitem><para>
+Debug message. The resolver cache is being created for this given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_INIT_INFO">
+<term>CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1</term>
+<listitem><para>
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOCAL_MSG">
+<term>CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data</term>
+<listitem><para>
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOCAL_RRSET">
+<term>CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data</term>
+<listitem><para>
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOOKUP_MSG">
+<term>CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOOKUP_RRSET">
+<term>CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_NO_QUESTION">
+<term>CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section</term>
+<listitem><para>
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UNKNOWN_CLASS_MSG">
+<term>CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UNKNOWN_CLASS_RRSET">
+<term>CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_MSG">
+<term>CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3</term>
+<listitem><para>
+Debug message. The resolver is updating a message in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_RRSET">
+<term>CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3</term>
+<listitem><para>
+Debug message. The resolver is updating an RRset in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG">
+<term>CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET">
+<term>CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_EXPIRED">
+<term>CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3</term>
+<listitem><para>
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_INIT">
+<term>CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2</term>
+<listitem><para>
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_LOOKUP">
+<term>CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</term>
+<listitem><para>
+Debug message. The resolver is trying to look up data in the RRset cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_NOT_FOUND">
+<term>CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_REMOVE_OLD">
+<term>CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_UNTRUSTED">
+<term>CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_UPDATE">
+<term>CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache</term>
+<listitem><para>
+Debug message. The RRset is updating its data with this given RRset.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ASYNC_READ_FAILED">
+<term>CC_ASYNC_READ_FAILED asynchronous read failed</term>
+<listitem><para>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_CONN_ERROR">
+<term>CC_CONN_ERROR error connecting to message queue (%1)</term>
+<listitem><para>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_DISCONNECT">
+<term>CC_DISCONNECT disconnecting from message queue daemon</term>
+<listitem><para>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISH">
+<term>CC_ESTABLISH trying to establish connection with message queue daemon at %1</term>
+<listitem><para>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISHED">
+<term>CC_ESTABLISHED successfully connected to message queue daemon</term>
+<listitem><para>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVE">
+<term>CC_GROUP_RECEIVE trying to receive a message</term>
+<listitem><para>
+Debug message, noting that a message is expected to come over the command
+channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVED">
+<term>CC_GROUP_RECEIVED message arrived ('%1', '%2')</term>
+<listitem><para>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_SEND">
+<term>CC_GROUP_SEND sending message '%1' to group '%2'</term>
+<listitem><para>
+Debug message, we're about to send a message over the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_INVALID_LENGTHS">
+<term>CC_INVALID_LENGTHS invalid length parameters (%1, %2)</term>
+<listitem><para>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_LENGTH_NOT_READY">
+<term>CC_LENGTH_NOT_READY length not ready</term>
+<listitem><para>
+There should be data representing the length of message on the socket, but it
+is not there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MESSAGE">
+<term>CC_NO_MESSAGE no message ready to be received yet</term>
+<listitem><para>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MSGQ">
+<term>CC_NO_MSGQ unable to connect to message queue (%1)</term>
+<listitem><para>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_ERROR">
+<term>CC_READ_ERROR error reading data from command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_EXCEPTION">
+<term>CC_READ_EXCEPTION error reading data from command channel (%1)</term>
+<listitem><para>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_REPLY">
+<term>CC_REPLY replying to message from '%1' with '%2'</term>
+<listitem><para>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SET_TIMEOUT">
+<term>CC_SET_TIMEOUT setting timeout to %1ms</term>
+<listitem><para>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_START_READ">
+<term>CC_START_READ starting asynchronous read</term>
+<listitem><para>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SUBSCRIBE">
+<term>CC_SUBSCRIBE subscribing to communication group %1</term>
+<listitem><para>
+Debug message. The program wants to receive messages addressed to this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_TIMEOUT">
+<term>CC_TIMEOUT timeout reading data from command channel</term>
+<listitem><para>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_UNSUBSCRIBE">
+<term>CC_UNSUBSCRIBE unsubscribing from communication group %1</term>
+<listitem><para>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_WRITE_ERROR">
+<term>CC_WRITE_ERROR error writing data to command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ZERO_LENGTH">
+<term>CC_ZERO_LENGTH invalid message length (0)</term>
+<listitem><para>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE">
+<term>CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</term>
+<listitem><para>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE">
+<term>CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2</term>
+<listitem><para>
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_CC_SESSION_ERROR">
+<term>CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</term>
+<listitem><para>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_DATA_READ_ERROR">
+<term>CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</term>
+<listitem><para>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_STOPPED_BY_KEYBOARD">
+<term>CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_BAD_CONFIG_DATA">
+<term>CMDCTL_BAD_CONFIG_DATA error in config data: %1</term>
+<listitem><para>
+There was an error reading the updated configuration data. The specific
+error is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_BAD_PASSWORD">
+<term>CMDCTL_BAD_PASSWORD bad password for user: %1</term>
+<listitem><para>
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_CC_SESSION_ERROR">
+<term>CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_CC_SESSION_TIMEOUT">
+<term>CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel</term>
+<listitem><para>
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_COMMAND_ERROR">
+<term>CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3</term>
+<listitem><para>
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_COMMAND_SENT">
+<term>CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent</term>
+<listitem><para>
+This debug message indicates that the given command has been sent to
+the given module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_NO_SUCH_USER">
+<term>CMDCTL_NO_SUCH_USER username not found in user database: %1</term>
+<listitem><para>
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_NO_USER_ENTRIES_READ">
+<term>CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied</term>
+<listitem><para>
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_SEND_COMMAND">
+<term>CMDCTL_SEND_COMMAND sending command %1 to module %2</term>
+<listitem><para>
+This debug message indicates that the given command is being sent to
+the given module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_SSL_SETUP_FAILURE_USER_DENIED">
+<term>CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1</term>
+<listitem><para>
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_STOPPED_BY_KEYBOARD">
+<term>CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_UNCAUGHT_EXCEPTION">
+<term>CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1</term>
+<listitem><para>
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_USER_DATABASE_READ_ERROR">
+<term>CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2</term>
+<listitem><para>
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_CCSESSION_MSG">
+<term>CONFIG_CCSESSION_MSG error in CC session message: %1</term>
+<listitem><para>
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_CCSESSION_MSG_INTERNAL">
+<term>CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</term>
+<listitem><para>
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</para><para>
+The most likely cause of this error is a programming error. Please raise
+a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_GET_FAIL">
+<term>CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</term>
+<listitem><para>
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_GET_FAILED">
+<term>CONFIG_GET_FAILED error getting configuration from cfgmgr: %1</term>
+<listitem><para>
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_JSON_PARSE">
+<term>CONFIG_JSON_PARSE JSON parse error in %1: %2</term>
+<listitem><para>
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_CONFIG_ERRORS">
+<term>CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1</term>
+<listitem><para>
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_EXPLICIT">
+<term>CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1</term>
+<listitem><para>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program. The logging
+configuration for the program will be updated with the information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_IGNORE_EXPLICIT">
+<term>CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1</term>
+<listitem><para>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger. As this does not match the logger specification for the
+program, it has been ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_IGNORE_WILD">
+<term>CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1</term>
+<listitem><para>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry. The configuration is ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_WILD_MATCH">
+<term>CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1</term>
+<listitem><para>
+This is a debug message. When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MOD_SPEC_FORMAT">
+<term>CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</term>
+<listitem><para>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MOD_SPEC_REJECT">
+<term>CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</term>
+<listitem><para>
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_OPEN_FAIL">
+<term>CONFIG_OPEN_FAIL error opening %1: %2</term>
+<listitem><para>
+There was an error opening the given file. The reason for the failure
+is included in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_CREATE">
+<term>DATASRC_CACHE_CREATE creating the hotspot cache</term>
+<listitem><para>
+This is a debug message issued during startup when the hotspot cache
+is created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_DESTROY">
+<term>DATASRC_CACHE_DESTROY destroying the hotspot cache</term>
+<listitem><para>
+Debug information. The hotspot cache is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_DISABLE">
+<term>DATASRC_CACHE_DISABLE disabling the hotspot cache</term>
+<listitem><para>
+A debug message issued when the hotspot cache is disabled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_ENABLE">
+<term>DATASRC_CACHE_ENABLE enabling the hotspot cache</term>
+<listitem><para>
+A debug message issued when the hotspot cache is enabled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_EXPIRED">
+<term>DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired</term>
+<listitem><para>
+A debug message issued when a hotspot cache lookup located the item but it
+had expired. The item was removed and the program proceeded as if the item
+had not been found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_FOUND">
+<term>DATASRC_CACHE_FOUND the item '%1' was found</term>
+<listitem><para>
+Debug information. An item was successfully located in the hotspot cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_FULL">
+<term>DATASRC_CACHE_FULL hotspot cache is full, dropping oldest</term>
+<listitem><para>
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_INSERT">
+<term>DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache</term>
+<listitem><para>
+A debug message indicating that a new item is being inserted into the hotspot
+cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_NOT_FOUND">
+<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache</term>
+<listitem><para>
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_OLD_FOUND">
+<term>DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing</term>
+<listitem><para>
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_REMOVE">
+<term>DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache</term>
+<listitem><para>
+Debug information. An item is being removed from the hotspot cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_CACHE_SLOTS">
+<term>DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items</term>
+<listitem><para>
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED">
+<term>DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2</term>
+<listitem><para>
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_RECORDS">
+<term>DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</term>
+<listitem><para>
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_TTL_MISMATCH">
+<term>DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</term>
+<listitem><para>
+The datasource backend provided resource records for the given RRset with
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DELEGATION">
+<term>DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</term>
+<listitem><para>
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DELEGATION_EXACT">
+<term>DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1</term>
+<listitem><para>
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DNAME">
+<term>DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1</term>
+<listitem><para>
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL">
+<term>DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1</term>
+<listitem><para>
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_NXDOMAIN">
+<term>DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</term>
+<listitem><para>
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_NXRRSET">
+<term>DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4</term>
+<listitem><para>
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_RRSET">
+<term>DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2</term>
+<listitem><para>
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE">
+<term>DATASRC_DATABASE_ITERATE iterating zone %1</term>
+<listitem><para>
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE_END">
+<term>DATASRC_DATABASE_ITERATE_END iterating zone finished</term>
+<listitem><para>
+While iterating through the zone, the program reached end of the data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE_NEXT">
+<term>DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2</term>
+<listitem><para>
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE_TTL_MISMATCH">
+<term>DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4</term>
+<listitem><para>
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_COMMIT">
+<term>DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</term>
+<listitem><para>
+Debug information. A set of updates to a zone has been successfully
+committed to the corresponding database backend. The zone name,
+its class and the database name are printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_CREATED">
+<term>DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3</term>
+<listitem><para>
+Debug information. A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_DESTROYED">
+<term>DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3</term>
+<listitem><para>
+Debug information. A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_ROLLBACK">
+<term>DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3</term>
+<listitem><para>
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes. The intermediate changes made through the updater won't
+be applied to the underlying database. The zone name, its class, and
+the underlying database name are shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_ROLLBACKFAIL">
+<term>DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4</term>
+<listitem><para>
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails. The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation. In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid. The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD">
+<term>DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1</term>
+<listitem><para>
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD_CANCEL_NS">
+<term>DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1</term>
+<listitem><para>
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD_CANCEL_SUB">
+<term>DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1</term>
+<listitem><para>
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD_EMPTY">
+<term>DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1</term>
+<listitem><para>
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DO_QUERY">
+<term>DATASRC_DO_QUERY handling query for '%1/%2'</term>
+<listitem><para>
+A debug message indicating that a query for the given name and RR type is being
+processed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_RRSET">
+<term>DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</term>
+<listitem><para>
+Debug information. An RRset is being added to the in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_WILDCARD">
+<term>DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</term>
+<listitem><para>
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ADD_ZONE">
+<term>DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</term>
+<listitem><para>
+Debug information. A zone is being added into the in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_ANY_SUCCESS">
+<term>DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</term>
+<listitem><para>
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME">
+<term>DATASRC_MEM_CNAME CNAME at the domain '%1'</term>
+<listitem><para>
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME_COEXIST">
+<term>DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</term>
+<listitem><para>
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some other data to CNAME.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CNAME_TO_NONEMPTY">
+<term>DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</term>
+<listitem><para>
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_CREATE">
+<term>DATASRC_MEM_CREATE creating zone '%1' in '%2' class</term>
+<listitem><para>
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DELEG_FOUND">
+<term>DATASRC_MEM_DELEG_FOUND delegation found at '%1'</term>
+<listitem><para>
+Debug information. A delegation point was found above the requested record.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DESTROY">
+<term>DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class</term>
+<listitem><para>
+Debug information. A zone from in-memory data source is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_ENCOUNTERED">
+<term>DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME</term>
+<listitem><para>
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way. This may lead to redirection to a different domain and
+stop the search.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_FOUND">
+<term>DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</term>
+<listitem><para>
+Debug information. A DNAME was found instead of the requested information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DNAME_NS">
+<term>DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</term>
+<listitem><para>
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DOMAIN_EMPTY">
+<term>DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</term>
+<listitem><para>
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_DUP_RRSET">
+<term>DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'</term>
+<listitem><para>
+An RRset is being inserted into in-memory data source for a second time. The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_EXACT_DELEGATION">
+<term>DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'</term>
+<listitem><para>
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_FIND">
+<term>DATASRC_MEM_FIND find '%1/%2'</term>
+<listitem><para>
+Debug information. A search for the requested RRset is being started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_FIND_ZONE">
+<term>DATASRC_MEM_FIND_ZONE looking for zone '%1'</term>
+<listitem><para>
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_LOAD">
+<term>DATASRC_MEM_LOAD loading zone '%1' from file '%2'</term>
+<listitem><para>
+Debug information. The content of master file is being loaded into the memory.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NOT_FOUND">
+<term>DATASRC_MEM_NOT_FOUND requested domain '%1' not found</term>
+<listitem><para>
+Debug information. The requested domain does not exist.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NS_ENCOUNTERED">
+<term>DATASRC_MEM_NS_ENCOUNTERED encountered a NS</term>
+<listitem><para>
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_NXRRSET">
+<term>DATASRC_MEM_NXRRSET no such type '%1' at '%2'</term>
+<listitem><para>
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_OUT_OF_ZONE">
+<term>DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'</term>
+<listitem><para>
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_RENAME">
+<term>DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'</term>
+<listitem><para>
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SINGLETON">
+<term>DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'</term>
+<listitem><para>
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SUCCESS">
+<term>DATASRC_MEM_SUCCESS query for '%1/%2' successful</term>
+<listitem><para>
+Debug information. The requested record was found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SUPER_STOP">
+<term>DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty</term>
+<listitem><para>
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_SWAP">
+<term>DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')</term>
+<listitem><para>
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_CANCEL">
+<term>DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'</term>
+<listitem><para>
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here. This
+behaviour is specified by RFC 1034, section 4.3.3
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_DNAME">
+<term>DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</term>
+<listitem><para>
+The software refuses to load DNAME records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_MEM_WILDCARD_NS">
+<term>DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</term>
+<listitem><para>
+The software refuses to load NS records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_ADD">
+<term>DATASRC_META_ADD adding a data source into meta data source</term>
+<listitem><para>
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_ADD_CLASS_MISMATCH">
+<term>DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</term>
+<listitem><para>
+It was attempted to add a data source into a meta data source, but their
+classes do not match.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_META_REMOVE">
+<term>DATASRC_META_REMOVE removing data source from meta data source</term>
+<listitem><para>
+Debug information. A data source is being removed from meta data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_NSEC">
+<term>DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'</term>
+<listitem><para>
+Debug information. A NSEC record covering this zone is being added.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_NSEC3">
+<term>DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'</term>
+<listitem><para>
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_RRSET">
+<term>DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message</term>
+<listitem><para>
+Debug information. An RRset is being added to the response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_ADD_SOA">
+<term>DATASRC_QUERY_ADD_SOA adding SOA of '%1'</term>
+<listitem><para>
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_AUTH_FAIL">
+<term>DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_BAD_REFERRAL">
+<term>DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</term>
+<listitem><para>
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_CACHED">
+<term>DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache</term>
+<listitem><para>
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_CHECK_CACHE">
+<term>DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'</term>
+<listitem><para>
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_COPY_AUTH">
+<term>DATASRC_QUERY_COPY_AUTH copying authoritative section into message</term>
+<listitem><para>
+Debug information. The whole referral information is being copied into the
+response message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_DELEGATION">
+<term>DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</term>
+<listitem><para>
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_EMPTY_CNAME">
+<term>DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</term>
+<listitem><para>
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_EMPTY_DNAME">
+<term>DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</term>
+<listitem><para>
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_FAIL">
+<term>DATASRC_QUERY_FAIL query failed</term>
+<listitem><para>
+Some subtask of query processing failed. The reason should have been reported
+already and a SERVFAIL will be returned to the querying system.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_FOLLOW_CNAME">
+<term>DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</term>
+<listitem><para>
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GET_MX_ADDITIONAL">
+<term>DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</term>
+<listitem><para>
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GET_NS_ADDITIONAL">
+<term>DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'</term>
+<listitem><para>
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_GLUE_FAIL">
+<term>DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_INVALID_OP">
+<term>DATASRC_QUERY_INVALID_OP invalid query operation requested</term>
+<listitem><para>
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_AUTH">
+<term>DATASRC_QUERY_IS_AUTH auth query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is an auth query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_GLUE">
+<term>DATASRC_QUERY_IS_GLUE glue query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is a query for glue addresses.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_NOGLUE">
+<term>DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is a query for addresses that are not
+glue.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_REF">
+<term>DATASRC_QUERY_IS_REF query for referral (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is a query for referral information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_IS_SIMPLE">
+<term>DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</term>
+<listitem><para>
+Debug information. The last DO_QUERY is a simple query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISPLACED_TASK">
+<term>DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</term>
+<listitem><para>
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISSING_NS">
+<term>DATASRC_QUERY_MISSING_NS missing NS records for '%1'</term>
+<listitem><para>
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_MISSING_SOA">
+<term>DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA</term>
+<listitem><para>
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NOGLUE_FAIL">
+<term>DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_AUTH">
+<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)</term>
+<listitem><para>
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE">
+<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)</term>
+<listitem><para>
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_DS_NSEC">
+<term>DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</term>
+<listitem><para>
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_DS_NSEC3">
+<term>DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone</term>
+<listitem><para>
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_NO_ZONE">
+<term>DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'</term>
+<listitem><para>
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_PROCESS">
+<term>DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</term>
+<listitem><para>
+Debug information. A sure query is being processed now.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_PROVE_NX_FAIL">
+<term>DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'</term>
+<listitem><para>
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_REF_FAIL">
+<term>DATASRC_QUERY_REF_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_RRSIG">
+<term>DATASRC_QUERY_RRSIG unable to answer RRSIG query</term>
+<listitem><para>
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_SIMPLE_FAIL">
+<term>DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1</term>
+<listitem><para>
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_SYNTH_CNAME">
+<term>DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</term>
+<listitem><para>
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_TASK_FAIL">
+<term>DATASRC_QUERY_TASK_FAIL task failed with %1</term>
+<listitem><para>
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_TOO_MANY_CNAMES">
+<term>DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</term>
+<listitem><para>
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_UNKNOWN_RESULT">
+<term>DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask</term>
+<listitem><para>
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD">
+<term>DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'</term>
+<listitem><para>
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_FAIL">
+<term>DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</term>
+<listitem><para>
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL">
+<term>DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)</term>
+<listitem><para>
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record. The code is 1 for error and 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_QUERY_WILDCARD_REFERRAL">
+<term>DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</term>
+<listitem><para>
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it. The code is 1 for error, 2 for not implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CLOSE">
+<term>DATASRC_SQLITE_CLOSE closing SQLite database</term>
+<listitem><para>
+Debug information. The SQLite data source is closing the database file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CONNCLOSE">
+<term>DATASRC_SQLITE_CONNCLOSE Closing sqlite database</term>
+<listitem><para>
+The database file is no longer needed and is being closed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CONNOPEN">
+<term>DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'</term>
+<listitem><para>
+The database file is being opened so it can start providing data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CREATE">
+<term>DATASRC_SQLITE_CREATE SQLite data source created</term>
+<listitem><para>
+Debug information. An instance of SQLite data source is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_DESTROY">
+<term>DATASRC_SQLITE_DESTROY SQLite data source destroyed</term>
+<listitem><para>
+Debug information. An instance of SQLite data source is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_DROPCONN">
+<term>DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized</term>
+<listitem><para>
+The object around a database connection is being destroyed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE">
+<term>DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is trying to identify which zone
+should hold this domain.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOT_FOUND">
+<term>DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'</term>
+<listitem><para>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
+no such zone in our data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND">
+<term>DATASRC_SQLITE_FIND looking for RRset '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up a resource record
+set.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDADDRS">
+<term>DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'</term>
+<listitem><para>
+Debug information. The data source is looking up the addresses for given
+domain name.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDADDRS_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDEXACT">
+<term>DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up an exact resource
+record.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDEXACT_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREC">
+<term>DATASRC_SQLITE_FINDREC looking for record '%1/%2'</term>
+<listitem><para>
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREF">
+<term>DATASRC_SQLITE_FINDREF looking for referral at '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FINDREF_BAD_CLASS">
+<term>DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was trying to identify if there's a referral. But
+it contains different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_BAD_CLASS">
+<term>DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</term>
+<listitem><para>
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_NSEC3">
+<term>DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'</term>
+<listitem><para>
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE">
+<term>DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</term>
+<listitem><para>
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_NEWCONN">
+<term>DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized</term>
+<listitem><para>
+A wrapper object to hold database connection is being initialized.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_OPEN">
+<term>DATASRC_SQLITE_OPEN opening SQLite database '%1'</term>
+<listitem><para>
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_PREVIOUS">
+<term>DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</term>
+<listitem><para>
+This is a debug message. The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_PREVIOUS_NO_ZONE">
+<term>DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</term>
+<listitem><para>
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_SETUP">
+<term>DATASRC_SQLITE_SETUP setting up SQLite database</term>
+<listitem><para>
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema. It'll still contain
+no data, but it will be ready for use.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_CLASS_NOT_CH">
+<term>DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only</term>
+<listitem><para>
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_CREATE">
+<term>DATASRC_STATIC_CREATE creating the static datasource</term>
+<listitem><para>
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_STATIC_FIND">
+<term>DATASRC_STATIC_FIND looking for '%1/%2'</term>
+<listitem><para>
+Debug information. This resource record set is being looked up in the static
+data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_UNEXPECTED_QUERY_STATE">
+<term>DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</term>
+<listitem><para>
+This indicates a programming error. An internal task of unknown type was
+generated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LIBXFRIN_DIFFERENT_TTL">
+<term>LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.</term>
+<listitem><para>
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
+<term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
+<listitem><para>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_BAD_DEBUG_STRING">
+<term>LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</term>
+<listitem><para>
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_BELOW_MIN_DEBUG">
+<term>LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</term>
+<listitem><para>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_BAD_DESTINATION">
+<term>LOG_BAD_DESTINATION unrecognized log destination: %1</term>
+<listitem><para>
+A logger destination value was given that was not recognized. The
+destination should be one of "console", "file", or "syslog".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_BAD_SEVERITY">
+<term>LOG_BAD_SEVERITY unrecognized log severity: %1</term>
+<listitem><para>
+A logger severity value was given that was not recognized. The severity
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_BAD_STREAM">
+<term>LOG_BAD_STREAM bad log console output stream: %1</term>
+<listitem><para>
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_DUPLICATE_MESSAGE_ID">
+<term>LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</term>
+<listitem><para>
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code. This indicates a
+programming error; please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_DUPLICATE_NAMESPACE">
+<term>LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</term>
+<listitem><para>
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.) Such a condition is regarded as an
+error and the read will be abandoned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_INPUT_OPEN_FAIL">
+<term>LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</term>
+<listitem><para>
+The program was not able to open the specified input message file for
+the reason given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_INVALID_MESSAGE_ID">
+<term>LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</term>
+<listitem><para>
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_EXTRA_ARGS">
+<term>LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</term>
+<listitem><para>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_INVALID_ARG">
+<term>LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<listitem><para>
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_NO_ARGS">
+<term>LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</term>
+<listitem><para>
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_MESSAGE_ID">
+<term>LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</term>
+<listitem><para>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_MESSAGE_TEXT">
+<term>LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</term>
+<listitem><para>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_SUCH_MESSAGE">
+<term>LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</term>
+<listitem><para>
+During start-up a local message file was read. A line with the listed
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+</para><para>
+There may be several reasons why this message may appear:
+</para><para>
+- The message ID has been mis-spelled in the local message file.
+</para><para>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</para><para>
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+</para><para>
+Whatever the reason, there is no impact on the operation of BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_OPEN_OUTPUT_FAIL">
+<term>LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</term>
+<listitem><para>
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_PREFIX_EXTRA_ARGS">
+<term>LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</term>
+<listitem><para>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_PREFIX_INVALID_ARG">
+<term>LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
+<listitem><para>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_READING_LOCAL_FILE">
+<term>LOG_READING_LOCAL_FILE reading local message file %1</term>
+<listitem><para>
+This is an informational message output by BIND 10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_READ_ERROR">
+<term>LOG_READ_ERROR error reading from message file %1: %2</term>
+<listitem><para>
+The specified error was encountered reading from the named message file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_UNRECOGNISED_DIRECTIVE">
+<term>LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</term>
+<listitem><para>
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_WRITE_ERROR">
+<term>LOG_WRITE_ERROR error writing to %1: %2</term>
+<listitem><para>
+The specified error was encountered by the message compiler when writing
+to the named output file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_INVALID_ADDRESS">
+<term>NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</term>
+<listitem><para>
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_OPCODE">
+<term>NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3</term>
+<listitem><para>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_QID">
+<term>NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4</term>
+<listitem><para>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_QUERY_NAME">
+<term>NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4</term>
+<listitem><para>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_REPLY_QR_NOT_SET">
+<term>NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2</term>
+<listitem><para>
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION">
+<term>NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1</term>
+<listitem><para>
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_RETRY_EXCEEDED">
+<term>NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded</term>
+<listitem><para>
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_SENDING_NOTIFY">
+<term>NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2</term>
+<listitem><para>
+A notify message is sent to the secondary nameserver at the given
+address.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_SOCKET_ERROR">
+<term>NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3</term>
+<listitem><para>
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_SOCKET_RECV_ERROR">
+<term>NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3</term>
+<listitem><para>
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_TIMEOUT">
+<term>NOTIFY_OUT_TIMEOUT retry notify to %1#%2</term>
+<listitem><para>
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_FIND_NS_ADDRESS">
+<term>NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</term>
+<listitem><para>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_FOUND_ADDRESS">
+<term>NSAS_FOUND_ADDRESS found address %1 for %2</term>
+<listitem><para>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_INVALID_RESPONSE">
+<term>NSAS_INVALID_RESPONSE queried for %1 but got invalid response</term>
+<listitem><para>
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</para><para>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_LOOKUP_CANCEL">
+<term>NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</term>
+<listitem><para>
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_NS_LOOKUP_FAIL">
+<term>NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</term>
+<listitem><para>
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_SEARCH_ZONE_NS">
+<term>NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</term>
+<listitem><para>
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_UPDATE_RTT">
+<term>NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</term>
+<listitem><para>
+A NSAS (nameserver address store - part of the resolver) debug message
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_WRONG_ANSWER">
+<term>NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<listitem><para>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</para><para>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_ANSWER">
+<term>RESLIB_ANSWER answer received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that an answer has been received to an upstream
+query for the specified question. Previous debug messages will have indicated
+the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_CNAME">
+<term>RESLIB_CNAME CNAME received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that CNAME response has been received to an upstream
+query for the specified question. Previous debug messages will have indicated
+the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_DEEPEST">
+<term>RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</term>
+<listitem><para>
+A debug message, a cache lookup did not find the specified <name, class,
+type> tuple in the cache; instead, the deepest delegation found is indicated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_FOLLOW_CNAME">
+<term>RESLIB_FOLLOW_CNAME following CNAME chain to <%1></term>
+<listitem><para>
+A debug message, a CNAME response was received and another query is being issued
+for the <name, class, type> tuple.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_LONG_CHAIN">
+<term>RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
+<listitem><para>
+A debug message recording that a CNAME response has been received to an upstream
+query for the specified question (Previous debug messages will have indicated
+the server to which the question was sent). However, receipt of this CNAME
+has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
+is where on CNAME points to another) and so an error is being returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NO_NS_RRSET">
+<term>RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></term>
+<listitem><para>
+A debug message, this indicates that a response was received for the specified
+query and was categorized as a referral. However, the received message did
+not contain any NS RRsets. This may indicate a programming error in the
+response classification code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NSAS_LOOKUP">
+<term>RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</term>
+<listitem><para>
+A debug message, the RunningQuery object is querying the NSAS for the
+nameservers for the specified zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_NXDOM_NXRR">
+<term>RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that either a NXDOMAIN or an NXRRSET response has
+been received to an upstream query for the specified question. Previous debug
+messages will have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_PROTOCOL">
+<term>RESLIB_PROTOCOL protocol error in answer for %1: %3</term>
+<listitem><para>
+A debug message indicating that a protocol error was received. As there
+are no retries left, an error will be reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_PROTOCOL_RETRY">
+<term>RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</term>
+<listitem><para>
+A debug message indicating that a protocol error was received and that
+the resolver is repeating the query to the same nameserver. After this
+repeated query, there will be the indicated number of retries left.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RCODE_ERR">
+<term>RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></term>
+<listitem><para>
+A debug message, the response to the specified query indicated an error
+that is not covered by a specific code path. A SERVFAIL will be returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RECQ_CACHE_FIND">
+<term>RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</term>
+<listitem><para>
+This is a debug message and indicates that a RecursiveQuery object found the
+the specified <name, class, type> tuple in the cache. The instance number
+at the end of the message indicates which of the two resolve() methods has
+been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RECQ_CACHE_NO_FIND">
+<term>RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
+<listitem><para>
+This is a debug message and indicates that the look in the cache made by the
+RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
+object has been created to resolve the question. The instance number at
+the end of the message indicates which of the two resolve() methods has
+been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFERRAL">
+<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFER_ZONE">
+<term>RESLIB_REFER_ZONE referred to zone %1</term>
+<listitem><para>
+A debug message indicating that the last referral message was to the specified
+zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RESOLVE">
+<term>RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</term>
+<listitem><para>
+A debug message, the RecursiveQuery::resolve method has been called to resolve
+the specified <name, class, type> tuple. The first action will be to lookup
+the specified tuple in the cache. The instance number at the end of the
+message indicates which of the two resolve() methods has been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RRSET_FOUND">
+<term>RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
+<listitem><para>
+A debug message, indicating that when RecursiveQuery::resolve queried the
+cache, a single RRset was found which was put in the answer. The instance
+number at the end of the message indicates which of the two resolve()
+methods has been called.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RTT">
+<term>RESLIB_RTT round-trip time of last query calculated as %1 ms</term>
+<listitem><para>
+A debug message giving the round-trip time of the last query and response.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNQ_CACHE_FIND">
+<term>RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</term>
+<listitem><para>
+This is a debug message and indicates that a RunningQuery object found
+the specified <name, class, type> tuple in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNQ_CACHE_LOOKUP">
+<term>RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</term>
+<listitem><para>
+This is a debug message and indicates that a RunningQuery object has made
+a call to its doLookup() method to look up the specified <name, class, type>
+tuple, the first action of which will be to examine the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNQ_FAIL">
+<term>RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</term>
+<listitem><para>
+A debug message indicating that a RunningQuery's failure callback has been
+called because all nameservers for the zone in question are unreachable.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_RUNQ_SUCCESS">
+<term>RESLIB_RUNQ_SUCCESS success callback - sending query to %1</term>
+<listitem><para>
+A debug message indicating that a RunningQuery's success callback has been
+called because a nameserver has been found, and that a query is being sent
+to the specified nameserver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TEST_SERVER">
+<term>RESLIB_TEST_SERVER setting test server to %1(%2)</term>
+<listitem><para>
+This is a warning message only generated in unit tests. It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed. If seen during normal operation,
+please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TEST_UPSTREAM">
+<term>RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</term>
+<listitem><para>
+This is a debug message and should only be seen in unit tests. A query for
+the specified <name, class, type> tuple is being sent to a test nameserver
+whose address is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TIMEOUT">
+<term>RESLIB_TIMEOUT query <%1> to %2 timed out</term>
+<listitem><para>
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TIMEOUT_RETRY">
+<term>RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
+<listitem><para>
+A debug message indicating that the specified query has timed out and that
+the resolver is repeating the query to the same nameserver. After this
+repeated query, there will be the indicated number of retries left.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_TRUNCATED">
+<term>RESLIB_TRUNCATED response to query for <%1> was truncated, re-querying over TCP</term>
+<listitem><para>
+A debug message, this indicates that the response to the specified query was
+truncated and that the resolver will be re-querying over TCP. There are
+various reasons why responses may be truncated, so this message is normal and
+gives no cause for concern.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_UPSTREAM">
+<term>RESLIB_UPSTREAM sending upstream query for <%1> to %2</term>
+<listitem><para>
+A debug message indicating that a query for the specified <name, class, type>
+tuple is being sent to a nameserver whose address is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_AXFR_TCP">
+<term>RESOLVER_AXFR_TCP AXFR request received over TCP</term>
+<listitem><para>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_AXFR_UDP">
+<term>RESOLVER_AXFR_UDP AXFR request received over UDP</term>
+<listitem><para>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CLIENT_TIME_SMALL">
+<term>RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</term>
+<listitem><para>
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIG_CHANNEL">
+<term>RESOLVER_CONFIG_CHANNEL configuration channel created</term>
+<listitem><para>
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIG_ERROR">
+<term>RESOLVER_CONFIG_ERROR error in configuration: %1</term>
+<listitem><para>
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIG_LOADED">
+<term>RESOLVER_CONFIG_LOADED configuration loaded</term>
+<listitem><para>
+This is a debug message output when the resolver configuration has been
+successfully loaded.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CONFIG_UPDATED">
+<term>RESOLVER_CONFIG_UPDATED configuration updated: %1</term>
+<listitem><para>
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_CREATED">
+<term>RESOLVER_CREATED main resolver object created</term>
+<listitem><para>
+This is a debug message indicating that the main resolver object has
+been created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_DNS_MESSAGE_RECEIVED">
+<term>RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</term>
+<listitem><para>
+This is a debug message from the resolver listing the contents of a
+received DNS message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_DNS_MESSAGE_SENT">
+<term>RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</term>
+<listitem><para>
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FAILED">
+<term>RESOLVER_FAILED resolver failed, reason: %1</term>
+<listitem><para>
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FORWARD_ADDRESS">
+<term>RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</term>
+<listitem><para>
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_FORWARD_QUERY">
+<term>RESOLVER_FORWARD_QUERY processing forward query</term>
+<listitem><para>
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
+servers.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_HEADER_ERROR">
+<term>RESOLVER_HEADER_ERROR message received, exception when processing header: %1</term>
+<listitem><para>
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_IXFR">
+<term>RESOLVER_IXFR IXFR request received</term>
+<listitem><para>
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_LOOKUP_TIME_SMALL">
+<term>RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</term>
+<listitem><para>
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_MESSAGE_ERROR">
+<term>RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</term>
+<listitem><para>
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NEGATIVE_RETRIES">
+<term>RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</term>
+<listitem><para>
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NON_IN_PACKET">
+<term>RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</term>
+<listitem><para>
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NORMAL_QUERY">
+<term>RESOLVER_NORMAL_QUERY processing normal query</term>
+<listitem><para>
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOTIFY_RECEIVED">
+<term>RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</term>
+<listitem><para>
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NOT_ONE_QUESTION">
+<term>RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</term>
+<listitem><para>
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_NO_ROOT_ADDRESS">
+<term>RESOLVER_NO_ROOT_ADDRESS no root addresses available</term>
+<listitem><para>
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PARSE_ERROR">
+<term>RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</term>
+<listitem><para>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PRINT_COMMAND">
+<term>RESOLVER_PRINT_COMMAND print message command, arguments are: %1</term>
+<listitem><para>
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_PROTOCOL_ERROR">
+<term>RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</term>
+<listitem><para>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_ACCEPTED">
+<term>RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</term>
+<listitem><para>
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_DROPPED">
+<term>RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</term>
+<listitem><para>
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_REJECTED">
+<term>RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</term>
+<listitem><para>
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_SETUP">
+<term>RESOLVER_QUERY_SETUP query setup</term>
+<listitem><para>
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_SHUTDOWN">
+<term>RESOLVER_QUERY_SHUTDOWN query shutdown</term>
+<listitem><para>
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_TIME_SMALL">
+<term>RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</term>
+<listitem><para>
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RECEIVED_MESSAGE">
+<term>RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</term>
+<listitem><para>
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_RECURSIVE">
+<term>RESOLVER_RECURSIVE running in recursive mode</term>
+<listitem><para>
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SERVICE_CREATED">
+<term>RESOLVER_SERVICE_CREATED service object created</term>
+<listitem><para>
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SET_PARAMS">
+<term>RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
+<listitem><para>
+This debug message lists the parameters being set for the resolver. These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers. Client timeout: the interval to resolve a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolve the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query. Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+</para><para>
+The client and lookup timeouts require a bit more explanation. The
+resolution of the client query might require a large number of queries to
+upstream nameservers. Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout. When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process; data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SET_QUERY_ACL">
+<term>RESOLVER_SET_QUERY_ACL query ACL is configured</term>
+<listitem><para>
+This debug message is generated when a new query ACL is configured for
+the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SET_ROOT_ADDRESS">
+<term>RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</term>
+<listitem><para>
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SHUTDOWN">
+<term>RESOLVER_SHUTDOWN resolver shutdown complete</term>
+<listitem><para>
+This informational message is output when the resolver has shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_STARTED">
+<term>RESOLVER_STARTED resolver started</term>
+<listitem><para>
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_STARTING">
+<term>RESOLVER_STARTING starting resolver with command line '%1'</term>
+<listitem><para>
+An informational message, this is output when the resolver starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_UNEXPECTED_RESPONSE">
+<term>RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</term>
+<listitem><para>
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_UNSUPPORTED_OPCODE">
+<term>RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</term>
+<listitem><para>
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESSES_NOT_LIST">
+<term>SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1</term>
+<listitem><para>
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_FAIL">
+<term>SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)</term>
+<listitem><para>
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_MISSING">
+<term>SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1</term>
+<listitem><para>
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_TYPE">
+<term>SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1</term>
+<listitem><para>
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_UNRECOVERABLE">
+<term>SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)</term>
+<listitem><para>
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+</para><para>
+The condition indicates problems with the server and/or the system on
+which it is running. The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_VALUE">
+<term>SRVCOMM_ADDRESS_VALUE address to set: %1#%2</term>
+<listitem><para>
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_DEINIT">
+<term>SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring</term>
+<listitem><para>
+Debug message indicating that the server is deinitializing the TSIG keyring.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_INIT">
+<term>SRVCOMM_KEYS_INIT initializing TSIG keyring</term>
+<listitem><para>
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_UPDATE">
+<term>SRVCOMM_KEYS_UPDATE updating TSIG keyring</term>
+<listitem><para>
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_PORT_RANGE">
+<term>SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)</term>
+<listitem><para>
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_SET_LISTEN">
+<term>SRVCOMM_SET_LISTEN setting addresses to listen to</term>
+<listitem><para>
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_BAD_OPTION_VALUE">
+<term>STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1</term>
+<listitem><para>
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CC_SESSION_ERROR">
+<term>STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1</term>
+<listitem><para>
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CLOSING">
+<term>STATHTTPD_CLOSING closing %1#%2</term>
+<listitem><para>
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CLOSING_CC_SESSION">
+<term>STATHTTPD_CLOSING_CC_SESSION stopping cc session</term>
+<listitem><para>
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_HANDLE_CONFIG">
+<term>STATHTTPD_HANDLE_CONFIG reading configuration: %1</term>
+<listitem><para>
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_SHUTDOWN_COMMAND">
+<term>STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_STATUS_COMMAND">
+<term>STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status</term>
+<listitem><para>
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_UNKNOWN_COMMAND">
+<term>STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</term>
+<listitem><para>
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_ERROR">
+<term>STATHTTPD_SERVER_ERROR HTTP server error: %1</term>
+<listitem><para>
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_INIT_ERROR">
+<term>STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1</term>
+<listitem><para>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SHUTDOWN">
+<term>STATHTTPD_SHUTDOWN shutting down</term>
+<listitem><para>
+The stats-httpd daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STARTED">
+<term>STATHTTPD_STARTED listening on %1#%2</term>
+<listitem><para>
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STARTING_CC_SESSION">
+<term>STATHTTPD_STARTING_CC_SESSION starting cc session</term>
+<listitem><para>
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_START_SERVER_INIT_ERROR">
+<term>STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1</term>
+<listitem><para>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STOPPED_BY_KEYBOARD">
+<term>STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_UNKNOWN_CONFIG_ITEM">
+<term>STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1</term>
+<listitem><para>
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_BAD_OPTION_VALUE">
+<term>STATS_BAD_OPTION_VALUE bad command line argument: %1</term>
+<listitem><para>
+The stats module was called with a bad command-line argument and will
+not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_CC_SESSION_ERROR">
+<term>STATS_CC_SESSION_ERROR error connecting to message bus: %1</term>
+<listitem><para>
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_NEW_CONFIG">
+<term>STATS_RECEIVED_NEW_CONFIG received new configuration: %1</term>
+<listitem><para>
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND">
+<term>STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema</term>
+<listitem><para>
+The stats module received a command to show all statistics schemas of all modules.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND">
+<term>STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1</term>
+<listitem><para>
+The stats module received a command to show the specified statistics schema of the specified module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOW_ALL_COMMAND">
+<term>STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</term>
+<listitem><para>
+The stats module received a command to show all statistics that it has
+collected.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOW_NAME_COMMAND">
+<term>STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1</term>
+<listitem><para>
+The stats module received a command to show the statistics that it has
+collected for the given item.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHUTDOWN_COMMAND">
+<term>STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+A shutdown command was sent to the stats module and it will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_STATUS_COMMAND">
+<term>STATS_RECEIVED_STATUS_COMMAND received command to return status</term>
+<listitem><para>
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_UNKNOWN_COMMAND">
+<term>STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</term>
+<listitem><para>
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_SEND_REQUEST_BOSS">
+<term>STATS_SEND_REQUEST_BOSS requesting boss to send statistics</term>
+<listitem><para>
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_STARTING">
+<term>STATS_STARTING starting</term>
+<listitem><para>
+The stats module will be now starting.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_START_ERROR">
+<term>STATS_START_ERROR stats module error: %1</term>
+<listitem><para>
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_STOPPED_BY_KEYBOARD">
+<term>STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_UNKNOWN_COMMAND_IN_SPEC">
+<term>STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1</term>
+<listitem><para>
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_DATABASE_FAILURE">
+<term>XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message. Note: due to the code structure
+this can only happen for AXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_INCONSISTENT_SOA">
+<term>XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received</term>
+<listitem><para>
+The serial fields of the first and last SOAs of AXFR (including AXFR-style
+IXFR) are not the same. According to RFC 5936 these two SOAs must be the
+"same" (not only for the serial), but it is still not clear what the
+receiver should do if this condition does not hold. There was a discussion
+about this at the IETF dnsext wg:
+http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+and the general feeling seems that it would be better to reject the
+transfer if a mismatch is detected. On the other hand, also as noted
+in that email thread, neither BIND 9 nor NSD performs any comparison
+on the SOAs. For now, we only check the serials (ignoring other fields)
+and only leave a warning log message when a mismatch is found. If it
+turns out to happen with a real world primary server implementation
+and that server actually feeds broken data (e.g. mixed versions of
+zone), we can consider a stricter action.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_ADDR_FORMAT">
+<term>XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</term>
+<listitem><para>
+The given master address is not a valid IP address.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_PORT_FORMAT">
+<term>XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</term>
+<listitem><para>
+The master port as read from the configuration is not a valid port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_TSIG_KEY_STRING">
+<term>XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_ZONE_CLASS">
+<term>XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</term>
+<listitem><para>
+The zone class as read from the configuration is not a valid DNS class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CC_SESSION_ERROR">
+<term>XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_COMMAND_ERROR">
+<term>XFRIN_COMMAND_ERROR error while executing command '%1': %2</term>
+<listitem><para>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CONNECT_MASTER">
+<term>XFRIN_CONNECT_MASTER error connecting to master at %1: %2</term>
+<listitem><para>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_GOT_INCREMENTAL_RESP">
+<term>XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1</term>
+<listitem><para>
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found. This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_GOT_NONINCREMENTAL_RESP">
+<term>XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1</term>
+<listitem><para>
+Non incremental transfer was detected at the "first data" of a transfer,
+which is the RR following the initial SOA. Non incremental transfer is
+either AXFR or AXFR-style IXFR. In the latter case, it means that
+in a response to IXFR query the first data is not SOA or its SOA serial
+is not equal to the requested SOA serial.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_IMPORT_DNS">
+<term>XFRIN_IMPORT_DNS error importing python DNS module: %1</term>
+<listitem><para>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR">
+<term>XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</term>
+<listitem><para>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER">
+<term>XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</term>
+<listitem><para>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_NOTIFY_UNKNOWN_MASTER">
+<term>XFRIN_NOTIFY_UNKNOWN_MASTER got notification to retransfer zone %1 from %2, expected %3</term>
+<listitem><para>
+The system received a notify for the given zone, but the address it came
+from does not match the master address in the Xfrin configuration. The notify
+is ignored. This may indicate that the configuration for the master is wrong,
+that a wrong machine is sending notifies, or that fake notifies are being sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_RETRANSFER_UNKNOWN_ZONE">
+<term>XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</term>
+<listitem><para>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STARTING">
+<term>XFRIN_STARTING starting resolver with command line '%1'</term>
+<listitem><para>
+An informational message, this is output when the resolver starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STOPPED_BY_KEYBOARD">
+<term>XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_UNKNOWN_ERROR">
+<term>XFRIN_UNKNOWN_ERROR unknown error: %1</term>
+<listitem><para>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_OTHER_FAILURE">
+<term>XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3</term>
+<listitem><para>
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module. Possible reasons are a broken DNS message or failure
+in database connection. The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_TRANSFER_FAILURE">
+<term>XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</term>
+<listitem><para>
+The XFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_TRANSFER_STARTED">
+<term>XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</term>
+<listitem><para>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_TRANSFER_SUCCESS">
+<term>XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded</term>
+<listitem><para>
+The XFR transfer of the given zone was successfully completed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_DONE">
+<term>XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</term>
+<listitem><para>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_ERROR">
+<term>XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</term>
+<listitem><para>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_FAILED">
+<term>XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</term>
+<listitem><para>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_STARTED">
+<term>XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</term>
+<listitem><para>
+A transfer out of the given zone has started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_BAD_TSIG_KEY_STRING">
+<term>XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_ERROR">
+<term>XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_TIMEOUT_ERROR">
+<term>XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</term>
+<listitem><para>
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CONFIG_ERROR">
+<term>XFROUT_CONFIG_ERROR error found in configuration data: %1</term>
+<listitem><para>
+The xfrout process encountered an error when installing the configuration at
+startup time. Details of the error are included in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_FETCH_REQUEST_ERROR">
+<term>XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</term>
+<listitem><para>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_HANDLE_QUERY_ERROR">
+<term>XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</term>
+<listitem><para>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IMPORT">
+<term>XFROUT_IMPORT error importing python module: %1</term>
+<listitem><para>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_MODULECC_SESSION_ERROR">
+<term>XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</term>
+<listitem><para>
+There was a problem in the lower level module handling configuration and
+control commands. This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization. A detailed error message from the module
+will also be displayed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG">
+<term>XFROUT_NEW_CONFIG Update xfrout configuration</term>
+<listitem><para>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG_DONE">
+<term>XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</term>
+<listitem><para>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NOTIFY_COMMAND">
+<term>XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</term>
+<listitem><para>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PARSE_QUERY_ERROR">
+<term>XFROUT_PARSE_QUERY_ERROR error parsing query: %1</term>
+<listitem><para>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PROCESS_REQUEST_ERROR">
+<term>XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</term>
+<listitem><para>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_DROPPED">
+<term>XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</term>
+<listitem><para>
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_REJECTED">
+<term>XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</term>
+<listitem><para>
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVED_SHUTDOWN_COMMAND">
+<term>XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR">
+<term>XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</term>
+<listitem><para>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</term>
+<listitem><para>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</term>
+<listitem><para>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_SOCKET_SELECT_ERROR">
+<term>XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</term>
+<listitem><para>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPED_BY_KEYBOARD">
+<term>XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPING">
+<term>XFROUT_STOPPING the xfrout daemon is shutting down</term>
+<listitem><para>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_UNIX_SOCKET_FILE_IN_USE">
+<term>XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</term>
+<listitem><para>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_CCSESSION_ERROR">
+<term>ZONEMGR_CCSESSION_ERROR command channel session error: %1</term>
+<listitem><para>
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_JITTER_TOO_BIG">
+<term>ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5</term>
+<listitem><para>
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_KEYBOARD_INTERRUPT">
+<term>ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt</term>
+<listitem><para>
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_LOAD_ZONE">
+<term>ZONEMGR_LOAD_ZONE loading zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_MASTER_ADDRESS">
+<term>ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master</term>
+<listitem><para>
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_SOA">
+<term>ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record</term>
+<listitem><para>
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_TIMER_THREAD">
+<term>ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running</term>
+<listitem><para>
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_ZONE_CLASS">
+<term>ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone</term>
+<listitem><para>
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_ZONE_NAME">
+<term>ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone</term>
+<listitem><para>
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_NOTIFY">
+<term>ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_SHUTDOWN">
+<term>ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_UNKNOWN">
+<term>ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'</term>
+<listitem><para>
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_XFRIN_FAILED">
+<term>ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_XFRIN_SUCCESS">
+<term>ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_REFRESH_ZONE">
+<term>ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)</term>
+<listitem><para>
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SELECT_ERROR">
+<term>ZONEMGR_SELECT_ERROR error with select(): %1</term>
+<listitem><para>
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SEND_FAIL">
+<term>ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed</term>
+<listitem><para>
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SESSION_ERROR">
+<term>ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon</term>
+<listitem><para>
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SESSION_TIMEOUT">
+<term>ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon</term>
+<listitem><para>
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SHUTDOWN">
+<term>ZONEMGR_SHUTDOWN zone manager has shut down</term>
+<listitem><para>
+A debug message, output when the zone manager has shut down completely.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_STARTING">
+<term>ZONEMGR_STARTING zone manager starting</term>
+<listitem><para>
+A debug message output when the zone manager starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_TIMER_THREAD_RUNNING">
+<term>ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running</term>
+<listitem><para>
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_FAIL">
+<term>ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_NOTIFIED">
+<term>ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_SUCCESS">
+<term>ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
+</para></listitem>
+</varlistentry>
+ </variablelist>
+ </para>
+ </chapter>
+</book>
diff --git a/ext/asio/asio/impl/error_code.ipp b/ext/asio/asio/impl/error_code.ipp
index ed37a17..218c09b 100644
--- a/ext/asio/asio/impl/error_code.ipp
+++ b/ext/asio/asio/impl/error_code.ipp
@@ -11,6 +11,9 @@
#ifndef ASIO_IMPL_ERROR_CODE_IPP
#define ASIO_IMPL_ERROR_CODE_IPP
+// strerror() needs <cstring>
+#include <cstring>
+
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
diff --git a/src/bin/Makefile.am b/src/bin/Makefile.am
index 23d660c..06d8df2 100644
--- a/src/bin/Makefile.am
+++ b/src/bin/Makefile.am
@@ -1,4 +1,4 @@
SUBDIRS = bind10 bindctl cfgmgr loadzone msgq host cmdctl auth xfrin xfrout \
- usermgr zonemgr stats tests resolver sockcreator
+ usermgr zonemgr stats tests resolver sockcreator dhcp6
check-recursive: all-recursive
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index 56dc348..4d8ec83 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -16,7 +16,8 @@ endif
pkglibexecdir = $(libexecdir)/@PACKAGE@
-CLEANFILES = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES = *.gcno *.gcda auth.spec spec_config.h
+CLEANFILES += auth_messages.h auth_messages.cc
man_MANS = b10-auth.8
EXTRA_DIST = $(man_MANS) b10-auth.xml
@@ -34,24 +35,40 @@ auth.spec: auth.spec.pre
spec_config.h: spec_config.h.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
-BUILT_SOURCES = spec_config.h
+auth_messages.h auth_messages.cc: auth_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/auth/auth_messages.mes
+
+BUILT_SOURCES = spec_config.h auth_messages.h auth_messages.cc
+
pkglibexec_PROGRAMS = b10-auth
b10_auth_SOURCES = query.cc query.h
b10_auth_SOURCES += auth_srv.cc auth_srv.h
+b10_auth_SOURCES += auth_log.cc auth_log.h
b10_auth_SOURCES += change_user.cc change_user.h
b10_auth_SOURCES += auth_config.cc auth_config.h
b10_auth_SOURCES += command.cc command.h
b10_auth_SOURCES += common.h common.cc
b10_auth_SOURCES += statistics.cc statistics.h
b10_auth_SOURCES += main.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+b10_auth_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
+
+nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc
+EXTRA_DIST += auth_messages.mes
+
b10_auth_LDADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
b10_auth_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+b10_auth_LDADD += $(top_builddir)/src/lib/util/libutil.la
b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
b10_auth_LDADD += $(top_builddir)/src/lib/cc/libcc.la
b10_auth_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
b10_auth_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
b10_auth_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
-b10_auth_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
+b10_auth_LDADD += $(top_builddir)/src/lib/log/liblog.la
b10_auth_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
b10_auth_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
b10_auth_LDADD += $(SQLITE_LIBS)
diff --git a/src/bin/auth/auth.spec.pre.in b/src/bin/auth/auth.spec.pre.in
index d88ffb5..2ce044e 100644
--- a/src/bin/auth/auth.spec.pre.in
+++ b/src/bin/auth/auth.spec.pre.in
@@ -122,6 +122,24 @@
}
]
}
+ ],
+ "statistics": [
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP ",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
]
}
}
diff --git a/src/bin/auth/auth_config.cc b/src/bin/auth/auth_config.cc
index 7929d80..d684c68 100644
--- a/src/bin/auth/auth_config.cc
+++ b/src/bin/auth/auth_config.cc
@@ -60,6 +60,15 @@ private:
set<string> configured_sources_;
};
+/// A derived \c AuthConfigParser for the version value
+/// (which is not used at this moment)
+class VersionConfig : public AuthConfigParser {
+public:
+ VersionConfig() {}
+ virtual void build(ConstElementPtr) {};
+ virtual void commit() {};
+};
+
void
DatasourcesConfig::build(ConstElementPtr config_value) {
BOOST_FOREACH(ConstElementPtr datasrc_elem, config_value->listValue()) {
@@ -98,7 +107,7 @@ DatasourcesConfig::commit() {
// server implementation details, and isn't scalable wrt the number of
// data source types, and should eventually be improved.
// Currently memory data source for class IN is the only possibility.
- server_.setMemoryDataSrc(RRClass::IN(), AuthSrv::MemoryDataSrcPtr());
+ server_.setInMemoryClient(RRClass::IN(), AuthSrv::InMemoryClientPtr());
BOOST_FOREACH(shared_ptr<AuthConfigParser> datasrc_config, datasources_) {
datasrc_config->commit();
@@ -116,12 +125,12 @@ public:
{}
virtual void build(ConstElementPtr config_value);
virtual void commit() {
- server_.setMemoryDataSrc(rrclass_, memory_datasrc_);
+ server_.setInMemoryClient(rrclass_, memory_client_);
}
private:
AuthSrv& server_;
RRClass rrclass_;
- AuthSrv::MemoryDataSrcPtr memory_datasrc_;
+ AuthSrv::InMemoryClientPtr memory_client_;
};
void
@@ -134,8 +143,8 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
// We'd eventually optimize building zones (in case of reloading) by
// selectively loading fresh zones. Right now we simply check the
// RR class is supported by the server implementation.
- server_.getMemoryDataSrc(rrclass_);
- memory_datasrc_ = AuthSrv::MemoryDataSrcPtr(new MemoryDataSrc());
+ server_.getInMemoryClient(rrclass_);
+ memory_client_ = AuthSrv::InMemoryClientPtr(new InMemoryClient());
ConstElementPtr zones_config = config_value->get("zones");
if (!zones_config) {
@@ -154,9 +163,10 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
isc_throw(AuthConfigError, "Missing zone file for zone: "
<< origin->str());
}
- shared_ptr<MemoryZone> new_zone(new MemoryZone(rrclass_,
+ shared_ptr<InMemoryZoneFinder> zone_finder(new
+ InMemoryZoneFinder(rrclass_,
Name(origin->stringValue())));
- const result::Result result = memory_datasrc_->addZone(new_zone);
+ const result::Result result = memory_client_->addZone(zone_finder);
if (result == result::EXIST) {
isc_throw(AuthConfigError, "zone "<< origin->str()
<< " already exists");
@@ -168,7 +178,7 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
* need the load method to be split into some kind of build and
* commit/abort parts.
*/
- new_zone->load(file->stringValue());
+ zone_finder->load(file->stringValue());
}
}
@@ -293,6 +303,11 @@ createAuthConfigParser(AuthSrv& server, const std::string& config_id,
// we may introduce dynamic registration of configuration parsers,
// and then this test can be done in a cleaner and safer way.
return (new ThrowerCommitConfig());
+ } else if (config_id == "version") {
+ // Currently, the version identifier is ignored, but it should
+ // later be used to mark backwards incompatible changes in the
+ // config data
+ return (new VersionConfig());
} else {
isc_throw(AuthConfigError, "Unknown configuration identifier: " <<
config_id);
diff --git a/src/bin/auth/auth_log.cc b/src/bin/auth/auth_log.cc
new file mode 100644
index 0000000..d41eaea
--- /dev/null
+++ b/src/bin/auth/auth_log.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// Defines the logger used by the top-level component of b10-auth.
+
+#include "auth_log.h"
+
+namespace isc {
+namespace auth {
+
+isc::log::Logger auth_logger("auth");
+
+} // namespace auth
+} // namespace isc
+
diff --git a/src/bin/auth/auth_log.h b/src/bin/auth/auth_log.h
new file mode 100644
index 0000000..e0cae0f
--- /dev/null
+++ b/src/bin/auth/auth_log.h
@@ -0,0 +1,54 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __AUTH_LOG__H
+#define __AUTH_LOG__H
+
+#include <log/macros.h>
+#include <auth/auth_messages.h>
+
+namespace isc {
+namespace auth {
+
+/// \brief Auth Logging
+///
+/// Defines the levels used to output debug messages in the "auth" part of
+/// the b10-auth program. Higher numbers equate to more verbose (and detailed)
+/// output.
+
+// Debug messages indicating normal startup are logged at this debug level.
+const int DBG_AUTH_START = DBGLVL_START_SHUT;
+
+// Debug level used to log setting information (such as configuration changes).
+const int DBG_AUTH_OPS = DBGLVL_COMMAND;
+
+// Trace detailed operations, including errors raised when processing invalid
+// packets. (These are not logged at severities of WARN or higher for fear
+// that a set of deliberately invalid packets set to the authoritative server
+// could overwhelm the logging.)
+const int DBG_AUTH_DETAIL = DBGLVL_TRACE_BASIC;
+
+// This level is used to log the contents of packets received and sent.
+const int DBG_AUTH_MESSAGES = DBGLVL_TRACE_DETAIL_DATA;
+
+/// Define the logger for the "auth" module part of b10-auth. We could define
+/// a logger in each file, but we would want to define a common name to avoid
+/// spelling mistakes, so it is just one small step from there to define a
+/// module-common logger.
+extern isc::log::Logger auth_logger;
+
+} // namespace nsas
+} // namespace isc
+
+#endif // __AUTH_LOG__H
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
new file mode 100644
index 0000000..4706690
--- /dev/null
+++ b/src/bin/auth/auth_messages.mes
@@ -0,0 +1,262 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::auth
+
+% AUTH_AXFR_ERROR error handling AXFR request: %1
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+
+% AUTH_AXFR_UDP AXFR query received over UDP
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+
+% AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+
+% AUTH_CONFIG_CHANNEL_CREATED configuration session channel created
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_CONFIG_CHANNEL_STARTED configuration session channel started
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+
+% AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+
+% AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+
+% AUTH_DATA_SOURCE data source database file: %1
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+
+% AUTH_DNS_SERVICES_CREATED DNS services created
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+
+% AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+
+% AUTH_LOAD_TSIG loading TSIG keys
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_LOAD_ZONE loaded zone %1/%2
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+
+% AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+
+% AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+
+% AUTH_NO_STATS_SESSION session interface for statistics is not available
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+
+% AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+
+% AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+
+% AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+
+% AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+
+% AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+
+% AUTH_PACKET_RECEIVED message received:\n%1
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_PROCESS_FAIL message processing failure: %1
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
+
+% AUTH_RECEIVED_COMMAND command '%1' received
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+
+% AUTH_RECEIVED_SENDSTATS command 'sendstats' received
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+
+% AUTH_RESPONSE_RECEIVED received response message, ignoring
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+
+% AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+
+% AUTH_SERVER_CREATED server created
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+
+% AUTH_SERVER_FAILED server failed: %1
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+
+% AUTH_SERVER_STARTED server started
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+
+% AUTH_SQLITE3 nothing to do for loading sqlite3
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+
+% AUTH_STATS_CHANNEL_CREATED STATS session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+
+% AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+
+% AUTH_STATS_COMMS communication error in sending statistics data: %1
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+
+% AUTH_STATS_TIMEOUT timeout while sending statistics data: %1
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+
+% AUTH_STATS_TIMER_DISABLED statistics timer has been disabled
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+
+% AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+
+% AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+
+% AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+
+% AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+
+% AUTH_ZONEMGR_COMMS error communicating with zone manager: %1
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+
+% AUTH_ZONEMGR_ERROR received error response from zone manager: %1
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+
+% AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index a863ef3..c9dac88 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -20,6 +20,7 @@
#include <cassert>
#include <iostream>
#include <vector>
+#include <memory>
#include <boost/bind.hpp>
@@ -43,6 +44,7 @@
#include <dns/rrset.h>
#include <dns/rrttl.h>
#include <dns/message.h>
+#include <dns/tsig.h>
#include <datasrc/query.h>
#include <datasrc/data_source.h>
@@ -57,6 +59,7 @@
#include <auth/auth_srv.h>
#include <auth/query.h>
#include <auth/statistics.h>
+#include <auth/auth_log.h>
using namespace std;
@@ -73,6 +76,7 @@ using namespace isc::xfr;
using namespace isc::asiolink;
using namespace isc::asiodns;
using namespace isc::server_common::portconfig;
+using boost::shared_ptr;
class AuthSrvImpl {
private:
@@ -85,11 +89,14 @@ public:
isc::data::ConstElementPtr setDbFile(isc::data::ConstElementPtr config);
bool processNormalQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer);
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context);
bool processAxfrQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer);
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context);
bool processNotify(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer);
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context);
IOService io_service_;
@@ -98,12 +105,11 @@ public:
/// These members are public because AuthSrv accesses them directly.
ModuleCCSession* config_session_;
- bool verbose_mode_;
AbstractSession* xfrin_session_;
/// In-memory data source. Currently class IN only for simplicity.
- const RRClass memory_datasrc_class_;
- AuthSrv::MemoryDataSrcPtr memory_datasrc_;
+ const RRClass memory_client_class_;
+ AuthSrv::InMemoryClientPtr memory_client_;
/// Hot spot cache
isc::datasrc::HotCache cache_;
@@ -116,6 +122,13 @@ public:
/// Addresses we listen on
AddressList listen_addresses_;
+
+ /// The TSIG keyring
+ const shared_ptr<TSIGKeyRing>* keyring_;
+
+ /// Bind the ModuleSpec object in config_session_ with
+ /// isc:config::ModuleSpec::validateStatistics.
+ void registerStatisticsValidator();
private:
std::string db_file_;
@@ -130,15 +143,19 @@ private:
/// Increment query counter
void incCounter(const int protocol);
+
+ // validateStatistics
+ bool validateStatistics(isc::data::ConstElementPtr data) const;
};
AuthSrvImpl::AuthSrvImpl(const bool use_cache,
AbstractXfroutClient& xfrout_client) :
- config_session_(NULL), verbose_mode_(false),
+ config_session_(NULL),
xfrin_session_(NULL),
- memory_datasrc_class_(RRClass::IN()),
+ memory_client_class_(RRClass::IN()),
statistics_timer_(io_service_),
- counters_(verbose_mode_),
+ counters_(),
+ keyring_(NULL),
xfrout_connected_(false),
xfrout_client_(xfrout_client)
{
@@ -241,7 +258,9 @@ public:
void
makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
- const Rcode& rcode, const bool verbose_mode)
+ const Rcode& rcode,
+ std::auto_ptr<TSIGContext> tsig_context =
+ std::auto_ptr<TSIGContext>())
{
// extract the parameters that should be kept.
// XXX: with the current implementation, it's not easy to set EDNS0
@@ -272,25 +291,16 @@ makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
message->setRcode(rcode);
MessageRenderer renderer(*buffer);
- message->toWire(renderer);
-
- if (verbose_mode) {
- cerr << "[b10-auth] sending an error response (" <<
- renderer.getLength() << " bytes):\n" << message->toText() << endl;
+ if (tsig_context.get() != NULL) {
+ message->toWire(renderer, *tsig_context);
+ } else {
+ message->toWire(renderer);
}
+ LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_ERROR_RESPONSE)
+ .arg(renderer.getLength()).arg(*message);
}
}
-void
-AuthSrv::setVerbose(const bool on) {
- impl_->verbose_mode_ = on;
-}
-
-bool
-AuthSrv::getVerbose() const {
- return (impl_->verbose_mode_);
-}
-
IOService&
AuthSrv::getIOService() {
return (impl_->io_service_);
@@ -314,6 +324,7 @@ AuthSrv::setXfrinSession(AbstractSession* xfrin_session) {
void
AuthSrv::setConfigSession(ModuleCCSession* config_session) {
impl_->config_session_ = config_session;
+ impl_->registerStatisticsValidator();
}
void
@@ -326,37 +337,34 @@ AuthSrv::getConfigSession() const {
return (impl_->config_session_);
}
-AuthSrv::MemoryDataSrcPtr
-AuthSrv::getMemoryDataSrc(const RRClass& rrclass) {
+AuthSrv::InMemoryClientPtr
+AuthSrv::getInMemoryClient(const RRClass& rrclass) {
// XXX: for simplicity, we only support the IN class right now.
- if (rrclass != impl_->memory_datasrc_class_) {
+ if (rrclass != impl_->memory_client_class_) {
isc_throw(InvalidParameter,
"Memory data source is not supported for RR class "
<< rrclass);
}
- return (impl_->memory_datasrc_);
+ return (impl_->memory_client_);
}
void
-AuthSrv::setMemoryDataSrc(const isc::dns::RRClass& rrclass,
- MemoryDataSrcPtr memory_datasrc)
+AuthSrv::setInMemoryClient(const isc::dns::RRClass& rrclass,
+ InMemoryClientPtr memory_client)
{
// XXX: see above
- if (rrclass != impl_->memory_datasrc_class_) {
+ if (rrclass != impl_->memory_client_class_) {
isc_throw(InvalidParameter,
"Memory data source is not supported for RR class "
<< rrclass);
- }
- if (impl_->verbose_mode_) {
- if (!impl_->memory_datasrc_ && memory_datasrc) {
- cerr << "[b10-auth] Memory data source is enabled for class "
- << rrclass << endl;
- } else if (impl_->memory_datasrc_ && !memory_datasrc) {
- cerr << "[b10-auth] Memory data source is disabled for class "
- << rrclass << endl;
- }
- }
- impl_->memory_datasrc_ = memory_datasrc;
+ } else if (!impl_->memory_client_ && memory_client) {
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_ENABLED)
+ .arg(rrclass);
+ } else if (impl_->memory_client_ && !memory_client) {
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_DISABLED)
+ .arg(rrclass);
+ }
+ impl_->memory_client_ = memory_client;
}
uint32_t
@@ -376,18 +384,13 @@ AuthSrv::setStatisticsTimerInterval(uint32_t interval) {
}
if (interval == 0) {
impl_->statistics_timer_.cancel();
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_DISABLED);
} else {
impl_->statistics_timer_.setup(boost::bind(&AuthSrv::submitStatistics,
this),
interval * 1000);
- }
- if (impl_->verbose_mode_) {
- if (interval == 0) {
- cerr << "[b10-auth] Disabled statistics timer" << endl;
- } else {
- cerr << "[b10-auth] Set statistics timer to " << interval
- << " seconds" << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_STATS_TIMER_SET)
+ .arg(interval);
}
}
@@ -404,17 +407,13 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
// Ignore all responses.
if (message->getHeaderFlag(Message::HEADERFLAG_QR)) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] received unexpected response, ignoring"
- << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_RESPONSE_RECEIVED);
server->resume(false);
return;
}
} catch (const Exception& ex) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] DNS packet exception: " << ex.what() << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_HEADER_PARSE_FAIL)
+ .arg(ex.what());
server->resume(false);
return;
}
@@ -423,52 +422,63 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
// Parse the message.
message->fromWire(request_buffer);
} catch (const DNSProtocolError& error) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] returning " << error.getRcode().toText()
- << ": " << error.what() << endl;
- }
- makeErrorMessage(message, buffer, error.getRcode(),
- impl_->verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PROTOCOL_ERROR)
+ .arg(error.getRcode().toText()).arg(error.what());
+ makeErrorMessage(message, buffer, error.getRcode());
server->resume(true);
return;
} catch (const Exception& ex) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] returning SERVFAIL: " << ex.what() << endl;
- }
- makeErrorMessage(message, buffer, Rcode::SERVFAIL(),
- impl_->verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PARSE_ERROR)
+ .arg(ex.what());
+ makeErrorMessage(message, buffer, Rcode::SERVFAIL());
server->resume(true);
return;
} // other exceptions will be handled at a higher layer.
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] received a message:\n" << message->toText() << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_PACKET_RECEIVED)
+ .arg(message->toText());
// Perform further protocol-level validation.
+ // TSIG first
+ // If this is set to something, we know we need to answer with TSIG as well
+ std::auto_ptr<TSIGContext> tsig_context;
+ const TSIGRecord* tsig_record(message->getTSIGRecord());
+ TSIGError tsig_error(TSIGError::NOERROR());
+
+ // Do we do TSIG?
+ // The keyring can be null if we're in test
+ if (impl_->keyring_ != NULL && tsig_record != NULL) {
+ tsig_context.reset(new TSIGContext(tsig_record->getName(),
+ tsig_record->getRdata().
+ getAlgorithm(),
+ **impl_->keyring_));
+ tsig_error = tsig_context->verify(tsig_record, io_message.getData(),
+ io_message.getDataSize());
+ }
bool sendAnswer = true;
- if (message->getOpcode() == Opcode::NOTIFY()) {
- sendAnswer = impl_->processNotify(io_message, message, buffer);
+ if (tsig_error != TSIGError::NOERROR()) {
+ makeErrorMessage(message, buffer, tsig_error.toRcode(), tsig_context);
+ } else if (message->getOpcode() == Opcode::NOTIFY()) {
+ sendAnswer = impl_->processNotify(io_message, message, buffer,
+ tsig_context);
} else if (message->getOpcode() != Opcode::QUERY()) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] unsupported opcode" << endl;
- }
- makeErrorMessage(message, buffer, Rcode::NOTIMP(),
- impl_->verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_UNSUPPORTED_OPCODE)
+ .arg(message->getOpcode().toText());
+ makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
} else if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
- makeErrorMessage(message, buffer, Rcode::FORMERR(),
- impl_->verbose_mode_);
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
} else {
ConstQuestionPtr question = *message->beginQuestion();
const RRType &qtype = question->getType();
if (qtype == RRType::AXFR()) {
- sendAnswer = impl_->processAxfrQuery(io_message, message, buffer);
+ sendAnswer = impl_->processAxfrQuery(io_message, message, buffer,
+ tsig_context);
} else if (qtype == RRType::IXFR()) {
- makeErrorMessage(message, buffer, Rcode::NOTIMP(),
- impl_->verbose_mode_);
+ makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
} else {
- sendAnswer = impl_->processNormalQuery(io_message, message, buffer);
+ sendAnswer = impl_->processNormalQuery(io_message, message, buffer,
+ tsig_context);
}
}
@@ -477,7 +487,8 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
bool
AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer)
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context)
{
ConstEDNSPtr remote_edns = message->getEDNS();
const bool dnssec_ok = remote_edns && remote_edns->getDNSSECAwareness();
@@ -502,20 +513,17 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
// If a memory data source is configured call the separate
// Query::process()
const ConstQuestionPtr question = *message->beginQuestion();
- if (memory_datasrc_ && memory_datasrc_class_ == question->getClass()) {
+ if (memory_client_ && memory_client_class_ == question->getClass()) {
const RRType& qtype = question->getType();
const Name& qname = question->getName();
- auth::Query(*memory_datasrc_, qname, qtype, *message).process();
+ auth::Query(*memory_client_, qname, qtype, *message).process();
} else {
datasrc::Query query(*message, cache_, dnssec_ok);
data_sources_.doQuery(query);
}
} catch (const Exception& ex) {
- if (verbose_mode_) {
- cerr << "[b10-auth] Internal error, returning SERVFAIL: " <<
- ex.what() << endl;
- }
- makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_);
+ LOG_ERROR(auth_logger, AUTH_PROCESS_FAIL).arg(ex.what());
+ makeErrorMessage(message, buffer, Rcode::SERVFAIL());
return (true);
}
@@ -523,29 +531,28 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
const bool udp_buffer =
(io_message.getSocket().getProtocol() == IPPROTO_UDP);
renderer.setLengthLimit(udp_buffer ? remote_bufsize : 65535);
- message->toWire(renderer);
-
- if (verbose_mode_) {
- cerr << "[b10-auth] sending a response ("
- << renderer.getLength()
- << " bytes):\n" << message->toText() << endl;
+ if (tsig_context.get() != NULL) {
+ message->toWire(renderer, *tsig_context);
+ } else {
+ message->toWire(renderer);
}
+ LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_NORMAL_RESPONSE)
+ .arg(renderer.getLength()).arg(message->toText());
return (true);
}
bool
AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer)
+ OutputBufferPtr buffer,
+ auto_ptr<TSIGContext> tsig_context)
{
// Increment query counter.
incCounter(io_message.getSocket().getProtocol());
if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
- if (verbose_mode_) {
- cerr << "[b10-auth] AXFR query over UDP isn't allowed" << endl;
- }
- makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_UDP);
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
return (true);
}
@@ -568,11 +575,9 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
xfrout_connected_ = false;
}
- if (verbose_mode_) {
- cerr << "[b10-auth] Error in handling XFR request: " << err.what()
- << endl;
- }
- makeErrorMessage(message, buffer, Rcode::SERVFAIL(), verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_ERROR)
+ .arg(err.what());
+ makeErrorMessage(message, buffer, Rcode::SERVFAIL(), tsig_context);
return (true);
}
@@ -581,25 +586,22 @@ AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
bool
AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
- OutputBufferPtr buffer)
+ OutputBufferPtr buffer,
+ std::auto_ptr<TSIGContext> tsig_context)
{
// The incoming notify must contain exactly one question for SOA of the
// zone name.
if (message->getRRCount(Message::SECTION_QUESTION) != 1) {
- if (verbose_mode_) {
- cerr << "[b10-auth] invalid number of questions in notify: "
- << message->getRRCount(Message::SECTION_QUESTION) << endl;
- }
- makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_QUESTIONS)
+ .arg(message->getRRCount(Message::SECTION_QUESTION));
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
return (true);
}
ConstQuestionPtr question = *message->beginQuestion();
if (question->getType() != RRType::SOA()) {
- if (verbose_mode_) {
- cerr << "[b10-auth] invalid question RR type in notify: "
- << question->getType() << endl;
- }
- makeErrorMessage(message, buffer, Rcode::FORMERR(), verbose_mode_);
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NOTIFY_RRTYPE)
+ .arg(question->getType().toText());
+ makeErrorMessage(message, buffer, Rcode::FORMERR(), tsig_context);
return (true);
}
@@ -615,10 +617,7 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
// silent about such cases, but there doesn't seem to be anything we can
// improve at the primary server side by sending an error anyway.
if (xfrin_session_ == NULL) {
- if (verbose_mode_) {
- cerr << "[b10-auth] "
- "session interface for xfrin is not available" << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_NO_XFRIN);
return (false);
}
@@ -644,16 +643,12 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
int rcode;
parsed_answer = parseAnswer(rcode, answer);
if (rcode != 0) {
- if (verbose_mode_) {
- cerr << "[b10-auth] failed to notify Zonemgr: "
- << parsed_answer->str() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_ZONEMGR_ERROR)
+ .arg(parsed_answer->str());
return (false);
}
} catch (const Exception& ex) {
- if (verbose_mode_) {
- cerr << "[b10-auth] failed to notify Zonemgr: " << ex.what() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_ZONEMGR_COMMS).arg(ex.what());
return (false);
}
@@ -662,7 +657,11 @@ AuthSrvImpl::processNotify(const IOMessage& io_message, MessagePtr message,
message->setRcode(Rcode::NOERROR());
MessageRenderer renderer(*buffer);
- message->toWire(renderer);
+ if (tsig_context.get() != NULL) {
+ message->toWire(renderer, *tsig_context);
+ } else {
+ message->toWire(renderer);
+ }
return (true);
}
@@ -679,6 +678,22 @@ AuthSrvImpl::incCounter(const int protocol) {
}
}
+void
+AuthSrvImpl::registerStatisticsValidator() {
+ counters_.registerStatisticsValidator(
+ boost::bind(&AuthSrvImpl::validateStatistics, this, _1));
+}
+
+bool
+AuthSrvImpl::validateStatistics(isc::data::ConstElementPtr data) const {
+ if (config_session_ == NULL) {
+ return (false);
+ }
+ return (
+ config_session_->getModuleSpec().validateStatistics(
+ data, true));
+}
+
ConstElementPtr
AuthSrvImpl::setDbFile(ConstElementPtr config) {
ConstElementPtr answer = isc::config::createAnswer();
@@ -709,10 +724,7 @@ AuthSrvImpl::setDbFile(ConstElementPtr config) {
} else {
return (answer);
}
-
- if (verbose_mode_) {
- cerr << "[b10-auth] Data source database file: " << db_file_ << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_DATA_SOURCE).arg(db_file_);
// create SQL data source
// Note: the following step is tricky to be exception-safe and to ensure
@@ -742,9 +754,7 @@ AuthSrv::updateConfig(ConstElementPtr new_config) {
}
return (impl_->setDbFile(new_config));
} catch (const isc::Exception& error) {
- if (impl_->verbose_mode_) {
- cerr << "[b10-auth] error: " << error.what() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_CONFIG_UPDATE_FAIL).arg(error.what());
return (isc::config::createAnswer(1, error.what()));
}
}
@@ -772,3 +782,8 @@ void
AuthSrv::setDNSService(isc::asiodns::DNSService& dnss) {
dnss_ = &dnss;
}
+
+void
+AuthSrv::setTSIGKeyRing(const shared_ptr<TSIGKeyRing>* keyring) {
+ impl_->keyring_ = keyring;
+}
diff --git a/src/bin/auth/auth_srv.h b/src/bin/auth/auth_srv.h
index 88f00c1..f2259a2 100644
--- a/src/bin/auth/auth_srv.h
+++ b/src/bin/auth/auth_srv.h
@@ -17,7 +17,7 @@
#include <string>
-// For MemoryDataSrcPtr below. This should be a temporary definition until
+// For InMemoryClientPtr below. This should be a temporary definition until
// we reorganize the data source framework.
#include <boost/shared_ptr.hpp>
@@ -39,11 +39,14 @@
namespace isc {
namespace datasrc {
-class MemoryDataSrc;
+class InMemoryClient;
}
namespace xfr {
class AbstractXfroutClient;
}
+namespace dns {
+class TSIGKeyRing;
+}
}
@@ -121,27 +124,6 @@ public:
isc::util::OutputBufferPtr buffer,
isc::asiodns::DNSServer* server);
- /// \brief Set verbose flag
- ///
- /// \param on The new value of the verbose flag
-
- /// \brief Enable or disable verbose logging.
- ///
- /// This method never throws an exception.
- ///
- /// \param on \c true to enable verbose logging; \c false to disable
- /// verbose logging.
- void setVerbose(const bool on);
-
- /// \brief Returns the logging verbosity of the \c AuthSrv object.
- ///
- /// This method never throws an exception.
- ///
- /// \return \c true if verbose logging is enabled; otherwise \c false.
-
- /// \brief Get the current value of the verbose flag
- bool getVerbose() const;
-
/// \brief Updates the data source for the \c AuthSrv object.
///
/// This method installs or replaces the data source that the \c AuthSrv
@@ -151,7 +133,7 @@ public:
/// If there is a data source installed, it will be replaced with the
/// new one.
///
- /// In the current implementation, the SQLite data source and MemoryDataSrc
+ /// In the current implementation, the SQLite data source and InMemoryClient
/// are assumed.
/// We can enable memory data source and get the path of SQLite database by
/// the \c config parameter. If we disabled memory data source, the SQLite
@@ -251,16 +233,16 @@ public:
///
void setXfrinSession(isc::cc::AbstractSession* xfrin_session);
- /// A shared pointer type for \c MemoryDataSrc.
+ /// A shared pointer type for \c InMemoryClient.
///
/// This is defined inside the \c AuthSrv class as it's supposed to be
/// a short term interface until we integrate the in-memory and other
/// data source frameworks.
- typedef boost::shared_ptr<isc::datasrc::MemoryDataSrc> MemoryDataSrcPtr;
+ typedef boost::shared_ptr<isc::datasrc::InMemoryClient> InMemoryClientPtr;
- /// An immutable shared pointer type for \c MemoryDataSrc.
- typedef boost::shared_ptr<const isc::datasrc::MemoryDataSrc>
- ConstMemoryDataSrcPtr;
+ /// An immutable shared pointer type for \c InMemoryClient.
+ typedef boost::shared_ptr<const isc::datasrc::InMemoryClient>
+ ConstInMemoryClientPtr;
/// Returns the in-memory data source configured for the \c AuthSrv,
/// if any.
@@ -278,11 +260,11 @@ public:
/// \param rrclass The RR class of the requested in-memory data source.
/// \return A pointer to the in-memory data source, if configured;
/// otherwise NULL.
- MemoryDataSrcPtr getMemoryDataSrc(const isc::dns::RRClass& rrclass);
+ InMemoryClientPtr getInMemoryClient(const isc::dns::RRClass& rrclass);
/// Sets or replaces the in-memory data source of the specified RR class.
///
- /// As noted in \c getMemoryDataSrc(), some RR classes may not be
+ /// As noted in \c getInMemoryClient(), some RR classes may not be
/// supported, in which case an exception of class \c InvalidParameter
/// will be thrown.
/// This method never throws an exception otherwise.
@@ -293,9 +275,9 @@ public:
/// in-memory data source.
///
/// \param rrclass The RR class of the in-memory data source to be set.
- /// \param memory_datasrc A (shared) pointer to \c MemoryDataSrc to be set.
- void setMemoryDataSrc(const isc::dns::RRClass& rrclass,
- MemoryDataSrcPtr memory_datasrc);
+ /// \param memory_datasrc A (shared) pointer to \c InMemoryClient to be set.
+ void setInMemoryClient(const isc::dns::RRClass& rrclass,
+ InMemoryClientPtr memory_client);
/// \brief Set the communication session with Statistics.
///
@@ -374,6 +356,14 @@ public:
/// \brief Assign an ASIO DNS Service queue to this Auth object
void setDNSService(isc::asiodns::DNSService& dnss);
+ /// \brief Sets the keyring used for verifying and signing
+ ///
+ /// The parameter is pointer to shared pointer, because the automatic
+ /// reloading routines of tsig keys replace the actual keyring object.
+ /// It is expected the pointer will point to some statically-allocated
+ /// object, it doesn't take ownership of it.
+ void setTSIGKeyRing(const boost::shared_ptr<isc::dns::TSIGKeyRing>*
+ keyring);
private:
AuthSrvImpl* impl_;
diff --git a/src/bin/auth/b10-auth.8 b/src/bin/auth/b10-auth.8
index 0356683..aedadee 100644
--- a/src/bin/auth/b10-auth.8
+++ b/src/bin/auth/b10-auth.8
@@ -2,12 +2,12 @@
.\" Title: b10-auth
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 8, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-AUTH" "8" "March 8, 2011" "BIND10" "BIND10"
+.TH "B10\-AUTH" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -70,18 +70,6 @@ defines the path to the SQLite3 zone file when using the sqlite datasource\&. Th
/usr/local/var/bind10\-devel/zone\&.sqlite3\&.
.PP
-\fIlisten_on\fR
-is a list of addresses and ports for
-\fBb10\-auth\fR
-to listen on\&. The list items are the
-\fIaddress\fR
-string and
-\fIport\fR
-number\&. By default,
-\fBb10\-auth\fR
-listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
-.PP
-
\fIdatasources\fR
configures data sources\&. The list items include:
\fItype\fR
@@ -114,6 +102,18 @@ In this development version, currently this is only used for the memory data sou
.RE
.PP
+\fIlisten_on\fR
+is a list of addresses and ports for
+\fBb10\-auth\fR
+to listen on\&. The list items are the
+\fIaddress\fR
+string and
+\fIport\fR
+number\&. By default,
+\fBb10\-auth\fR
+listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
+.PP
+
\fIstatistics\-interval\fR
is the timer interval in seconds for
\fBb10\-auth\fR
@@ -164,6 +164,25 @@ immediately\&.
\fBshutdown\fR
exits
\fBb10\-auth\fR\&. (Note that the BIND 10 boss process will restart this service\&.)
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+auth\&.queries\&.tcp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over TCP since startup\&.
+.RE
+.PP
+auth\&.queries\&.udp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over UDP since startup\&.
+.RE
.SH "FILES"
.PP
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index 2b53394..636f437 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 8, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -132,15 +132,6 @@
</para>
<para>
- <varname>listen_on</varname> is a list of addresses and ports for
- <command>b10-auth</command> to listen on.
- The list items are the <varname>address</varname> string
- and <varname>port</varname> number.
- By default, <command>b10-auth</command> listens on port 53
- on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
- </para>
-
- <para>
<varname>datasources</varname> configures data sources.
The list items include:
<varname>type</varname> to optionally choose the data source type
@@ -165,6 +156,15 @@
</para>
<para>
+ <varname>listen_on</varname> is a list of addresses and ports for
+ <command>b10-auth</command> to listen on.
+ The list items are the <varname>address</varname> string
+ and <varname>port</varname> number.
+ By default, <command>b10-auth</command> listens on port 53
+ on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
+ </para>
+
+ <para>
<varname>statistics-interval</varname> is the timer interval
in seconds for <command>b10-auth</command> to share its
statistics information to
@@ -209,6 +209,34 @@
</refsect1>
<refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>auth.queries.tcp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over TCP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>auth.queries.udp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over UDP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para>
<filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index a569147..53c019f 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -12,8 +12,18 @@ query_bench_SOURCES += ../query.h ../query.cc
query_bench_SOURCES += ../auth_srv.h ../auth_srv.cc
query_bench_SOURCES += ../auth_config.h ../auth_config.cc
query_bench_SOURCES += ../statistics.h ../statistics.cc
+query_bench_SOURCES += ../auth_log.h ../auth_log.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+query_bench_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
+
+nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
query_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+query_bench_LDADD += $(top_builddir)/src/lib/util/libutil.la
query_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
query_bench_LDADD += $(top_builddir)/src/lib/bench/libbench.la
query_bench_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
@@ -26,3 +36,4 @@ query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
query_bench_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
query_bench_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
query_bench_LDADD += $(SQLITE_LIBS)
+
diff --git a/src/bin/auth/command.cc b/src/bin/auth/command.cc
index eafcae8..940d57b 100644
--- a/src/bin/auth/command.cc
+++ b/src/bin/auth/command.cc
@@ -27,16 +27,18 @@
#include <config/ccsession.h>
+#include <auth/auth_log.h>
#include <auth/auth_srv.h>
#include <auth/command.h>
-using namespace std;
-using boost::shared_ptr;
using boost::scoped_ptr;
-using namespace isc::dns;
+using boost::shared_ptr;
+using namespace isc::auth;
+using namespace isc::config;
using namespace isc::data;
using namespace isc::datasrc;
-using namespace isc::config;
+using namespace isc::dns;
+using namespace std;
namespace {
/// An exception that is thrown if an error occurs while handling a command
@@ -115,9 +117,7 @@ public:
class SendStatsCommand : public AuthCommand {
public:
virtual void exec(AuthSrv& server, isc::data::ConstElementPtr) {
- if (server.getVerbose()) {
- cerr << "[b10-auth] command 'sendstats' received" << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_SENDSTATS);
server.submitStatistics();
}
};
@@ -136,22 +136,21 @@ public:
// that doesn't block other server operations.
// TODO: we may (should?) want to check the "last load time" and
// the timestamp of the file and skip loading if the file isn't newer.
- shared_ptr<MemoryZone> newzone(new MemoryZone(oldzone->getClass(),
- oldzone->getOrigin()));
- newzone->load(oldzone->getFileName());
- oldzone->swap(*newzone);
-
- if (server.getVerbose()) {
- cerr << "[b10-auth] Loaded zone '" << newzone->getOrigin()
- << "'/" << newzone->getClass() << endl;
- }
+ shared_ptr<InMemoryZoneFinder> zone_finder(
+ new InMemoryZoneFinder(old_zone_finder->getClass(),
+ old_zone_finder->getOrigin()));
+ zone_finder->load(old_zone_finder->getFileName());
+ old_zone_finder->swap(*zone_finder);
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_LOAD_ZONE)
+ .arg(zone_finder->getOrigin()).arg(zone_finder->getClass());
}
private:
- shared_ptr<MemoryZone> oldzone; // zone to be updated with the new file.
+ // zone finder to be updated with the new file.
+ shared_ptr<InMemoryZoneFinder> old_zone_finder;
// A helper private method to parse and validate command parameters.
- // On success, it sets 'oldzone' to the zone to be updated.
+ // On success, it sets 'old_zone_finder' to the zone to be updated.
// It returns true if everything is okay; and false if the command is
// valid but there's no need for further process.
bool validate(AuthSrv& server, isc::data::ConstElementPtr args) {
@@ -164,10 +163,7 @@ private:
ConstElementPtr datasrc_elem = args->get("datasrc");
if (datasrc_elem) {
if (datasrc_elem->stringValue() == "sqlite3") {
- if (server.getVerbose()) {
- cerr << "[b10-auth] Nothing to do for loading sqlite3"
- << endl;
- }
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_SQLITE3);
return (false);
} else if (datasrc_elem->stringValue() != "memory") {
// (note: at this point it's guaranteed that datasrc_elem
@@ -182,7 +178,7 @@ private:
const RRClass zone_class = class_elem ?
RRClass(class_elem->stringValue()) : RRClass::IN();
- AuthSrv::MemoryDataSrcPtr datasrc(server.getMemoryDataSrc(zone_class));
+ AuthSrv::InMemoryClientPtr datasrc(server.getInMemoryClient(zone_class));
if (datasrc == NULL) {
isc_throw(AuthCommandError, "Memory data source is disabled");
}
@@ -194,13 +190,14 @@ private:
const Name origin(origin_elem->stringValue());
// Get the current zone
- const MemoryDataSrc::FindResult result = datasrc->findZone(origin);
+ const InMemoryClient::FindResult result = datasrc->findZone(origin);
if (result.code != result::SUCCESS) {
isc_throw(AuthCommandError, "Zone " << origin <<
" is not found in data source");
}
- oldzone = boost::dynamic_pointer_cast<MemoryZone>(result.zone);
+ old_zone_finder = boost::dynamic_pointer_cast<InMemoryZoneFinder>(
+ result.zone_finder);
return (true);
}
@@ -233,18 +230,13 @@ ConstElementPtr
execAuthServerCommand(AuthSrv& server, const string& command_id,
ConstElementPtr args)
{
- if (server.getVerbose()) {
- cerr << "[b10-auth] Received '" << command_id << "' command" << endl;
- }
-
+ LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_RECEIVED_COMMAND).arg(command_id);
try {
scoped_ptr<AuthCommand>(createAuthCommand(command_id))->exec(server,
args);
} catch (const isc::Exception& ex) {
- if (server.getVerbose()) {
- cerr << "[b10-auth] Command '" << command_id
- << "' execution failed: " << ex.what() << endl;
- }
+ LOG_ERROR(auth_logger, AUTH_COMMAND_FAILED).arg(command_id)
+ .arg(ex.what());
return (createAnswer(1, ex.what()));
}
diff --git a/src/bin/auth/common.cc b/src/bin/auth/common.cc
index 35381a1..a7031f3 100644
--- a/src/bin/auth/common.cc
+++ b/src/bin/auth/common.cc
@@ -12,22 +12,25 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <string>
+
#include <auth/common.h>
#include <auth/spec_config.h>
#include <stdlib.h>
using std::string;
-string getXfroutSocketPath() {
+string
+getXfroutSocketPath() {
if (getenv("B10_FROM_BUILD") != NULL) {
- if (getenv("B10_FROM_SOURCE_LOCALSTATEDIR")) {
+ if (getenv("B10_FROM_SOURCE_LOCALSTATEDIR") != NULL) {
return (string(getenv("B10_FROM_SOURCE_LOCALSTATEDIR")) +
"/auth_xfrout_conn");
} else {
return (string(getenv("B10_FROM_BUILD")) + "/auth_xfrout_conn");
}
} else {
- if (getenv("BIND10_XFROUT_SOCKET_FILE")) {
+ if (getenv("BIND10_XFROUT_SOCKET_FILE") != NULL) {
return (getenv("BIND10_XFROUT_SOCKET_FILE"));
} else {
return (UNIX_SOCKET_FILE);
diff --git a/src/bin/auth/main.cc b/src/bin/auth/main.cc
index 480c2f7..c8f6762 100644
--- a/src/bin/auth/main.cc
+++ b/src/bin/auth/main.cc
@@ -44,24 +44,26 @@
#include <auth/command.h>
#include <auth/change_user.h>
#include <auth/auth_srv.h>
+#include <auth/auth_log.h>
#include <asiodns/asiodns.h>
#include <asiolink/asiolink.h>
-#include <log/dummylog.h>
+#include <log/logger_support.h>
+#include <server_common/keyring.h>
using namespace std;
-using namespace isc::data;
+using namespace isc::asiodns;
+using namespace isc::asiolink;
+using namespace isc::auth;
using namespace isc::cc;
using namespace isc::config;
+using namespace isc::data;
using namespace isc::dns;
+using namespace isc::log;
using namespace isc::util;
using namespace isc::xfr;
-using namespace isc::asiolink;
-using namespace isc::asiodns;
namespace {
-bool verbose_mode = false;
-
/* need global var for config/command handlers.
* todo: turn this around, and put handlers in the authserver
* class itself? */
@@ -87,6 +89,7 @@ usage() {
cerr << "\t-v: verbose output" << endl;
exit(1);
}
+
} // end of anonymous namespace
int
@@ -94,6 +97,7 @@ main(int argc, char* argv[]) {
int ch;
const char* uid = NULL;
bool cache = true;
+ bool verbose = false;
while ((ch = getopt(argc, argv, ":nu:v")) != -1) {
switch (ch) {
@@ -104,8 +108,7 @@ main(int argc, char* argv[]) {
uid = optarg;
break;
case 'v':
- verbose_mode = true;
- isc::log::denabled = true;
+ verbose = true;
break;
case '?':
default:
@@ -117,6 +120,11 @@ main(int argc, char* argv[]) {
usage();
}
+ // Initialize logging. If verbose, we'll use maximum verbosity.
+ isc::log::initLogger("b10-auth",
+ (verbose ? isc::log::DEBUG : isc::log::INFO),
+ isc::log::MAX_DEBUG_LEVEL, NULL);
+
int ret = 0;
// XXX: we should eventually pass io_service here.
@@ -137,8 +145,7 @@ main(int argc, char* argv[]) {
}
auth_server = new AuthSrv(cache, xfrout_client);
- auth_server->setVerbose(verbose_mode);
- cout << "[b10-auth] Server created." << endl;
+ LOG_INFO(auth_logger, AUTH_SERVER_CREATED);
SimpleCallback* checkin = auth_server->getCheckinProvider();
IOService& io_service = auth_server->getIOService();
@@ -147,27 +154,32 @@ main(int argc, char* argv[]) {
DNSService dns_service(io_service, checkin, lookup, answer);
auth_server->setDNSService(dns_service);
- cout << "[b10-auth] DNSServices created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_DNS_SERVICES_CREATED);
cc_session = new Session(io_service.get_io_service());
- cout << "[b10-auth] Configuration session channel created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_CREATED);
+ // We delay starting listening to new commands/config just before we
+ // go into the main loop to avoid confusion due to mixture of
+ // synchronous and asynchronous operations (this would happen in
+ // initializing TSIG keys below). Until then all operations on the
+ // CC session will take place synchronously.
config_session = new ModuleCCSession(specfile, *cc_session,
my_config_handler,
- my_command_handler);
- cout << "[b10-auth] Configuration channel established." << endl;
+ my_command_handler, false);
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_ESTABLISHED);
xfrin_session = new Session(io_service.get_io_service());
- cout << "[b10-auth] Xfrin session channel created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_CREATED);
xfrin_session->establish(NULL);
xfrin_session_established = true;
- cout << "[b10-auth] Xfrin session channel established." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_ESTABLISHED);
statistics_session = new Session(io_service.get_io_service());
- cout << "[b10-auth] Statistics session channel created." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_CREATED);
statistics_session->establish(NULL);
statistics_session_established = true;
- cout << "[b10-auth] Statistics session channel established." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_STATS_CHANNEL_ESTABLISHED);
auth_server->setXfrinSession(xfrin_session);
auth_server->setStatisticsSession(statistics_session);
@@ -176,25 +188,34 @@ main(int argc, char* argv[]) {
// all initial configurations, but as a short term workaround we
// handle the traditional "database_file" setup by directly calling
// updateConfig().
- // if server load configure failed, we won't exit, give user second chance
- // to correct the configure.
+ // if server load configure failed, we won't exit, give user second
+ // chance to correct the configure.
auth_server->setConfigSession(config_session);
try {
configureAuthServer(*auth_server, config_session->getFullConfig());
auth_server->updateConfig(ElementPtr());
} catch (const AuthConfigError& ex) {
- cout << "[bin10-auth] Server load config failed:" << ex.what() << endl;
+ LOG_ERROR(auth_logger, AUTH_CONFIG_LOAD_FAIL).arg(ex.what());
}
if (uid != NULL) {
changeUser(uid);
}
- cout << "[b10-auth] Server started." << endl;
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_LOAD_TSIG);
+ isc::server_common::initKeyring(*config_session);
+ auth_server->setTSIGKeyRing(&isc::server_common::keyring);
+
+ // Now start asynchronous read.
+ config_session->start();
+ LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_STARTED);
+
+ // Successfully initialized.
+ LOG_INFO(auth_logger, AUTH_SERVER_STARTED);
io_service.run();
} catch (const std::exception& ex) {
- cerr << "[b10-auth] Server failed: " << ex.what() << endl;
+ LOG_FATAL(auth_logger, AUTH_SERVER_FAILED).arg(ex.what());
ret = 1;
}
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 323f890..b2e0234 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -12,6 +12,7 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <algorithm> // for std::max
#include <vector>
#include <boost/foreach.hpp>
@@ -19,7 +20,7 @@
#include <dns/rcode.h>
#include <dns/rdataclass.h>
-#include <datasrc/memory_datasrc.h>
+#include <datasrc/client.h>
#include <auth/query.h>
@@ -31,24 +32,24 @@ namespace isc {
namespace auth {
void
-Query::getAdditional(const Zone& zone, const RRset& rrset) const {
+Query::addAdditional(ZoneFinder& zone, const RRset& rrset) {
RdataIteratorPtr rdata_iterator(rrset.getRdataIterator());
for (; !rdata_iterator->isLast(); rdata_iterator->next()) {
const Rdata& rdata(rdata_iterator->getCurrent());
if (rrset.getType() == RRType::NS()) {
// Need to perform the search in the "GLUE OK" mode.
const generic::NS& ns = dynamic_cast<const generic::NS&>(rdata);
- findAddrs(zone, ns.getNSName(), Zone::FIND_GLUE_OK);
+ addAdditionalAddrs(zone, ns.getNSName(), ZoneFinder::FIND_GLUE_OK);
} else if (rrset.getType() == RRType::MX()) {
const generic::MX& mx(dynamic_cast<const generic::MX&>(rdata));
- findAddrs(zone, mx.getMXName());
+ addAdditionalAddrs(zone, mx.getMXName());
}
}
}
void
-Query::findAddrs(const Zone& zone, const Name& qname,
- const Zone::FindOptions options) const
+Query::addAdditionalAddrs(ZoneFinder& zone, const Name& qname,
+ const ZoneFinder::FindOptions options)
{
// Out of zone name
NameComparisonResult result = zone.getOrigin().compare(qname);
@@ -66,32 +67,33 @@ Query::findAddrs(const Zone& zone, const Name& qname,
// Find A rrset
if (qname_ != qname || qtype_ != RRType::A()) {
- Zone::FindResult a_result = zone.find(qname, RRType::A(), NULL,
- options);
- if (a_result.code == Zone::SUCCESS) {
+ ZoneFinder::FindResult a_result = zone.find(qname, RRType::A(), NULL,
+ options | dnssec_opt_);
+ if (a_result.code == ZoneFinder::SUCCESS) {
response_.addRRset(Message::SECTION_ADDITIONAL,
- boost::const_pointer_cast<RRset>(a_result.rrset));
+ boost::const_pointer_cast<RRset>(a_result.rrset), dnssec_);
}
}
// Find AAAA rrset
if (qname_ != qname || qtype_ != RRType::AAAA()) {
- Zone::FindResult aaaa_result =
- zone.find(qname, RRType::AAAA(), NULL, options);
- if (aaaa_result.code == Zone::SUCCESS) {
+ ZoneFinder::FindResult aaaa_result =
+ zone.find(qname, RRType::AAAA(), NULL, options | dnssec_opt_);
+ if (aaaa_result.code == ZoneFinder::SUCCESS) {
response_.addRRset(Message::SECTION_ADDITIONAL,
- boost::const_pointer_cast<RRset>(aaaa_result.rrset));
+ boost::const_pointer_cast<RRset>(aaaa_result.rrset),
+ dnssec_);
}
}
}
void
-Query::putSOA(const Zone& zone) const {
- Zone::FindResult soa_result(zone.find(zone.getOrigin(),
- RRType::SOA()));
- if (soa_result.code != Zone::SUCCESS) {
+Query::addSOA(ZoneFinder& finder) {
+ ZoneFinder::FindResult soa_result(finder.find(finder.getOrigin(),
+ RRType::SOA(), NULL, dnssec_opt_));
+ if (soa_result.code != ZoneFinder::SUCCESS) {
isc_throw(NoSOA, "There's no SOA record in zone " <<
- zone.getOrigin().toText());
+ finder.getOrigin().toText());
} else {
/*
* FIXME:
@@ -99,34 +101,116 @@ Query::putSOA(const Zone& zone) const {
* to insist.
*/
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(soa_result.rrset));
+ boost::const_pointer_cast<RRset>(soa_result.rrset), dnssec_);
}
}
+// Note: unless the data source client implementation or the zone content
+// is broken, 'nsec' should be a valid NSEC RR. Likewise, the call to
+// find() in this method should result in NXDOMAIN and an NSEC RR that proves
+// the non existent of matching wildcard. If these assumptions aren't met
+// due to a buggy data source implementation or a broken zone, we'll let
+// underlying libdns++ modules throw an exception, which would result in
+// either an SERVFAIL response or just ignoring the query. We at least prevent
+// a complete crash due to such broken behavior.
void
-Query::getAuthAdditional(const Zone& zone) const {
+Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
+ if (nsec->getRdataCount() == 0) {
+ isc_throw(BadNSEC, "NSEC for NXDOMAIN is empty");
+ return;
+ }
+
+ // Add the NSEC proving NXDOMAIN to the authority section.
+ response_.addRRset(Message::SECTION_AUTHORITY,
+ boost::const_pointer_cast<RRset>(nsec), dnssec_);
+
+ // Next, identify the best possible wildcard name that would match
+ // the query name. It's the longer common suffix with the qname
+ // between the owner or the next domain of the NSEC that proves NXDOMAIN,
+ // prefixed by the wildcard label, "*". For example, for query name
+ // a.b.example.com, if the NXDOMAIN NSEC is
+ // b.example.com. NSEC c.example.com., the longer suffix is b.example.com.,
+ // and the best possible wildcard is *.b.example.com. If the NXDOMAIN
+ // NSEC is a.example.com. NSEC c.b.example.com., the longer suffix
+ // is the next domain of the NSEC, and we get the same wildcard name.
+ const int qlabels = qname_.getLabelCount();
+ const int olabels = qname_.compare(nsec->getName()).getCommonLabels();
+ const int nlabels = qname_.compare(
+ dynamic_cast<const generic::NSEC&>(nsec->getRdataIterator()->
+ getCurrent()).
+ getNextName()).getCommonLabels();
+ const int common_labels = std::max(olabels, nlabels);
+ const Name wildname(Name("*").concatenate(qname_.split(qlabels -
+ common_labels)));
+
+ // Confirm the wildcard doesn't exist (this should result in NXDOMAIN;
+ // otherwise we shouldn't have got NXDOMAIN for the original query in
+ // the first place).
+ const ZoneFinder::FindResult fresult = finder.find(wildname,
+ RRType::NSEC(), NULL,
+ dnssec_opt_);
+ if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
+ fresult.rrset->getRdataCount() == 0) {
+ isc_throw(BadNSEC, "Unexpected result for wildcard NXDOMAIN proof");
+ return;
+ }
+
+ // Add the (no-) wildcard proof only when it's different from the NSEC
+ // that proves NXDOMAIN; sometimes they can be the same.
+ // Note: name comparison is relatively expensive. When we are at the
+ // stage of performance optimization, we should consider optimizing this
+ // for some optimized data source implementations.
+ if (nsec->getName() != fresult.rrset->getName()) {
+ response_.addRRset(Message::SECTION_AUTHORITY,
+ boost::const_pointer_cast<RRset>(fresult.rrset),
+ dnssec_);
+ }
+}
+
+void
+Query::addWildcardProof(ZoneFinder& finder) {
+ // The query name shouldn't exist in the zone if there were no wildcard
+ // substitution. Confirm that by specifying NO_WILDCARD. It should result
+ // in NXDOMAIN and an NSEC RR that proves it should be returned.
+ const ZoneFinder::FindResult fresult =
+ finder.find(qname_, RRType::NSEC(), NULL,
+ dnssec_opt_ | ZoneFinder::NO_WILDCARD);
+ if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
+ fresult.rrset->getRdataCount() == 0) {
+ isc_throw(BadNSEC, "Unexpected result for wildcard proof");
+ return;
+ }
+ response_.addRRset(Message::SECTION_AUTHORITY,
+ boost::const_pointer_cast<RRset>(fresult.rrset),
+ dnssec_);
+}
+
+void
+Query::addAuthAdditional(ZoneFinder& finder) {
// Fill in authority and addtional sections.
- Zone::FindResult ns_result = zone.find(zone.getOrigin(), RRType::NS());
+ ZoneFinder::FindResult ns_result = finder.find(finder.getOrigin(),
+ RRType::NS(), NULL,
+ dnssec_opt_);
// zone origin name should have NS records
- if (ns_result.code != Zone::SUCCESS) {
+ if (ns_result.code != ZoneFinder::SUCCESS) {
isc_throw(NoApexNS, "There's no apex NS records in zone " <<
- zone.getOrigin().toText());
+ finder.getOrigin().toText());
} else {
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(ns_result.rrset));
+ boost::const_pointer_cast<RRset>(ns_result.rrset), dnssec_);
// Handle additional for authority section
- getAdditional(zone, *ns_result.rrset);
+ addAdditional(finder, *ns_result.rrset);
}
}
void
-Query::process() const {
+Query::process() {
bool keep_doing = true;
const bool qtype_is_any = (qtype_ == RRType::ANY());
response_.setHeaderFlag(Message::HEADERFLAG_AA, false);
- const MemoryDataSrc::FindResult result =
- memory_datasrc_.findZone(qname_);
+ const DataSourceClient::FindResult result =
+ datasrc_client_.findZone(qname_);
// If we have no matching authoritative zone for the query name, return
// REFUSED. In short, this is to be compatible with BIND 9, but the
@@ -138,6 +222,7 @@ Query::process() const {
response_.setRcode(Rcode::REFUSED());
return;
}
+ ZoneFinder& zfinder = *result.zone_finder;
// Found a zone which is the nearest ancestor to QNAME, set the AA bit
response_.setHeaderFlag(Message::HEADERFLAG_AA);
@@ -145,14 +230,14 @@ Query::process() const {
while (keep_doing) {
keep_doing = false;
std::auto_ptr<RRsetList> target(qtype_is_any ? new RRsetList : NULL);
- const Zone::FindResult db_result(result.zone->find(qname_, qtype_,
- target.get()));
-
+ const ZoneFinder::FindResult db_result(
+ zfinder.find(qname_, qtype_, target.get(), dnssec_opt_));
switch (db_result.code) {
- case Zone::DNAME: {
+ case ZoneFinder::DNAME: {
// First, put the dname into the answer
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
/*
* Empty DNAME should never get in, as it is impossible to
* create one in master file.
@@ -188,10 +273,11 @@ Query::process() const {
qname_.getLabelCount() -
db_result.rrset->getName().getLabelCount()).
concatenate(dname.getDname())));
- response_.addRRset(Message::SECTION_ANSWER, cname);
+ response_.addRRset(Message::SECTION_ANSWER, cname, dnssec_);
break;
}
- case Zone::CNAME:
+ case ZoneFinder::CNAME:
+ case ZoneFinder::WILDCARD_CNAME:
/*
* We don't do chaining yet. Therefore handling a CNAME is
* mostly the same as handling SUCCESS, but we didn't get
@@ -202,48 +288,78 @@ Query::process() const {
* So, just put it there.
*/
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
+
+ // If the answer is a result of wildcard substitution,
+ // add a proof that there's no closer name.
+ if (dnssec_ && db_result.code == ZoneFinder::WILDCARD_CNAME) {
+ addWildcardProof(*result.zone_finder);
+ }
break;
- case Zone::SUCCESS:
+ case ZoneFinder::SUCCESS:
+ case ZoneFinder::WILDCARD:
if (qtype_is_any) {
// If quety type is ANY, insert all RRs under the domain
// into answer section.
BOOST_FOREACH(RRsetPtr rrset, *target) {
- response_.addRRset(Message::SECTION_ANSWER, rrset);
+ response_.addRRset(Message::SECTION_ANSWER, rrset,
+ dnssec_);
// Handle additional for answer section
- getAdditional(*result.zone, *rrset.get());
+ addAdditional(*result.zone_finder, *rrset.get());
}
} else {
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
// Handle additional for answer section
- getAdditional(*result.zone, *db_result.rrset);
+ addAdditional(*result.zone_finder, *db_result.rrset);
}
// If apex NS records haven't been provided in the answer
// section, insert apex NS records into the authority section
// and AAAA/A RRS of each of the NS RDATA into the additional
// section.
- if (qname_ != result.zone->getOrigin() ||
- db_result.code != Zone::SUCCESS ||
+ if (qname_ != result.zone_finder->getOrigin() ||
+ db_result.code != ZoneFinder::SUCCESS ||
(qtype_ != RRType::NS() && !qtype_is_any))
{
- getAuthAdditional(*result.zone);
+ addAuthAdditional(*result.zone_finder);
+ }
+
+ // If the answer is a result of wildcard substitution,
+ // add a proof that there's no closer name.
+ if (dnssec_ && db_result.code == ZoneFinder::WILDCARD) {
+ addWildcardProof(*result.zone_finder);
}
break;
- case Zone::DELEGATION:
+ case ZoneFinder::DELEGATION:
response_.setHeaderFlag(Message::HEADERFLAG_AA, false);
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(db_result.rrset));
- getAdditional(*result.zone, *db_result.rrset);
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
+ addAdditional(*result.zone_finder, *db_result.rrset);
break;
- case Zone::NXDOMAIN:
- // Just empty answer with SOA in authority section
+ case ZoneFinder::NXDOMAIN:
response_.setRcode(Rcode::NXDOMAIN());
- putSOA(*result.zone);
+ addSOA(*result.zone_finder);
+ if (dnssec_ && db_result.rrset) {
+ addNXDOMAINProof(zfinder, db_result.rrset);
+ }
+ break;
+ case ZoneFinder::NXRRSET:
+ addSOA(*result.zone_finder);
+ if (dnssec_ && db_result.rrset) {
+ response_.addRRset(Message::SECTION_AUTHORITY,
+ boost::const_pointer_cast<RRset>(
+ db_result.rrset),
+ dnssec_);
+ }
break;
- case Zone::NXRRSET:
- // Just empty answer with SOA in authority section
- putSOA(*result.zone);
+ default:
+ // This is basically a bug of the data source implementation,
+ // but could also happen in the middle of development where
+ // we try to add a new result code.
+ isc_throw(isc::NotImplemented, "Unknown result code");
break;
}
}
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index e0c6323..3282c0d 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -26,7 +26,7 @@ class RRset;
}
namespace datasrc {
-class MemoryDataSrc;
+class DataSourceClient;
}
namespace auth {
@@ -36,10 +36,8 @@ namespace auth {
///
/// Many of the design details for this class are still in flux.
/// We'll revisit and update them as we add more functionality, for example:
-/// - memory_datasrc parameter of the constructor. It is a data source that
-/// uses in memory dedicated backend.
/// - as a related point, we may have to pass the RR class of the query.
-/// in the initial implementation the RR class is an attribute of memory
+/// in the initial implementation the RR class is an attribute of
/// datasource and omitted. It's not clear if this assumption holds with
/// generic data sources. On the other hand, it will help keep
/// implementation simpler, and we might rather want to modify the design
@@ -51,7 +49,7 @@ namespace auth {
/// separate attribute setter.
/// - likewise, we'll eventually need to do per zone access control, for which
/// we need querier's information such as its IP address.
-/// - memory_datasrc and response may better be parameters to process() instead
+/// - datasrc_client and response may better be parameters to process() instead
/// of the constructor.
///
/// <b>Note:</b> The class name is intentionally the same as the one used in
@@ -71,10 +69,21 @@ private:
/// Adds a SOA of the zone into the authority zone of response_.
/// Can throw NoSOA.
///
- void putSOA(const isc::datasrc::Zone& zone) const;
+ void addSOA(isc::datasrc::ZoneFinder& finder);
+
+ /// Add NSEC RRs that prove an NXDOMAIN result.
+ ///
+ /// This corresponds to Section 3.1.3.2 of RFC 4035.
+ void addNXDOMAINProof(isc::datasrc::ZoneFinder& finder,
+ isc::dns::ConstRRsetPtr nsec);
+
+ /// Add NSEC RRs that prove a wildcard answer is the best one.
+ ///
+ /// This corresponds to Section 3.1.3.3 of RFC 4035.
+ void addWildcardProof(isc::datasrc::ZoneFinder& finder);
/// \brief Look up additional data (i.e., address records for the names
- /// included in NS or MX records).
+ /// included in NS or MX records) and add them to the additional section.
///
/// Note: Any additional data which has already been provided in the
/// answer section (i.e., if the original query happend to be for the
@@ -83,12 +92,12 @@ private:
/// This method may throw a exception because its underlying methods may
/// throw exceptions.
///
- /// \param zone The Zone wherein the additional data to the query is bo be
- /// found.
+ /// \param zone The ZoneFinder through which the additional data for the
+ /// query is to be found.
/// \param rrset The RRset (i.e., NS or MX rrset) which require additional
/// processing.
- void getAdditional(const isc::datasrc::Zone& zone,
- const isc::dns::RRset& rrset) const;
+ void addAdditional(isc::datasrc::ZoneFinder& zone,
+ const isc::dns::RRset& rrset);
/// \brief Find address records for a specified name.
///
@@ -102,18 +111,19 @@ private:
/// The glue records must exactly match the name in the NS RDATA, without
/// CNAME or wildcard processing.
///
- /// \param zone The \c Zone wherein the address records is to be found.
+ /// \param zone The \c ZoneFinder through which the address records is to
+ /// be found.
/// \param qname The name in rrset RDATA.
/// \param options The search options.
- void findAddrs(const isc::datasrc::Zone& zone,
- const isc::dns::Name& qname,
- const isc::datasrc::Zone::FindOptions options
- = isc::datasrc::Zone::FIND_DEFAULT) const;
+ void addAdditionalAddrs(isc::datasrc::ZoneFinder& zone,
+ const isc::dns::Name& qname,
+ const isc::datasrc::ZoneFinder::FindOptions options
+ = isc::datasrc::ZoneFinder::FIND_DEFAULT);
- /// \brief Look up \c Zone's NS and address records for the NS RDATA
- /// (domain name) for authoritative answer.
+ /// \brief Look up a zone's NS RRset and their address records for an
+ /// authoritative answer, and add them to the additional section.
///
- /// On returning an authoritative answer, insert the \c Zone's NS into the
+ /// On returning an authoritative answer, insert a zone's NS into the
/// authority section and AAAA/A RRs of each of the NS RDATA into the
/// additional section.
///
@@ -126,25 +136,29 @@ private:
/// include AAAA/A RRs under a zone cut in additional section. (BIND 9
/// excludes under-cut RRs; NSD include them.)
///
- /// \param zone The \c Zone wherein the additional data to the query is to
- /// be found.
- void getAuthAdditional(const isc::datasrc::Zone& zone) const;
+ /// \param finder The \c ZoneFinder through which the NS and additional
+ /// data for the query are to be found.
+ void addAuthAdditional(isc::datasrc::ZoneFinder& finder);
public:
/// Constructor from query parameters.
///
/// This constructor never throws an exception.
///
- /// \param memory_datasrc The memory datasource wherein the answer to the query is
+ /// \param datasrc_client The datasource wherein the answer to the query is
/// to be found.
/// \param qname The query name
/// \param qtype The RR type of the query
/// \param response The response message to store the answer to the query.
- Query(const isc::datasrc::MemoryDataSrc& memory_datasrc,
+ /// \param dnssec If the answer should include signatures and NSEC/NSEC3 if
+ /// possible.
+ Query(const isc::datasrc::DataSourceClient& datasrc_client,
const isc::dns::Name& qname, const isc::dns::RRType& qtype,
- isc::dns::Message& response) :
- memory_datasrc_(memory_datasrc), qname_(qname), qtype_(qtype),
- response_(response)
+ isc::dns::Message& response, bool dnssec = false) :
+ datasrc_client_(datasrc_client), qname_(qname), qtype_(qtype),
+ response_(response), dnssec_(dnssec),
+ dnssec_opt_(dnssec ? isc::datasrc::ZoneFinder::FIND_DNSSEC :
+ isc::datasrc::ZoneFinder::FIND_DEFAULT)
{}
/// Process the query.
@@ -157,7 +171,7 @@ public:
/// successful search would result in adding a corresponding RRset to
/// the answer section of the response.
///
- /// If no matching zone is found in the memory datasource, the RCODE of
+ /// If no matching zone is found in the datasource, the RCODE of
/// SERVFAIL will be set in the response.
/// <b>Note:</b> this is different from the error code that BIND 9 returns
/// by default when it's configured as an authoritative-only server (and
@@ -173,7 +187,7 @@ public:
/// This might throw BadZone or any of its specific subclasses, but that
/// shouldn't happen in real-life (as BadZone means wrong data, it should
/// have been rejected upon loading).
- void process() const;
+ void process();
/// \short Bad zone data encountered.
///
@@ -207,11 +221,24 @@ public:
{}
};
+ /// An invalid result is given when a valid NSEC is expected
+ ///
+ // This can only happen when the underlying data source implementation or
+ /// the zone is broken. By throwing an exception we treat such cases
+ /// as SERVFAIL.
+ struct BadNSEC : public BadZone {
+ BadNSEC(const char* file, size_t line, const char* what) :
+ BadZone(file, line, what)
+ {}
+ };
+
private:
- const isc::datasrc::MemoryDataSrc& memory_datasrc_;
+ const isc::datasrc::DataSourceClient& datasrc_client_;
const isc::dns::Name& qname_;
const isc::dns::RRType& qtype_;
isc::dns::Message& response_;
+ const bool dnssec_;
+ const isc::datasrc::ZoneFinder::FindOptions dnssec_opt_;
};
}
diff --git a/src/bin/auth/spec_config.h.pre.in b/src/bin/auth/spec_config.h.pre.in
index 52581dd..1b1df19 100644
--- a/src/bin/auth/spec_config.h.pre.in
+++ b/src/bin/auth/spec_config.h.pre.in
@@ -1,16 +1,16 @@
-// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#define AUTH_SPECFILE_LOCATION "@prefix@/share/@PACKAGE@/auth.spec"
-#define UNIX_SOCKET_FILE "@@LOCALSTATEDIR@@/auth_xfrout_conn"
+// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define AUTH_SPECFILE_LOCATION "@prefix@/share/@PACKAGE@/auth.spec"
+#define UNIX_SOCKET_FILE "@@LOCALSTATEDIR@@/@PACKAGE@/auth_xfrout_conn"
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 415aa14..e62719f 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -13,6 +13,7 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <auth/statistics.h>
+#include <auth/auth_log.h>
#include <cc/data.h>
#include <cc/session.h>
@@ -20,6 +21,8 @@
#include <sstream>
#include <iostream>
+using namespace isc::auth;
+
// TODO: We need a namespace ("auth_server"?) to hold
// AuthSrv and AuthCounters.
@@ -29,28 +32,26 @@ private:
AuthCountersImpl(const AuthCountersImpl& source);
AuthCountersImpl& operator=(const AuthCountersImpl& source);
public:
- // References verbose_mode flag in AuthSrvImpl
- // TODO: Fix this short term workaround for logging
- // after we have logging framework
- AuthCountersImpl(const bool& verbose_mode);
+ AuthCountersImpl();
~AuthCountersImpl();
void inc(const AuthCounters::CounterType type);
bool submitStatistics() const;
void setStatisticsSession(isc::cc::AbstractSession* statistics_session);
+ void registerStatisticsValidator
+ (AuthCounters::validator_type validator);
// Currently for testing purpose only
uint64_t getCounter(const AuthCounters::CounterType type) const;
private:
std::vector<uint64_t> counters_;
isc::cc::AbstractSession* statistics_session_;
- const bool& verbose_mode_;
+ AuthCounters::validator_type validator_;
};
-AuthCountersImpl::AuthCountersImpl(const bool& verbose_mode) :
+AuthCountersImpl::AuthCountersImpl() :
// initialize counter
// size: AuthCounters::COUNTER_TYPES, initial value: 0
counters_(AuthCounters::COUNTER_TYPES, 0),
- statistics_session_(NULL),
- verbose_mode_(verbose_mode)
+ statistics_session_(NULL)
{}
AuthCountersImpl::~AuthCountersImpl()
@@ -64,25 +65,30 @@ AuthCountersImpl::inc(const AuthCounters::CounterType type) {
bool
AuthCountersImpl::submitStatistics() const {
if (statistics_session_ == NULL) {
- if (verbose_mode_) {
- std::cerr << "[b10-auth] "
- << "session interface for statistics"
- << " is not available" << std::endl;
- }
+ LOG_ERROR(auth_logger, AUTH_NO_STATS_SESSION);
return (false);
}
std::stringstream statistics_string;
statistics_string << "{\"command\": [\"set\","
- << "{ \"stats_data\": "
- << "{ \"auth.queries.udp\": "
+ << "{ \"owner\": \"Auth\","
+ << " \"data\":"
+ << "{ \"queries.udp\": "
<< counters_.at(AuthCounters::COUNTER_UDP_QUERY)
- << ", \"auth.queries.tcp\": "
+ << ", \"queries.tcp\": "
<< counters_.at(AuthCounters::COUNTER_TCP_QUERY)
<< " }"
<< "}"
<< "]}";
isc::data::ConstElementPtr statistics_element =
isc::data::Element::fromJSON(statistics_string);
+ // validate the statistics data before send
+ if (validator_) {
+ if (!validator_(
+ statistics_element->get("command")->get(1)->get("data"))) {
+ LOG_ERROR(auth_logger, AUTH_INVALID_STATISTICS_DATA);
+ return (false);
+ }
+ }
try {
// group_{send,recv}msg() can throw an exception when encountering
// an error, and group_recvmsg() will throw an exception on timeout.
@@ -95,18 +101,10 @@ AuthCountersImpl::submitStatistics() const {
// currently it just returns empty message
statistics_session_->group_recvmsg(env, answer, false, seq);
} catch (const isc::cc::SessionError& ex) {
- if (verbose_mode_) {
- std::cerr << "[b10-auth] "
- << "communication error in sending statistics data: "
- << ex.what() << std::endl;
- }
+ LOG_ERROR(auth_logger, AUTH_STATS_COMMS).arg(ex.what());
return (false);
} catch (const isc::cc::SessionTimeout& ex) {
- if (verbose_mode_) {
- std::cerr << "[b10-auth] "
- << "timeout happened while sending statistics data: "
- << ex.what() << std::endl;
- }
+ LOG_ERROR(auth_logger, AUTH_STATS_TIMEOUT).arg(ex.what());
return (false);
}
return (true);
@@ -119,14 +117,20 @@ AuthCountersImpl::setStatisticsSession
statistics_session_ = statistics_session;
}
+void
+AuthCountersImpl::registerStatisticsValidator
+ (AuthCounters::validator_type validator)
+{
+ validator_ = validator;
+}
+
// Currently for testing purpose only
uint64_t
AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
return (counters_.at(type));
}
-AuthCounters::AuthCounters(const bool& verbose_mode) :
- impl_(new AuthCountersImpl(verbose_mode))
+AuthCounters::AuthCounters() : impl_(new AuthCountersImpl())
{}
AuthCounters::~AuthCounters() {
@@ -154,3 +158,10 @@ uint64_t
AuthCounters::getCounter(const AuthCounters::CounterType type) const {
return (impl_->getCounter(type));
}
+
+void
+AuthCounters::registerStatisticsValidator
+ (AuthCounters::validator_type validator) const
+{
+ return (impl_->registerStatisticsValidator(validator));
+}
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 9e5240e..c930414 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -61,15 +61,10 @@ public:
};
/// The constructor.
///
- /// \param verbose_mode reference to verbose_mode_ of AuthSrvImpl
- ///
/// This constructor is mostly exception free. But it may still throw
/// a standard exception if memory allocation fails inside the method.
///
- /// \todo Fix this short term workaround for logging
- /// after we have logging framework.
- ///
- AuthCounters(const bool& verbose_mode);
+ AuthCounters();
/// The destructor.
///
/// This method never throws an exception.
@@ -136,6 +131,26 @@ public:
/// \return the value of the counter specified by \a type.
///
uint64_t getCounter(const AuthCounters::CounterType type) const;
+
+ /// \brief A type of validation function for the specification in
+ /// isc::config::ModuleSpec.
+ ///
+ /// This type might be useful for not only statistics
+ /// specificatoin but also for config_data specification and for
+ /// commnad.
+ ///
+ typedef boost::function<bool(const isc::data::ConstElementPtr&)>
+ validator_type;
+
+ /// \brief Register a function type of the statistics validation
+ /// function for AuthCounters.
+ ///
+ /// This method never throws an exception.
+ ///
+ /// \param validator A function type of the validation of
+ /// statistics specification.
+ ///
+ void registerStatisticsValidator(AuthCounters::validator_type validator) const;
};
#endif // __STATISTICS_H
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index 050373a..d27386e 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -22,6 +22,7 @@ TESTS += run_unittests
run_unittests_SOURCES = $(top_srcdir)/src/lib/dns/tests/unittest_util.h
run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
run_unittests_SOURCES += ../auth_srv.h ../auth_srv.cc
+run_unittests_SOURCES += ../auth_log.h ../auth_log.cc
run_unittests_SOURCES += ../query.h ../query.cc
run_unittests_SOURCES += ../change_user.h ../change_user.cc
run_unittests_SOURCES += ../auth_config.h ../auth_config.cc
@@ -36,6 +37,16 @@ run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += change_user_unittest.cc
run_unittests_SOURCES += statistics_unittest.cc
run_unittests_SOURCES += run_unittests.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+run_unittests_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
+
+
+nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc
+
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
@@ -43,6 +54,7 @@ run_unittests_LDADD += $(SQLITE_LIBS)
run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la
run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
@@ -52,6 +64,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index a77f7e6..4698588 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -16,6 +16,8 @@
#include <vector>
+#include <boost/shared_ptr.hpp>
+
#include <gtest/gtest.h>
#include <dns/message.h>
@@ -25,8 +27,10 @@
#include <dns/rrtype.h>
#include <dns/rrttl.h>
#include <dns/rdataclass.h>
+#include <dns/tsig.h>
#include <server_common/portconfig.h>
+#include <server_common/keyring.h>
#include <datasrc/memory_datasrc.h>
#include <auth/auth_srv.h>
@@ -50,6 +54,7 @@ using namespace isc::asiolink;
using namespace isc::testutils;
using namespace isc::server_common::portconfig;
using isc::UnitTestUtil;
+using boost::shared_ptr;
namespace {
const char* const CONFIG_TESTDB =
@@ -185,15 +190,6 @@ TEST_F(AuthSrvTest, unsupportedRequest) {
unsupportedRequest();
}
-// Simple API check
-TEST_F(AuthSrvTest, verbose) {
- EXPECT_FALSE(server.getVerbose());
- server.setVerbose(true);
- EXPECT_TRUE(server.getVerbose());
- server.setVerbose(false);
- EXPECT_FALSE(server.getVerbose());
-}
-
// Multiple questions. Should result in FORMERR.
TEST_F(AuthSrvTest, multiQuestion) {
multiQuestion();
@@ -242,6 +238,139 @@ TEST_F(AuthSrvTest, AXFRSuccess) {
EXPECT_TRUE(xfrout.isConnected());
}
+// Try giving the server a TSIG signed request and see it can anwer signed as
+// well
+TEST_F(AuthSrvTest, TSIGSigned) {
+ // Prepare key, the client message, etc
+ const TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
+ TSIGContext context(key);
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("version.bind"), RRClass::CH(), RRType::TXT());
+ createRequestPacket(request_message, IPPROTO_UDP, &context);
+
+ // Run the message through the server
+ shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+ keyring->add(key);
+ server.setTSIGKeyRing(&keyring);
+ server.processMessage(*io_message, parse_message, response_obuffer,
+ &dnsserv);
+
+ // What did we get?
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, Rcode::NOERROR(),
+ opcode.getCode(), QR_FLAG | AA_FLAG, 1, 1, 1, 0);
+ // We need to parse the message ourself, or getTSIGRecord won't work
+ InputBuffer ib(response_obuffer->getData(), response_obuffer->getLength());
+ Message m(Message::PARSE);
+ m.fromWire(ib);
+
+ const TSIGRecord* tsig = m.getTSIGRecord();
+ ASSERT_TRUE(tsig != NULL) << "Missing TSIG signature";
+ TSIGError error(context.verify(tsig, response_obuffer->getData(),
+ response_obuffer->getLength()));
+ EXPECT_EQ(TSIGError::NOERROR(), error) <<
+ "The server signed the response, but it doesn't seem to be valid";
+}
+
+// Give the server a signed request, but don't give it the key. It will
+// not be able to verify it, returning BADKEY
+TEST_F(AuthSrvTest, TSIGSignedBadKey) {
+ TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
+ TSIGContext context(key);
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("version.bind"), RRClass::CH(), RRType::TXT());
+ createRequestPacket(request_message, IPPROTO_UDP, &context);
+
+ // Process the message, but use a different key there
+ shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+ server.setTSIGKeyRing(&keyring);
+ server.processMessage(*io_message, parse_message, response_obuffer,
+ &dnsserv);
+
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, TSIGError::BAD_KEY().toRcode(),
+ opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+ // We need to parse the message ourself, or getTSIGRecord won't work
+ InputBuffer ib(response_obuffer->getData(), response_obuffer->getLength());
+ Message m(Message::PARSE);
+ m.fromWire(ib);
+
+ const TSIGRecord* tsig = m.getTSIGRecord();
+ ASSERT_TRUE(tsig != NULL) <<
+ "Missing TSIG signature (we should have one even at error)";
+ EXPECT_EQ(TSIGError::BAD_KEY_CODE, tsig->getRdata().getError());
+ EXPECT_EQ(0, tsig->getRdata().getMACSize()) <<
+ "It should be unsigned with this error";
+}
+
+// Give the server a signed request, but signed by a different key
+// (with the same name). It should return BADSIG
+TEST_F(AuthSrvTest, TSIGBadSig) {
+ TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
+ TSIGContext context(key);
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("version.bind"), RRClass::CH(), RRType::TXT());
+ createRequestPacket(request_message, IPPROTO_UDP, &context);
+
+ // Process the message, but use a different key there
+ shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+ keyring->add(TSIGKey("key:QkFECg==:hmac-sha1"));
+ server.setTSIGKeyRing(&keyring);
+ server.processMessage(*io_message, parse_message, response_obuffer,
+ &dnsserv);
+
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, TSIGError::BAD_SIG().toRcode(),
+ opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+ // We need to parse the message ourself, or getTSIGRecord won't work
+ InputBuffer ib(response_obuffer->getData(), response_obuffer->getLength());
+ Message m(Message::PARSE);
+ m.fromWire(ib);
+
+ const TSIGRecord* tsig = m.getTSIGRecord();
+ ASSERT_TRUE(tsig != NULL) <<
+ "Missing TSIG signature (we should have one even at error)";
+ EXPECT_EQ(TSIGError::BAD_SIG_CODE, tsig->getRdata().getError());
+ EXPECT_EQ(0, tsig->getRdata().getMACSize()) <<
+ "It should be unsigned with this error";
+}
+
+// Give the server a signed unsupported request with a bad signature.
+// This checks the server first verifies the signature before anything
+// else.
+TEST_F(AuthSrvTest, TSIGCheckFirst) {
+ TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
+ TSIGContext context(key);
+ // Pass a wrong opcode there. The server shouldn't know what to do
+ // about it.
+ UnitTestUtil::createRequestMessage(request_message, Opcode::RESERVED14(),
+ default_qid, Name("version.bind"),
+ RRClass::CH(), RRType::TXT());
+ createRequestPacket(request_message, IPPROTO_UDP, &context);
+
+ // Process the message, but use a different key there
+ shared_ptr<TSIGKeyRing> keyring(new TSIGKeyRing);
+ keyring->add(TSIGKey("key:QkFECg==:hmac-sha1"));
+ server.setTSIGKeyRing(&keyring);
+ server.processMessage(*io_message, parse_message, response_obuffer,
+ &dnsserv);
+
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, TSIGError::BAD_SIG().toRcode(),
+ Opcode::RESERVED14().getCode(), QR_FLAG, 0, 0, 0, 0);
+ // We need to parse the message ourself, or getTSIGRecord won't work
+ InputBuffer ib(response_obuffer->getData(), response_obuffer->getLength());
+ Message m(Message::PARSE);
+ m.fromWire(ib);
+
+ const TSIGRecord* tsig = m.getTSIGRecord();
+ ASSERT_TRUE(tsig != NULL) <<
+ "Missing TSIG signature (we should have one even at error)";
+ EXPECT_EQ(TSIGError::BAD_SIG_CODE, tsig->getRdata().getError());
+ EXPECT_EQ(0, tsig->getRdata().getMACSize()) <<
+ "It should be unsigned with this error";
+}
+
TEST_F(AuthSrvTest, AXFRConnectFail) {
EXPECT_FALSE(xfrout.isConnected()); // check prerequisite
xfrout.disableConnect();
@@ -522,17 +651,17 @@ TEST_F(AuthSrvTest, updateConfigFail) {
QR_FLAG | AA_FLAG, 1, 1, 1, 0);
}
-TEST_F(AuthSrvTest, updateWithMemoryDataSrc) {
+TEST_F(AuthSrvTest, updateWithInMemoryClient) {
// Test configuring memory data source. Detailed test cases are covered
// in the configuration tests. We only check the AuthSrv interface here.
// By default memory data source isn't enabled
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
updateConfig(&server,
"{\"datasources\": [{\"type\": \"memory\"}]}", true);
// after successful configuration, we should have one (with empty zoneset).
- ASSERT_NE(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
// The memory data source is empty, should return REFUSED rcode.
createDataFromFile("examplequery_fromWire.wire");
@@ -543,7 +672,7 @@ TEST_F(AuthSrvTest, updateWithMemoryDataSrc) {
opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
}
-TEST_F(AuthSrvTest, chQueryWithMemoryDataSrc) {
+TEST_F(AuthSrvTest, chQueryWithInMemoryClient) {
// Configure memory data source for class IN
updateConfig(&server, "{\"datasources\": "
"[{\"class\": \"IN\", \"type\": \"memory\"}]}", true);
diff --git a/src/bin/auth/tests/command_unittest.cc b/src/bin/auth/tests/command_unittest.cc
index 3fdd086..8a82367 100644
--- a/src/bin/auth/tests/command_unittest.cc
+++ b/src/bin/auth/tests/command_unittest.cc
@@ -48,9 +48,9 @@ using namespace isc::datasrc;
using namespace isc::config;
namespace {
-class AuthConmmandTest : public ::testing::Test {
+class AuthCommandTest : public ::testing::Test {
protected:
- AuthConmmandTest() : server(false, xfrout), rcode(-1) {
+ AuthCommandTest() : server(false, xfrout), rcode(-1) {
server.setStatisticsSession(&statistics_session);
}
void checkAnswer(const int expected_code) {
@@ -60,21 +60,20 @@ protected:
MockSession statistics_session;
MockXfroutClient xfrout;
AuthSrv server;
- AuthSrv::ConstMemoryDataSrcPtr memory_datasrc;
ConstElementPtr result;
int rcode;
public:
void stopServer(); // need to be public for boost::bind
};
-TEST_F(AuthConmmandTest, unknownCommand) {
+TEST_F(AuthCommandTest, unknownCommand) {
result = execAuthServerCommand(server, "no_such_command",
ConstElementPtr());
parseAnswer(rcode, result);
EXPECT_EQ(1, rcode);
}
-TEST_F(AuthConmmandTest, DISABLED_unexpectedException) {
+TEST_F(AuthCommandTest, DISABLED_unexpectedException) {
// execAuthServerCommand() won't catch standard exceptions.
// Skip this test for now: ModuleCCSession doesn't seem to validate
// commands.
@@ -83,7 +82,7 @@ TEST_F(AuthConmmandTest, DISABLED_unexpectedException) {
runtime_error);
}
-TEST_F(AuthConmmandTest, sendStatistics) {
+TEST_F(AuthCommandTest, sendStatistics) {
result = execAuthServerCommand(server, "sendstats", ConstElementPtr());
// Just check some message has been sent. Detailed tests specific to
// statistics are done in its own tests.
@@ -92,15 +91,15 @@ TEST_F(AuthConmmandTest, sendStatistics) {
}
void
-AuthConmmandTest::stopServer() {
+AuthCommandTest::stopServer() {
result = execAuthServerCommand(server, "shutdown", ConstElementPtr());
parseAnswer(rcode, result);
assert(rcode == 0); // make sure the test stops when something is wrong
}
-TEST_F(AuthConmmandTest, shutdown) {
+TEST_F(AuthCommandTest, shutdown) {
isc::asiolink::IntervalTimer itimer(server.getIOService());
- itimer.setup(boost::bind(&AuthConmmandTest::stopServer, this), 1);
+ itimer.setup(boost::bind(&AuthCommandTest::stopServer, this), 1);
server.getIOService().run();
EXPECT_EQ(0, rcode);
}
@@ -110,18 +109,18 @@ TEST_F(AuthConmmandTest, shutdown) {
// zones, and checks the zones are correctly loaded.
void
zoneChecks(AuthSrv& server) {
- EXPECT_TRUE(server.getMemoryDataSrc(RRClass::IN()));
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_TRUE(server.getInMemoryClient(RRClass::IN()));
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::A()).code);
- EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::AAAA()).code);
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::A()).code);
- EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::AAAA()).code);
}
@@ -147,25 +146,25 @@ configureZones(AuthSrv& server) {
void
newZoneChecks(AuthSrv& server) {
- EXPECT_TRUE(server.getMemoryDataSrc(RRClass::IN()));
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_TRUE(server.getInMemoryClient(RRClass::IN()));
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::A()).code);
// now test1.example should have ns/AAAA
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::AAAA()).code);
// test2.example shouldn't change
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::A()).code);
- EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::AAAA()).code);
}
-TEST_F(AuthConmmandTest, loadZone) {
+TEST_F(AuthCommandTest, loadZone) {
configureZones(server);
ASSERT_EQ(0, system(INSTALL_PROG " " TEST_DATA_DIR
@@ -182,7 +181,7 @@ TEST_F(AuthConmmandTest, loadZone) {
newZoneChecks(server);
}
-TEST_F(AuthConmmandTest, loadBrokenZone) {
+TEST_F(AuthCommandTest, loadBrokenZone) {
configureZones(server);
ASSERT_EQ(0, system(INSTALL_PROG " " TEST_DATA_DIR
@@ -195,7 +194,7 @@ TEST_F(AuthConmmandTest, loadBrokenZone) {
zoneChecks(server); // zone shouldn't be replaced
}
-TEST_F(AuthConmmandTest, loadUnreadableZone) {
+TEST_F(AuthCommandTest, loadUnreadableZone) {
configureZones(server);
// install the zone file as unreadable
@@ -209,7 +208,7 @@ TEST_F(AuthConmmandTest, loadUnreadableZone) {
zoneChecks(server); // zone shouldn't be replaced
}
-TEST_F(AuthConmmandTest, loadZoneWithoutDataSrc) {
+TEST_F(AuthCommandTest, loadZoneWithoutDataSrc) {
// try to execute load command without configuring the zone beforehand.
// it should fail.
result = execAuthServerCommand(server, "loadzone",
@@ -218,7 +217,7 @@ TEST_F(AuthConmmandTest, loadZoneWithoutDataSrc) {
checkAnswer(1);
}
-TEST_F(AuthConmmandTest, loadSqlite3DataSrc) {
+TEST_F(AuthCommandTest, loadSqlite3DataSrc) {
// For sqlite3 data source we don't have to do anything (the data source
// (re)loads itself automatically)
result = execAuthServerCommand(server, "loadzone",
@@ -228,7 +227,7 @@ TEST_F(AuthConmmandTest, loadSqlite3DataSrc) {
checkAnswer(0);
}
-TEST_F(AuthConmmandTest, loadZoneInvalidParams) {
+TEST_F(AuthCommandTest, loadZoneInvalidParams) {
configureZones(server);
// null arg
diff --git a/src/bin/auth/tests/config_unittest.cc b/src/bin/auth/tests/config_unittest.cc
index 7658b84..dadb0ee 100644
--- a/src/bin/auth/tests/config_unittest.cc
+++ b/src/bin/auth/tests/config_unittest.cc
@@ -57,12 +57,12 @@ protected:
TEST_F(AuthConfigTest, datasourceConfig) {
// By default, we don't have any in-memory data source.
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
configureAuthServer(server, Element::fromJSON(
"{\"datasources\": [{\"type\": \"memory\"}]}"));
// after successful configuration, we should have one (with empty zoneset).
- ASSERT_NE(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(AuthConfigTest, databaseConfig) {
@@ -74,8 +74,15 @@ TEST_F(AuthConfigTest, databaseConfig) {
"{\"database_file\": \"should_be_ignored\"}")));
}
+TEST_F(AuthConfigTest, versionConfig) {
+ // make sure it does not throw on 'version'
+ EXPECT_NO_THROW(configureAuthServer(
+ server,
+ Element::fromJSON("{\"version\": 0}")));
+}
+
TEST_F(AuthConfigTest, exceptionGuarantee) {
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
// This configuration contains an invalid item, which will trigger
// an exception.
EXPECT_THROW(configureAuthServer(
@@ -85,7 +92,7 @@ TEST_F(AuthConfigTest, exceptionGuarantee) {
" \"no_such_config_var\": 1}")),
AuthConfigError);
// The server state shouldn't change
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
}
TEST_F(AuthConfigTest, exceptionConversion) {
@@ -147,22 +154,22 @@ protected:
TEST_F(MemoryDatasrcConfigTest, addZeroDataSrc) {
parser->build(Element::fromJSON("[]"));
parser->commit();
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
}
TEST_F(MemoryDatasrcConfigTest, addEmpty) {
// By default, we don't have any in-memory data source.
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
parser->build(Element::fromJSON("[{\"type\": \"memory\"}]"));
parser->commit();
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(MemoryDatasrcConfigTest, addZeroZone) {
parser->build(Element::fromJSON("[{\"type\": \"memory\","
" \"zones\": []}]"));
parser->commit();
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(MemoryDatasrcConfigTest, addOneZone) {
@@ -172,10 +179,10 @@ TEST_F(MemoryDatasrcConfigTest, addOneZone) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
// Check it actually loaded something
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(rrclass)->findZone(
- Name("ns.example.com.")).zone->find(Name("ns.example.com."),
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(rrclass)->findZone(
+ Name("ns.example.com.")).zone_finder->find(Name("ns.example.com."),
RRType::A()).code);
}
@@ -192,7 +199,7 @@ TEST_F(MemoryDatasrcConfigTest, addMultiZones) {
" \"file\": \"" TEST_DATA_DIR
"/example.net.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(3, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(3, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(MemoryDatasrcConfigTest, replace) {
@@ -202,9 +209,9 @@ TEST_F(MemoryDatasrcConfigTest, replace) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::SUCCESS,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
// create a new parser, and install a new set of configuration. It
@@ -220,9 +227,9 @@ TEST_F(MemoryDatasrcConfigTest, replace) {
" \"file\": \"" TEST_DATA_DIR
"/example.net.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(2, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(2, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::NOTFOUND,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
}
@@ -234,9 +241,9 @@ TEST_F(MemoryDatasrcConfigTest, exception) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::SUCCESS,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
// create a new parser, and try to load something. It will throw,
@@ -255,9 +262,9 @@ TEST_F(MemoryDatasrcConfigTest, exception) {
// commit it
// The original should be untouched
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::SUCCESS,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
}
@@ -268,13 +275,13 @@ TEST_F(MemoryDatasrcConfigTest, remove) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
delete parser;
parser = createAuthConfigParser(server, "datasources");
EXPECT_NO_THROW(parser->build(Element::fromJSON("[]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
}
TEST_F(MemoryDatasrcConfigTest, adDuplicateZones) {
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index c68b672..16a2409 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -17,6 +17,7 @@
#include <map>
#include <boost/bind.hpp>
+#include <boost/scoped_ptr.hpp>
#include <dns/masterload.h>
#include <dns/message.h>
@@ -91,11 +92,67 @@ const char* const other_zone_rrs =
"cnamemailer.example.com. 3600 IN CNAME www.example.com.\n"
"cnamemx.example.com. 3600 IN MX 10 cnamemailer.example.com.\n"
"mx.delegation.example.com. 3600 IN A 192.0.2.100\n";
+// Wildcards
+const char* const wild_txt = "*.wild.example.com. 3600 IN A 192.0.2.7\n";
+const char* const nsec_wild_txt =
+ "*.wild.example.com. 3600 IN NSEC www.example.com. A NSEC RRSIG\n";
+const char* const cnamewild_txt =
+ "*.cnamewild.example.com. 3600 IN CNAME www.example.org.\n";
+const char* const nsec_cnamewild_txt = "*.cnamewild.example.com. "
+ "3600 IN NSEC delegation.example.com. CNAME NSEC RRSIG\n";
+// Used in NXDOMAIN proof test. We are going to test some unusual case where
+// the best possible wildcard is below the "next domain" of the NSEC RR that
+// proves the NXDOMAIN, i.e.,
+// mx.example.com. (exist)
+// (.no.example.com. (qname, NXDOMAIN)
+// ).no.example.com. (exist)
+// *.no.example.com. (best possible wildcard, not exist)
+const char* const no_txt =
+ ").no.example.com. 3600 IN AAAA 2001:db8::53\n";
+// NSEC records.
+const char* const nsec_apex_txt =
+ "example.com. 3600 IN NSEC cname.example.com. NS SOA NSEC RRSIG\n";
+const char* const nsec_mx_txt =
+ "mx.example.com. 3600 IN NSEC ).no.example.com. MX NSEC RRSIG\n";
+const char* const nsec_no_txt =
+ ").no.example.com. 3600 IN NSEC nz.no.example.com. AAAA NSEC RRSIG\n";
+
+// We'll also test the case where a single NSEC proves both NXDOMAIN and the
+// non existence of wildcard. The following records will be used for that
+// test.
+// ).no.example.com. (exist, whose NSEC proves everything)
+// *.no.example.com. (best possible wildcard, not exist)
+// nx.no.example.com. (NXDOMAIN)
+// nz.no.example.com. (exist)
+const char* const nz_txt =
+ "nz.no.example.com. 3600 IN AAAA 2001:db8::5300\n";
+const char* const nsec_nz_txt =
+ "nz.no.example.com. 3600 IN NSEC noglue.example.com. AAAA NSEC RRSIG\n";
+const char* const nsec_nxdomain_txt =
+ "noglue.example.com. 3600 IN NSEC nonsec.example.com. A\n";
+
+// NSEC for the normal NXRRSET case
+const char* const nsec_www_txt =
+ "www.example.com. 3600 IN NSEC example.com. A NSEC RRSIG\n";
+
+// Authoritative data without NSEC
+const char* const nonsec_a_txt = "nonsec.example.com. 3600 IN A 192.0.2.0\n";
+
+// A helper function that generates a textual representation of RRSIG RDATA
+// for the given covered type. The resulting RRSIG may not necessarily make
+// sense in terms of the DNSSEC protocol, but for our testing purposes it's
+// okay.
+string
+getCommonRRSIGText(const string& type) {
+ return (type +
+ string(" 5 3 3600 20000101000000 20000201000000 12345 "
+ "example.com. FAKEFAKEFAKE"));
+}
-// This is a mock Zone class for testing.
-// It is a derived class of Zone for the convenient of tests.
+// This is a mock Zone Finder class for testing.
+// It is a derived class of ZoneFinder for the convenient of tests.
// Its find() method emulates the common behavior of protocol compliant
-// zone classes, but simplifies some minor cases and also supports broken
+// ZoneFinder classes, but simplifies some minor cases and also supports broken
// behavior.
// For simplicity, most names are assumed to be "in zone"; there's only
// one zone cut at the point of name "delegation.example.com".
@@ -103,31 +160,41 @@ const char* const other_zone_rrs =
// will result in DNAME.
// This mock zone doesn't handle empty non terminal nodes (if we need to test
// such cases find() should have specialized code for it).
-class MockZone : public Zone {
+class MockZoneFinder : public ZoneFinder {
public:
- MockZone() :
+ MockZoneFinder() :
origin_(Name("example.com")),
delegation_name_("delegation.example.com"),
dname_name_("dname.example.com"),
has_SOA_(true),
has_apex_NS_(true),
- rrclass_(RRClass::IN())
+ rrclass_(RRClass::IN()),
+ include_rrsig_anyway_(false),
+ nsec_name_(origin_)
{
stringstream zone_stream;
zone_stream << soa_txt << zone_ns_txt << ns_addrs_txt <<
delegation_txt << mx_txt << www_a_txt << cname_txt <<
cname_nxdom_txt << cname_out_txt << dname_txt << dname_a_txt <<
- other_zone_rrs;
+ other_zone_rrs << no_txt << nz_txt <<
+ nsec_apex_txt << nsec_mx_txt << nsec_no_txt << nsec_nz_txt <<
+ nsec_nxdomain_txt << nsec_www_txt << nonsec_a_txt <<
+ wild_txt << nsec_wild_txt << cnamewild_txt << nsec_cnamewild_txt;
masterLoad(zone_stream, origin_, rrclass_,
- boost::bind(&MockZone::loadRRset, this, _1));
+ boost::bind(&MockZoneFinder::loadRRset, this, _1));
+
+ empty_nsec_rrset_ = ConstRRsetPtr(new RRset(Name::ROOT_NAME(),
+ RRClass::IN(),
+ RRType::NSEC(),
+ RRTTL(3600)));
}
- virtual const isc::dns::Name& getOrigin() const { return (origin_); }
- virtual const isc::dns::RRClass& getClass() const { return (rrclass_); }
+ virtual isc::dns::Name getOrigin() const { return (origin_); }
+ virtual isc::dns::RRClass getClass() const { return (rrclass_); }
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
RRsetList* target = NULL,
- const FindOptions options = FIND_DEFAULT) const;
+ const FindOptions options = FIND_DEFAULT);
// If false is passed, it makes the zone broken as if it didn't have the
// SOA.
@@ -137,11 +204,32 @@ public:
// the apex NS.
void setApexNSFlag(bool on) { has_apex_NS_ = on; }
+ // Turn this on if you want it to return RRSIGs regardless of FIND_GLUE_OK
+ void setIncludeRRSIGAnyway(bool on) { include_rrsig_anyway_ = on; }
+
+ // Once called, this "faked" result will be returned when NSEC is expected
+ // for the specified query name.
+ void setNSECResult(const Name& nsec_name, Result code,
+ ConstRRsetPtr rrset)
+ {
+ nsec_name_ = nsec_name;
+ nsec_result_.reset(new ZoneFinder::FindResult(code, rrset));
+ }
+
+ Name findPreviousName(const Name&) const {
+ isc_throw(isc::NotImplemented, "Mock doesn't support previous name");
+ }
+
+public:
+ // We allow the tests to use these for convenience
+ ConstRRsetPtr delegation_rrset_;
+ ConstRRsetPtr empty_nsec_rrset_;
+
private:
typedef map<RRType, ConstRRsetPtr> RRsetStore;
typedef map<Name, RRsetStore> Domains;
Domains domains_;
- void loadRRset(ConstRRsetPtr rrset) {
+ void loadRRset(RRsetPtr rrset) {
domains_[rrset->getName()][rrset->getType()] = rrset;
if (rrset->getName() == delegation_name_ &&
rrset->getType() == RRType::NS()) {
@@ -149,6 +237,20 @@ private:
} else if (rrset->getName() == dname_name_ &&
rrset->getType() == RRType::DNAME()) {
dname_rrset_ = rrset;
+ // Add some signatures
+ } else if (rrset->getName() == Name("example.com.") &&
+ rrset->getType() == RRType::NS()) {
+ // For NS, we only have RRSIG for the origin name.
+ rrset->addRRsig(RdataPtr(new generic::RRSIG(
+ getCommonRRSIGText("NS"))));
+ } else {
+ // For others generate RRSIG unconditionally. Technically this
+ // is wrong because we shouldn't have it for names under a zone
+ // cut. But in our tests that doesn't matter, so we add them
+ // just for simplicity.
+ rrset->addRRsig(RdataPtr(new generic::RRSIG(
+ getCommonRRSIGText(rrset->getType().
+ toText()))));
}
}
@@ -158,14 +260,35 @@ private:
const Name dname_name_;
bool has_SOA_;
bool has_apex_NS_;
- ConstRRsetPtr delegation_rrset_;
ConstRRsetPtr dname_rrset_;
const RRClass rrclass_;
+ bool include_rrsig_anyway_;
+ // The following two will be used for faked NSEC cases
+ Name nsec_name_;
+ boost::scoped_ptr<ZoneFinder::FindResult> nsec_result_;
};
-Zone::FindResult
-MockZone::find(const Name& name, const RRType& type,
- RRsetList* target, const FindOptions options) const
+// A helper function that generates a new RRset based on "wild_rrset",
+// replacing its owner name with 'real_name'.
+ConstRRsetPtr
+substituteWild(const RRset& wild_rrset, const Name& real_name) {
+ RRsetPtr rrset(new RRset(real_name, wild_rrset.getClass(),
+ wild_rrset.getType(), wild_rrset.getTTL()));
+ // For simplicity we only consider the case with one RDATA (for now)
+ rrset->addRdata(wild_rrset.getRdataIterator()->getCurrent());
+ ConstRRsetPtr wild_sig = wild_rrset.getRRsig();
+ if (wild_sig) {
+ RRsetPtr sig(new RRset(real_name, wild_sig->getClass(),
+ wild_sig->getType(), wild_sig->getTTL()));
+ sig->addRdata(wild_sig->getRdataIterator()->getCurrent());
+ rrset->addRRsig(sig);
+ }
+ return (rrset);
+}
+
+ZoneFinder::FindResult
+MockZoneFinder::find(const Name& name, const RRType& type,
+ RRsetList* target, const FindOptions options)
{
// Emulating a broken zone: mandatory apex RRs are missing if specifically
// configured so (which are rare cases).
@@ -195,7 +318,26 @@ MockZone::find(const Name& name, const RRType& type,
RRsetStore::const_iterator found_rrset =
found_domain->second.find(type);
if (found_rrset != found_domain->second.end()) {
- return (FindResult(SUCCESS, found_rrset->second));
+ ConstRRsetPtr rrset;
+ // Strip whatever signature there is in case DNSSEC is not required
+ // Just to make sure the Query asks for it when it is needed
+ if (options & ZoneFinder::FIND_DNSSEC ||
+ include_rrsig_anyway_ ||
+ !found_rrset->second->getRRsig()) {
+ rrset = found_rrset->second;
+ } else {
+ RRsetPtr noconst(new RRset(found_rrset->second->getName(),
+ found_rrset->second->getClass(),
+ found_rrset->second->getType(),
+ found_rrset->second->getTTL()));
+ for (RdataIteratorPtr
+ i(found_rrset->second->getRdataIterator());
+ !i->isLast(); i->next()) {
+ noconst->addRdata(i->getCurrent());
+ }
+ rrset = noconst;
+ }
+ return (FindResult(SUCCESS, rrset));
}
// If not found but we have a target, fill it with all RRsets here
@@ -216,10 +358,94 @@ MockZone::find(const Name& name, const RRType& type,
}
// Otherwise it's NXRRSET case.
+ if ((options & FIND_DNSSEC) != 0) {
+ found_rrset = found_domain->second.find(RRType::NSEC());
+ if (found_rrset != found_domain->second.end()) {
+ return (FindResult(NXRRSET, found_rrset->second));
+ }
+ }
return (FindResult(NXRRSET, RRsetPtr()));
}
- // query name isn't found in our domains. returns NXDOMAIN.
+ // query name isn't found in our domains.
+ // We first check if the query name is an empty non terminal name
+ // of the zone by naive linear search.
+ Domains::const_iterator domain;
+ for (domain = domains_.begin(); domain != domains_.end(); ++domain) {
+ if (name.compare((*domain).first).getRelation() ==
+ NameComparisonResult::SUPERDOMAIN) {
+ break;
+ }
+ }
+ if (domain != domains_.end()) {
+ // The query name is in an empty non terminal node followed by 'domain'
+ // (for simplicity we ignore the pathological case of 'domain' is
+ // the origin of the zone)
+ --domain; // reset domain to the "previous name"
+ if ((options & FIND_DNSSEC) != 0) {
+ RRsetStore::const_iterator found_rrset =
+ (*domain).second.find(RRType::NSEC());
+ if (found_rrset != (*domain).second.end()) {
+ return (FindResult(NXRRSET, found_rrset->second));
+ }
+ }
+ return (FindResult(NXRRSET, RRsetPtr()));
+ }
+
+ // Another possibility is wildcard. For simplicity we only check
+ // hardcoded specific cases, ignoring other details such as canceling
+ // due to the existence of closer name.
+ if ((options & NO_WILDCARD) == 0) {
+ const Name wild_suffix("wild.example.com");
+ if (name.compare(wild_suffix).getRelation() ==
+ NameComparisonResult::SUBDOMAIN) {
+ domain = domains_.find(Name("*").concatenate(wild_suffix));
+ assert(domain != domains_.end());
+ RRsetStore::const_iterator found_rrset = domain->second.find(type);
+ assert(found_rrset != domain->second.end());
+ return (FindResult(WILDCARD,
+ substituteWild(*found_rrset->second, name)));
+ }
+ const Name cnamewild_suffix("cnamewild.example.com");
+ if (name.compare(cnamewild_suffix).getRelation() ==
+ NameComparisonResult::SUBDOMAIN) {
+ domain = domains_.find(Name("*").concatenate(cnamewild_suffix));
+ assert(domain != domains_.end());
+ RRsetStore::const_iterator found_rrset =
+ domain->second.find(RRType::CNAME());
+ assert(found_rrset != domain->second.end());
+ return (FindResult(WILDCARD_CNAME,
+ substituteWild(*found_rrset->second, name)));
+ }
+ }
+
+ // This is an NXDOMAIN case.
+ // If we need DNSSEC proof, find the "previous name" that has an NSEC RR
+ // and return NXDOMAIN with the found NSEC. Otherwise, just return the
+ // NXDOMAIN code and NULL. If DNSSEC proof is requested but no NSEC is
+ // found, we return NULL, too. (For simplicity under the test conditions
+ // we don't care about pathological cases such as the name is "smaller"
+ // than the origin)
+ if ((options & FIND_DNSSEC) != 0) {
+ // Emulate a broken DataSourceClient for some special names.
+ if (nsec_result_ && nsec_name_ == name) {
+ return (*nsec_result_);
+ }
+
+ // Normal case
+ // XXX: some older g++ complains about operator!= if we use
+ // const_reverse_iterator
+ for (Domains::reverse_iterator it = domains_.rbegin();
+ it != domains_.rend();
+ ++it) {
+ RRsetStore::const_iterator nsec_it;
+ if ((*it).first < name &&
+ (nsec_it = (*it).second.find(RRType::NSEC()))
+ != (*it).second.end()) {
+ return (FindResult(NXDOMAIN, (*nsec_it).second));
+ }
+ }
+ }
return (FindResult(NXDOMAIN, RRsetPtr()));
}
@@ -233,11 +459,15 @@ protected:
response.setRcode(Rcode::NOERROR());
response.setOpcode(Opcode::QUERY());
// create and add a matching zone.
- mock_zone = new MockZone();
- memory_datasrc.addZone(ZonePtr(mock_zone));
+ mock_finder = new MockZoneFinder();
+ memory_client.addZone(ZoneFinderPtr(mock_finder));
}
- MockZone* mock_zone;
- MemoryDataSrc memory_datasrc;
+ MockZoneFinder* mock_finder;
+ // We use InMemoryClient here. We could have some kind of mock client
+ // here, but historically, the Query supported only InMemoryClient
+ // (originally named MemoryDataSrc) and was tested with it, so we keep
+ // it like this for now.
+ InMemoryClient memory_client;
const Name qname;
const RRClass qclass;
const RRType qtype;
@@ -286,24 +516,76 @@ responseCheck(Message& response, const isc::dns::Rcode& rcode,
TEST_F(QueryTest, noZone) {
// There's no zone in the memory datasource. So the response should have
// REFUSED.
- MemoryDataSrc empty_memory_datasrc;
- Query nozone_query(empty_memory_datasrc, qname, qtype, response);
+ InMemoryClient empty_memory_client;
+ Query nozone_query(empty_memory_client, qname, qtype, response);
EXPECT_NO_THROW(nozone_query.process());
EXPECT_EQ(Rcode::REFUSED(), response.getRcode());
}
TEST_F(QueryTest, exactMatch) {
- Query query(memory_datasrc, qname, qtype, response);
+ Query query(memory_client, qname, qtype, response);
+ EXPECT_NO_THROW(query.process());
+ // find match rrset
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
+ www_a_txt, zone_ns_txt, ns_addrs_txt);
+}
+
+TEST_F(QueryTest, exactMatchIgnoreSIG) {
+ // Check that we do not include the RRSIG when not requested even when
+ // we receive it from the data source.
+ mock_finder->setIncludeRRSIGAnyway(true);
+ Query query(memory_client, qname, qtype, response);
EXPECT_NO_THROW(query.process());
// find match rrset
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
www_a_txt, zone_ns_txt, ns_addrs_txt);
}
+TEST_F(QueryTest, dnssecPositive) {
+ // Just like exactMatch, but the signatures should be included as well
+ Query query(memory_client, qname, qtype, response, true);
+ EXPECT_NO_THROW(query.process());
+ // find match rrset
+ // We can't let responseCheck to check the additional section as well,
+ // it gets confused by the two RRs for glue.delegation.../RRSIG due
+ // to it's design and fixing it would be hard. Therefore we simply
+ // check manually this one time.
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 4, 6,
+ (www_a_txt + std::string("www.example.com. 3600 IN RRSIG "
+ "A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. "
+ "FAKEFAKEFAKE\n")).c_str(),
+ (zone_ns_txt + std::string("example.com. 3600 IN RRSIG NS 5 "
+ "3 3600 20000101000000 "
+ "20000201000000 12345 "
+ "example.com. FAKEFAKEFAKE\n")).
+ c_str(), NULL);
+ RRsetIterator iterator(response.beginSection(Message::SECTION_ADDITIONAL));
+ const char* additional[] = {
+ "glue.delegation.example.com. 3600 IN A 192.0.2.153\n",
+ "glue.delegation.example.com. 3600 IN RRSIG A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ "glue.delegation.example.com. 3600 IN AAAA 2001:db8::53\n",
+ "glue.delegation.example.com. 3600 IN RRSIG AAAA 5 3 3600 "
+ "20000101000000 20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ "noglue.example.com. 3600 IN A 192.0.2.53\n",
+ "noglue.example.com. 3600 IN RRSIG A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ NULL
+ };
+ for (const char** rr(additional); *rr != NULL; ++ rr) {
+ ASSERT_FALSE(iterator ==
+ response.endSection(Message::SECTION_ADDITIONAL));
+ EXPECT_EQ(*rr, (*iterator)->toText());
+ iterator ++;
+ }
+ EXPECT_TRUE(iterator == response.endSection(Message::SECTION_ADDITIONAL));
+}
+
TEST_F(QueryTest, exactAddrMatch) {
// find match rrset, omit additional data which has already been provided
// in the answer section from the additional.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("noglue.example.com"), qtype,
+ EXPECT_NO_THROW(Query(memory_client, Name("noglue.example.com"), qtype,
response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 2,
@@ -315,7 +597,7 @@ TEST_F(QueryTest, exactAddrMatch) {
TEST_F(QueryTest, apexNSMatch) {
// find match rrset, omit authority data which has already been provided
// in the answer section from the authority section.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("example.com"), RRType::NS(),
+ EXPECT_NO_THROW(Query(memory_client, Name("example.com"), RRType::NS(),
response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 0, 3,
@@ -326,11 +608,12 @@ TEST_F(QueryTest, apexNSMatch) {
TEST_F(QueryTest, exactAnyMatch) {
// find match rrset, omit additional data which has already been provided
// in the answer section from the additional.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("noglue.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("noglue.example.com"),
RRType::ANY(), response).process());
- responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 2,
- "noglue.example.com. 3600 IN A 192.0.2.53\n",
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 3, 2,
+ (string("noglue.example.com. 3600 IN A 192.0.2.53\n") +
+ string(nsec_nxdomain_txt)).c_str(),
zone_ns_txt,
"glue.delegation.example.com. 3600 IN A 192.0.2.153\n"
"glue.delegation.example.com. 3600 IN AAAA 2001:db8::53\n");
@@ -339,36 +622,34 @@ TEST_F(QueryTest, exactAnyMatch) {
TEST_F(QueryTest, apexAnyMatch) {
// find match rrset, omit additional data which has already been provided
// in the answer section from the additional.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("example.com"),
RRType::ANY(), response).process());
- responseCheck(response, Rcode::NOERROR(), AA_FLAG, 4, 0, 3,
- "example.com. 3600 IN SOA . . 0 0 0 0 0\n"
- "example.com. 3600 IN NS glue.delegation.example.com.\n"
- "example.com. 3600 IN NS noglue.example.com.\n"
- "example.com. 3600 IN NS example.net.\n",
- NULL, ns_addrs_txt, mock_zone->getOrigin());
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 5, 0, 3,
+ (string(soa_txt) + string(zone_ns_txt) +
+ string(nsec_apex_txt)).c_str(),
+ NULL, ns_addrs_txt, mock_finder->getOrigin());
}
TEST_F(QueryTest, mxANYMatch) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("mx.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("mx.example.com"),
RRType::ANY(), response).process());
- responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 3, 4,
- mx_txt, zone_ns_txt,
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 4, 3, 4,
+ (string(mx_txt) + string(nsec_mx_txt)).c_str(), zone_ns_txt,
(string(ns_addrs_txt) + string(www_a_txt)).c_str());
}
TEST_F(QueryTest, glueANYMatch) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("delegation.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("delegation.example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NOERROR(), 0, 0, 4, 3,
NULL, delegation_txt, ns_addrs_txt);
}
TEST_F(QueryTest, nodomainANY) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("nxdomain.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("nxdomain.example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
}
// This tests that when we need to look up Zone's apex NS records for
@@ -376,15 +657,15 @@ TEST_F(QueryTest, nodomainANY) {
// throw in that case.
TEST_F(QueryTest, noApexNS) {
// Disable apex NS record
- mock_zone->setApexNSFlag(false);
+ mock_finder->setApexNSFlag(false);
- EXPECT_THROW(Query(memory_datasrc, Name("noglue.example.com"), qtype,
+ EXPECT_THROW(Query(memory_client, Name("noglue.example.com"), qtype,
response).process(), Query::NoApexNS);
// We don't look into the response, as it threw
}
TEST_F(QueryTest, delegation) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("delegation.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("delegation.example.com"),
qtype, response).process());
responseCheck(response, Rcode::NOERROR(), 0, 0, 4, 3,
@@ -392,18 +673,255 @@ TEST_F(QueryTest, delegation) {
}
TEST_F(QueryTest, nxdomain) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("nxdomain.example.com"), qtype,
+ EXPECT_NO_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
response).process());
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxdomainWithNSEC) {
+ // NXDOMAIN with DNSSEC proof. We should have SOA, NSEC that proves
+ // NXDOMAIN and NSEC that proves nonexistence of matching wildcard,
+ // as well as their RRSIGs.
+ EXPECT_NO_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
+ response, true).process());
+ responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 6, 0,
+ NULL, (string(soa_txt) +
+ string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("SOA") + "\n" +
+ string(nsec_nxdomain_txt) + "\n" +
+ string("noglue.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC") + "\n" +
+ string(nsec_apex_txt) + "\n" +
+ string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC")).c_str(),
+ NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxdomainWithNSEC2) {
+ // See comments about no_txt. In this case the best possible wildcard
+ // is derived from the next domain of the NSEC that proves NXDOMAIN, and
+ // the NSEC to provide the non existence of wildcard is different from
+ // the first NSEC.
+ Query(memory_client, Name("(.no.example.com"), qtype,
+ response, true).process();
+ responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 6, 0,
+ NULL, (string(soa_txt) +
+ string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("SOA") + "\n" +
+ string(nsec_mx_txt) + "\n" +
+ string("mx.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC") + "\n" +
+ string(nsec_no_txt) + "\n" +
+ string(").no.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC")).c_str(),
+ NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxdomainWithNSECDuplicate) {
+ // See comments about nz_txt. In this case we only need one NSEC,
+ // which proves both NXDOMAIN and the non existence of wildcard.
+ Query(memory_client, Name("nx.no.example.com"), qtype,
+ response, true).process();
+ responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 4, 0,
+ NULL, (string(soa_txt) +
+ string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("SOA") + "\n" +
+ string(nsec_no_txt) + "\n" +
+ string(").no.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC")).c_str(),
+ NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC1) {
+ // ZoneFinder::find() returns NXDOMAIN with non NSEC RR.
+ mock_finder->setNSECResult(Name("badnsec.example.com"),
+ ZoneFinder::NXDOMAIN,
+ mock_finder->delegation_rrset_);
+ EXPECT_THROW(Query(memory_client, Name("badnsec.example.com"), qtype,
+ response, true).process(),
+ std::bad_cast);
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC2) {
+ // ZoneFinder::find() returns NXDOMAIN with an empty NSEC RR.
+ mock_finder->setNSECResult(Name("emptynsec.example.com"),
+ ZoneFinder::NXDOMAIN,
+ mock_finder->empty_nsec_rrset_);
+ EXPECT_THROW(Query(memory_client, Name("emptynsec.example.com"), qtype,
+ response, true).process(),
+ Query::BadNSEC);
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC3) {
+ // "no-wildcard proof" returns SUCCESS. it should be NXDOMAIN.
+ mock_finder->setNSECResult(Name("*.example.com"),
+ ZoneFinder::SUCCESS,
+ mock_finder->delegation_rrset_);
+ EXPECT_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
+ response, true).process(),
+ Query::BadNSEC);
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC4) {
+ // "no-wildcard proof" doesn't return RRset.
+ mock_finder->setNSECResult(Name("*.example.com"),
+ ZoneFinder::NXDOMAIN, ConstRRsetPtr());
+ EXPECT_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
+ response, true).process(),
+ Query::BadNSEC);
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC5) {
+ // "no-wildcard proof" returns non NSEC.
+ mock_finder->setNSECResult(Name("*.example.com"),
+ ZoneFinder::NXDOMAIN,
+ mock_finder->delegation_rrset_);
+ // This is a bit odd, but we'll simply include the returned RRset.
+ Query(memory_client, Name("nxdomain.example.com"), qtype,
+ response, true).process();
+ responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 8, 0,
+ NULL, (string(soa_txt) +
+ string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("SOA") + "\n" +
+ string(nsec_nxdomain_txt) + "\n" +
+ string("noglue.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC") + "\n" +
+ delegation_txt).c_str(),
+ NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC6) {
+ // "no-wildcard proof" returns empty NSEC.
+ mock_finder->setNSECResult(Name("*.example.com"),
+ ZoneFinder::NXDOMAIN,
+ mock_finder->empty_nsec_rrset_);
+ EXPECT_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
+ response, true).process(),
+ Query::BadNSEC);
}
TEST_F(QueryTest, nxrrset) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("www.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("www.example.com"),
RRType::TXT(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxrrsetWithNSEC) {
+ // NXRRSET with DNSSEC proof. We should have SOA, NSEC that proves the
+ // NXRRSET and their RRSIGs.
+ Query(memory_client, Name("www.example.com"), RRType::TXT(), response,
+ true).process();
+
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
+ (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("SOA") + "\n" +
+ string(nsec_www_txt) + "\n" +
+ string("www.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC")).c_str(),
+ NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, emptyNameWithNSEC) {
+ // Empty non terminal with DNSSEC proof. This is one of the cases of
+ // Section 3.1.3.2 of RFC4035.
+ // mx.example.com. NSEC ).no.example.com. proves no.example.com. is a
+ // non empty terminal node. Note that it also implicitly proves there
+ // should be no closer wildcard match (because the empty name is an
+ // exact match), so we only need one NSEC.
+ // From the point of the Query::process(), this is actually no different
+ // from the other NXRRSET case, but we check that explicitly just in case.
+ Query(memory_client, Name("no.example.com"), RRType::A(), response,
+ true).process();
+
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
+ (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("SOA") + "\n" +
+ string(nsec_mx_txt) + "\n" +
+ string("mx.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC")).c_str(),
+ NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxrrsetWithoutNSEC) {
+ // NXRRSET with DNSSEC proof requested, but there's no NSEC at that node.
+ // This is an unexpected event (if the zone is supposed to be properly
+ // signed with NSECs), but we accept and ignore the oddity.
+ Query(memory_client, Name("nonsec.example.com"), RRType::TXT(), response,
+ true).process();
+
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 2, 0, NULL,
+ (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("SOA") + "\n").c_str(),
+ NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, wildcardNSEC) {
+ // The qname matches *.wild.example.com. The response should contain
+ // an NSEC that proves the non existence of a closer name.
+ Query(memory_client, Name("www.wild.example.com"), RRType::A(), response,
+ true).process();
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 6, 6,
+ (string(wild_txt).replace(0, 1, "www") +
+ string("www.wild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("A") + "\n").c_str(),
+ (zone_ns_txt + string("example.com. 3600 IN RRSIG NS 5 "
+ "3 3600 20000101000000 "
+ "20000201000000 12345 "
+ "example.com. FAKEFAKEFAKE\n") +
+ string(nsec_wild_txt) +
+ string("*.wild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC") + "\n").c_str(),
+ NULL, // we are not interested in additionals in this test
+ mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, CNAMEwildNSEC) {
+ // Similar to the previous case, but the matching wildcard record is
+ // CNAME.
+ Query(memory_client, Name("www.cnamewild.example.com"), RRType::A(),
+ response, true).process();
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 2, 0,
+ (string(cnamewild_txt).replace(0, 1, "www") +
+ string("www.cnamewild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("CNAME") + "\n").c_str(),
+ (string(nsec_cnamewild_txt) +
+ string("*.cnamewild.example.com. 3600 IN RRSIG ") +
+ getCommonRRSIGText("NSEC") + "\n").c_str(),
+ NULL, // we are not interested in additionals in this test
+ mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, badWildcardProof1) {
+ // Unexpected case in wildcard proof: ZoneFinder::find() returns SUCCESS
+ // when NXDOMAIN is expected.
+ mock_finder->setNSECResult(Name("www.wild.example.com"),
+ ZoneFinder::SUCCESS,
+ mock_finder->delegation_rrset_);
+ EXPECT_THROW(Query(memory_client, Name("www.wild.example.com"),
+ RRType::A(), response, true).process(),
+ Query::BadNSEC);
+}
+
+TEST_F(QueryTest, badWildcardProof2) {
+ // "wildcard proof" doesn't return RRset.
+ mock_finder->setNSECResult(Name("www.wild.example.com"),
+ ZoneFinder::NXDOMAIN, ConstRRsetPtr());
+ EXPECT_THROW(Query(memory_client, Name("www.wild.example.com"),
+ RRType::A(), response, true).process(),
+ Query::BadNSEC);
+}
+
+TEST_F(QueryTest, badWildcardProof3) {
+ // "wildcard proof" returns empty NSEC.
+ mock_finder->setNSECResult(Name("www.wild.example.com"),
+ ZoneFinder::NXDOMAIN,
+ mock_finder->empty_nsec_rrset_);
+ EXPECT_THROW(Query(memory_client, Name("www.wild.example.com"),
+ RRType::A(), response, true).process(),
+ Query::BadNSEC);
}
/*
@@ -412,22 +930,22 @@ TEST_F(QueryTest, nxrrset) {
*/
TEST_F(QueryTest, noSOA) {
// disable zone's SOA RR.
- mock_zone->setSOAFlag(false);
+ mock_finder->setSOAFlag(false);
// The NX Domain
- EXPECT_THROW(Query(memory_datasrc, Name("nxdomain.example.com"),
+ EXPECT_THROW(Query(memory_client, Name("nxdomain.example.com"),
qtype, response).process(), Query::NoSOA);
// Of course, we don't look into the response, as it throwed
// NXRRSET
- EXPECT_THROW(Query(memory_datasrc, Name("nxrrset.example.com"),
+ EXPECT_THROW(Query(memory_client, Name("nxrrset.example.com"),
qtype, response).process(), Query::NoSOA);
}
TEST_F(QueryTest, noMatchZone) {
// there's a zone in the memory datasource but it doesn't match the qname.
// should result in REFUSED.
- Query(memory_datasrc, Name("example.org"), qtype, response).process();
+ Query(memory_client, Name("example.org"), qtype, response).process();
EXPECT_EQ(Rcode::REFUSED(), response.getRcode());
}
@@ -438,7 +956,7 @@ TEST_F(QueryTest, noMatchZone) {
* A record, other to unknown out of zone one.
*/
TEST_F(QueryTest, MX) {
- Query(memory_datasrc, Name("mx.example.com"), RRType::MX(),
+ Query(memory_client, Name("mx.example.com"), RRType::MX(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 3, 4,
@@ -452,7 +970,7 @@ TEST_F(QueryTest, MX) {
* This should not trigger the additional processing for the exchange.
*/
TEST_F(QueryTest, MXAlias) {
- Query(memory_datasrc, Name("cnamemx.example.com"), RRType::MX(),
+ Query(memory_client, Name("cnamemx.example.com"), RRType::MX(),
response).process();
// there shouldn't be no additional RRs for the exchanges (we have 3
@@ -472,7 +990,7 @@ TEST_F(QueryTest, MXAlias) {
* returned.
*/
TEST_F(QueryTest, CNAME) {
- Query(memory_datasrc, Name("cname.example.com"), RRType::A(),
+ Query(memory_client, Name("cname.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -482,7 +1000,7 @@ TEST_F(QueryTest, CNAME) {
TEST_F(QueryTest, explicitCNAME) {
// same owner name as the CNAME test but explicitly query for CNAME RR.
// expect the same response as we don't provide a full chain yet.
- Query(memory_datasrc, Name("cname.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cname.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -494,7 +1012,7 @@ TEST_F(QueryTest, CNAME_NX_RRSET) {
// note: with chaining, what should be expected is not trivial:
// BIND 9 returns the CNAME in answer and SOA in authority, no additional.
// NSD returns the CNAME, NS in authority, A/AAAA for NS in additional.
- Query(memory_datasrc, Name("cname.example.com"), RRType::TXT(),
+ Query(memory_client, Name("cname.example.com"), RRType::TXT(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -503,7 +1021,7 @@ TEST_F(QueryTest, CNAME_NX_RRSET) {
TEST_F(QueryTest, explicitCNAME_NX_RRSET) {
// same owner name as the NXRRSET test but explicitly query for CNAME RR.
- Query(memory_datasrc, Name("cname.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cname.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -517,7 +1035,7 @@ TEST_F(QueryTest, CNAME_NX_DOMAIN) {
// RCODE being NXDOMAIN.
// NSD returns the CNAME, NS in authority, A/AAAA for NS in additional,
// RCODE being NOERROR.
- Query(memory_datasrc, Name("cnamenxdom.example.com"), RRType::A(),
+ Query(memory_client, Name("cnamenxdom.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -526,7 +1044,7 @@ TEST_F(QueryTest, CNAME_NX_DOMAIN) {
TEST_F(QueryTest, explicitCNAME_NX_DOMAIN) {
// same owner name as the NXDOMAIN test but explicitly query for CNAME RR.
- Query(memory_datasrc, Name("cnamenxdom.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cnamenxdom.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -542,7 +1060,7 @@ TEST_F(QueryTest, CNAME_OUT) {
* Then the same test should be done with .org included there and
* see what it does (depends on what we want to do)
*/
- Query(memory_datasrc, Name("cnameout.example.com"), RRType::A(),
+ Query(memory_client, Name("cnameout.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -551,7 +1069,7 @@ TEST_F(QueryTest, CNAME_OUT) {
TEST_F(QueryTest, explicitCNAME_OUT) {
// same owner name as the OUT test but explicitly query for CNAME RR.
- Query(memory_datasrc, Name("cnameout.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cnameout.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -567,7 +1085,7 @@ TEST_F(QueryTest, explicitCNAME_OUT) {
* pointing to NXRRSET and NXDOMAIN cases (similarly as with CNAME).
*/
TEST_F(QueryTest, DNAME) {
- Query(memory_datasrc, Name("www.dname.example.com"), RRType::A(),
+ Query(memory_client, Name("www.dname.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 0, 0,
@@ -583,7 +1101,7 @@ TEST_F(QueryTest, DNAME) {
* DNAME.
*/
TEST_F(QueryTest, DNAME_ANY) {
- Query(memory_datasrc, Name("www.dname.example.com"), RRType::ANY(),
+ Query(memory_client, Name("www.dname.example.com"), RRType::ANY(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 0, 0,
@@ -592,7 +1110,7 @@ TEST_F(QueryTest, DNAME_ANY) {
// Test when we ask for DNAME explicitly, it does no synthetizing.
TEST_F(QueryTest, explicitDNAME) {
- Query(memory_datasrc, Name("dname.example.com"), RRType::DNAME(),
+ Query(memory_client, Name("dname.example.com"), RRType::DNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -604,7 +1122,7 @@ TEST_F(QueryTest, explicitDNAME) {
* the CNAME, it should return the RRset.
*/
TEST_F(QueryTest, DNAME_A) {
- Query(memory_datasrc, Name("dname.example.com"), RRType::A(),
+ Query(memory_client, Name("dname.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -616,11 +1134,11 @@ TEST_F(QueryTest, DNAME_A) {
* It should not synthetize the CNAME.
*/
TEST_F(QueryTest, DNAME_NX_RRSET) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("dname.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("dname.example.com"),
RRType::TXT(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
}
/*
@@ -636,7 +1154,7 @@ TEST_F(QueryTest, LongDNAME) {
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"dname.example.com.");
- EXPECT_NO_THROW(Query(memory_datasrc, longname, RRType::A(),
+ EXPECT_NO_THROW(Query(memory_client, longname, RRType::A(),
response).process());
responseCheck(response, Rcode::YXDOMAIN(), AA_FLAG, 1, 0, 0,
@@ -655,7 +1173,7 @@ TEST_F(QueryTest, MaxLenDNAME) {
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"dname.example.com.");
- EXPECT_NO_THROW(Query(memory_datasrc, longname, RRType::A(),
+ EXPECT_NO_THROW(Query(memory_client, longname, RRType::A(),
response).process());
// Check the answer is OK
diff --git a/src/bin/auth/tests/run_unittests.cc b/src/bin/auth/tests/run_unittests.cc
index 6ae848d..d3bbab7 100644
--- a/src/bin/auth/tests/run_unittests.cc
+++ b/src/bin/auth/tests/run_unittests.cc
@@ -13,6 +13,8 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <util/unittests/run_all.h>
#include <dns/tests/unittest_util.h>
@@ -21,6 +23,7 @@ main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
isc::UnitTestUtil::addDataPath(TEST_DATA_DIR);
isc::UnitTestUtil::addDataPath(TEST_DATA_BUILDDIR);
+ isc::log::initLogger();
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index 062b70d..98e573b 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -16,6 +16,8 @@
#include <gtest/gtest.h>
+#include <boost/bind.hpp>
+
#include <cc/data.h>
#include <cc/session.h>
@@ -69,14 +71,20 @@ private:
};
protected:
- AuthCountersTest() : verbose_mode_(false), counters(verbose_mode_) {
+ AuthCountersTest() : counters() {
counters.setStatisticsSession(&statistics_session_);
}
~AuthCountersTest() {
}
MockSession statistics_session_;
- bool verbose_mode_;
AuthCounters counters;
+ // no need to be inherited from the original class here.
+ class MockModuleSpec {
+ public:
+ bool validateStatistics(ConstElementPtr, const bool valid) const
+ { return (valid); }
+ };
+ MockModuleSpec module_spec_;
};
void
@@ -182,7 +190,7 @@ TEST_F(AuthCountersTest, submitStatisticsWithException) {
statistics_session_.setThrowSessionTimeout(false);
}
-TEST_F(AuthCountersTest, submitStatistics) {
+TEST_F(AuthCountersTest, submitStatisticsWithoutValidator) {
// Submit statistics data.
// Validate if it submits correct data.
@@ -202,12 +210,69 @@ TEST_F(AuthCountersTest, submitStatistics) {
// Command is "set".
EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
->get(0)->stringValue());
+ EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+ ->get(1)->get("owner")->stringValue());
ConstElementPtr statistics_data = statistics_session_.sent_msg
->get("command")->get(1)
- ->get("stats_data");
+ ->get("data");
// UDP query counter is 2 and TCP query counter is 1.
- EXPECT_EQ(2, statistics_data->get("auth.queries.udp")->intValue());
- EXPECT_EQ(1, statistics_data->get("auth.queries.tcp")->intValue());
+ EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+ EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
}
+TEST_F(AuthCountersTest, submitStatisticsWithValidator) {
+
+ //a validator for the unittest
+ AuthCounters::validator_type validator;
+ ConstElementPtr el;
+
+ // Submit statistics data with correct statistics validator.
+ validator = boost::bind(
+ &AuthCountersTest::MockModuleSpec::validateStatistics,
+ &module_spec_, _1, true);
+
+ EXPECT_TRUE(validator(el));
+
+ // register validator to AuthCounters
+ counters.registerStatisticsValidator(validator);
+
+ // Counters should be initialized to 0.
+ EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_UDP_QUERY));
+ EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+
+ // UDP query counter is set to 2.
+ counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+ counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+ // TCP query counter is set to 1.
+ counters.inc(AuthCounters::COUNTER_TCP_QUERY);
+
+ // checks the value returned by submitStatistics
+ EXPECT_TRUE(counters.submitStatistics());
+
+ // Destination is "Stats".
+ EXPECT_EQ("Stats", statistics_session_.msg_destination);
+ // Command is "set".
+ EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
+ ->get(0)->stringValue());
+ EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+ ->get(1)->get("owner")->stringValue());
+ ConstElementPtr statistics_data = statistics_session_.sent_msg
+ ->get("command")->get(1)
+ ->get("data");
+ // UDP query counter is 2 and TCP query counter is 1.
+ EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+ EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
+
+ // Submit statistics data with incorrect statistics validator.
+ validator = boost::bind(
+ &AuthCountersTest::MockModuleSpec::validateStatistics,
+ &module_spec_, _1, false);
+
+ EXPECT_FALSE(validator(el));
+
+ counters.registerStatisticsValidator(validator);
+
+ // checks the value returned by submitStatistics
+ EXPECT_FALSE(counters.submitStatistics());
+}
}
diff --git a/src/bin/auth/tests/testdata/Makefile.am b/src/bin/auth/tests/testdata/Makefile.am
index f6f1f27..c86722f 100644
--- a/src/bin/auth/tests/testdata/Makefile.am
+++ b/src/bin/auth/tests/testdata/Makefile.am
@@ -23,4 +23,4 @@ EXTRA_DIST += example.com
EXTRA_DIST += example.sqlite3
.spec.wire:
- $(abs_top_builddir)/src/lib/dns/tests/testdata/gen-wiredata.py -o $@ $<
+ $(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index 254875f..69ea256 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -1,16 +1,23 @@
SUBDIRS = . tests
sbin_SCRIPTS = bind10
-CLEANFILES = bind10 bind10.pyc
+CLEANFILES = bind10 bind10_src.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.pyc
pkglibexecdir = $(libexecdir)/@PACKAGE@
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+noinst_SCRIPTS = run_bind10.sh
+
bind10dir = $(pkgdatadir)
bind10_DATA = bob.spec
EXTRA_DIST = bob.spec
man_MANS = bind10.8
-EXTRA_DIST += $(man_MANS) bind10.xml
+EXTRA_DIST += $(man_MANS) bind10.xml bind10_messages.mes
if ENABLE_MAN
@@ -19,11 +26,21 @@ bind10.8: bind10.xml
endif
+$(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py : bind10_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/bind10_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-bind10: bind10.py
+bind10: bind10_src.py $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
- -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10.py >$@
+ -e "s|@@LIBDIR@@|$(libdir)|" \
+ -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10_src.py >$@
chmod a+x $@
pytest:
$(SHELL) tests/bind10_test
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index d5ab905..0adcb70 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -2,12 +2,21 @@
.\" Title: bind10
.\" Author: [see the "AUTHORS" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 31, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "BIND10" "8" "March 31, 2011" "BIND10" "BIND10"
+.TH "BIND10" "8" "August 11, 2011" "BIND10" "BIND10"
+.\" -----------------------------------------------------------------
+.\" * Define some portability stuff
+.\" -----------------------------------------------------------------
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.\" http://bugs.debian.org/507673
+.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -22,7 +31,7 @@
bind10 \- BIND 10 boss process
.SH "SYNOPSIS"
.HP \w'\fBbind10\fR\ 'u
-\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-\-brittle\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR]
+\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-brittle\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR]
.SH "DESCRIPTION"
.PP
The
@@ -107,6 +116,23 @@ Display more about what is going on for
\fBbind10\fR
and its child processes\&.
.RE
+.PP
+\fB\-w\fR \fIwait_time\fR, \fB\-\-wait\fR \fIwait_time\fR
+.RS 4
+Sets the amount of time that BIND 10 will wait for the configuration manager (a key component of BIND 10) to initialize itself before abandoning the start up and terminating with an error\&. The wait_time is specified in seconds and has a default value of 10\&.
+.RE
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+bind10\&.boot_time
+.RS 4
+The date and time that the
+\fBbind10\fR
+process started\&. This is represented in ISO 8601 format\&.
+.RE
.SH "SEE ALSO"
.PP
diff --git a/src/bin/bind10/bind10.py.in b/src/bin/bind10/bind10.py.in
deleted file mode 100755
index 648d085..0000000
--- a/src/bin/bind10/bind10.py.in
+++ /dev/null
@@ -1,1039 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-This file implements the Boss of Bind (BoB, or bob) program.
-
-Its purpose is to start up the BIND 10 system, and then manage the
-processes, by starting and stopping processes, plus restarting
-processes that exit.
-
-To start the system, it first runs the c-channel program (msgq), then
-connects to that. It then runs the configuration manager, and reads
-its own configuration. Then it proceeds to starting other modules.
-
-The Python subprocess module is used for starting processes, but
-because this is not efficient for managing groups of processes,
-SIGCHLD signals are caught and processed using the signal module.
-
-Most of the logic is contained in the BoB class. However, since Python
-requires that signal processing happen in the main thread, we do
-signal handling outside of that class, in the code running for
-__main__.
-"""
-
-import sys; sys.path.append ('@@PYTHONPATH@@')
-import os
-
-# If B10_FROM_SOURCE is set in the environment, we use data files
-# from a directory relative to that, otherwise we use the ones
-# installed on the system
-if "B10_FROM_SOURCE" in os.environ:
- SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
-else:
- PREFIX = "@prefix@"
- DATAROOTDIR = "@datarootdir@"
- SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-
-import subprocess
-import signal
-import re
-import errno
-import time
-import select
-import random
-import socket
-from optparse import OptionParser, OptionValueError
-import io
-import pwd
-import posix
-
-import isc.cc
-import isc.util.process
-import isc.net.parse
-
-# Assign this process some longer name
-isc.util.process.rename(sys.argv[0])
-
-# This is the version that gets displayed to the user.
-# The VERSION string consists of the module name, the module version
-# number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
-
-# This is for bind10.boottime of stats module
-_BASETIME = time.gmtime()
-
-class RestartSchedule:
- """
-Keeps state when restarting something (in this case, a process).
-
-When a process dies unexpectedly, we need to restart it. However, if
-it fails to restart for some reason, then we should not simply keep
-restarting it at high speed.
-
-A more sophisticated algorithm can be developed, but for now we choose
-a simple set of rules:
-
- * If a process was been running for >=10 seconds, we restart it
- right away.
- * If a process was running for <10 seconds, we wait until 10 seconds
- after it was started.
-
-To avoid programs getting into lockstep, we use a normal distribution
-to avoid being restarted at exactly 10 seconds."""
-
- def __init__(self, restart_frequency=10.0):
- self.restart_frequency = restart_frequency
- self.run_start_time = None
- self.run_stop_time = None
- self.restart_time = None
-
- def set_run_start_time(self, when=None):
- if when is None:
- when = time.time()
- self.run_start_time = when
- sigma = self.restart_frequency * 0.05
- self.restart_time = when + random.normalvariate(self.restart_frequency,
- sigma)
-
- def set_run_stop_time(self, when=None):
- """We don't actually do anything with stop time now, but it
- might be useful for future algorithms."""
- if when is None:
- when = time.time()
- self.run_stop_time = when
-
- def get_restart_time(self, when=None):
- if when is None:
- when = time.time()
- return max(when, self.restart_time)
-
-class ProcessInfoError(Exception): pass
-
-class ProcessInfo:
- """Information about a process"""
-
- dev_null = open(os.devnull, "w")
-
- def __init__(self, name, args, env={}, dev_null_stdout=False,
- dev_null_stderr=False, uid=None, username=None):
- self.name = name
- self.args = args
- self.env = env
- self.dev_null_stdout = dev_null_stdout
- self.dev_null_stderr = dev_null_stderr
- self.restart_schedule = RestartSchedule()
- self.uid = uid
- self.username = username
- self.process = None
- self.pid = None
-
- def _preexec_work(self):
- """Function used before running a program that needs to run as a
- different user."""
- # First, put us into a separate process group so we don't get
- # SIGINT signals on Ctrl-C (the boss will shut everthing down by
- # other means).
- os.setpgrp()
- # Second, set the user ID if one has been specified
- if self.uid is not None:
- try:
- posix.setuid(self.uid)
- except OSError as e:
- if e.errno == errno.EPERM:
- # if we failed to change user due to permission report that
- raise ProcessInfoError("Unable to change to user %s (uid %d)" % (self.username, self.uid))
- else:
- # otherwise simply re-raise whatever error we found
- raise
-
- def _spawn(self):
- if self.dev_null_stdout:
- spawn_stdout = self.dev_null
- else:
- spawn_stdout = None
- if self.dev_null_stderr:
- spawn_stderr = self.dev_null
- else:
- spawn_stderr = None
- # Environment variables for the child process will be a copy of those
- # of the boss process with any additional specific variables given
- # on construction (self.env).
- spawn_env = os.environ
- spawn_env.update(self.env)
- if 'B10_FROM_SOURCE' not in os.environ:
- spawn_env['PATH'] = "@@LIBEXECDIR@@:" + spawn_env['PATH']
- self.process = subprocess.Popen(self.args,
- stdin=subprocess.PIPE,
- stdout=spawn_stdout,
- stderr=spawn_stderr,
- close_fds=True,
- env=spawn_env,
- preexec_fn=self._preexec_work)
- self.pid = self.process.pid
- self.restart_schedule.set_run_start_time()
-
- # spawn() and respawn() are the same for now, but in the future they
- # may have different functionality
- def spawn(self):
- self._spawn()
-
- def respawn(self):
- self._spawn()
-
-class CChannelConnectError(Exception): pass
-
-class BoB:
- """Boss of BIND class."""
-
- def __init__(self, msgq_socket_file=None, data_path=None,
- config_filename=None, nocache=False, verbose=False, setuid=None,
- username=None, cmdctl_port=None, brittle=False):
- """
- Initialize the Boss of BIND. This is a singleton (only one can run).
-
- The msgq_socket_file specifies the UNIX domain socket file that the
- msgq process listens on. If verbose is True, then the boss reports
- what it is doing.
-
- Data path and config filename are passed trough to config manager
- (if provided) and specify the config file to be used.
-
- The cmdctl_port is passed to cmdctl and specify on which port it
- should listen.
- """
- self.cc_session = None
- self.ccs = None
- self.cfg_start_auth = True
- self.cfg_start_resolver = False
- self.started_auth_family = False
- self.started_resolver_family = False
- self.curproc = None
- self.dead_processes = {}
- self.msgq_socket_file = msgq_socket_file
- self.nocache = nocache
- self.processes = {}
- self.expected_shutdowns = {}
- self.runnable = False
- self.uid = setuid
- self.username = username
- self.verbose = verbose
- self.data_path = data_path
- self.config_filename = config_filename
- self.cmdctl_port = cmdctl_port
- self.brittle = brittle
-
- def config_handler(self, new_config):
- # If this is initial update, don't do anything now, leave it to startup
- if not self.runnable:
- return
- # Now we declare few functions used only internally here. Besides the
- # benefit of not polluting the name space, they are closures, so we
- # don't need to pass some variables
- def start_stop(name, started, start, stop):
- if not'start_' + name in new_config:
- return
- if new_config['start_' + name]:
- if not started:
- if self.uid is not None:
- sys.stderr.write("[bind10] Starting " + name + " as " +
- "a user, not root. This might fail.\n")
- start()
- else:
- stop()
- # These four functions are passed to start_stop (smells like functional
- # programming little bit)
- def resolver_on():
- self.start_resolver(self.c_channel_env)
- self.started_resolver_family = True
- def resolver_off():
- self.stop_resolver()
- self.started_resolver_family = False
- def auth_on():
- self.start_auth(self.c_channel_env)
- self.start_xfrout(self.c_channel_env)
- self.start_xfrin(self.c_channel_env)
- self.start_zonemgr(self.c_channel_env)
- self.started_auth_family = True
- def auth_off():
- self.stop_zonemgr()
- self.stop_xfrin()
- self.stop_xfrout()
- self.stop_auth()
- self.started_auth_family = False
-
- # The real code of the config handler function follows here
- if self.verbose:
- sys.stdout.write("[bind10] Handling new configuration: " +
- str(new_config) + "\n")
- start_stop('resolver', self.started_resolver_family, resolver_on,
- resolver_off)
- start_stop('auth', self.started_auth_family, auth_on, auth_off)
-
- answer = isc.config.ccsession.create_answer(0)
- return answer
-
- def get_processes(self):
- pids = list(self.processes.keys())
- pids.sort()
- process_list = [ ]
- for pid in pids:
- process_list.append([pid, self.processes[pid].name])
- return process_list
-
- def command_handler(self, command, args):
- if self.verbose:
- sys.stdout.write("[bind10] Boss got command: " + str(command) + "\n")
- answer = isc.config.ccsession.create_answer(1, "command not implemented")
- if type(command) != str:
- answer = isc.config.ccsession.create_answer(1, "bad command")
- else:
- if command == "shutdown":
- self.runnable = False
- answer = isc.config.ccsession.create_answer(0)
- elif command == "sendstats":
- # send statistics data to the stats daemon immediately
- cmd = isc.config.ccsession.create_command(
- 'set', { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
- }})
- seq = self.cc_session.group_sendmsg(cmd, 'Stats')
- self.cc_session.group_recvmsg(True, seq)
- answer = isc.config.ccsession.create_answer(0)
- elif command == "ping":
- answer = isc.config.ccsession.create_answer(0, "pong")
- elif command == "show_processes":
- answer = isc.config.ccsession. \
- create_answer(0, self.get_processes())
- else:
- answer = isc.config.ccsession.create_answer(1,
- "Unknown command")
- return answer
-
- def kill_started_processes(self):
- """
- Called as part of the exception handling when a process fails to
- start, this runs through the list of started processes, killing
- each one. It then clears that list.
- """
- if self.verbose:
- sys.stdout.write("[bind10] killing started processes:\n")
-
- for pid in self.processes:
- if self.verbose:
- sys.stdout.write("[bind10] - %s\n" % self.processes[pid].name)
- self.processes[pid].process.kill()
- self.processes = {}
-
- def read_bind10_config(self):
- """
- Reads the parameters associated with the BoB module itself.
-
- At present these are the components to start although arguably this
- information should be in the configuration for the appropriate
- module itself. (However, this would cause difficulty in the case of
- xfrin/xfrout and zone manager as we don't need to start those if we
- are not running the authoritative server.)
- """
- if self.verbose:
- sys.stdout.write("[bind10] Reading Boss configuration:\n")
-
- config_data = self.ccs.get_full_config()
- self.cfg_start_auth = config_data.get("start_auth")
- self.cfg_start_resolver = config_data.get("start_resolver")
-
- if self.verbose:
- sys.stdout.write("[bind10] - start_auth: %s\n" %
- str(self.cfg_start_auth))
- sys.stdout.write("[bind10] - start_resolver: %s\n" %
- str(self.cfg_start_resolver))
-
- def log_starting(self, process, port = None, address = None):
- """
- A convenience function to output a "Starting xxx" message if the
- verbose option is set. Putting this into a separate method ensures
- that the output form is consistent across all processes.
-
- The process name (passed as the first argument) is put into
- self.curproc, and is used to indicate which process failed to
- start if there is an error (and is used in the "Started" message
- on success). The optional port and address information are
- appended to the message (if present).
- """
- self.curproc = process
- if self.verbose:
- sys.stdout.write("[bind10] Starting %s" % self.curproc)
- if port is not None:
- sys.stdout.write(" on port %d" % port)
- if address is not None:
- sys.stdout.write(" (address %s)" % str(address))
- sys.stdout.write("\n")
-
- def log_started(self, pid = None):
- """
- A convenience function to output a 'Started xxxx (PID yyyy)'
- message. As with starting_message(), this ensures a consistent
- format.
- """
- if self.verbose:
- sys.stdout.write("[bind10] Started %s" % self.curproc)
- if pid is not None:
- sys.stdout.write(" (PID %d)" % pid)
- sys.stdout.write("\n")
-
- # The next few methods start the individual processes of BIND-10. They
- # are called via start_all_processes(). If any fail, an exception is
- # raised which is caught by the caller of start_all_processes(); this kills
- # processes started up to that point before terminating the program.
-
- def start_msgq(self, c_channel_env):
- """
- Start the message queue and connect to the command channel.
- """
- self.log_starting("b10-msgq")
- c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
- True, not self.verbose, uid=self.uid,
- username=self.username)
- c_channel.spawn()
- self.processes[c_channel.pid] = c_channel
- self.log_started(c_channel.pid)
-
- # Now connect to the c-channel
- cc_connect_start = time.time()
- while self.cc_session is None:
- # if we have been trying for "a while" give up
- if (time.time() - cc_connect_start) > 5:
- raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
-
- # try to connect, and if we can't wait a short while
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- except isc.cc.session.SessionError:
- time.sleep(0.1)
-
- def start_cfgmgr(self, c_channel_env):
- """
- Starts the configuration manager process
- """
- self.log_starting("b10-cfgmgr")
- args = ["b10-cfgmgr"]
- if self.data_path is not None:
- args.append("--data-path=" + self.data_path)
- if self.config_filename is not None:
- args.append("--config-filename=" + self.config_filename)
- bind_cfgd = ProcessInfo("b10-cfgmgr", args,
- c_channel_env, uid=self.uid,
- username=self.username)
- bind_cfgd.spawn()
- self.processes[bind_cfgd.pid] = bind_cfgd
- self.log_started(bind_cfgd.pid)
-
- # sleep until b10-cfgmgr is fully up and running, this is a good place
- # to have a (short) timeout on synchronized groupsend/receive
- # TODO: replace the sleep by a listen for ConfigManager started
- # message
- time.sleep(1)
-
- def start_ccsession(self, c_channel_env):
- """
- Start the CC Session
-
- The argument c_channel_env is unused but is supplied to keep the
- argument list the same for all start_xxx methods.
- """
- self.log_starting("ccsession")
- self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler, self.command_handler)
- self.ccs.start()
- self.log_started()
-
- # A couple of utility methods for starting processes...
-
- def start_process(self, name, args, c_channel_env, port=None, address=None):
- """
- Given a set of command arguments, start the process and output
- appropriate log messages. If the start is successful, the process
- is added to the list of started processes.
-
- The port and address arguments are for log messages only.
- """
- self.log_starting(name, port, address)
- newproc = ProcessInfo(name, args, c_channel_env)
- newproc.spawn()
- self.processes[newproc.pid] = newproc
- self.log_started(newproc.pid)
-
- def start_simple(self, name, c_channel_env, port=None, address=None):
- """
- Most of the BIND-10 processes are started with the command:
-
- <process-name> [-v]
-
- ... where -v is appended if verbose is enabled. This method
- generates the arguments from the name and starts the process.
-
- The port and address arguments are for log messages only.
- """
- # Set up the command arguments.
- args = [name]
- if self.verbose:
- args += ['-v']
-
- # ... and start the process
- self.start_process(name, args, c_channel_env, port, address)
-
- # The next few methods start up the rest of the BIND-10 processes.
- # Although many of these methods are little more than a call to
- # start_simple, they are retained (a) for testing reasons and (b) as a place
- # where modifications can be made if the process start-up sequence changes
- # for a given process.
-
- def start_auth(self, c_channel_env):
- """
- Start the Authoritative server
- """
- authargs = ['b10-auth']
- if self.nocache:
- authargs += ['-n']
- if self.uid:
- authargs += ['-u', str(self.uid)]
- if self.verbose:
- authargs += ['-v']
-
- # ... and start
- self.start_process("b10-auth", authargs, c_channel_env)
-
- def start_resolver(self, c_channel_env):
- """
- Start the Resolver. At present, all these arguments and switches
- are pure speculation. As with the auth daemon, they should be
- read from the configuration database.
- """
- self.curproc = "b10-resolver"
- # XXX: this must be read from the configuration manager in the future
- resargs = ['b10-resolver']
- if self.uid:
- resargs += ['-u', str(self.uid)]
- if self.verbose:
- resargs += ['-v']
-
- # ... and start
- self.start_process("b10-resolver", resargs, c_channel_env)
-
- def start_xfrout(self, c_channel_env):
- self.start_simple("b10-xfrout", c_channel_env)
-
- def start_xfrin(self, c_channel_env):
- self.start_simple("b10-xfrin", c_channel_env)
-
- def start_zonemgr(self, c_channel_env):
- self.start_simple("b10-zonemgr", c_channel_env)
-
- def start_stats(self, c_channel_env):
- self.start_simple("b10-stats", c_channel_env)
-
- def start_stats_httpd(self, c_channel_env):
- self.start_simple("b10-stats-httpd", c_channel_env)
-
- def start_cmdctl(self, c_channel_env):
- """
- Starts the command control process
- """
- args = ["b10-cmdctl"]
- if self.cmdctl_port is not None:
- args.append("--port=" + str(self.cmdctl_port))
- self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
-
- def start_all_processes(self):
- """
- Starts up all the processes. Any exception generated during the
- starting of the processes is handled by the caller.
- """
- c_channel_env = self.c_channel_env
- self.start_msgq(c_channel_env)
- self.start_cfgmgr(c_channel_env)
- self.start_ccsession(c_channel_env)
-
- # Extract the parameters associated with Bob. This can only be
- # done after the CC Session is started.
- self.read_bind10_config()
-
- # Continue starting the processes. The authoritative server (if
- # selected):
- if self.cfg_start_auth:
- self.start_auth(c_channel_env)
-
- # ... and resolver (if selected):
- if self.cfg_start_resolver:
- self.start_resolver(c_channel_env)
- self.started_resolver_family = True
-
- # Everything after the main components can run as non-root.
- # TODO: this is only temporary - once the privileged socket creator is
- # fully working, nothing else will run as root.
- if self.uid is not None:
- posix.setuid(self.uid)
-
- # xfrin/xfrout and the zone manager are only meaningful if the
- # authoritative server has been started.
- if self.cfg_start_auth:
- self.start_xfrout(c_channel_env)
- self.start_xfrin(c_channel_env)
- self.start_zonemgr(c_channel_env)
- self.started_auth_family = True
-
- # ... and finally start the remaining processes
- self.start_stats(c_channel_env)
- self.start_stats_httpd(c_channel_env)
- self.start_cmdctl(c_channel_env)
-
- def startup(self):
- """
- Start the BoB instance.
-
- Returns None if successful, otherwise an string describing the
- problem.
- """
- # Try to connect to the c-channel daemon, to see if it is already
- # running
- c_channel_env = {}
- if self.msgq_socket_file is not None:
- c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
- if self.verbose:
- sys.stdout.write("[bind10] Checking for already running b10-msgq\n")
- # try to connect, and if we can't wait a short while
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- return "b10-msgq already running, or socket file not cleaned , cannot start"
- except isc.cc.session.SessionError:
- # this is the case we want, where the msgq is not running
- pass
-
- # Start all processes. If any one fails to start, kill all started
- # processes and exit with an error indication.
- try:
- self.c_channel_env = c_channel_env
- self.start_all_processes()
- except Exception as e:
- self.kill_started_processes()
- return "Unable to start " + self.curproc + ": " + str(e)
-
- # Started successfully
- self.runnable = True
- return None
-
- def stop_all_processes(self):
- """Stop all processes."""
- cmd = { "command": ['shutdown']}
-
- self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
- self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
- self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
- self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
- self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
- self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
- self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
- self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
- self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
-
- def stop_process(self, process, recipient):
- """
- Stop the given process, friendly-like. The process is the name it has
- (in logs, etc), the recipient is the address on msgq.
- """
- if self.verbose:
- sys.stdout.write("[bind10] Asking %s to terminate\n" % process)
- # TODO: Some timeout to solve processes that don't want to die would
- # help. We can even store it in the dict, it is used only as a set
- self.expected_shutdowns[process] = 1
- # Ask the process to die willingly
- self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
- recipient)
-
- # Series of stop_process wrappers
- def stop_resolver(self):
- self.stop_process('b10-resolver', 'Resolver')
-
- def stop_auth(self):
- self.stop_process('b10-auth', 'Auth')
-
- def stop_xfrout(self):
- self.stop_process('b10-xfrout', 'Xfrout')
-
- def stop_xfrin(self):
- self.stop_process('b10-xfrin', 'Xfrin')
-
- def stop_zonemgr(self):
- self.stop_process('b10-zonemgr', 'Zonemgr')
-
- def shutdown(self):
- """Stop the BoB instance."""
- if self.verbose:
- sys.stdout.write("[bind10] Stopping the server.\n")
- # first try using the BIND 10 request to stop
- try:
- self.stop_all_processes()
- except:
- pass
- # XXX: some delay probably useful... how much is uncertain
- # I have changed the delay from 0.5 to 1, but sometime it's
- # still not enough.
- time.sleep(1)
- self.reap_children()
- # next try sending a SIGTERM
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- if self.verbose:
- sys.stdout.write("[bind10] Sending SIGTERM to %s (PID %d).\n" %
- (proc_info.name, proc_info.pid))
- try:
- proc_info.process.terminate()
- except OSError:
- # ignore these (usually ESRCH because the child
- # finally exited)
- pass
- # finally, send SIGKILL (unmaskable termination) until everybody dies
- while self.processes:
- # XXX: some delay probably useful... how much is uncertain
- time.sleep(0.1)
- self.reap_children()
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- if self.verbose:
- sys.stdout.write("[bind10] Sending SIGKILL to %s (PID %d).\n" %
- (proc_info.name, proc_info.pid))
- try:
- proc_info.process.kill()
- except OSError:
- # ignore these (usually ESRCH because the child
- # finally exited)
- pass
- if self.verbose:
- sys.stdout.write("[bind10] All processes ended, server done.\n")
-
- def _get_process_exit_status(self):
- return os.waitpid(-1, os.WNOHANG)
-
- def reap_children(self):
- """Check to see if any of our child processes have exited,
- and note this for later handling.
- """
- while True:
- try:
- (pid, exit_status) = self._get_process_exit_status()
- except OSError as o:
- if o.errno == errno.ECHILD: break
- # XXX: should be impossible to get any other error here
- raise
- if pid == 0: break
- if pid in self.processes:
- # One of the processes we know about. Get information on it.
- proc_info = self.processes.pop(pid)
- proc_info.restart_schedule.set_run_stop_time()
- self.dead_processes[proc_info.pid] = proc_info
-
- # Write out message, but only if in the running state:
- # During startup and shutdown, these messages are handled
- # elsewhere.
- if self.runnable:
- if exit_status is None:
- sys.stdout.write(
- "[bind10] Process %s (PID %d) died: exit status not available" %
- (proc_info.name, proc_info.pid))
- else:
- sys.stdout.write(
- "[bind10] Process %s (PID %d) terminated, exit status = %d\n" %
- (proc_info.name, proc_info.pid, exit_status))
-
- # Was it a special process?
- if proc_info.name == "b10-msgq":
- sys.stdout.write(
- "[bind10] The b10-msgq process died, shutting down.\n")
- self.runnable = False
-
- # If we're in 'brittle' mode, we want to shutdown after
- # any process dies.
- if self.brittle:
- self.runnable = False
- else:
- sys.stdout.write("[bind10] Unknown child pid %d exited.\n" % pid)
-
- def restart_processes(self):
- """
- Restart any dead processes:
-
- * Returns the time when the next process is ready to be restarted.
- * If the server is shutting down, returns 0.
- * If there are no processes, returns None.
-
- The values returned can be safely passed into select() as the
- timeout value.
- """
- next_restart = None
- # if we're shutting down, then don't restart
- if not self.runnable:
- return 0
- # otherwise look through each dead process and try to restart
- still_dead = {}
- now = time.time()
- for proc_info in self.dead_processes.values():
- if proc_info.name in self.expected_shutdowns:
- # We don't restart, we wanted it to die
- del self.expected_shutdowns[proc_info.name]
- continue
- restart_time = proc_info.restart_schedule.get_restart_time(now)
- if restart_time > now:
- if (next_restart is None) or (next_restart > restart_time):
- next_restart = restart_time
- still_dead[proc_info.pid] = proc_info
- else:
- if self.verbose:
- sys.stdout.write("[bind10] Resurrecting dead %s process...\n" %
- proc_info.name)
- try:
- proc_info.respawn()
- self.processes[proc_info.pid] = proc_info
- sys.stdout.write("[bind10] Resurrected %s (PID %d)\n" %
- (proc_info.name, proc_info.pid))
- except:
- still_dead[proc_info.pid] = proc_info
- # remember any processes that refuse to be resurrected
- self.dead_processes = still_dead
- # return the time when the next process is ready to be restarted
- return next_restart
-
-# global variables, needed for signal handlers
-options = None
-boss_of_bind = None
-
-def reaper(signal_number, stack_frame):
- """A child process has died (SIGCHLD received)."""
- # don't do anything...
- # the Python signal handler has been set up to write
- # down a pipe, waking up our select() bit
- pass
-
-def get_signame(signal_number):
- """Return the symbolic name for a signal."""
- for sig in dir(signal):
- if sig.startswith("SIG") and sig[3].isalnum():
- if getattr(signal, sig) == signal_number:
- return sig
- return "Unknown signal %d" % signal_number
-
-# XXX: perhaps register atexit() function and invoke that instead
-def fatal_signal(signal_number, stack_frame):
- """We need to exit (SIGINT or SIGTERM received)."""
- global options
- global boss_of_bind
- if options.verbose:
- sys.stdout.write("[bind10] Received %s.\n" % get_signame(signal_number))
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.runnable = False
-
-def process_rename(option, opt_str, value, parser):
- """Function that renames the process if it is requested by a option."""
- isc.util.process.rename(value)
-
-def parse_args(args=sys.argv[1:], Parser=OptionParser):
- """
- Function for parsing command line arguments. Returns the
- options object from OptionParser.
- """
- parser = Parser(version=VERSION)
- parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
- type="string", default=None,
- help="UNIX domain socket file the b10-msgq daemon will use")
- parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
- default=False, help="disable hot-spot cache in authoritative DNS server")
- parser.add_option("-u", "--user", dest="user", type="string", default=None,
- help="Change user after startup (must run as root)")
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
- parser.add_option("--pretty-name", type="string", action="callback",
- callback=process_rename,
- help="Set the process name (displayed in ps, top, ...)")
- parser.add_option("-c", "--config-file", action="store",
- dest="config_file", default=None,
- help="Configuration database filename")
- parser.add_option("-p", "--data-path", dest="data_path",
- help="Directory to search for configuration files",
- default=None)
- parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
- default=None, help="Port of command control")
- parser.add_option("--pid-file", dest="pid_file", type="string",
- default=None,
- help="file to dump the PID of the BIND 10 process")
- parser.add_option("--brittle", dest="brittle", action="store_true",
- help="debugging flag: exit if any component dies")
-
- (options, args) = parser.parse_args(args)
-
- if options.cmdctl_port is not None:
- try:
- isc.net.parse.port_parse(options.cmdctl_port)
- except ValueError as e:
- parser.error(e)
-
- if args:
- parser.print_help()
- sys.exit(1)
-
- return options
-
-def dump_pid(pid_file):
- """
- Dump the PID of the current process to the specified file. If the given
- file is None this function does nothing. If the file already exists,
- the existing content will be removed. If a system error happens in
- creating or writing to the file, the corresponding exception will be
- propagated to the caller.
- """
- if pid_file is None:
- return
- f = open(pid_file, "w")
- f.write('%d\n' % os.getpid())
- f.close()
-
-def unlink_pid_file(pid_file):
- """
- Remove the given file, which is basically expected to be the PID file
- created by dump_pid(). The specified may or may not exist; if it
- doesn't this function does nothing. Other system level errors in removing
- the file will be propagated as the corresponding exception.
- """
- if pid_file is None:
- return
- try:
- os.unlink(pid_file)
- except OSError as error:
- if error.errno is not errno.ENOENT:
- raise
-
-
-def main():
- global options
- global boss_of_bind
- # Enforce line buffering on stdout, even when not a TTY
- sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
-
- options = parse_args()
-
- # Check user ID.
- setuid = None
- username = None
- if options.user:
- # Try getting information about the user, assuming UID passed.
- try:
- pw_ent = pwd.getpwuid(int(options.user))
- setuid = pw_ent.pw_uid
- username = pw_ent.pw_name
- except ValueError:
- pass
- except KeyError:
- pass
-
- # Next try getting information about the user, assuming user name
- # passed.
- # If the information is both a valid user name and user number, we
- # prefer the name because we try it second. A minor point, hopefully.
- try:
- pw_ent = pwd.getpwnam(options.user)
- setuid = pw_ent.pw_uid
- username = pw_ent.pw_name
- except KeyError:
- pass
-
- if setuid is None:
- sys.stderr.write("bind10: invalid user: '%s'\n" % options.user)
- sys.exit(1)
-
- # Announce startup.
- if options.verbose:
- sys.stdout.write("%s\n" % VERSION)
-
- # Create wakeup pipe for signal handlers
- wakeup_pipe = os.pipe()
- signal.set_wakeup_fd(wakeup_pipe[1])
-
- # Set signal handlers for catching child termination, as well
- # as our own demise.
- signal.signal(signal.SIGCHLD, reaper)
- signal.siginterrupt(signal.SIGCHLD, False)
- signal.signal(signal.SIGINT, fatal_signal)
- signal.signal(signal.SIGTERM, fatal_signal)
-
- # Block SIGPIPE, as we don't want it to end this process
- signal.signal(signal.SIGPIPE, signal.SIG_IGN)
-
- # Go bob!
- boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
- options.config_file, options.nocache, options.verbose,
- setuid, username, options.cmdctl_port, options.brittle)
- startup_result = boss_of_bind.startup()
- if startup_result:
- sys.stderr.write("[bind10] Error on startup: %s\n" % startup_result)
- sys.exit(1)
- sys.stdout.write("[bind10] BIND 10 started\n")
- dump_pid(options.pid_file)
-
- # In our main loop, we check for dead processes or messages
- # on the c-channel.
- wakeup_fd = wakeup_pipe[0]
- ccs_fd = boss_of_bind.ccs.get_socket().fileno()
- while boss_of_bind.runnable:
- # clean up any processes that exited
- boss_of_bind.reap_children()
- next_restart = boss_of_bind.restart_processes()
- if next_restart is None:
- wait_time = None
- else:
- wait_time = max(next_restart - time.time(), 0)
-
- # select() can raise EINTR when a signal arrives,
- # even if they are resumable, so we have to catch
- # the exception
- try:
- (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
- wait_time)
- except select.error as err:
- if err.args[0] == errno.EINTR:
- (rlist, wlist, xlist) = ([], [], [])
- else:
- sys.stderr.write("[bind10] Error with select(); %s\n" % err)
- break
-
- for fd in rlist + xlist:
- if fd == ccs_fd:
- try:
- boss_of_bind.ccs.check_command()
- except isc.cc.session.ProtocolError:
- if options.verbose:
- sys.stderr.write("[bind10] msgq channel disappeared.\n")
- break
- elif fd == wakeup_fd:
- os.read(wakeup_fd, 32)
-
- # shutdown
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.shutdown()
- sys.stdout.write("[bind10] BIND 10 exiting\n");
- unlink_pid_file(options.pid_file)
- sys.exit(0)
-
-if __name__ == "__main__":
- main()
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 1128264..6de0947 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 31, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -50,6 +50,7 @@
<arg><option>-p <replaceable>data_path</replaceable></option></arg>
<arg><option>-u <replaceable>user</replaceable></option></arg>
<arg><option>-v</option></arg>
+ <arg><option>-w <replaceable>wait_time</replaceable></option></arg>
<arg><option>--brittle</option></arg>
<arg><option>--cmdctl-port</option> <replaceable>port</replaceable></arg>
<arg><option>--config-file</option> <replaceable>config-filename</replaceable></arg>
@@ -60,6 +61,7 @@
<arg><option>--pretty-name <replaceable>name</replaceable></option></arg>
<arg><option>--user <replaceable>user</replaceable></option></arg>
<arg><option>--verbose</option></arg>
+ <arg><option>--wait <replaceable>wait_time</replaceable></option></arg>
</cmdsynopsis>
</refsynopsisdiv>
@@ -211,12 +213,48 @@ The default is the basename of ARG 0.
</listitem>
</varlistentry>
+ <varlistentry>
+ <term><option>-w</option> <replaceable>wait_time</replaceable>, <option>--wait</option> <replaceable>wait_time</replaceable></term>
+ <listitem>
+ <para>Sets the amount of time that BIND 10 will wait for
+ the configuration manager (a key component of BIND 10) to
+ initialize itself before abandoning the start up and
+ terminating with an error. The wait_time is specified in
+ seconds and has a default value of 10.
+ </para>
+ </listitem>
+ </varlistentry>
+
</variablelist>
</refsect1>
<!--
TODO: configuration section
-->
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>bind10.boot_time</term>
+ <listitem><para>
+ The date and time that the <command>bind10</command>
+ process started.
+ This is represented in ISO 8601 format.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
<!--
<refsect1>
<title>FILES</title>
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
new file mode 100644
index 0000000..2769aa9
--- /dev/null
+++ b/src/bin/bind10/bind10_messages.mes
@@ -0,0 +1,235 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+
+% BIND10_CONFIGURATION_START_AUTH start authoritative server: %1
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+
+% BIND10_CONFIGURATION_START_RESOLVER start resolver: %1
+This message shows whether or not the resolver should be
+started according to the configuration.
+
+% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
+
+% BIND10_INVALID_USER invalid user: %1
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+
+% BIND10_KILLING_ALL_PROCESSES killing all started processes
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+
+% BIND10_KILL_PROCESS killing process %1
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+
+% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+
+% BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+
+% BIND10_MSGQ_DISAPPEARED msgq channel disappeared
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+
+% BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+
+% BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+
+% BIND10_READING_BOSS_CONFIGURATION reading boss configuration
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+
+% BIND10_RECEIVED_COMMAND received command: %1
+The boss module received a command and shall now process it. The command
+is printed.
+
+% BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+
+% BIND10_RECEIVED_SIGNAL received signal %1
+The boss module received the given signal.
+
+% BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)
+The given process has been restarted successfully, and is now running
+with the given process id.
+
+% BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...
+The given process has ended unexpectedly, and is now restarted.
+
+% BIND10_SELECT_ERROR error in select() call: %1
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+
+% BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)
+The boss module is sending a SIGKILL signal to the given process.
+
+% BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
+The boss module is sending a SIGTERM signal to the given process.
+
+% BIND10_SHUTDOWN stopping the server
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+
+% BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete
+All child processes have been stopped, and the boss process will now
+stop itself.
+
+% BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+
+% BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+
+% BIND10_SOCKCREATOR_CRASHED the socket creator crashed
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+
+% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+
+% BIND10_SOCKCREATOR_INIT initializing socket creator parser
+The boss module initializes routines for parsing the socket creator
+protocol.
+
+% BIND10_SOCKCREATOR_KILL killing the socket creator
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+
+% BIND10_SOCKCREATOR_TERMINATE terminating socket creator
+The boss module sends a request to terminate to the socket creator.
+
+% BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+
+% BIND10_SOCKET_CREATED successfully created socket %1
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+
+% BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+
+% BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator
+The boss forwards a request for a socket to the socket creator.
+
+% BIND10_STARTED_CC started configuration/command session
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
+
+% BIND10_STARTED_PROCESS started %1
+The given process has successfully been started.
+
+% BIND10_STARTED_PROCESS_PID started %1 (PID %2)
+The given process has successfully been started, and has the given PID.
+
+% BIND10_STARTING starting BIND10: %1
+Informational message on startup that shows the full version.
+
+% BIND10_STARTING_CC starting configuration/command session
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
+
+% BIND10_STARTING_PROCESS starting process %1
+The boss module is starting the given process.
+
+% BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)
+The boss module is starting the given process, which will listen on the
+given port number.
+
+% BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+
+% BIND10_STARTUP_COMPLETE BIND 10 started
+All modules have been successfully started, and BIND 10 is now running.
+
+% BIND10_STARTUP_ERROR error during startup: %1
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+
+% BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts. This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected. It may be that processes are starting
+of sequence.
+
+% BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts. This error is output when a
+message received by the Boss process is not recognised.
+
+% BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+
+% BIND10_STOP_PROCESS asking %1 to shut down
+The boss module is sending a shutdown command to the given module over
+the message channel.
+
+% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+
+% BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing. This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up. The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
+
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
new file mode 100755
index 0000000..4bcd778
--- /dev/null
+++ b/src/bin/bind10/bind10_src.py.in
@@ -0,0 +1,1168 @@
+#!@PYTHON@
+
+# Copyright (C) 2010,2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This file implements the Boss of Bind (BoB, or bob) program.
+
+Its purpose is to start up the BIND 10 system, and then manage the
+processes, by starting and stopping processes, plus restarting
+processes that exit.
+
+To start the system, it first runs the c-channel program (msgq), then
+connects to that. It then runs the configuration manager, and reads
+its own configuration. Then it proceeds to starting other modules.
+
+The Python subprocess module is used for starting processes, but
+because this is not efficient for managing groups of processes,
+SIGCHLD signals are caught and processed using the signal module.
+
+Most of the logic is contained in the BoB class. However, since Python
+requires that signal processing happen in the main thread, we do
+signal handling outside of that class, in the code running for
+__main__.
+"""
+
+import sys; sys.path.append ('@@PYTHONPATH@@')
+import os
+
+# If B10_FROM_SOURCE is set in the environment, we use data files
+# from a directory relative to that, otherwise we use the ones
+# installed on the system
+if "B10_FROM_SOURCE" in os.environ:
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
+ ADD_LIBEXEC_PATH = False
+else:
+ PREFIX = "@prefix@"
+ DATAROOTDIR = "@datarootdir@"
+ SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
+ ADD_LIBEXEC_PATH = True
+
+import subprocess
+import signal
+import re
+import errno
+import time
+import select
+import random
+import socket
+from optparse import OptionParser, OptionValueError
+import io
+import pwd
+import posix
+import copy
+
+import isc.cc
+import isc.util.process
+import isc.net.parse
+import isc.log
+from isc.log_messages.bind10_messages import *
+import isc.bind10.sockcreator
+
+isc.log.init("b10-boss")
+logger = isc.log.Logger("boss")
+
+# Pending system-wide debug level definitions, the ones we
+# use here are hardcoded for now
+DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
+DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
+
+# Assign this process some longer name
+isc.util.process.rename(sys.argv[0])
+
+# This is the version that gets displayed to the user.
+# The VERSION string consists of the module name, the module version
+# number, and the overall BIND 10 version number (set in configure.ac).
+VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
+
+# This is for boot_time of Boss
+_BASETIME = time.gmtime()
+
+class RestartSchedule:
+ """
+Keeps state when restarting something (in this case, a process).
+
+When a process dies unexpectedly, we need to restart it. However, if
+it fails to restart for some reason, then we should not simply keep
+restarting it at high speed.
+
+A more sophisticated algorithm can be developed, but for now we choose
+a simple set of rules:
+
+ * If a process was been running for >=10 seconds, we restart it
+ right away.
+ * If a process was running for <10 seconds, we wait until 10 seconds
+ after it was started.
+
+To avoid programs getting into lockstep, we use a normal distribution
+to avoid being restarted at exactly 10 seconds."""
+
+ def __init__(self, restart_frequency=10.0):
+ self.restart_frequency = restart_frequency
+ self.run_start_time = None
+ self.run_stop_time = None
+ self.restart_time = None
+
+ def set_run_start_time(self, when=None):
+ if when is None:
+ when = time.time()
+ self.run_start_time = when
+ sigma = self.restart_frequency * 0.05
+ self.restart_time = when + random.normalvariate(self.restart_frequency,
+ sigma)
+
+ def set_run_stop_time(self, when=None):
+ """We don't actually do anything with stop time now, but it
+ might be useful for future algorithms."""
+ if when is None:
+ when = time.time()
+ self.run_stop_time = when
+
+ def get_restart_time(self, when=None):
+ if when is None:
+ when = time.time()
+ return max(when, self.restart_time)
+
+class ProcessInfoError(Exception): pass
+
+class ProcessInfo:
+ """Information about a process"""
+
+ dev_null = open(os.devnull, "w")
+
+ def __init__(self, name, args, env={}, dev_null_stdout=False,
+ dev_null_stderr=False, uid=None, username=None):
+ self.name = name
+ self.args = args
+ self.env = env
+ self.dev_null_stdout = dev_null_stdout
+ self.dev_null_stderr = dev_null_stderr
+ self.restart_schedule = RestartSchedule()
+ self.uid = uid
+ self.username = username
+ self.process = None
+ self.pid = None
+
+ def _preexec_work(self):
+ """Function used before running a program that needs to run as a
+ different user."""
+ # First, put us into a separate process group so we don't get
+ # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+ # other means).
+ os.setpgrp()
+ # Second, set the user ID if one has been specified
+ if self.uid is not None:
+ try:
+ posix.setuid(self.uid)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # if we failed to change user due to permission report that
+ raise ProcessInfoError("Unable to change to user %s (uid %d)" % (self.username, self.uid))
+ else:
+ # otherwise simply re-raise whatever error we found
+ raise
+
+ def _spawn(self):
+ if self.dev_null_stdout:
+ spawn_stdout = self.dev_null
+ else:
+ spawn_stdout = None
+ if self.dev_null_stderr:
+ spawn_stderr = self.dev_null
+ else:
+ spawn_stderr = None
+ # Environment variables for the child process will be a copy of those
+ # of the boss process with any additional specific variables given
+ # on construction (self.env).
+ spawn_env = copy.deepcopy(os.environ)
+ spawn_env.update(self.env)
+ if ADD_LIBEXEC_PATH:
+ spawn_env['PATH'] = "@@LIBEXECDIR@@:" + spawn_env['PATH']
+ self.process = subprocess.Popen(self.args,
+ stdin=subprocess.PIPE,
+ stdout=spawn_stdout,
+ stderr=spawn_stderr,
+ close_fds=True,
+ env=spawn_env,
+ preexec_fn=self._preexec_work)
+ self.pid = self.process.pid
+ self.restart_schedule.set_run_start_time()
+
+ # spawn() and respawn() are the same for now, but in the future they
+ # may have different functionality
+ def spawn(self):
+ self._spawn()
+
+ def respawn(self):
+ self._spawn()
+
+class CChannelConnectError(Exception): pass
+
+class ProcessStartError(Exception): pass
+
+class BoB:
+ """Boss of BIND class."""
+
+ def __init__(self, msgq_socket_file=None, data_path=None,
+ config_filename=None, nocache=False, verbose=False, setuid=None,
+ username=None, cmdctl_port=None, brittle=False, wait_time=10):
+ """
+ Initialize the Boss of BIND. This is a singleton (only one can run).
+
+ The msgq_socket_file specifies the UNIX domain socket file that the
+ msgq process listens on. If verbose is True, then the boss reports
+ what it is doing.
+
+ Data path and config filename are passed through to config manager
+ (if provided) and specify the config file to be used.
+
+ The cmdctl_port is passed to cmdctl and specify on which port it
+ should listen.
+
+ brittle is a debug option that controls whether the Boss shuts down
+ after any process dies.
+
+ wait_time controls the amount of time (in seconds) that Boss waits
+ for selected processes to initialize before continuing with the
+ initialization. Currently this is only the configuration manager.
+ """
+ self.cc_session = None
+ self.ccs = None
+ self.cfg_start_auth = True
+ self.cfg_start_resolver = False
+ self.cfg_start_dhcp6 = False
+ self.cfg_start_dhcp4 = False
+ self.started_auth_family = False
+ self.started_resolver_family = False
+ self.curproc = None
+ self.dead_processes = {}
+ self.msgq_socket_file = msgq_socket_file
+ self.nocache = nocache
+ self.processes = {}
+ self.expected_shutdowns = {}
+ self.runnable = False
+ self.uid = setuid
+ self.username = username
+ self.verbose = verbose
+ self.data_path = data_path
+ self.config_filename = config_filename
+ self.cmdctl_port = cmdctl_port
+ self.brittle = brittle
+ self.wait_time = wait_time
+ self.sockcreator = None
+
+ # If -v was set, enable full debug logging.
+ if self.verbose:
+ logger.set_severity("DEBUG", 99)
+
+ def config_handler(self, new_config):
+ # If this is initial update, don't do anything now, leave it to startup
+ if not self.runnable:
+ return
+ # Now we declare few functions used only internally here. Besides the
+ # benefit of not polluting the name space, they are closures, so we
+ # don't need to pass some variables
+ def start_stop(name, started, start, stop):
+ if not'start_' + name in new_config:
+ return
+ if new_config['start_' + name]:
+ if not started:
+ if self.uid is not None:
+ logger.info(BIND10_START_AS_NON_ROOT, name)
+ start()
+ else:
+ stop()
+ # These four functions are passed to start_stop (smells like functional
+ # programming little bit)
+ def resolver_on():
+ self.start_resolver(self.c_channel_env)
+ self.started_resolver_family = True
+ def resolver_off():
+ self.stop_resolver()
+ self.started_resolver_family = False
+ def auth_on():
+ self.start_auth(self.c_channel_env)
+ self.start_xfrout(self.c_channel_env)
+ self.start_xfrin(self.c_channel_env)
+ self.start_zonemgr(self.c_channel_env)
+ self.started_auth_family = True
+ def auth_off():
+ self.stop_zonemgr()
+ self.stop_xfrin()
+ self.stop_xfrout()
+ self.stop_auth()
+ self.started_auth_family = False
+
+ # The real code of the config handler function follows here
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
+ new_config)
+ start_stop('resolver', self.started_resolver_family, resolver_on,
+ resolver_off)
+ start_stop('auth', self.started_auth_family, auth_on, auth_off)
+
+ answer = isc.config.ccsession.create_answer(0)
+ return answer
+
+ def get_processes(self):
+ pids = list(self.processes.keys())
+ pids.sort()
+ process_list = [ ]
+ for pid in pids:
+ process_list.append([pid, self.processes[pid].name])
+ return process_list
+
+ def _get_stats_data(self):
+ return { "owner": "Boss",
+ "data": { 'boot_time':
+ time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }
+ }
+
+ def command_handler(self, command, args):
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
+ answer = isc.config.ccsession.create_answer(1, "command not implemented")
+ if type(command) != str:
+ answer = isc.config.ccsession.create_answer(1, "bad command")
+ else:
+ if command == "shutdown":
+ self.runnable = False
+ answer = isc.config.ccsession.create_answer(0)
+ elif command == "getstats":
+ answer = isc.config.ccsession.create_answer(0, self._get_stats_data())
+ elif command == "sendstats":
+ # send statistics data to the stats daemon immediately
+ stats_data = self._get_stats_data()
+ valid = self.ccs.get_module_spec().validate_statistics(
+ True, stats_data["data"])
+ if valid:
+ cmd = isc.config.ccsession.create_command('set', stats_data)
+ seq = self.cc_session.group_sendmsg(cmd, 'Stats')
+ # Consume the answer, in case it becomes a orphan message.
+ try:
+ self.cc_session.group_recvmsg(False, seq)
+ except isc.cc.session.SessionTimeout:
+ pass
+ answer = isc.config.ccsession.create_answer(0)
+ else:
+ logger.fatal(BIND10_INVALID_STATISTICS_DATA);
+ answer = isc.config.ccsession.create_answer(
+ 1, "specified statistics data is invalid")
+ elif command == "ping":
+ answer = isc.config.ccsession.create_answer(0, "pong")
+ elif command == "show_processes":
+ answer = isc.config.ccsession. \
+ create_answer(0, self.get_processes())
+ else:
+ answer = isc.config.ccsession.create_answer(1,
+ "Unknown command")
+ return answer
+
+ def start_creator(self):
+ self.curproc = 'b10-sockcreator'
+ creator_path = os.environ['PATH']
+ if ADD_LIBEXEC_PATH:
+ creator_path = "@@LIBEXECDIR@@:" + creator_path
+ self.sockcreator = isc.bind10.sockcreator.Creator(creator_path)
+
+ def stop_creator(self, kill=False):
+ if self.sockcreator is None:
+ return
+ if kill:
+ self.sockcreator.kill()
+ else:
+ self.sockcreator.terminate()
+ self.sockcreator = None
+
+ def kill_started_processes(self):
+ """
+ Called as part of the exception handling when a process fails to
+ start, this runs through the list of started processes, killing
+ each one. It then clears that list.
+ """
+ logger.info(BIND10_KILLING_ALL_PROCESSES)
+
+ self.stop_creator(True)
+
+ for pid in self.processes:
+ logger.info(BIND10_KILL_PROCESS, self.processes[pid].name)
+ self.processes[pid].process.kill()
+ self.processes = {}
+
+ def read_bind10_config(self):
+ """
+ Reads the parameters associated with the BoB module itself.
+
+ At present these are the components to start although arguably this
+ information should be in the configuration for the appropriate
+ module itself. (However, this would cause difficulty in the case of
+ xfrin/xfrout and zone manager as we don't need to start those if we
+ are not running the authoritative server.)
+ """
+ logger.info(BIND10_READING_BOSS_CONFIGURATION)
+
+ config_data = self.ccs.get_full_config()
+ self.cfg_start_auth = config_data.get("start_auth")
+ self.cfg_start_resolver = config_data.get("start_resolver")
+
+ logger.info(BIND10_CONFIGURATION_START_AUTH, self.cfg_start_auth)
+ logger.info(BIND10_CONFIGURATION_START_RESOLVER, self.cfg_start_resolver)
+
+ def log_starting(self, process, port = None, address = None):
+ """
+ A convenience function to output a "Starting xxx" message if the
+ logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
+ Putting this into a separate method ensures
+ that the output form is consistent across all processes.
+
+ The process name (passed as the first argument) is put into
+ self.curproc, and is used to indicate which process failed to
+ start if there is an error (and is used in the "Started" message
+ on success). The optional port and address information are
+ appended to the message (if present).
+ """
+ self.curproc = process
+ if port is None and address is None:
+ logger.info(BIND10_STARTING_PROCESS, self.curproc)
+ elif address is None:
+ logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
+ port)
+ else:
+ logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
+ self.curproc, address, port)
+
+ def log_started(self, pid = None):
+ """
+ A convenience function to output a 'Started xxxx (PID yyyy)'
+ message. As with starting_message(), this ensures a consistent
+ format.
+ """
+ if pid is None:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
+ else:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
+
+ def process_running(self, msg, who):
+ """
+ Some processes return a message to the Boss after they have
+ started to indicate that they are running. The form of the
+ message is a dictionary with contents {"running:", "<process>"}.
+ This method checks the passed message and returns True if the
+ "who" process is contained in the message (so is presumably
+ running). It returns False for all other conditions and will
+ log an error if appropriate.
+ """
+ if msg is not None:
+ try:
+ if msg["running"] == who:
+ return True
+ else:
+ logger.error(BIND10_STARTUP_UNEXPECTED_MESSAGE, msg)
+ except:
+ logger.error(BIND10_STARTUP_UNRECOGNISED_MESSAGE, msg)
+
+ return False
+
+ # The next few methods start the individual processes of BIND-10. They
+ # are called via start_all_processes(). If any fail, an exception is
+ # raised which is caught by the caller of start_all_processes(); this kills
+ # processes started up to that point before terminating the program.
+
+ def start_msgq(self, c_channel_env):
+ """
+ Start the message queue and connect to the command channel.
+ """
+ self.log_starting("b10-msgq")
+ c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
+ True, not self.verbose, uid=self.uid,
+ username=self.username)
+ c_channel.spawn()
+ self.processes[c_channel.pid] = c_channel
+ self.log_started(c_channel.pid)
+
+ # Now connect to the c-channel
+ cc_connect_start = time.time()
+ while self.cc_session is None:
+ # if we have been trying for "a while" give up
+ if (time.time() - cc_connect_start) > 5:
+ raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
+
+ # try to connect, and if we can't wait a short while
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ except isc.cc.session.SessionError:
+ time.sleep(0.1)
+
+ # Subscribe to the message queue. The only messages we expect to receive
+ # on this channel are once relating to process startup.
+ self.cc_session.group_subscribe("Boss")
+
+ def start_cfgmgr(self, c_channel_env):
+ """
+ Starts the configuration manager process
+ """
+ self.log_starting("b10-cfgmgr")
+ args = ["b10-cfgmgr"]
+ if self.data_path is not None:
+ args.append("--data-path=" + self.data_path)
+ if self.config_filename is not None:
+ args.append("--config-filename=" + self.config_filename)
+ bind_cfgd = ProcessInfo("b10-cfgmgr", args,
+ c_channel_env, uid=self.uid,
+ username=self.username)
+ bind_cfgd.spawn()
+ self.processes[bind_cfgd.pid] = bind_cfgd
+ self.log_started(bind_cfgd.pid)
+
+ # Wait for the configuration manager to start up as subsequent initialization
+ # cannot proceed without it. The time to wait can be set on the command line.
+ time_remaining = self.wait_time
+ msg, env = self.cc_session.group_recvmsg()
+ while time_remaining > 0 and not self.process_running(msg, "ConfigManager"):
+ logger.debug(DBG_PROCESS, BIND10_WAIT_CFGMGR)
+ time.sleep(1)
+ time_remaining = time_remaining - 1
+ msg, env = self.cc_session.group_recvmsg()
+
+ if not self.process_running(msg, "ConfigManager"):
+ raise ProcessStartError("Configuration manager process has not started")
+
+ def start_ccsession(self, c_channel_env):
+ """
+ Start the CC Session
+
+ The argument c_channel_env is unused but is supplied to keep the
+ argument list the same for all start_xxx methods.
+
+ With regards to logging, note that as the CC session is not a
+ process, the log_starting/log_started methods are not used.
+ """
+ logger.info(BIND10_STARTING_CC)
+ self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler,
+ socket_file = self.msgq_socket_file)
+ self.ccs.start()
+ logger.debug(DBG_PROCESS, BIND10_STARTED_CC)
+
+ # A couple of utility methods for starting processes...
+
+ def start_process(self, name, args, c_channel_env, port=None, address=None):
+ """
+ Given a set of command arguments, start the process and output
+ appropriate log messages. If the start is successful, the process
+ is added to the list of started processes.
+
+ The port and address arguments are for log messages only.
+ """
+ self.log_starting(name, port, address)
+ newproc = ProcessInfo(name, args, c_channel_env)
+ newproc.spawn()
+ self.processes[newproc.pid] = newproc
+ self.log_started(newproc.pid)
+
+ def start_simple(self, name, c_channel_env, port=None, address=None):
+ """
+ Most of the BIND-10 processes are started with the command:
+
+ <process-name> [-v]
+
+ ... where -v is appended if verbose is enabled. This method
+ generates the arguments from the name and starts the process.
+
+ The port and address arguments are for log messages only.
+ """
+ # Set up the command arguments.
+ args = [name]
+ if self.verbose:
+ args += ['-v']
+
+ # ... and start the process
+ self.start_process(name, args, c_channel_env, port, address)
+
+ # The next few methods start up the rest of the BIND-10 processes.
+ # Although many of these methods are little more than a call to
+ # start_simple, they are retained (a) for testing reasons and (b) as a place
+ # where modifications can be made if the process start-up sequence changes
+ # for a given process.
+
+ def start_auth(self, c_channel_env):
+ """
+ Start the Authoritative server
+ """
+ authargs = ['b10-auth']
+ if self.nocache:
+ authargs += ['-n']
+ if self.uid:
+ authargs += ['-u', str(self.uid)]
+ if self.verbose:
+ authargs += ['-v']
+
+ # ... and start
+ self.start_process("b10-auth", authargs, c_channel_env)
+
+ def start_resolver(self, c_channel_env):
+ """
+ Start the Resolver. At present, all these arguments and switches
+ are pure speculation. As with the auth daemon, they should be
+ read from the configuration database.
+ """
+ self.curproc = "b10-resolver"
+ # XXX: this must be read from the configuration manager in the future
+ resargs = ['b10-resolver']
+ if self.uid:
+ resargs += ['-u', str(self.uid)]
+ if self.verbose:
+ resargs += ['-v']
+
+ # ... and start
+ self.start_process("b10-resolver", resargs, c_channel_env)
+
+ def start_xfrout(self, c_channel_env):
+ self.start_simple("b10-xfrout", c_channel_env)
+
+ def start_xfrin(self, c_channel_env):
+ # XXX: a quick-hack workaround. xfrin will implicitly use dynamically
+ # loadable data source modules, which will be installed in $(libdir).
+ # On some OSes (including MacOS X and *BSDs) the main process (python)
+ # cannot find the modules unless they are located in a common shared
+ # object path or a path in the (DY)LD_LIBRARY_PATH. We should seek
+ # a cleaner solution, but for a short term workaround we specify the
+ # path here, unconditionally, and without even bothering which
+ # environment variable should be used.
+ #
+ # We reuse the ADD_LIBEXEC_PATH variable to see whether we need to
+ # do this, as the conditions that make this workaround needed are
+ # the same as for the libexec path addition
+ if ADD_LIBEXEC_PATH:
+ cur_path = os.getenv('DYLD_LIBRARY_PATH')
+ cur_path = '' if cur_path is None else ':' + cur_path
+ c_channel_env['DYLD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
+
+ cur_path = os.getenv('LD_LIBRARY_PATH')
+ cur_path = '' if cur_path is None else ':' + cur_path
+ c_channel_env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
+ self.start_simple("b10-xfrin", c_channel_env)
+
+ def start_zonemgr(self, c_channel_env):
+ self.start_simple("b10-zonemgr", c_channel_env)
+
+ def start_stats(self, c_channel_env):
+ self.start_simple("b10-stats", c_channel_env)
+
+ def start_stats_httpd(self, c_channel_env):
+ self.start_simple("b10-stats-httpd", c_channel_env)
+
+ def start_dhcp6(self, c_channel_env):
+ self.start_simple("b10-dhcp6", c_channel_env)
+
+ def start_cmdctl(self, c_channel_env):
+ """
+ Starts the command control process
+ """
+ args = ["b10-cmdctl"]
+ if self.cmdctl_port is not None:
+ args.append("--port=" + str(self.cmdctl_port))
+ if self.verbose:
+ args.append("-v")
+ self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
+
+ def start_all_processes(self):
+ """
+ Starts up all the processes. Any exception generated during the
+ starting of the processes is handled by the caller.
+ """
+ # The socket creator first, as it is the only thing that needs root
+ self.start_creator()
+ # TODO: Once everything uses the socket creator, we can drop root
+ # privileges right now
+
+ c_channel_env = self.c_channel_env
+ self.start_msgq(c_channel_env)
+ self.start_cfgmgr(c_channel_env)
+ self.start_ccsession(c_channel_env)
+
+ # Extract the parameters associated with Bob. This can only be
+ # done after the CC Session is started. Note that the logging
+ # configuration may override the "-v" switch set on the command line.
+ self.read_bind10_config()
+
+ # Continue starting the processes. The authoritative server (if
+ # selected):
+ if self.cfg_start_auth:
+ self.start_auth(c_channel_env)
+
+ # ... and resolver (if selected):
+ if self.cfg_start_resolver:
+ self.start_resolver(c_channel_env)
+ self.started_resolver_family = True
+
+ # Everything after the main components can run as non-root.
+ # TODO: this is only temporary - once the privileged socket creator is
+ # fully working, nothing else will run as root.
+ if self.uid is not None:
+ posix.setuid(self.uid)
+
+ # xfrin/xfrout and the zone manager are only meaningful if the
+ # authoritative server has been started.
+ if self.cfg_start_auth:
+ self.start_xfrout(c_channel_env)
+ self.start_xfrin(c_channel_env)
+ self.start_zonemgr(c_channel_env)
+ self.started_auth_family = True
+
+ # ... and finally start the remaining processes
+ self.start_stats(c_channel_env)
+ self.start_stats_httpd(c_channel_env)
+ self.start_cmdctl(c_channel_env)
+
+ if self.cfg_start_dhcp6:
+ self.start_dhcp6(c_channel_env)
+
+ def startup(self):
+ """
+ Start the BoB instance.
+
+ Returns None if successful, otherwise an string describing the
+ problem.
+ """
+ # Try to connect to the c-channel daemon, to see if it is already
+ # running
+ c_channel_env = {}
+ if self.msgq_socket_file is not None:
+ c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
+ logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
+ # try to connect, and if we can't wait a short while
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
+ return "b10-msgq already running, or socket file not cleaned , cannot start"
+ except isc.cc.session.SessionError:
+ # this is the case we want, where the msgq is not running
+ pass
+
+ # Start all processes. If any one fails to start, kill all started
+ # processes and exit with an error indication.
+ try:
+ self.c_channel_env = c_channel_env
+ self.start_all_processes()
+ except Exception as e:
+ self.kill_started_processes()
+ return "Unable to start " + self.curproc + ": " + str(e)
+
+ # Started successfully
+ self.runnable = True
+ return None
+
+ def stop_all_processes(self):
+ """Stop all processes."""
+ cmd = { "command": ['shutdown']}
+
+ self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
+ self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
+ self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
+ self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
+ self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
+ self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
+ self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
+ self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
+ self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
+ # Terminate the creator last
+ self.stop_creator()
+
+ def stop_process(self, process, recipient):
+ """
+ Stop the given process, friendly-like. The process is the name it has
+ (in logs, etc), the recipient is the address on msgq.
+ """
+ logger.info(BIND10_STOP_PROCESS, process)
+ # TODO: Some timeout to solve processes that don't want to die would
+ # help. We can even store it in the dict, it is used only as a set
+ self.expected_shutdowns[process] = 1
+ # Ask the process to die willingly
+ self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
+ recipient)
+
+ # Series of stop_process wrappers
+ def stop_resolver(self):
+ self.stop_process('b10-resolver', 'Resolver')
+
+ def stop_auth(self):
+ self.stop_process('b10-auth', 'Auth')
+
+ def stop_xfrout(self):
+ self.stop_process('b10-xfrout', 'Xfrout')
+
+ def stop_xfrin(self):
+ self.stop_process('b10-xfrin', 'Xfrin')
+
+ def stop_zonemgr(self):
+ self.stop_process('b10-zonemgr', 'Zonemgr')
+
+ def shutdown(self):
+ """Stop the BoB instance."""
+ logger.info(BIND10_SHUTDOWN)
+ # first try using the BIND 10 request to stop
+ try:
+ self.stop_all_processes()
+ except:
+ pass
+ # XXX: some delay probably useful... how much is uncertain
+ # I have changed the delay from 0.5 to 1, but sometime it's
+ # still not enough.
+ time.sleep(1)
+ self.reap_children()
+ # next try sending a SIGTERM
+ processes_to_stop = list(self.processes.values())
+ for proc_info in processes_to_stop:
+ logger.info(BIND10_SEND_SIGTERM, proc_info.name,
+ proc_info.pid)
+ try:
+ proc_info.process.terminate()
+ except OSError:
+ # ignore these (usually ESRCH because the child
+ # finally exited)
+ pass
+ # finally, send SIGKILL (unmaskable termination) until everybody dies
+ while self.processes:
+ # XXX: some delay probably useful... how much is uncertain
+ time.sleep(0.1)
+ self.reap_children()
+ processes_to_stop = list(self.processes.values())
+ for proc_info in processes_to_stop:
+ logger.info(BIND10_SEND_SIGKILL, proc_info.name,
+ proc_info.pid)
+ try:
+ proc_info.process.kill()
+ except OSError:
+ # ignore these (usually ESRCH because the child
+ # finally exited)
+ pass
+ logger.info(BIND10_SHUTDOWN_COMPLETE)
+
+ def _get_process_exit_status(self):
+ return os.waitpid(-1, os.WNOHANG)
+
+ def reap_children(self):
+ """Check to see if any of our child processes have exited,
+ and note this for later handling.
+ """
+ while True:
+ try:
+ (pid, exit_status) = self._get_process_exit_status()
+ except OSError as o:
+ if o.errno == errno.ECHILD: break
+ # XXX: should be impossible to get any other error here
+ raise
+ if pid == 0: break
+ if self.sockcreator is not None and self.sockcreator.pid() == pid:
+ # This is the socket creator, started and terminated
+ # differently. This can't be restarted.
+ if self.runnable:
+ logger.fatal(BIND10_SOCKCREATOR_CRASHED)
+ self.sockcreator = None
+ self.runnable = False
+ elif pid in self.processes:
+ # One of the processes we know about. Get information on it.
+ proc_info = self.processes.pop(pid)
+ proc_info.restart_schedule.set_run_stop_time()
+ self.dead_processes[proc_info.pid] = proc_info
+
+ # Write out message, but only if in the running state:
+ # During startup and shutdown, these messages are handled
+ # elsewhere.
+ if self.runnable:
+ if exit_status is None:
+ logger.warn(BIND10_PROCESS_ENDED_NO_EXIT_STATUS,
+ proc_info.name, proc_info.pid)
+ else:
+ logger.warn(BIND10_PROCESS_ENDED_WITH_EXIT_STATUS,
+ proc_info.name, proc_info.pid,
+ exit_status)
+
+ # Was it a special process?
+ if proc_info.name == "b10-msgq":
+ logger.fatal(BIND10_MSGQ_DAEMON_ENDED)
+ self.runnable = False
+
+ # If we're in 'brittle' mode, we want to shutdown after
+ # any process dies.
+ if self.brittle:
+ self.runnable = False
+ else:
+ logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
+
+ def restart_processes(self):
+ """
+ Restart any dead processes:
+
+ * Returns the time when the next process is ready to be restarted.
+ * If the server is shutting down, returns 0.
+ * If there are no processes, returns None.
+
+ The values returned can be safely passed into select() as the
+ timeout value.
+ """
+ next_restart = None
+ # if we're shutting down, then don't restart
+ if not self.runnable:
+ return 0
+ # otherwise look through each dead process and try to restart
+ still_dead = {}
+ now = time.time()
+ for proc_info in self.dead_processes.values():
+ if proc_info.name in self.expected_shutdowns:
+ # We don't restart, we wanted it to die
+ del self.expected_shutdowns[proc_info.name]
+ continue
+ restart_time = proc_info.restart_schedule.get_restart_time(now)
+ if restart_time > now:
+ if (next_restart is None) or (next_restart > restart_time):
+ next_restart = restart_time
+ still_dead[proc_info.pid] = proc_info
+ else:
+ logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
+ try:
+ proc_info.respawn()
+ self.processes[proc_info.pid] = proc_info
+ logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
+ except:
+ still_dead[proc_info.pid] = proc_info
+ # remember any processes that refuse to be resurrected
+ self.dead_processes = still_dead
+ # return the time when the next process is ready to be restarted
+ return next_restart
+
+# global variables, needed for signal handlers
+options = None
+boss_of_bind = None
+
+def reaper(signal_number, stack_frame):
+ """A child process has died (SIGCHLD received)."""
+ # don't do anything...
+ # the Python signal handler has been set up to write
+ # down a pipe, waking up our select() bit
+ pass
+
+def get_signame(signal_number):
+ """Return the symbolic name for a signal."""
+ for sig in dir(signal):
+ if sig.startswith("SIG") and sig[3].isalnum():
+ if getattr(signal, sig) == signal_number:
+ return sig
+ return "Unknown signal %d" % signal_number
+
+# XXX: perhaps register atexit() function and invoke that instead
+def fatal_signal(signal_number, stack_frame):
+ """We need to exit (SIGINT or SIGTERM received)."""
+ global options
+ global boss_of_bind
+ logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ boss_of_bind.runnable = False
+
+def process_rename(option, opt_str, value, parser):
+ """Function that renames the process if it is requested by a option."""
+ isc.util.process.rename(value)
+
+def parse_args(args=sys.argv[1:], Parser=OptionParser):
+ """
+ Function for parsing command line arguments. Returns the
+ options object from OptionParser.
+ """
+ parser = Parser(version=VERSION)
+ parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
+ type="string", default=None,
+ help="UNIX domain socket file the b10-msgq daemon will use")
+ parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
+ default=False, help="disable hot-spot cache in authoritative DNS server")
+ parser.add_option("-u", "--user", dest="user", type="string", default=None,
+ help="Change user after startup (must run as root)")
+ parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
+ parser.add_option("--pretty-name", type="string", action="callback",
+ callback=process_rename,
+ help="Set the process name (displayed in ps, top, ...)")
+ parser.add_option("-c", "--config-file", action="store",
+ dest="config_file", default=None,
+ help="Configuration database filename")
+ parser.add_option("-p", "--data-path", dest="data_path",
+ help="Directory to search for configuration files",
+ default=None)
+ parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
+ default=None, help="Port of command control")
+ parser.add_option("--pid-file", dest="pid_file", type="string",
+ default=None,
+ help="file to dump the PID of the BIND 10 process")
+ parser.add_option("--brittle", dest="brittle", action="store_true",
+ help="debugging flag: exit if any component dies")
+ parser.add_option("-w", "--wait", dest="wait_time", type="int",
+ default=10, help="Time (in seconds) to wait for config manager to start up")
+
+ (options, args) = parser.parse_args(args)
+
+ if options.cmdctl_port is not None:
+ try:
+ isc.net.parse.port_parse(options.cmdctl_port)
+ except ValueError as e:
+ parser.error(e)
+
+ if args:
+ parser.print_help()
+ sys.exit(1)
+
+ return options
+
+def dump_pid(pid_file):
+ """
+ Dump the PID of the current process to the specified file. If the given
+ file is None this function does nothing. If the file already exists,
+ the existing content will be removed. If a system error happens in
+ creating or writing to the file, the corresponding exception will be
+ propagated to the caller.
+ """
+ if pid_file is None:
+ return
+ f = open(pid_file, "w")
+ f.write('%d\n' % os.getpid())
+ f.close()
+
+def unlink_pid_file(pid_file):
+ """
+ Remove the given file, which is basically expected to be the PID file
+ created by dump_pid(). The specified may or may not exist; if it
+ doesn't this function does nothing. Other system level errors in removing
+ the file will be propagated as the corresponding exception.
+ """
+ if pid_file is None:
+ return
+ try:
+ os.unlink(pid_file)
+ except OSError as error:
+ if error.errno is not errno.ENOENT:
+ raise
+
+
+def main():
+ global options
+ global boss_of_bind
+ # Enforce line buffering on stdout, even when not a TTY
+ sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
+
+ options = parse_args()
+
+ # Check user ID.
+ setuid = None
+ username = None
+ if options.user:
+ # Try getting information about the user, assuming UID passed.
+ try:
+ pw_ent = pwd.getpwuid(int(options.user))
+ setuid = pw_ent.pw_uid
+ username = pw_ent.pw_name
+ except ValueError:
+ pass
+ except KeyError:
+ pass
+
+ # Next try getting information about the user, assuming user name
+ # passed.
+ # If the information is both a valid user name and user number, we
+ # prefer the name because we try it second. A minor point, hopefully.
+ try:
+ pw_ent = pwd.getpwnam(options.user)
+ setuid = pw_ent.pw_uid
+ username = pw_ent.pw_name
+ except KeyError:
+ pass
+
+ if setuid is None:
+ logger.fatal(BIND10_INVALID_USER, options.user)
+ sys.exit(1)
+
+ # Announce startup.
+ logger.info(BIND10_STARTING, VERSION)
+
+ # Create wakeup pipe for signal handlers
+ wakeup_pipe = os.pipe()
+ signal.set_wakeup_fd(wakeup_pipe[1])
+
+ # Set signal handlers for catching child termination, as well
+ # as our own demise.
+ signal.signal(signal.SIGCHLD, reaper)
+ signal.siginterrupt(signal.SIGCHLD, False)
+ signal.signal(signal.SIGINT, fatal_signal)
+ signal.signal(signal.SIGTERM, fatal_signal)
+
+ # Block SIGPIPE, as we don't want it to end this process
+ signal.signal(signal.SIGPIPE, signal.SIG_IGN)
+
+ # Go bob!
+ boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
+ options.config_file, options.nocache, options.verbose,
+ setuid, username, options.cmdctl_port, options.brittle,
+ options.wait_time)
+ startup_result = boss_of_bind.startup()
+ if startup_result:
+ logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+ sys.exit(1)
+ logger.info(BIND10_STARTUP_COMPLETE)
+ dump_pid(options.pid_file)
+
+ # In our main loop, we check for dead processes or messages
+ # on the c-channel.
+ wakeup_fd = wakeup_pipe[0]
+ ccs_fd = boss_of_bind.ccs.get_socket().fileno()
+ while boss_of_bind.runnable:
+ # clean up any processes that exited
+ boss_of_bind.reap_children()
+ next_restart = boss_of_bind.restart_processes()
+ if next_restart is None:
+ wait_time = None
+ else:
+ wait_time = max(next_restart - time.time(), 0)
+
+ # select() can raise EINTR when a signal arrives,
+ # even if they are resumable, so we have to catch
+ # the exception
+ try:
+ (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
+ wait_time)
+ except select.error as err:
+ if err.args[0] == errno.EINTR:
+ (rlist, wlist, xlist) = ([], [], [])
+ else:
+ logger.fatal(BIND10_SELECT_ERROR, err)
+ break
+
+ for fd in rlist + xlist:
+ if fd == ccs_fd:
+ try:
+ boss_of_bind.ccs.check_command()
+ except isc.cc.session.ProtocolError:
+ logger.fatal(BIND10_MSGQ_DISAPPEARED)
+ self.runnable = False
+ break
+ elif fd == wakeup_fd:
+ os.read(wakeup_fd, 32)
+
+ # shutdown
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ boss_of_bind.shutdown()
+ unlink_pid_file(options.pid_file)
+ sys.exit(0)
+
+if __name__ == "__main__":
+ main()
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 1184fd1..b4cfac6 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -37,6 +37,17 @@
"command_description": "List the running BIND 10 processes",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
]
}
}
diff --git a/src/bin/bind10/creatorapi.txt b/src/bin/bind10/creatorapi.txt
new file mode 100644
index 0000000..c23d907
--- /dev/null
+++ b/src/bin/bind10/creatorapi.txt
@@ -0,0 +1,123 @@
+Socket creator API
+==================
+
+This API is between Boss and other modules to allow them requesting of sockets.
+For simplicity, we will use the socket creator for all (even non-privileged)
+ports for now, but we should have some function where we can abstract it later.
+
+Goals
+-----
+* Be able to request a socket of any combination IPv4/IPv6 UDP/TCP bound to given
+ port and address (sockets that are not bound to anything can be created
+ without privileges, therefore are not requested from the socket creator).
+* Allow to provide the same socket to multiple modules (eg. multiple running
+ auth servers).
+* Allow releasing the sockets (in case all modules using it give it up,
+ terminate or crash).
+* Allow restricting of the sharing (don't allow shared socket between auth
+ and recursive, as the packets would often get to the wrong application,
+ show error instead).
+* Get the socket to the application.
+
+Transport of sockets
+--------------------
+It seems we are stuck with current msgq for a while and there's a chance the
+new replacement will not be able to send sockets inbound. So, we need another
+channel.
+
+The boss will create a unix-domain socket and listen on it. When something
+requests a socket over the command channel and the socket is created, some kind
+of token is returned to the application (which will represent the future
+socket). The application then connects to the unix-domain socket, sends the
+token over the connection (so Boss will know which socket to send there, in case
+multiple applications ask for sockets simultaneously) and Boss sends the socket
+in return.
+
+In theory, we could send the requests directly over the unix-domain
+socket, but it has two disadvantages:
+* The msgq handles serializing/deserializing of structured
+ information (like the parameters to be used), we would have to do it
+ manually on the socket.
+* We could place some kind of security in front of msgq (in case file
+ permissions are not enough, for example if they are not honored on
+ socket files, as indicated in the first paragraph of:
+ http://lkml.indiana.edu/hypermail/linux/kernel/0505.2/0008.html).
+ The socket would have to be secured separately. With the tokens,
+ there's some level of security already - someone not having the
+ token can't request a priviledged socket.
+
+Caching of sockets
+------------------
+To allow sending the same socket to multiple application, the Boss process will
+hold a cache. Each socket that is created and sent is kept open in Boss and
+preserved there as well. A reference count is kept with each of them.
+
+When another application asks for the same socket, it is simply sent from the
+cache instead of creating it again by the creator.
+
+When application gives the socket willingly (by sending a message over the
+command channel), the reference count can be decreased without problems. But
+when the application terminates or crashes, we need to decrease it as well.
+There's a problem, since we don't know which command channel connection (eg.
+lname) belongs to which PID. Furthermore, the applications don't need to be
+started by boss.
+
+There are two possibilities:
+* Let the msgq send messages about disconnected clients (eg. group message to
+ some name). This one is better if we want to migrate to dbus, since dbus
+ already has this capability as well as sending the sockets inbound (at least it
+ seems so on unix) and we could get rid of the unix-domain socket completely.
+* Keep the unix-domain connections open forever. Boss can remember which socket
+ was sent to which connection and when the connection closes (because the
+ application crashed), it can drop all the references on the sockets. This
+ seems easier to implement.
+
+The commands
+------------
+* Command to release a socket. This one would have single parameter, the token
+ used to get the socket. After this, boss would decrease its reference count
+ and if it drops to zero, close its own copy of the socket. This should be used
+ when the module stops using the socket (and after closes it). The
+ library could remember the file-descriptor to token mapping (for
+ common applications that don't request the same socket multiple
+ times in parallel).
+* Command to request a socket. It would have parameters to specify which socket
+ (IP address, address family, port) and how to allow sharing. Sharing would be
+ one of:
+ - None
+ - Same kind of application (however, it is not entirely clear what
+ this means, in case it won't work out intuitively, we'll need to
+ define it somehow)
+ - Any kind of application
+ And a kind of application would be provided, to decide if the sharing is
+ possible (eg. if auth allows sharing with the same kind and something else
+ allows sharing with anything, the sharing is not possible, two auths can).
+
+ It would return either error (the socket can't be created or sharing is not
+ possible) or the token. Then there would be some time for the application to
+ pick up the requested socket.
+
+Examples
+--------
+We probably would have a library with blocking calls to request the
+sockets, so a code could look like:
+
+(socket_fd, token) = request_socket(address, port, 'UDP', SHARE_SAMENAME, 'test-application')
+sock = socket.fromfd(socket_fd)
+
+# Some sock.send and sock.recv stuff here
+
+sock.close()
+release_socket(socket_fd) # or release_socket(token)
+
+Known limitations
+-----------------
+Currently the socket creator doesn't support specifying any socket
+options. If it turns out there are any options that need to be set
+before bind(), we'll need to extend it (and extend the protocol as
+well). If we want to support them, we'll have to solve a possible
+conflict (what to do when two applications request the same socket and
+want to share it, but want different options).
+
+The current socket creator doesn't know raw sockets, but if they are
+needed, it should be easy to add.
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 89301bd..9e4abc0 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -20,17 +20,17 @@ export PYTHON_EXEC
BIND10_PATH=@abs_top_builddir@/src/bin/bind10
-PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:$PATH
+PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
export PATH
-PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
@@ -45,6 +45,5 @@ export B10_FROM_BUILD
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
-cd ${BIND10_PATH}
-exec ${PYTHON_EXEC} -O bind10 "$@"
+exec ${PYTHON_EXEC} -O ${BIND10_PATH}/bind10 "$@"
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index 34d809a..d54ee56 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -2,7 +2,14 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
PYTESTS = bind10_test.py
-EXTRA_DIST = $(PYTESTS)
+noinst_SCRIPTS = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
@@ -13,7 +20,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 2ffe2b4..1bd6ab4 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10 import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
# XXX: environment tests are currently disabled, due to the preprocessor
# setup that we have now complicating the environment
@@ -21,11 +21,13 @@ from bind10 import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BAS
import unittest
import sys
import os
+import copy
import signal
import socket
from isc.net.addr import IPAddr
import time
import isc
+import isc.log
from isc.testutils.parse_args import TestOptParser, OptsError
@@ -111,6 +113,9 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.cfg_start_auth, True)
self.assertEqual(bob.cfg_start_resolver, False)
+ self.assertEqual(bob.cfg_start_dhcp4, False)
+ self.assertEqual(bob.cfg_start_dhcp6, False)
+
def test_init_alternate_socket(self):
bob = BoB("alt_socket_file")
self.assertEqual(bob.verbose, False)
@@ -125,15 +130,35 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.nocache, False)
self.assertEqual(bob.cfg_start_auth, True)
self.assertEqual(bob.cfg_start_resolver, False)
+ self.assertEqual(bob.cfg_start_dhcp4, False)
+ self.assertEqual(bob.cfg_start_dhcp6, False)
def test_command_handler(self):
class DummySession():
def group_sendmsg(self, msg, group):
(self.msg, self.group) = (msg, group)
def group_recvmsg(self, nonblock, seq): pass
+ class DummyModuleCCSession():
+ module_spec = isc.config.module_spec.ModuleSpec({
+ "module_name": "Boss",
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ })
+ def get_module_spec(self):
+ return self.module_spec
bob = BoB()
bob.verbose = True
bob.cc_session = DummySession()
+ bob.ccs = DummyModuleCCSession()
# a bad command
self.assertEqual(bob.command_handler(-1, None),
isc.config.ccsession.create_answer(1, "bad command"))
@@ -141,14 +166,22 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.command_handler("shutdown", None),
isc.config.ccsession.create_answer(0))
self.assertFalse(bob.runnable)
+ # "getstats" command
+ self.assertEqual(bob.command_handler("getstats", None),
+ isc.config.ccsession.create_answer(0,
+ { "owner": "Boss",
+ "data": {
+ 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }}))
# "sendstats" command
self.assertEqual(bob.command_handler("sendstats", None),
isc.config.ccsession.create_answer(0))
self.assertEqual(bob.cc_session.group, "Stats")
self.assertEqual(bob.cc_session.msg,
isc.config.ccsession.create_command(
- 'set', { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ "set", { "owner": "Boss",
+ "data": {
+ "boot_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", _BASETIME)
}}))
# "ping" command
self.assertEqual(bob.command_handler("ping", None),
@@ -187,6 +220,13 @@ class MockBob(BoB):
self.cmdctl = False
self.c_channel_env = {}
self.processes = { }
+ self.creator = False
+
+ def start_creator(self):
+ self.creator = True
+
+ def stop_creator(self, kill=False):
+ self.creator = False
def read_bind10_config(self):
# Configuration options are set directly
@@ -247,6 +287,16 @@ class MockBob(BoB):
self.processes[12] = ProcessInfo('b10-cmdctl', ['/bin/false'])
self.processes[12].pid = 12
+ def start_dhcp6(self, c_channel_env):
+ self.dhcp6 = True
+ self.processes[13] = ProcessInfo('b10-dhcp6', ['/bin/false'])
+ self.processes[13]
+
+ def start_dhcp4(self, c_channel_env):
+ self.dhcp4 = True
+ self.processes[14] = ProcessInfo('b10-dhcp4', ['/bin/false'])
+ self.processes[14]
+
# We don't really use all of these stop_ methods. But it might turn out
# someone would add some stop_ method to BoB and we want that one overriden
# in case he forgets to update the tests.
@@ -311,6 +361,10 @@ class TestStartStopProcessesBob(unittest.TestCase):
of processes and that the right processes are started and stopped
according to changes in configuration.
"""
+ def check_environment_unchanged(self):
+ # Check whether the environment has not been changed
+ self.assertEqual(original_os_environ, os.environ)
+
def check_started(self, bob, core, auth, resolver):
"""
Check that the right sets of services are started. The ones that
@@ -321,6 +375,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.assertEqual(bob.msgq, core)
self.assertEqual(bob.cfgmgr, core)
self.assertEqual(bob.ccsession, core)
+ self.assertEqual(bob.creator, core)
self.assertEqual(bob.auth, auth)
self.assertEqual(bob.resolver, resolver)
self.assertEqual(bob.xfrout, auth)
@@ -329,6 +384,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.assertEqual(bob.stats, core)
self.assertEqual(bob.stats_httpd, core)
self.assertEqual(bob.cmdctl, core)
+ self.check_environment_unchanged()
def check_preconditions(self, bob):
self.check_started(bob, False, False, False)
@@ -339,6 +395,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
should be started. Some processes still need to be running.
"""
self.check_started(bob, True, False, False)
+ self.check_environment_unchanged()
def check_started_both(self, bob):
"""
@@ -346,18 +403,40 @@ class TestStartStopProcessesBob(unittest.TestCase):
(auth and resolver) are enabled.
"""
self.check_started(bob, True, True, True)
+ self.check_environment_unchanged()
def check_started_auth(self, bob):
"""
Check the set of processes needed to run auth only is started.
"""
self.check_started(bob, True, True, False)
+ self.check_environment_unchanged()
def check_started_resolver(self, bob):
"""
Check the set of processes needed to run resolver only is started.
"""
self.check_started(bob, True, False, True)
+ self.check_environment_unchanged()
+
+ def check_started_dhcp(self, bob, v4, v6):
+ """
+ Check if proper combinations of DHCPv4 and DHCpv6 can be started
+ """
+ v4found = 0
+ v6found = 0
+
+ for pid in bob.processes:
+ if (bob.processes[pid].name == "b10-dhcp4"):
+ v4found += 1
+ if (bob.processes[pid].name == "b10-dhcp6"):
+ v6found += 1
+
+ # there should be exactly one DHCPv4 daemon (if v4==True)
+ # there should be exactly one DHCPv6 daemon (if v6==True)
+ self.assertEqual(v4==True, v4found==1)
+ self.assertEqual(v6==True, v6found==1)
+ self.check_environment_unchanged()
# Checks the processes started when starting neither auth nor resolver
# is specified.
@@ -524,6 +603,40 @@ class TestStartStopProcessesBob(unittest.TestCase):
bob.config_handler({'start_auth': True, 'start_resolver': True})
+ # Checks that DHCP (v4 and v6) processes are started when expected
+ def test_start_dhcp(self):
+
+ # Create BoB and ensure correct initialization
+ bob = MockBob()
+ self.check_preconditions(bob)
+
+ # don't care about DNS stuff
+ bob.cfg_start_auth = False
+ bob.cfg_start_resolver = False
+
+ # v4 and v6 disabled
+ bob.cfg_start_dhcp6 = False
+ bob.cfg_start_dhcp4 = False
+ bob.start_all_processes()
+ self.check_started_dhcp(bob, False, False)
+
+ # v6 only enabled
+ bob.cfg_start_dhcp6 = True
+ bob.cfg_start_dhcp4 = False
+ bob.start_all_processes()
+ self.check_started_dhcp(bob, False, True)
+
+ # uncomment when dhcpv4 becomes implemented
+ # v4 only enabled
+ #bob.cfg_start_dhcp6 = False
+ #bob.cfg_start_dhcp4 = True
+ #self.check_started_dhcp(bob, True, False)
+
+ # both v4 and v6 enabled
+ #bob.cfg_start_dhcp6 = True
+ #bob.cfg_start_dhcp4 = True
+ #self.check_started_dhcp(bob, True, True)
+
class TestBossCmd(unittest.TestCase):
def test_ping(self):
"""
@@ -697,4 +810,7 @@ class TestBrittle(unittest.TestCase):
self.assertFalse(bob.runnable)
if __name__ == '__main__':
+ # store os.environ for test_unchanged_environment
+ original_os_environ = copy.deepcopy(os.environ)
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/bindctl/Makefile.am b/src/bin/bindctl/Makefile.am
index 2f412ec..700f26e 100644
--- a/src/bin/bindctl/Makefile.am
+++ b/src/bin/bindctl/Makefile.am
@@ -5,6 +5,8 @@ man_MANS = bindctl.1
EXTRA_DIST = $(man_MANS) bindctl.xml
+noinst_SCRIPTS = run_bindctl.sh
+
python_PYTHON = __init__.py bindcmd.py cmdparse.py exception.py moduleinfo.py \
mycollections.py
pythondir = $(pyexecdir)/bindctl
@@ -25,3 +27,8 @@ bindctl: bindctl_main.py
-e "s|@@SYSCONFDIR@@|@sysconfdir@|" \
-e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bindctl_main.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/bindctl/bindcmd.py b/src/bin/bindctl/bindcmd.py
index 8973aa5..b67bc4b 100644
--- a/src/bin/bindctl/bindcmd.py
+++ b/src/bin/bindctl/bindcmd.py
@@ -46,6 +46,16 @@ except ImportError:
# if we have readline support, use that, otherwise use normal stdio
try:
import readline
+ # This is a fix for the problem described in
+ # http://bind10.isc.org/ticket/1345
+ # If '-' is seen as a word-boundary, the final completion-step
+ # (as handled by the cmd module, and hence outside our reach) can
+ # mistakenly add data twice, resulting in wrong completion results
+ # The solution is to remove it.
+ delims = readline.get_completer_delims()
+ delims = delims.replace('-', '')
+ readline.set_completer_delims(delims)
+
my_readline = readline.get_line_buffer
except ImportError:
my_readline = sys.stdin.readline
@@ -61,21 +71,21 @@ Type \"<module_name> <command_name> help\" for help on the specific command.
\nAvailable module names: """
class ValidatedHTTPSConnection(http.client.HTTPSConnection):
- '''Overrides HTTPSConnection to support certification
+ '''Overrides HTTPSConnection to support certification
validation. '''
def __init__(self, host, ca_certs):
http.client.HTTPSConnection.__init__(self, host)
self.ca_certs = ca_certs
def connect(self):
- ''' Overrides the connect() so that we do
+ ''' Overrides the connect() so that we do
certificate validation. '''
sock = socket.create_connection((self.host, self.port),
self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
-
+
req_cert = ssl.CERT_NONE
if self.ca_certs:
req_cert = ssl.CERT_REQUIRED
@@ -85,7 +95,7 @@ class ValidatedHTTPSConnection(http.client.HTTPSConnection):
ca_certs=self.ca_certs)
class BindCmdInterpreter(Cmd):
- """simple bindctl example."""
+ """simple bindctl example."""
def __init__(self, server_port='localhost:8080', pem_file=None,
csv_file_dir=None):
@@ -118,29 +128,33 @@ class BindCmdInterpreter(Cmd):
socket.gethostname())).encode())
digest = session_id.hexdigest()
return digest
-
+
def run(self):
'''Parse commands from user and send them to cmdctl. '''
try:
if not self.login_to_cmdctl():
- return
+ return 1
self.cmdloop()
print('\nExit from bindctl')
+ return 0
except FailToLogin as err:
# error already printed when this was raised, ignoring
- pass
+ return 1
except KeyboardInterrupt:
print('\nExit from bindctl')
+ return 0
except socket.error as err:
print('Failed to send request, the connection is closed')
+ return 1
except http.client.CannotSendRequest:
print('Can not send request, the connection is busy')
+ return 1
def _get_saved_user_info(self, dir, file_name):
- ''' Read all the available username and password pairs saved in
+ ''' Read all the available username and password pairs saved in
file(path is "dir + file_name"), Return value is one list of elements
- ['name', 'password'], If get information failed, empty list will be
+ ['name', 'password'], If get information failed, empty list will be
returned.'''
if (not dir) or (not os.path.exists(dir)):
return []
@@ -166,7 +180,7 @@ class BindCmdInterpreter(Cmd):
if not os.path.exists(dir):
os.mkdir(dir, 0o700)
- csvfilepath = dir + file_name
+ csvfilepath = dir + file_name
csvfile = open(csvfilepath, 'w')
os.chmod(csvfilepath, 0o600)
writer = csv.writer(csvfile)
@@ -180,7 +194,7 @@ class BindCmdInterpreter(Cmd):
return True
def login_to_cmdctl(self):
- '''Login to cmdctl with the username and password inputted
+ '''Login to cmdctl with the username and password inputted
from user. After the login is sucessful, the username and
password will be saved in 'default_user.csv', when run the next
time, username and password saved in 'default_user.csv' will be
@@ -246,14 +260,14 @@ class BindCmdInterpreter(Cmd):
if self.login_to_cmdctl():
# successful, so try send again
status, reply_msg = self._send_message(url, body)
-
+
if reply_msg:
return json.loads(reply_msg.decode())
else:
return {}
-
- def send_POST(self, url, post_param = None):
+
+ def send_POST(self, url, post_param = None):
'''Send POST request to cmdctl, session id is send with the name
'cookie' in header.
Format: /module_name/command_name
@@ -312,12 +326,12 @@ class BindCmdInterpreter(Cmd):
def _validate_cmd(self, cmd):
'''validate the parameters and merge some parameters together,
merge algorithm is based on the command line syntax, later, if
- a better command line syntax come out, this function should be
- updated first.
+ a better command line syntax come out, this function should be
+ updated first.
'''
if not cmd.module in self.modules:
raise CmdUnknownModuleSyntaxError(cmd.module)
-
+
module_info = self.modules[cmd.module]
if not module_info.has_command_with_name(cmd.command):
raise CmdUnknownCmdSyntaxError(cmd.module, cmd.command)
@@ -325,17 +339,17 @@ class BindCmdInterpreter(Cmd):
command_info = module_info.get_command_with_name(cmd.command)
manda_params = command_info.get_mandatory_param_names()
all_params = command_info.get_param_names()
-
+
# If help is entered, don't do further parameter validation.
for val in cmd.params.keys():
if val == "help":
return
-
- params = cmd.params.copy()
- if not params and manda_params:
- raise CmdMissParamSyntaxError(cmd.module, cmd.command, manda_params[0])
+
+ params = cmd.params.copy()
+ if not params and manda_params:
+ raise CmdMissParamSyntaxError(cmd.module, cmd.command, manda_params[0])
elif params and not all_params:
- raise CmdUnknownParamSyntaxError(cmd.module, cmd.command,
+ raise CmdUnknownParamSyntaxError(cmd.module, cmd.command,
list(params.keys())[0])
elif params:
param_name = None
@@ -366,7 +380,7 @@ class BindCmdInterpreter(Cmd):
param_name = command_info.get_param_name_by_position(name, param_count)
cmd.params[param_name] = cmd.params[name]
del cmd.params[name]
-
+
elif not name in all_params:
raise CmdUnknownParamSyntaxError(cmd.module, cmd.command, name)
@@ -375,7 +389,7 @@ class BindCmdInterpreter(Cmd):
if not name in params and not param_nr in params:
raise CmdMissParamSyntaxError(cmd.module, cmd.command, name)
param_nr += 1
-
+
# Convert parameter value according parameter spec file.
# Ignore check for commands belongs to module 'config'
if cmd.module != CONFIG_MODULE_NAME:
@@ -384,9 +398,9 @@ class BindCmdInterpreter(Cmd):
try:
cmd.params[param_name] = isc.config.config_data.convert_type(param_spec, cmd.params[param_name])
except isc.cc.data.DataTypeError as e:
- raise isc.cc.data.DataTypeError('Invalid parameter value for \"%s\", the type should be \"%s\" \n'
+ raise isc.cc.data.DataTypeError('Invalid parameter value for \"%s\", the type should be \"%s\" \n'
% (param_name, param_spec['item_type']) + str(e))
-
+
def _handle_cmd(self, cmd):
'''Handle a command entered by the user'''
if cmd.command == "help" or ("help" in cmd.params.keys()):
@@ -398,6 +412,8 @@ class BindCmdInterpreter(Cmd):
print("Error: " + str(dte))
except isc.cc.data.DataNotFoundError as dnfe:
print("Error: " + str(dnfe))
+ except isc.cc.data.DataAlreadyPresentError as dape:
+ print("Error: " + str(dape))
except KeyError as ke:
print("Error: missing " + str(ke))
else:
@@ -406,7 +422,7 @@ class BindCmdInterpreter(Cmd):
def add_module_info(self, module_info):
'''Add the information about one module'''
self.modules[module_info.name] = module_info
-
+
def get_module_names(self):
'''Return the names of all known modules'''
return list(self.modules.keys())
@@ -438,15 +454,15 @@ class BindCmdInterpreter(Cmd):
subsequent_indent=" " +
" " * CONST_BINDCTL_HELP_INDENT_WIDTH,
width=70))
-
+
def onecmd(self, line):
if line == 'EOF' or line.lower() == "quit":
self.conn.close()
return True
-
+
if line == 'h':
line = 'help'
-
+
Cmd.onecmd(self, line)
def remove_prefix(self, list, prefix):
@@ -474,7 +490,7 @@ class BindCmdInterpreter(Cmd):
cmd = BindCmdParse(cur_line)
if not cmd.params and text:
hints = self._get_command_startswith(cmd.module, text)
- else:
+ else:
hints = self._get_param_startswith(cmd.module, cmd.command,
text)
if cmd.module == CONFIG_MODULE_NAME:
@@ -490,8 +506,8 @@ class BindCmdInterpreter(Cmd):
except CmdMissCommandNameFormatError as e:
if not text.strip(): # command name is empty
- hints = self.modules[e.module].get_command_names()
- else:
+ hints = self.modules[e.module].get_command_names()
+ else:
hints = self._get_module_startswith(text)
except CmdCommandNameFormatError as e:
@@ -505,44 +521,43 @@ class BindCmdInterpreter(Cmd):
hints = []
self.hint = hints
- #self._append_space_to_hint()
if state < len(self.hint):
return self.hint[state]
else:
return None
-
- def _get_module_startswith(self, text):
+
+ def _get_module_startswith(self, text):
return [module
- for module in self.modules
+ for module in self.modules
if module.startswith(text)]
def _get_command_startswith(self, module, text):
- if module in self.modules:
+ if module in self.modules:
return [command
- for command in self.modules[module].get_command_names()
+ for command in self.modules[module].get_command_names()
if command.startswith(text)]
-
- return []
-
- def _get_param_startswith(self, module, command, text):
+ return []
+
+
+ def _get_param_startswith(self, module, command, text):
if module in self.modules:
- module_info = self.modules[module]
- if command in module_info.get_command_names():
+ module_info = self.modules[module]
+ if command in module_info.get_command_names():
cmd_info = module_info.get_command_with_name(command)
- params = cmd_info.get_param_names()
+ params = cmd_info.get_param_names()
hint = []
- if text:
+ if text:
hint = [val for val in params if val.startswith(text)]
else:
hint = list(params)
-
+
if len(hint) == 1 and hint[0] != "help":
- hint[0] = hint[0] + " ="
-
+ hint[0] = hint[0] + " ="
+
return hint
return []
@@ -559,24 +574,24 @@ class BindCmdInterpreter(Cmd):
self._print_correct_usage(err)
except isc.cc.data.DataTypeError as err:
print("Error! ", err)
-
- def _print_correct_usage(self, ept):
+
+ def _print_correct_usage(self, ept):
if isinstance(ept, CmdUnknownModuleSyntaxError):
self.do_help(None)
-
+
elif isinstance(ept, CmdUnknownCmdSyntaxError):
self.modules[ept.module].module_help()
-
+
elif isinstance(ept, CmdMissParamSyntaxError) or \
isinstance(ept, CmdUnknownParamSyntaxError):
self.modules[ept.module].command_help(ept.command)
-
-
+
+
def _append_space_to_hint(self):
"""Append one space at the end of complete hint."""
self.hint = [(val + " ") for val in self.hint]
-
-
+
+
def _handle_help(self, cmd):
if cmd.command == "help":
self.modules[cmd.module].module_help()
@@ -634,7 +649,15 @@ class BindCmdInterpreter(Cmd):
# we have more data to show
line += "/"
else:
- line += "\t" + json.dumps(value_map['value'])
+ # if type is named_set, don't print value if None
+ # (it is either {} meaning empty, or None, meaning
+ # there actually is data, but not to be shown with
+ # the current command
+ if value_map['type'] == 'named_set' and\
+ value_map['value'] is None:
+ line += "/\t"
+ else:
+ line += "\t" + json.dumps(value_map['value'])
line += "\t" + value_map['type']
line += "\t"
if value_map['default']:
@@ -649,10 +672,9 @@ class BindCmdInterpreter(Cmd):
data, default = self.config_data.get_value(identifier)
print(json.dumps(data))
elif cmd.command == "add":
- if 'value' in cmd.params:
- self.config_data.add_value(identifier, cmd.params['value'])
- else:
- self.config_data.add_value(identifier)
+ self.config_data.add_value(identifier,
+ cmd.params.get('value_or_name'),
+ cmd.params.get('value_for_set'))
elif cmd.command == "remove":
if 'value' in cmd.params:
self.config_data.remove_value(identifier, cmd.params['value'])
@@ -674,9 +696,12 @@ class BindCmdInterpreter(Cmd):
elif cmd.command == "revert":
self.config_data.clear_local_changes()
elif cmd.command == "commit":
- self.config_data.commit()
+ try:
+ self.config_data.commit()
+ except isc.config.ModuleCCSessionError as mcse:
+ print(str(mcse))
elif cmd.command == "diff":
- print(self.config_data.get_local_changes());
+ print(self.config_data.get_local_changes())
elif cmd.command == "go":
self.go(identifier)
diff --git a/src/bin/bindctl/bindctl_main.py.in b/src/bin/bindctl/bindctl_main.py.in
index 01307e9..58c03eb 100755
--- a/src/bin/bindctl/bindctl_main.py.in
+++ b/src/bin/bindctl/bindctl_main.py.in
@@ -50,17 +50,28 @@ def prepare_config_commands(tool):
cmd.add_param(param)
module.add_command(cmd)
- cmd = CommandInfo(name = "add", desc = "Add an entry to configuration list. If no value is given, a default value is added.")
+ cmd = CommandInfo(name = "add", desc =
+ "Add an entry to configuration list or a named set. "
+ "When adding to a list, the command has one optional argument, "
+ "a value to add to the list. The value must be in correct JSON "
+ "and complete. When adding to a named set, it has one "
+ "mandatory parameter (the name to add), and an optional "
+ "parameter value, similar to when adding to a list. "
+ "In either case, when no value is given, an entry will be "
+ "constructed with default values.")
param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to add to the list. It must be in correct JSON format and complete.")
+ param = ParamInfo(name = "value_or_name", type = "string", optional=True, desc = "Specifies a value to add to the list, or the name when adding to a named set. It must be in correct JSON format and complete.")
+ cmd.add_param(param)
+ module.add_command(cmd)
+ param = ParamInfo(name = "value_for_set", type = "string", optional=True, desc = "Specifies an optional value to add to the named map. It must be in correct JSON format and complete.")
cmd.add_param(param)
module.add_command(cmd)
- cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list.")
+ cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list or named set.")
param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to remove from the list. It must be in correct JSON format and complete.")
+ param = ParamInfo(name = "value", type = "string", optional=True, desc = "When identifier is a list, specifies a value to remove from the list. It must be in correct JSON format and complete. When it is a named set, specifies the name to remove.")
cmd.add_param(param)
module.add_command(cmd)
@@ -135,4 +146,5 @@ if __name__ == '__main__':
tool = BindCmdInterpreter(server_addr, pem_file=options.cert_chain,
csv_file_dir=options.csv_file_dir)
prepare_config_commands(tool)
- tool.run()
+ result = tool.run()
+ sys.exit(result)
diff --git a/src/bin/bindctl/run_bindctl.sh.in b/src/bin/bindctl/run_bindctl.sh.in
old mode 100644
new mode 100755
index 730ce1e..f4cc40c
--- a/src/bin/bindctl/run_bindctl.sh.in
+++ b/src/bin/bindctl/run_bindctl.sh.in
@@ -20,9 +20,17 @@ export PYTHON_EXEC
BINDCTL_PATH=@abs_top_builddir@/src/bin/bindctl
-PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
export PYTHONPATH
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
B10_FROM_SOURCE=@abs_top_srcdir@
export B10_FROM_SOURCE
diff --git a/src/bin/bindctl/tests/Makefile.am b/src/bin/bindctl/tests/Makefile.am
index d2bb90f..3d08a17 100644
--- a/src/bin/bindctl/tests/Makefile.am
+++ b/src/bin/bindctl/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = bindctl_test.py cmdparse_test.py
EXTRA_DIST = $(PYTESTS)
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/bindctl/tests/bindctl_test.py b/src/bin/bindctl/tests/bindctl_test.py
index 0635b32..cef35dc 100644
--- a/src/bin/bindctl/tests/bindctl_test.py
+++ b/src/bin/bindctl/tests/bindctl_test.py
@@ -31,14 +31,14 @@ from bindctl_main import set_bindctl_options
from bindctl import cmdparse
from bindctl import bindcmd
from bindctl.moduleinfo import *
-from bindctl.exception import *
+from bindctl.exception import *
try:
from collections import OrderedDict
except ImportError:
from mycollections import OrderedDict
class TestCmdLex(unittest.TestCase):
-
+
def my_assert_raise(self, exception_type, cmd_line):
self.assertRaises(exception_type, cmdparse.BindCmdParse, cmd_line)
@@ -48,13 +48,13 @@ class TestCmdLex(unittest.TestCase):
assert cmd.module == "zone"
assert cmd.command == "add"
self.assertEqual(len(cmd.params), 0)
-
-
+
+
def testCommandWithParameters(self):
lines = {"zone add zone_name = cnnic.cn, file = cnnic.cn.file master=1.1.1.1",
"zone add zone_name = \"cnnic.cn\", file ='cnnic.cn.file' master=1.1.1.1 ",
"zone add zone_name = 'cnnic.cn\", file ='cnnic.cn.file' master=1.1.1.1, " }
-
+
for cmd_line in lines:
cmd = cmdparse.BindCmdParse(cmd_line)
assert cmd.module == "zone"
@@ -75,7 +75,7 @@ class TestCmdLex(unittest.TestCase):
cmd = cmdparse.BindCmdParse('zone cmd name = 1\"\'34**&2 ,value= 44\"\'\"')
self.assertEqual(cmd.params['name'], '1\"\'34**&2')
self.assertEqual(cmd.params['value'], '44\"\'\"')
-
+
cmd = cmdparse.BindCmdParse('zone cmd name = 1\'34**&2value=44\"\'\" value = \"==============\'')
self.assertEqual(cmd.params['name'], '1\'34**&2value=44\"\'\"')
self.assertEqual(cmd.params['value'], '==============')
@@ -83,34 +83,34 @@ class TestCmdLex(unittest.TestCase):
cmd = cmdparse.BindCmdParse('zone cmd name = \"1234, 567890 \" value ==&*/')
self.assertEqual(cmd.params['name'], '1234, 567890 ')
self.assertEqual(cmd.params['value'], '=&*/')
-
+
def testCommandWithListParam(self):
cmd = cmdparse.BindCmdParse("zone set zone_name='cnnic.cn', master='1.1.1.1, 2.2.2.2'")
- assert cmd.params["master"] == '1.1.1.1, 2.2.2.2'
-
+ assert cmd.params["master"] == '1.1.1.1, 2.2.2.2'
+
def testCommandWithHelpParam(self):
cmd = cmdparse.BindCmdParse("zone add help")
assert cmd.params["help"] == "help"
-
+
cmd = cmdparse.BindCmdParse("zone add help *&)&)*&&$#$^%")
assert cmd.params["help"] == "help"
self.assertEqual(len(cmd.params), 1)
-
+
def testCmdModuleNameFormatError(self):
self.my_assert_raise(CmdModuleNameFormatError, "zone=good")
- self.my_assert_raise(CmdModuleNameFormatError, "zo/ne")
- self.my_assert_raise(CmdModuleNameFormatError, "")
+ self.my_assert_raise(CmdModuleNameFormatError, "zo/ne")
+ self.my_assert_raise(CmdModuleNameFormatError, "")
self.my_assert_raise(CmdModuleNameFormatError, "=zone")
- self.my_assert_raise(CmdModuleNameFormatError, "zone,")
-
-
+ self.my_assert_raise(CmdModuleNameFormatError, "zone,")
+
+
def testCmdMissCommandNameFormatError(self):
self.my_assert_raise(CmdMissCommandNameFormatError, "zone")
self.my_assert_raise(CmdMissCommandNameFormatError, "zone ")
self.my_assert_raise(CmdMissCommandNameFormatError, "help ")
-
-
+
+
def testCmdCommandNameFormatError(self):
self.my_assert_raise(CmdCommandNameFormatError, "zone =d")
self.my_assert_raise(CmdCommandNameFormatError, "zone z=d")
@@ -119,11 +119,11 @@ class TestCmdLex(unittest.TestCase):
self.my_assert_raise(CmdCommandNameFormatError, "zone zdd/ \"")
class TestCmdSyntax(unittest.TestCase):
-
+
def _create_bindcmd(self):
"""Create one bindcmd"""
-
- tool = bindcmd.BindCmdInterpreter()
+
+ tool = bindcmd.BindCmdInterpreter()
string_spec = { 'item_type' : 'string',
'item_optional' : False,
'item_default' : ''}
@@ -135,40 +135,40 @@ class TestCmdSyntax(unittest.TestCase):
load_cmd = CommandInfo(name = "load")
load_cmd.add_param(zone_file_param)
load_cmd.add_param(zone_name)
-
- param_master = ParamInfo(name = "master", optional = True, param_spec = string_spec)
- param_master = ParamInfo(name = "port", optional = True, param_spec = int_spec)
- param_allow_update = ParamInfo(name = "allow_update", optional = True, param_spec = string_spec)
+
+ param_master = ParamInfo(name = "master", optional = True, param_spec = string_spec)
+ param_master = ParamInfo(name = "port", optional = True, param_spec = int_spec)
+ param_allow_update = ParamInfo(name = "allow_update", optional = True, param_spec = string_spec)
set_cmd = CommandInfo(name = "set")
set_cmd.add_param(param_master)
set_cmd.add_param(param_allow_update)
set_cmd.add_param(zone_name)
-
- reload_all_cmd = CommandInfo(name = "reload_all")
-
- zone_module = ModuleInfo(name = "zone")
+
+ reload_all_cmd = CommandInfo(name = "reload_all")
+
+ zone_module = ModuleInfo(name = "zone")
zone_module.add_command(load_cmd)
zone_module.add_command(set_cmd)
zone_module.add_command(reload_all_cmd)
-
+
tool.add_module_info(zone_module)
return tool
-
-
+
+
def setUp(self):
self.bindcmd = self._create_bindcmd()
-
-
+
+
def no_assert_raise(self, cmd_line):
cmd = cmdparse.BindCmdParse(cmd_line)
- self.bindcmd._validate_cmd(cmd)
-
-
+ self.bindcmd._validate_cmd(cmd)
+
+
def my_assert_raise(self, exception_type, cmd_line):
cmd = cmdparse.BindCmdParse(cmd_line)
- self.assertRaises(exception_type, self.bindcmd._validate_cmd, cmd)
-
-
+ self.assertRaises(exception_type, self.bindcmd._validate_cmd, cmd)
+
+
def testValidateSuccess(self):
self.no_assert_raise("zone load zone_file='cn' zone_name='cn'")
self.no_assert_raise("zone load zone_file='cn', zone_name='cn', ")
@@ -178,27 +178,27 @@ class TestCmdSyntax(unittest.TestCase):
self.no_assert_raise("zone set allow_update='1.1.1.1' zone_name='cn'")
self.no_assert_raise("zone set zone_name='cn'")
self.my_assert_raise(isc.cc.data.DataTypeError, "zone set zone_name ='cn', port='cn'")
- self.no_assert_raise("zone reload_all")
-
-
+ self.no_assert_raise("zone reload_all")
+
+
def testCmdUnknownModuleSyntaxError(self):
self.my_assert_raise(CmdUnknownModuleSyntaxError, "zoned d")
self.my_assert_raise(CmdUnknownModuleSyntaxError, "dd dd ")
-
-
+
+
def testCmdUnknownCmdSyntaxError(self):
self.my_assert_raise(CmdUnknownCmdSyntaxError, "zone dd")
-
+
def testCmdMissParamSyntaxError(self):
self.my_assert_raise(CmdMissParamSyntaxError, "zone load zone_file='cn'")
self.my_assert_raise(CmdMissParamSyntaxError, "zone load zone_name='cn'")
self.my_assert_raise(CmdMissParamSyntaxError, "zone set allow_update='1.1.1.1'")
self.my_assert_raise(CmdMissParamSyntaxError, "zone set ")
-
+
def testCmdUnknownParamSyntaxError(self):
self.my_assert_raise(CmdUnknownParamSyntaxError, "zone load zone_d='cn'")
- self.my_assert_raise(CmdUnknownParamSyntaxError, "zone reload_all zone_name = 'cn'")
-
+ self.my_assert_raise(CmdUnknownParamSyntaxError, "zone reload_all zone_name = 'cn'")
+
class TestModuleInfo(unittest.TestCase):
def test_get_param_name_by_position(self):
@@ -212,36 +212,36 @@ class TestModuleInfo(unittest.TestCase):
self.assertEqual('sex', cmd.get_param_name_by_position(2, 3))
self.assertEqual('data', cmd.get_param_name_by_position(2, 4))
self.assertEqual('data', cmd.get_param_name_by_position(2, 4))
-
+
self.assertRaises(KeyError, cmd.get_param_name_by_position, 4, 4)
-
+
class TestNameSequence(unittest.TestCase):
"""
Test if the module/command/parameters is saved in the order creation
"""
-
+
def _create_bindcmd(self):
- """Create one bindcmd"""
-
+ """Create one bindcmd"""
+
self._cmd = CommandInfo(name = "load")
self.module = ModuleInfo(name = "zone")
- self.tool = bindcmd.BindCmdInterpreter()
+ self.tool = bindcmd.BindCmdInterpreter()
for random_str in self.random_names:
self._cmd.add_param(ParamInfo(name = random_str))
self.module.add_command(CommandInfo(name = random_str))
- self.tool.add_module_info(ModuleInfo(name = random_str))
-
+ self.tool.add_module_info(ModuleInfo(name = random_str))
+
def setUp(self):
self.random_names = ['1erdfeDDWsd', '3fe', '2009erd', 'Fe231', 'tere142', 'rei8WD']
self._create_bindcmd()
-
- def testSequence(self):
+
+ def testSequence(self):
param_names = self._cmd.get_param_names()
cmd_names = self.module.get_command_names()
module_names = self.tool.get_module_names()
-
+
i = 0
while i < len(self.random_names):
assert self.random_names[i] == param_names[i+1]
@@ -342,7 +342,7 @@ class TestConfigCommands(unittest.TestCase):
# validate log message for socket.err
socket_err_output = io.StringIO()
sys.stdout = socket_err_output
- self.assertRaises(None, self.tool.run())
+ self.assertEqual(1, self.tool.run())
self.assertEqual("Failed to send request, the connection is closed\n",
socket_err_output.getvalue())
socket_err_output.close()
@@ -350,7 +350,7 @@ class TestConfigCommands(unittest.TestCase):
# validate log message for http.client.CannotSendRequest
cannot_send_output = io.StringIO()
sys.stdout = cannot_send_output
- self.assertRaises(None, self.tool.run())
+ self.assertEqual(1, self.tool.run())
self.assertEqual("Can not send request, the connection is busy\n",
cannot_send_output.getvalue())
cannot_send_output.close()
@@ -472,4 +472,4 @@ class TestCommandLineOptions(unittest.TestCase):
if __name__== "__main__":
unittest.main()
-
+
diff --git a/src/bin/cfgmgr/Makefile.am b/src/bin/cfgmgr/Makefile.am
index fc0ed4a..aee78cf 100644
--- a/src/bin/cfgmgr/Makefile.am
+++ b/src/bin/cfgmgr/Makefile.am
@@ -28,3 +28,8 @@ install-data-local:
$(mkinstalldirs) $(DESTDIR)/@localstatedir@/@PACKAGE@
# TODO: permissions handled later
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/cfgmgr/b10-cfgmgr.py.in b/src/bin/cfgmgr/b10-cfgmgr.py.in
index d91dfca..2ccc430 100755
--- a/src/bin/cfgmgr/b10-cfgmgr.py.in
+++ b/src/bin/cfgmgr/b10-cfgmgr.py.in
@@ -17,7 +17,6 @@
import sys; sys.path.append ('@@PYTHONPATH@@')
-from isc.config.cfgmgr import ConfigManager, ConfigManagerDataReadError
import bind10_config
from isc.cc import SessionError
import isc.util.process
@@ -26,6 +25,10 @@ import os
from optparse import OptionParser
import glob
import os.path
+import isc.log
+isc.log.init("b10-cfgmgr")
+from isc.config.cfgmgr import ConfigManager, ConfigManagerDataReadError, logger
+from isc.log_messages.cfgmgr_messages import *
isc.util.process.rename()
@@ -91,13 +94,12 @@ def main():
cm.notify_boss()
cm.run()
except SessionError as se:
- print("[b10-cfgmgr] Error creating config manager, "
- "is the command channel daemon running?")
+ logger.fatal(CFGMGR_CC_SESSION_ERROR, se)
return 1
except KeyboardInterrupt as kie:
- print("[b10-cfgmgr] Interrupted, exiting")
+ logger.info(CFGMGR_STOPPED_BY_KEYBOARD)
except ConfigManagerDataReadError as cmdre:
- print("[b10-cfgmgr] " + str(cmdre))
+ logger.fatal(CFGMGR_DATA_READ_ERROR, cmdre)
return 2
return 0
diff --git a/src/bin/cfgmgr/plugins/Makefile.am b/src/bin/cfgmgr/plugins/Makefile.am
index d83c2bb..5a4cfef 100644
--- a/src/bin/cfgmgr/plugins/Makefile.am
+++ b/src/bin/cfgmgr/plugins/Makefile.am
@@ -1,5 +1,15 @@
SUBDIRS = tests
-EXTRA_DIST = README tsig_keys.py tsig_keys.spec
+
+EXTRA_DIST = README logging.spec tsig_keys.spec
config_plugindir = @prefix@/share/@PACKAGE@/config_plugins
-config_plugin_DATA = tsig_keys.py tsig_keys.spec
+config_plugin_DATA = logging.spec tsig_keys.spec
+
+python_PYTHON = b10logging.py tsig_keys.py
+pythondir = $(config_plugindir)
+
+CLEANFILES = b10logging.pyc tsig_keys.pyc
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/cfgmgr/plugins/b10logging.py b/src/bin/cfgmgr/plugins/b10logging.py
new file mode 100644
index 0000000..e288c6d
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/b10logging.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This is the configuration plugin for logging options
+# The name is 'b10logging' because logging.py is an existing module
+#
+# For a technical background, see
+# http://bind10.isc.org/wiki/LoggingCppApiDesign
+#
+
+from isc.config.module_spec import module_spec_from_file
+from isc.util.file import path_search
+from bind10_config import PLUGIN_PATHS
+spec = module_spec_from_file(path_search('logging.spec', PLUGIN_PATHS))
+
+ALLOWED_SEVERITIES = [ 'default',
+ 'debug',
+ 'info',
+ 'warn',
+ 'error',
+ 'fatal',
+ 'none' ]
+ALLOWED_DESTINATIONS = [ 'console',
+ 'file',
+ 'syslog' ]
+ALLOWED_STREAMS = [ 'stdout',
+ 'stderr' ]
+
+def check(config):
+ # Check the data layout first
+ errors=[]
+ if not spec.validate_config(False, config, errors):
+ return ' '.join(errors)
+ # The 'layout' is ok, now check for specific values
+ if 'loggers' in config:
+ for logger in config['loggers']:
+ # name should always be present
+ name = logger['name']
+ # report an error if name starts with * but not *.,
+ # or if * is not the first character.
+ # TODO: we might want to also warn or error if the
+ # logger name is not an existing module, but we can't
+ # really tell that from here at this point
+ star_pos = name.find('*')
+ if star_pos > 0 or\
+ name == '*.' or\
+ (star_pos == 0 and len(name) > 1 and name[1] != '.'):
+ errors.append("Bad logger name: '" + name + "': * can "
+ "only be used instead of the full "
+ "first-level name, e.g. '*' or "
+ "'*.subsystem'")
+
+ if 'severity' in logger and\
+ logger['severity'].lower() not in ALLOWED_SEVERITIES:
+ errors.append("bad severity value for logger " + name +
+ ": " + logger['severity'])
+ if 'output_options' in logger:
+ for output_option in logger['output_options']:
+ if 'destination' in output_option:
+ destination = output_option['destination'].lower()
+ if destination not in ALLOWED_DESTINATIONS:
+ errors.append("bad destination for logger " +
+ name + ": " + output_option['destination'])
+ else:
+ # if left to default, output is stdout, and
+ # it will not show in the updated config,
+ # so 1. we only need to check it if present,
+ # and 2. if destination is changed, so should
+ # output. So first check checks 'in', and the
+ # others 'not in' for 'output'
+ if destination == "console" and\
+ 'output' in output_option and\
+ output_option['output'] not in ALLOWED_STREAMS:
+ errors.append("bad output for logger " + name +
+ ": " + output_option['output'] +
+ ", must be stdout or stderr")
+ elif destination == "file" and\
+ ('output' not in output_option or\
+ output_option['output'] == ""):
+ errors.append("destination set to file but "
+ "output not set to any "
+ "filename for logger "
+ + name)
+ elif destination == "syslog" and\
+ 'output' not in output_option or\
+ output_option['output'] == "":
+ errors.append("destination set to syslog but "
+ "output not set to any facility"
+ " for logger " + name)
+
+ if errors:
+ return ', '.join(errors)
+ return None
+
+def load():
+ return (spec, check)
+
diff --git a/src/bin/cfgmgr/plugins/logging.spec b/src/bin/cfgmgr/plugins/logging.spec
new file mode 100644
index 0000000..e377b0e
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/logging.spec
@@ -0,0 +1,81 @@
+{
+ "module_spec": {
+ "module_name": "Logging",
+ "module_description": "Logging options",
+ "config_data": [
+ {
+ "item_name": "loggers",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec": {
+ "item_name": "logger",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "name",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ { "item_name": "severity",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "INFO"
+ },
+ { "item_name": "debuglevel",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0
+ },
+ { "item_name": "additive",
+ "item_type": "boolean",
+ "item_optional": false,
+ "item_default": false
+ },
+ { "item_name": "output_options",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec": {
+ "item_name": "output_option",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "destination",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "console"
+ },
+ { "item_name": "output",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "stdout"
+ },
+ { "item_name": "flush",
+ "item_type": "boolean",
+ "item_optional": false,
+ "item_default": false
+ },
+ { "item_name": "maxsize",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0
+ },
+ { "item_name": "maxver",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+ ],
+ "commands": []
+ }
+}
diff --git a/src/bin/cfgmgr/plugins/tests/Makefile.am b/src/bin/cfgmgr/plugins/tests/Makefile.am
index 48a0393..ffea2d7 100644
--- a/src/bin/cfgmgr/plugins/tests/Makefile.am
+++ b/src/bin/cfgmgr/plugins/tests/Makefile.am
@@ -1,8 +1,15 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = tsig_keys_test.py
+PYTESTS = tsig_keys_test.py logging_test.py
EXTRA_DIST = $(PYTESTS)
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -12,8 +19,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env B10_TEST_PLUGIN_DIR=$(abs_srcdir)/..:$(abs_builddir)/.. \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ B10_TEST_PLUGIN_DIR=$(abs_srcdir)/..:$(abs_builddir)/.. \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ $(LIBRARY_PATH_PLACEHOLDER) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/cfgmgr/plugins/tests/logging_test.py b/src/bin/cfgmgr/plugins/tests/logging_test.py
new file mode 100644
index 0000000..818a596
--- /dev/null
+++ b/src/bin/cfgmgr/plugins/tests/logging_test.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Make sure we can load the module, put it into path
+import sys
+import os
+sys.path.extend(os.environ["B10_TEST_PLUGIN_DIR"].split(':'))
+
+import b10logging
+import unittest
+
+class LoggingConfCheckTest(unittest.TestCase):
+ def test_load(self):
+ """
+ Checks the entry point returns the correct values.
+ """
+ (spec, check) = b10logging.load()
+ # It returns the checking function
+ self.assertEqual(check, b10logging.check)
+ # The plugin stores it's spec
+ self.assertEqual(spec, b10logging.spec)
+
+ def test_logger_conf(self):
+ self.assertEqual(None,
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'DEBUG',
+ 'debuglevel': 50,
+ 'output_options':
+ [{'destination': 'file',
+ 'output': '/some/file'
+ }]
+ },
+ {'name': 'b10-resolver',
+ 'severity': 'WARN',
+ 'additive': True,
+ 'output_options':
+ [{'destination': 'console',
+ 'output': 'stderr',
+ 'flush': True
+ }]
+ },
+ {'name': 'b10-resolver.resolver',
+ 'severity': 'ERROR',
+ 'output_options': []
+ },
+ {'name': '*.cache',
+ 'severity': 'INFO'
+ }
+ ]}))
+ def do_bad_name_test(self, name):
+ err_str = "Bad logger name: '" + name + "': * can only be "\
+ "used instead of the full first-level name, e.g. "\
+ "'*' or '*.subsystem'"
+ self.assertEqual(err_str,
+ b10logging.check({'loggers':
+ [{'name': name,
+ 'severity': 'DEBUG'},
+ ]}))
+
+ def test_logger_bad_name(self):
+ self.do_bad_name_test("*.")
+ self.do_bad_name_test("*foo")
+ self.do_bad_name_test("*foo.lib")
+ self.do_bad_name_test("foo*")
+ self.do_bad_name_test("foo*.lib")
+
+ def test_logger_bad_severity(self):
+ self.assertEqual('bad severity value for logger *: BADVAL',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'BADVAL'}]}))
+
+ def test_logger_bad_destination(self):
+ self.assertEqual('bad destination for logger *: baddest',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'baddest' }
+ ]}]}))
+
+ def test_logger_bad_console_output(self):
+ self.assertEqual('bad output for logger *: bad_output, must be stdout or stderr',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'console',
+ 'output': 'bad_output'
+ }
+ ]}]}))
+
+ def test_logger_bad_file_output(self):
+ self.assertEqual('destination set to file but output not set to any filename for logger *',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'file' }
+ ]}]}))
+
+ def test_logger_bad_syslog_output(self):
+ self.assertEqual('destination set to syslog but output not set to any facility for logger *',
+ b10logging.check({'loggers':
+ [{'name': '*',
+ 'severity': 'INFO',
+ 'output_options': [
+ { 'destination': 'syslog' }
+ ]}]}))
+
+ def test_logger_bad_type(self):
+ self.assertEqual('123 should be a string',
+ b10logging.check({'loggers':
+ [{'name': 123,
+ 'severity': 'INFO'}]}))
+ self.assertEqual('123 should be a string',
+ b10logging.check({'loggers':
+ [{'name': 'bind10',
+ 'severity': 123}]}))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/bin/cfgmgr/plugins/tests/tsig_keys_test.py b/src/bin/cfgmgr/plugins/tests/tsig_keys_test.py
index be2921c..808f28a 100644
--- a/src/bin/cfgmgr/plugins/tests/tsig_keys_test.py
+++ b/src/bin/cfgmgr/plugins/tests/tsig_keys_test.py
@@ -86,7 +86,7 @@ class TSigKeysTest(unittest.TestCase):
self.assertEqual("TSIG: Invalid TSIG key string: invalid.key",
tsig_keys.check({'keys': ['invalid.key']}))
self.assertEqual(
- "TSIG: attempt to decode a value not in base64 char set",
+ "TSIG: Unexpected end of input in BASE decoder",
tsig_keys.check({'keys': ['invalid.key:123']}))
def test_bad_format(self):
diff --git a/src/bin/cfgmgr/tests/Makefile.am b/src/bin/cfgmgr/tests/Makefile.am
index 68666e6..a2e43ff 100644
--- a/src/bin/cfgmgr/tests/Makefile.am
+++ b/src/bin/cfgmgr/tests/Makefile.am
@@ -1,7 +1,15 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-cfgmgr_test.py
-EXTRA_DIST = $(PYTESTS) testdata/plugins/testplugin.py
+noinst_SCRIPTS = $(PYTESTS)
+EXTRA_DIST = testdata/plugins/testplugin.py
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
@@ -12,7 +20,14 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env TESTDATA_PATH=$(abs_srcdir)/testdata \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ TESTDATA_PATH=$(abs_srcdir)/testdata \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/python/isc/config \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
+
+CLEANDIRS = testdata/plugins/__pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/cmdctl/Makefile.am b/src/bin/cmdctl/Makefile.am
index 04cf5e2..e302fa6 100644
--- a/src/bin/cmdctl/Makefile.am
+++ b/src/bin/cmdctl/Makefile.am
@@ -4,6 +4,9 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
pkglibexec_SCRIPTS = b10-cmdctl
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
b10_cmdctldir = $(pkgdatadir)
# NOTE: this will overwrite on install
@@ -18,10 +21,12 @@ b10_cmdctl_DATA += cmdctl.spec
EXTRA_DIST = $(CMDCTL_CONFIGURATIONS)
-CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
+CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.pyc
man_MANS = b10-cmdctl.8
-EXTRA_DIST += $(man_MANS) b10-cmdctl.xml
+EXTRA_DIST += $(man_MANS) b10-cmdctl.xml cmdctl_messages.mes
if ENABLE_MAN
@@ -33,20 +38,30 @@ endif
cmdctl.spec: cmdctl.spec.pre
$(SED) -e "s|@@SYSCONFDIR@@|$(sysconfdir)|" cmdctl.spec.pre >$@
+$(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py : cmdctl_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cmdctl_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-cmdctl: cmdctl.py
+b10-cmdctl: cmdctl.py $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" cmdctl.py >$@
chmod a+x $@
if INSTALL_CONFIGURATIONS
-# TODO: permissions handled later
+# Below we intentionally use ${INSTALL} -m 640 instead of $(INSTALL_DATA)
+# because these file will contain sensitive information.
install-data-local:
$(mkinstalldirs) $(DESTDIR)/@sysconfdir@/@PACKAGE@
for f in $(CMDCTL_CONFIGURATIONS) ; do \
if test ! -f $(DESTDIR)$(sysconfdir)/@PACKAGE@/$$f; then \
- $(INSTALL_DATA) $(srcdir)/$$f $(DESTDIR)$(sysconfdir)/@PACKAGE@/ ; \
+ ${INSTALL} -m 640 $(srcdir)/$$f $(DESTDIR)$(sysconfdir)/@PACKAGE@/ ; \
fi ; \
done
endif
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/cmdctl/cmdctl.py.in b/src/bin/cmdctl/cmdctl.py.in
index f1c1021..ff221db 100755
--- a/src/bin/cmdctl/cmdctl.py.in
+++ b/src/bin/cmdctl/cmdctl.py.in
@@ -17,12 +17,12 @@
''' cmdctl module is the configuration entry point for all commands from bindctl
or some other web tools client of bind10. cmdctl is pure https server which provi-
-des RESTful API. When command client connecting with cmdctl, it should first login
-with legal username and password.
- When cmdctl starting up, it will collect command specification and
+des RESTful API. When command client connecting with cmdctl, it should first login
+with legal username and password.
+ When cmdctl starting up, it will collect command specification and
configuration specification/data of other available modules from configmanager, then
wait for receiving request from client, parse the request and resend the request to
-the proper module. When getting the request result from the module, send back the
+the proper module. When getting the request result from the module, send back the
resut to client.
'''
@@ -47,6 +47,13 @@ import isc.net.parse
from optparse import OptionParser, OptionValueError
from hashlib import sha1
from isc.util import socketserver_mixin
+from isc.log_messages.cmdctl_messages import *
+
+isc.log.init("b10-cmdctl")
+logger = isc.log.Logger("cmdctl")
+
+# Debug level for communication with BIND10
+DBG_CMDCTL_MESSAGING = logger.DBGLVL_COMMAND
try:
import threading
@@ -74,16 +81,16 @@ SPECFILE_LOCATION = SPECFILE_PATH + os.sep + "cmdctl.spec"
class CmdctlException(Exception):
pass
-
+
class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
'''https connection request handler.
Currently only GET and POST are supported. '''
def do_GET(self):
- '''The client should send its session id in header with
+ '''The client should send its session id in header with
the name 'cookie'
'''
self.session_id = self.headers.get('cookie')
- rcode, reply = http.client.OK, []
+ rcode, reply = http.client.OK, []
if self._is_session_valid():
if self._is_user_logged_in():
rcode, reply = self._handle_get_request()
@@ -99,16 +106,16 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def _handle_get_request(self):
'''Currently only support the following three url GET request '''
id, module = self._parse_request_path()
- return self.server.get_reply_data_for_GET(id, module)
+ return self.server.get_reply_data_for_GET(id, module)
def _is_session_valid(self):
- return self.session_id
+ return self.session_id
def _is_user_logged_in(self):
login_time = self.server.user_sessions.get(self.session_id)
if not login_time:
return False
-
+
idle_time = time.time() - login_time
if idle_time > self.server.idle_timeout:
return False
@@ -118,7 +125,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def _parse_request_path(self):
'''Parse the url, the legal url should like /ldh or /ldh/ldh '''
- groups = URL_PATTERN.match(self.path)
+ groups = URL_PATTERN.match(self.path)
if not groups:
return (None, None)
else:
@@ -126,8 +133,8 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
'''Process POST request. '''
- '''Process user login and send command to proper module
- The client should send its session id in header with
+ '''Process user login and send command to proper module
+ The client should send its session id in header with
the name 'cookie'
'''
self.session_id = self.headers.get('cookie')
@@ -141,7 +148,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
rcode, reply = http.client.UNAUTHORIZED, ["please login"]
else:
rcode, reply = http.client.BAD_REQUEST, ["session isn't valid"]
-
+
self.send_response(rcode)
self.end_headers()
self.wfile.write(json.dumps(reply).encode())
@@ -162,18 +169,19 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
length = self.headers.get('Content-Length')
if not length:
- return False, ["invalid username or password"]
+ return False, ["invalid username or password"]
try:
user_info = json.loads((self.rfile.read(int(length))).decode())
except:
- return False, ["invalid username or password"]
+ return False, ["invalid username or password"]
user_name = user_info.get('username')
if not user_name:
return False, ["need user name"]
if not self.server.get_user_info(user_name):
- return False, ["user doesn't exist"]
+ logger.info(CMDCTL_NO_SUCH_USER, user_name)
+ return False, ["username or password error"]
user_pwd = user_info.get('password')
if not user_pwd:
@@ -181,10 +189,11 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
local_info = self.server.get_user_info(user_name)
pwd_hashval = sha1((user_pwd + local_info[1]).encode())
if pwd_hashval.hexdigest() != local_info[0]:
- return False, ["password doesn't match"]
+ logger.info(CMDCTL_BAD_PASSWORD, user_name)
+ return False, ["username or password error"]
return True, None
-
+
def _handle_post_request(self):
'''Handle all the post request from client. '''
@@ -206,7 +215,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
if rcode != 0:
ret = http.client.BAD_REQUEST
return ret, reply
-
+
def log_request(self, code='-', size='-'):
'''Rewrite the log request function, log nothing.'''
pass
@@ -230,11 +239,11 @@ class CommandControl():
def _setup_session(self):
'''Setup the session for receving the commands
- sent from other modules. There are two sessions
- for cmdctl, one(self.module_cc) is used for receiving
- commands sent from other modules, another one (self._cc)
- is used to send the command from Bindctl or other tools
- to proper modules.'''
+ sent from other modules. There are two sessions
+ for cmdctl, one(self.module_cc) is used for receiving
+ commands sent from other modules, another one (self._cc)
+ is used to send the command from Bindctl or other tools
+ to proper modules.'''
self._cc = isc.cc.Session()
self._module_cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
self.config_handler,
@@ -242,7 +251,7 @@ class CommandControl():
self._module_name = self._module_cc.get_module_spec().get_module_name()
self._cmdctl_config_data = self._module_cc.get_full_config()
self._module_cc.start()
-
+
def _accounts_file_check(self, filepath):
''' Check whether the accounts file is valid, each row
should be a list with 3 items.'''
@@ -279,9 +288,9 @@ class CommandControl():
errstr = self._accounts_file_check(new_config[key])
else:
errstr = 'unknown config item: ' + key
-
+
if errstr != None:
- self.log_info('Fail to apply config data, ' + errstr)
+ logger.error(CMDCTL_BAD_CONFIG_DATA, errstr);
return ccsession.create_answer(1, errstr)
return ccsession.create_answer(0)
@@ -305,7 +314,7 @@ class CommandControl():
self.modules_spec[args[0]] = args[1]
elif command == ccsession.COMMAND_SHUTDOWN:
- #When cmdctl get 'shutdown' command from boss,
+ #When cmdctl get 'shutdown' command from boss,
#shutdown the outer httpserver.
self._httpserver.shutdown()
self._serving = False
@@ -375,36 +384,35 @@ class CommandControl():
specs = self.get_modules_spec()
if module_name not in specs.keys():
return 1, {'error' : 'unknown module'}
-
+
spec_obj = isc.config.module_spec.ModuleSpec(specs[module_name], False)
errors = []
if not spec_obj.validate_command(command_name, params, errors):
return 1, {'error': errors[0]}
-
+
return self.send_command(module_name, command_name, params)
def send_command(self, module_name, command_name, params = None):
'''Send the command from bindctl to proper module. '''
errstr = 'unknown error'
answer = None
- if self._verbose:
- self.log_info("Begin send command '%s' to module '%s'" %(command_name, module_name))
+ logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_SEND_COMMAND,
+ command_name, module_name)
if module_name == self._module_name:
- # Process the command sent to cmdctl directly.
+ # Process the command sent to cmdctl directly.
answer = self.command_handler(command_name, params)
else:
msg = ccsession.create_command(command_name, params)
seq = self._cc.group_sendmsg(msg, module_name)
+ logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_COMMAND_SENT,
+ command_name, module_name)
#TODO, it may be blocked, msqg need to add a new interface waiting in timeout.
try:
answer, env = self._cc.group_recvmsg(False, seq)
except isc.cc.session.SessionTimeout:
errstr = "Module '%s' not responding" % module_name
- if self._verbose:
- self.log_info("Finish send command '%s' to module '%s'" % (command_name, module_name))
-
if answer:
try:
rcode, arg = ccsession.parse_answer(answer)
@@ -415,15 +423,12 @@ class CommandControl():
else:
return rcode, {}
else:
- # TODO: exception
errstr = str(answer['result'][1])
except ccsession.ModuleCCSessionError as mcse:
errstr = str("Error in ccsession answer:") + str(mcse)
- self.log_info(errstr)
+
+ logger.error(CMDCTL_COMMAND_ERROR, command_name, module_name, errstr)
return 1, {'error': errstr}
-
- def log_info(self, msg):
- sys.stdout.write("[b10-cmdctl] %s\n" % str(msg))
def get_cmdctl_config_data(self):
''' If running in source code tree, use keyfile, certificate
@@ -448,13 +453,15 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
'''Make the server address can be reused.'''
allow_reuse_address = True
- def __init__(self, server_address, RequestHandlerClass,
+ def __init__(self, server_address, RequestHandlerClass,
CommandControlClass,
idle_timeout = 1200, verbose = False):
'''idle_timeout: the max idle time for login'''
socketserver_mixin.NoPollMixIn.__init__(self)
try:
http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass)
+ logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_STARTED,
+ server_address[0], server_address[1])
except socket.error as err:
raise CmdctlException("Error creating server, because: %s \n" % str(err))
@@ -467,9 +474,9 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
self._accounts_file = None
def _create_user_info(self, accounts_file):
- '''Read all user's name and its' salt, hashed password
+ '''Read all user's name and its' salt, hashed password
from accounts file.'''
- if (self._accounts_file == accounts_file) and (len(self._user_infos) > 0):
+ if (self._accounts_file == accounts_file) and (len(self._user_infos) > 0):
return
with self._lock:
@@ -481,18 +488,19 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
for row in reader:
self._user_infos[row[0]] = [row[1], row[2]]
except (IOError, IndexError) as e:
- self.log_info("Fail to read user database, %s" % e)
+ logger.error(CMDCTL_USER_DATABASE_READ_ERROR,
+ accounts_file, e)
finally:
if csvfile:
csvfile.close()
self._accounts_file = accounts_file
if len(self._user_infos) == 0:
- self.log_info("Fail to get user information, will deny any user")
-
+ logger.error(CMDCTL_NO_USER_ENTRIES_READ)
+
def get_user_info(self, username):
'''Get user's salt and hashed string. If the user
- doesn't exist, return None, or else, the list
+ doesn't exist, return None, or else, the list
[salt, hashed password] will be returned.'''
with self._lock:
info = self._user_infos.get(username)
@@ -501,9 +509,9 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
def save_user_session_id(self, session_id):
''' Record user's id and login time. '''
self.user_sessions[session_id] = time.time()
-
+
def _check_key_and_cert(self, key, cert):
- # TODO, check the content of key/certificate file
+ # TODO, check the content of key/certificate file
if not os.path.exists(key):
raise CmdctlException("key file '%s' doesn't exist " % key)
@@ -518,9 +526,9 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
certfile = cert,
keyfile = key,
ssl_version = ssl.PROTOCOL_SSLv23)
- return ssl_sock
+ return ssl_sock
except (ssl.SSLError, CmdctlException) as err :
- self.log_info("Deny client's connection because %s" % str(err))
+ logger.info(CMDCTL_SSL_SETUP_FAILURE_USER_DENIED, err)
self.close_request(sock)
# raise socket error to finish the request
raise socket.error
@@ -535,20 +543,17 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
def get_reply_data_for_GET(self, id, module):
'''Currently only support the following three url GET request '''
- rcode, reply = http.client.NO_CONTENT, []
+ rcode, reply = http.client.NO_CONTENT, []
if not module:
if id == CONFIG_DATA_URL:
rcode, reply = http.client.OK, self.cmdctl.get_config_data()
elif id == MODULE_SPEC_URL:
rcode, reply = http.client.OK, self.cmdctl.get_modules_spec()
-
- return rcode, reply
+
+ return rcode, reply
def send_command_to_module(self, module_name, command_name, params):
return self.cmdctl.send_command_with_check(module_name, command_name, params)
-
- def log_info(self, msg):
- sys.stdout.write("[b10-cmdctl] %s\n" % str(msg))
httpd = None
@@ -563,10 +568,9 @@ def set_signal_handler():
def run(addr = 'localhost', port = 8080, idle_timeout = 1200, verbose = False):
''' Start cmdctl as one https server. '''
- if verbose:
- sys.stdout.write("[b10-cmdctl] starting on %s port:%d\n" %(addr, port))
- httpd = SecureHTTPServer((addr, port), SecureHTTPRequestHandler,
+ httpd = SecureHTTPServer((addr, port), SecureHTTPRequestHandler,
CommandControl, idle_timeout, verbose)
+
httpd.serve_forever()
def check_port(option, opt_str, value, parser):
@@ -604,18 +608,18 @@ if __name__ == '__main__':
(options, args) = parser.parse_args()
result = 1 # in case of failure
try:
+ if options.verbose:
+ logger.set_severity("DEBUG", 99)
run(options.addr, options.port, options.idle_timeout, options.verbose)
result = 0
except isc.cc.SessionError as err:
- sys.stderr.write("[b10-cmdctl] Error creating b10-cmdctl, "
- "is the command channel daemon running?\n")
+ logger.fatal(CMDCTL_CC_SESSION_ERROR, err)
except isc.cc.SessionTimeout:
- sys.stderr.write("[b10-cmdctl] Error creating b10-cmdctl, "
- "is the configuration manager running?\n")
+ logger.fatal(CMDCTL_CC_SESSION_TIMEOUT)
except KeyboardInterrupt:
- sys.stderr.write("[b10-cmdctl] exit from Cmdctl\n")
+ logger.info(CMDCTL_STOPPED_BY_KEYBOARD)
except CmdctlException as err:
- sys.stderr.write("[b10-cmdctl] " + str(err) + "\n")
+ logger.fatal(CMDCTL_UNCAUGHT_EXCEPTION, err);
if httpd:
httpd.shutdown()
diff --git a/src/bin/cmdctl/cmdctl_messages.mes b/src/bin/cmdctl/cmdctl_messages.mes
new file mode 100644
index 0000000..a3371b9
--- /dev/null
+++ b/src/bin/cmdctl/cmdctl_messages.mes
@@ -0,0 +1,84 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the cmdctl_messages python module.
+
+% CMDCTL_BAD_CONFIG_DATA error in config data: %1
+There was an error reading the updated configuration data. The specific
+error is printed.
+
+% CMDCTL_BAD_PASSWORD bad password for user: %1
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+
+% CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+
+% CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+
+% CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+
+% CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent
+This debug message indicates that the given command has been sent to
+the given module.
+
+% CMDCTL_NO_SUCH_USER username not found in user database: %1
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+
+% CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+
+% CMDCTL_SEND_COMMAND sending command %1 to module %2
+This debug message indicates that the given command is being sent to
+the given module.
+
+% CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+
+% CMDCTL_STARTED cmdctl is listening for connections on %1:%2
+The cmdctl daemon has started and is now listening for connections.
+
+% CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+
+% CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+
+% CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
diff --git a/src/bin/cmdctl/run_b10-cmdctl.sh.in b/src/bin/cmdctl/run_b10-cmdctl.sh.in
index 6a519e1..7e63249 100644
--- a/src/bin/cmdctl/run_b10-cmdctl.sh.in
+++ b/src/bin/cmdctl/run_b10-cmdctl.sh.in
@@ -19,9 +19,17 @@ PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
CMD_CTRLD_PATH=@abs_top_builddir@/src/bin/cmdctl
-PYTHONPATH=@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
export PYTHONPATH
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am
index 6a4d7d4..89d89ea 100644
--- a/src/bin/cmdctl/tests/Makefile.am
+++ b/src/bin/cmdctl/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = cmdctl_test.py
EXTRA_DIST = $(PYTESTS)
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -11,7 +18,8 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cmdctl \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SPEC_PATH=$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SRC_PATH=$(abs_top_srcdir)/src/bin/cmdctl \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/cmdctl/tests/cmdctl_test.py b/src/bin/cmdctl/tests/cmdctl_test.py
index 5463c36..3103f47 100644
--- a/src/bin/cmdctl/tests/cmdctl_test.py
+++ b/src/bin/cmdctl/tests/cmdctl_test.py
@@ -19,6 +19,7 @@ import socket
import tempfile
import sys
from cmdctl import *
+import isc.log
SPEC_FILE_PATH = '..' + os.sep
if 'CMDCTL_SPEC_PATH' in os.environ:
@@ -173,7 +174,7 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
self.handler.server._user_infos['root'] = ['aa', 'aaa']
ret, msg = self.handler._check_user_name_and_pwd()
self.assertFalse(ret)
- self.assertEqual(msg, ['password doesn\'t match'])
+ self.assertEqual(msg, ['username or password error'])
def test_check_user_name_and_pwd_2(self):
user_info = {'username':'root', 'password':'abc123'}
@@ -214,7 +215,7 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
ret, msg = self.handler._check_user_name_and_pwd()
self.assertFalse(ret)
- self.assertEqual(msg, ['user doesn\'t exist'])
+ self.assertEqual(msg, ['username or password error'])
def test_do_POST(self):
self.handler.headers = {}
@@ -447,6 +448,7 @@ class TestFuncNotInClass(unittest.TestCase):
if __name__== "__main__":
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/dhcp6/.gitignore b/src/bin/dhcp6/.gitignore
new file mode 100644
index 0000000..6a6060b
--- /dev/null
+++ b/src/bin/dhcp6/.gitignore
@@ -0,0 +1,9 @@
+*~
+Makefile
+Makefile.in
+*.o
+.deps
+.libs
+b10-dhcp6
+spec_config.h
+spec_config.h.pre
diff --git a/src/bin/dhcp6/Makefile.am b/src/bin/dhcp6/Makefile.am
new file mode 100644
index 0000000..b0f8cd9
--- /dev/null
+++ b/src/bin/dhcp6/Makefile.am
@@ -0,0 +1,46 @@
+SUBDIRS = . tests
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += -I$(top_srcdir)/src/bin -I$(top_builddir)/src/bin
+AM_CPPFLAGS += -I$(top_srcdir)/src/lib/dns -I$(top_builddir)/src/lib/dns
+AM_CPPFLAGS += -I$(top_srcdir)/src/lib/cc -I$(top_builddir)/src/lib/cc
+ AM_CPPFLAGS += $(BOOST_INCLUDES)
+
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+
+CLEANFILES = *.gcno *.gcda spec_config.h
+
+man_MANS = b10-dhcp6.8
+EXTRA_DIST = $(man_MANS) dhcp6.spec interfaces.txt
+
+if ENABLE_MAN
+
+b10-dhcp6.8: b10-dhcp6.xml
+ xsltproc --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/b10-dhcp6.xml
+
+endif
+
+spec_config.h: spec_config.h.pre
+ $(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
+
+BUILT_SOURCES = spec_config.h
+pkglibexec_PROGRAMS = b10-dhcp6
+
+b10_dhcp6_SOURCES = main.cc iface_mgr.cc dhcp6_srv.cc
+b10_dhcp6_SOURCES += iface_mgr.h dhcp6_srv.h
+
+b10_dhcp6_LDADD = $(top_builddir)/src/lib/dhcp/libdhcp.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/log/liblog.la
+
+# TODO: config.h.in is wrong because doesn't honor pkgdatadir
+# and can't use @datadir@ because doesn't expand default ${prefix}
+b10_dhcp6dir = $(pkgdatadir)
+b10_dhcp6_DATA = dhcp6.spec interfaces.txt
diff --git a/src/bin/dhcp6/b10-dhcp6.8 b/src/bin/dhcp6/b10-dhcp6.8
new file mode 100644
index 0000000..1f34a9a
--- /dev/null
+++ b/src/bin/dhcp6/b10-dhcp6.8
@@ -0,0 +1,51 @@
+'\" t
+.\" Title: b10-dhcp6
+.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\" Date: October 27, 2011
+.\" Manual: BIND10
+.\" Source: BIND10
+.\" Language: English
+.\"
+.TH "B10\-DHCP6" "8" "October 27, 2011" "BIND10" "BIND10"
+.\" -----------------------------------------------------------------
+.\" * set default formatting
+.\" -----------------------------------------------------------------
+.\" disable hyphenation
+.nh
+.\" disable justification (adjust text to left margin only)
+.ad l
+.\" -----------------------------------------------------------------
+.\" * MAIN CONTENT STARTS HERE *
+.\" -----------------------------------------------------------------
+.SH "NAME"
+b10-dhcp6 \- DHCPv6 server in BIND 10 architecture
+.SH "SYNOPSIS"
+.HP \w'\fBb10\-dhcp6\fR\ 'u
+\fBb10\-dhcp6\fR [\fB\-v\fR]
+.SH "DESCRIPTION"
+.PP
+The
+\fBb10\-dhcp6\fR
+daemon will provide the DHCPv6 server implementation when it becomes functional\&.
+.SH "ARGUMENTS"
+.PP
+The arguments are as follows:
+.PP
+\fB\-v\fR
+.RS 4
+Enable verbose mode\&.
+.RE
+.SH "SEE ALSO"
+.PP
+
+\fBbind10\fR(8)\&.
+.SH "HISTORY"
+.PP
+The
+\fBb10\-dhcp6\fR
+daemon was first coded in June 2011 by Tomek Mrugalski\&.
+.SH "COPYRIGHT"
+.br
+Copyright \(co 2011 Internet Systems Consortium, Inc. ("ISC")
+.br
diff --git a/src/bin/dhcp6/b10-dhcp6.xml b/src/bin/dhcp6/b10-dhcp6.xml
new file mode 100644
index 0000000..53227db
--- /dev/null
+++ b/src/bin/dhcp6/b10-dhcp6.xml
@@ -0,0 +1,98 @@
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
+ [<!ENTITY mdash "—">]>
+<!--
+ - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<refentry>
+
+ <refentryinfo>
+ <date>October 27, 2011</date>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>b10-dhcp6</refentrytitle>
+ <manvolnum>8</manvolnum>
+ <refmiscinfo>BIND10</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>b10-dhcp6</refname>
+ <refpurpose>DHCPv6 server in BIND 10 architecture</refpurpose>
+ </refnamediv>
+
+ <docinfo>
+ <copyright>
+ <year>2011</year>
+ <holder>Internet Systems Consortium, Inc. ("ISC")</holder>
+ </copyright>
+ </docinfo>
+
+ <refsynopsisdiv>
+ <cmdsynopsis>
+ <command>b10-dhcp6</command>
+ <arg><option>-v</option></arg>
+ </cmdsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>DESCRIPTION</title>
+ <para>
+ The <command>b10-dhcp6</command> daemon will provide the
+ DHCPv6 server implementation when it becomes functional.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>ARGUMENTS</title>
+
+ <para>The arguments are as follows:</para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term><option>-v</option></term>
+ <listitem><para>
+ Enable verbose mode.
+<!-- TODO: what does this do? -->
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>SEE ALSO</title>
+ <para>
+ <citerefentry>
+ <refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>HISTORY</title>
+ <para>
+ The <command>b10-dhcp6</command> daemon was first coded in
+ June 2011 by Tomek Mrugalski.
+ </para>
+ </refsect1>
+</refentry><!--
+ - Local variables:
+ - mode: sgml
+ - End:
+-->
diff --git a/src/bin/dhcp6/dhcp6.spec b/src/bin/dhcp6/dhcp6.spec
new file mode 100644
index 0000000..0e7e852
--- /dev/null
+++ b/src/bin/dhcp6/dhcp6.spec
@@ -0,0 +1,14 @@
+{
+ "module_spec": {
+ "module_name": "dhcp6",
+ "module_description": "DHCPv6 daemon",
+ "config_data": [
+ { "item_name": "interface",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "eth0"
+ }
+ ],
+ "commands": []
+ }
+}
diff --git a/src/bin/dhcp6/dhcp6_srv.cc b/src/bin/dhcp6/dhcp6_srv.cc
new file mode 100644
index 0000000..ba5afec
--- /dev/null
+++ b/src/bin/dhcp6/dhcp6_srv.cc
@@ -0,0 +1,231 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "dhcp/dhcp6.h"
+#include "dhcp/pkt6.h"
+#include "dhcp6/iface_mgr.h"
+#include "dhcp6/dhcp6_srv.h"
+#include "dhcp/option6_ia.h"
+#include "dhcp/option6_iaaddr.h"
+#include "asiolink/io_address.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+
+Dhcpv6Srv::Dhcpv6Srv() {
+ cout << "Initialization" << endl;
+
+ // first call to instance() will create IfaceMgr (it's a singleton)
+ // it may throw something if things go wrong
+ IfaceMgr::instance();
+
+ /// @todo: instantiate LeaseMgr here once it is imlpemented.
+
+ setServerID();
+
+ shutdown = false;
+}
+
+Dhcpv6Srv::~Dhcpv6Srv() {
+ cout << "DHCPv6 Srv shutdown." << endl;
+}
+
+bool
+Dhcpv6Srv::run() {
+ while (!shutdown) {
+ boost::shared_ptr<Pkt6> query; // client's message
+ boost::shared_ptr<Pkt6> rsp; // server's response
+
+ query = IfaceMgr::instance().receive();
+
+ if (query) {
+ if (!query->unpack()) {
+ cout << "Failed to parse incoming packet" << endl;
+ continue;
+ }
+ switch (query->getType()) {
+ case DHCPV6_SOLICIT:
+ rsp = processSolicit(query);
+ break;
+ case DHCPV6_REQUEST:
+ rsp = processRequest(query);
+ break;
+ case DHCPV6_RENEW:
+ rsp = processRenew(query);
+ break;
+ case DHCPV6_REBIND:
+ rsp = processRebind(query);
+ break;
+ case DHCPV6_CONFIRM:
+ rsp = processConfirm(query);
+ break;
+ case DHCPV6_RELEASE:
+ rsp = processRelease(query);
+ break;
+ case DHCPV6_DECLINE:
+ rsp = processDecline(query);
+ break;
+ case DHCPV6_INFORMATION_REQUEST:
+ rsp = processInfRequest(query);
+ break;
+ default:
+ cout << "Unknown pkt type received:"
+ << query->getType() << endl;
+ }
+
+ cout << "Received " << query->data_len_ << " bytes packet type="
+ << query->getType() << endl;
+ cout << query->toText();
+ if (rsp) {
+ rsp->remote_addr_ = query->remote_addr_;
+ rsp->local_addr_ = query->local_addr_;
+ rsp->remote_port_ = DHCP6_CLIENT_PORT;
+ rsp->local_port_ = DHCP6_SERVER_PORT;
+ rsp->ifindex_ = query->ifindex_;
+ rsp->iface_ = query->iface_;
+ cout << "Replying with:" << rsp->getType() << endl;
+ cout << rsp->toText();
+ cout << "----" << endl;
+ if (rsp->pack()) {
+ cout << "#### pack successful." << endl;
+ }
+ IfaceMgr::instance().send(rsp);
+ }
+ }
+
+ // TODO add support for config session (see src/bin/auth/main.cc)
+ // so this daemon can be controlled from bob
+ }
+
+ return (true);
+}
+
+void
+Dhcpv6Srv::setServerID() {
+ /// TODO implement this for real once interface detection is done.
+ /// Use hardcoded server-id for now
+
+ boost::shared_array<uint8_t> srvid(new uint8_t[14]);
+ srvid[0] = 0;
+ srvid[1] = 1; // DUID type 1 = DUID-LLT (see section 9.2 of RFC3315)
+ srvid[2] = 0;
+ srvid[3] = 6; // HW type = ethernet (I think. I'm typing this from my head
+ // in hotel, without Internet connection)
+ for (int i=4; i<14; i++) {
+ srvid[i]=i-4;
+ }
+ serverid_ = boost::shared_ptr<Option>(new Option(Option::V6,
+ D6O_SERVERID,
+ srvid,
+ 0, 14));
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processSolicit(boost::shared_ptr<Pkt6> solicit) {
+
+ boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_ADVERTISE,
+ solicit->getTransid(),
+ Pkt6::UDP));
+
+ /// TODO Rewrite this once LeaseManager is implemented.
+
+ // answer client's IA (this is mostly a dummy,
+ // so let's answer only first IA and hope there is only one)
+ boost::shared_ptr<Option> ia_opt = solicit->getOption(D6O_IA_NA);
+ if (ia_opt) {
+ // found IA
+ Option* tmp = ia_opt.get();
+ Option6IA* ia_req = dynamic_cast<Option6IA*>(tmp);
+ if (ia_req) {
+ boost::shared_ptr<Option6IA>
+ ia_rsp(new Option6IA(D6O_IA_NA, ia_req->getIAID()));
+ ia_rsp->setT1(1500);
+ ia_rsp->setT2(2600);
+ boost::shared_ptr<Option6IAAddr>
+ addr(new Option6IAAddr(D6O_IAADDR,
+ IOAddress("2001:db8:1234:5678::abcd"),
+ 5000, 7000));
+ ia_rsp->addOption(addr);
+ reply->addOption(ia_rsp);
+ }
+ }
+
+ // add client-id
+ boost::shared_ptr<Option> clientid = solicit->getOption(D6O_CLIENTID);
+ if (clientid) {
+ reply->addOption(clientid);
+ }
+
+ // add server-id
+ reply->addOption(getServerID());
+ return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processRequest(boost::shared_ptr<Pkt6> request) {
+ /// TODO: Implement processRequest() for real
+ boost::shared_ptr<Pkt6> reply = processSolicit(request);
+ reply->setType(DHCPV6_REPLY);
+ return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processRenew(boost::shared_ptr<Pkt6> renew) {
+ boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+ renew->getTransid(),
+ Pkt6::UDP));
+ return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processRebind(boost::shared_ptr<Pkt6> rebind) {
+ boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+ rebind->getTransid(),
+ Pkt6::UDP));
+ return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processConfirm(boost::shared_ptr<Pkt6> confirm) {
+ boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+ confirm->getTransid(),
+ Pkt6::UDP));
+ return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processRelease(boost::shared_ptr<Pkt6> release) {
+ boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+ release->getTransid(),
+ Pkt6::UDP));
+ return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processDecline(boost::shared_ptr<Pkt6> decline) {
+ boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+ decline->getTransid(),
+ Pkt6::UDP));
+ return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processInfRequest(boost::shared_ptr<Pkt6> infRequest) {
+ boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+ infRequest->getTransid(),
+ Pkt6::UDP));
+ return reply;
+}
diff --git a/src/bin/dhcp6/dhcp6_srv.h b/src/bin/dhcp6/dhcp6_srv.h
new file mode 100644
index 0000000..4daef3a
--- /dev/null
+++ b/src/bin/dhcp6/dhcp6_srv.h
@@ -0,0 +1,156 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DHCPV6_SRV_H
+#define DHCPV6_SRV_H
+
+#include <boost/shared_ptr.hpp>
+#include <boost/noncopyable.hpp>
+#include "dhcp/pkt6.h"
+#include "dhcp/option.h"
+#include <iostream>
+
+namespace isc {
+
+namespace dhcp {
+/// @brief DHCPv6 server service.
+///
+/// This singleton class represents DHCPv6 server. It contains all
+/// top-level methods and routines necessary for server operation.
+/// In particular, it instantiates IfaceMgr, loads or generates DUID
+/// that is going to be used as server-identifier, receives incoming
+/// packets, processes them, manages leases assignment and generates
+/// appropriate responses.
+class Dhcpv6Srv : public boost::noncopyable {
+
+public:
+ /// @brief Default constructor.
+ ///
+ /// Instantiates necessary services, required to run DHCPv6 server.
+ /// In particular, creates IfaceMgr that will be responsible for
+ /// network interaction. Will instantiate lease manager, and load
+ /// old or create new DUID.
+ Dhcpv6Srv();
+
+ /// @brief Destructor. Used during DHCPv6 service shutdown.
+ ~Dhcpv6Srv();
+
+ /// @brief Returns server-intentifier option
+ ///
+ /// @return server-id option
+ boost::shared_ptr<isc::dhcp::Option>
+ getServerID() { return serverid_; }
+
+ /// @brief Main server processing loop.
+ ///
+ /// Main server processing loop. Receives incoming packets, verifies
+ /// their correctness, generates appropriate answer (if needed) and
+ /// transmits respones.
+ ///
+ /// @return true, if being shut down gracefully, fail if experienced
+ /// critical error.
+ bool run();
+
+protected:
+ /// @brief Processes incoming SOLICIT and returns response.
+ ///
+ /// Processes received SOLICIT message and verifies that its sender
+ /// should be served. In particular IA, TA and PD options are populated
+ /// with to-be assinged addresses, temporary addresses and delegated
+ /// prefixes, respectively. In the usual 4 message exchange, server is
+ /// expected to respond with ADVERTISE message. However, if client
+ /// requests rapid-commit and server supports it, REPLY will be sent
+ /// instead of ADVERTISE and requested leases will be assigned
+ /// immediately.
+ ///
+ /// @param solicit SOLICIT message received from client
+ ///
+ /// @return ADVERTISE, REPLY message or NULL
+ boost::shared_ptr<Pkt6>
+ processSolicit(boost::shared_ptr<Pkt6> solicit);
+
+ /// @brief Processes incoming REQUEST and returns REPLY response.
+ ///
+ /// Processes incoming REQUEST message and verifies that its sender
+ /// should be served. In particular IA, TA and PD options are populated
+ /// with assinged addresses, temporary addresses and delegated
+ /// prefixes, respectively. Uses LeaseMgr to allocate or update existing
+ /// leases.
+ ///
+ /// @param request a message received from client
+ ///
+ /// @return REPLY message or NULL
+ boost::shared_ptr<Pkt6>
+ processRequest(boost::shared_ptr<Pkt6> request);
+
+ /// @brief Stub function that will handle incoming RENEW messages.
+ ///
+ /// @param renew message received from client
+ boost::shared_ptr<Pkt6>
+ processRenew(boost::shared_ptr<Pkt6> renew);
+
+ /// @brief Stub function that will handle incoming REBIND messages.
+ ///
+ /// @param rebind message received from client
+ boost::shared_ptr<Pkt6>
+ processRebind(boost::shared_ptr<Pkt6> rebind);
+
+ /// @brief Stub function that will handle incoming CONFIRM messages.
+ ///
+ /// @param confirm message received from client
+ boost::shared_ptr<Pkt6>
+ processConfirm(boost::shared_ptr<Pkt6> confirm);
+
+ /// @brief Stub function that will handle incoming RELEASE messages.
+ ///
+ /// @param release message received from client
+ boost::shared_ptr<Pkt6>
+ processRelease(boost::shared_ptr<Pkt6> release);
+
+ /// @brief Stub function that will handle incoming DECLINE messages.
+ ///
+ /// @param decline message received from client
+ boost::shared_ptr<Pkt6>
+ processDecline(boost::shared_ptr<Pkt6> decline);
+
+ /// @brief Stub function that will handle incoming INF-REQUEST messages.
+ ///
+ /// @param infRequest message received from client
+ boost::shared_ptr<Pkt6>
+ processInfRequest(boost::shared_ptr<Pkt6> infRequest);
+
+ /// @brief Sets server-identifier.
+ ///
+ /// This method attempts to set server-identifier DUID. It loads it
+ /// from a file. If file load fails, it generates new DUID using
+ /// interface link-layer addresses (EUI-64) + timestamp (DUID type
+ /// duid-llt, see RFC3315, section 9.2). If there are no suitable
+ /// interfaces present, exception it thrown
+ ///
+ /// @throws isc::Unexpected Failed to read DUID file and no suitable
+ /// interfaces for new DUID generation are detected.
+ void setServerID();
+
+ /// server DUID (to be sent in server-identifier option)
+ boost::shared_ptr<isc::dhcp::Option> serverid_;
+
+ /// indicates if shutdown is in progress. Setting it to true will
+ /// initiate server shutdown procedure.
+ volatile bool shutdown;
+};
+
+}; // namespace isc::dhcp
+}; // namespace isc
+
+#endif // DHCP6_SRV_H
diff --git a/src/bin/dhcp6/iface_mgr.cc b/src/bin/dhcp6/iface_mgr.cc
new file mode 100644
index 0000000..a96db07
--- /dev/null
+++ b/src/bin/dhcp6/iface_mgr.cc
@@ -0,0 +1,542 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sstream>
+#include <fstream>
+#include <string.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include "dhcp/dhcp6.h"
+#include "dhcp6/iface_mgr.h"
+#include "exceptions/exceptions.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+
+namespace isc {
+
+/// IfaceMgr is a singleton implementation
+IfaceMgr* IfaceMgr::instance_ = 0;
+
+void
+IfaceMgr::instanceCreate() {
+ if (instance_) {
+ // no need to do anything. Instance is already created.
+ // Who called it again anyway? Uh oh. Had to be us, as
+ // this is private method.
+ return;
+ }
+ instance_ = new IfaceMgr();
+}
+
+IfaceMgr&
+IfaceMgr::instance() {
+ if (instance_ == 0) {
+ instanceCreate();
+ }
+ return (*instance_);
+}
+
+IfaceMgr::Iface::Iface(const std::string& name, int ifindex)
+ :name_(name), ifindex_(ifindex), mac_len_(0) {
+
+ memset(mac_, 0, sizeof(mac_));
+}
+
+std::string
+IfaceMgr::Iface::getFullName() const {
+ ostringstream tmp;
+ tmp << name_ << "/" << ifindex_;
+ return (tmp.str());
+}
+
+std::string
+IfaceMgr::Iface::getPlainMac() const {
+ ostringstream tmp;
+ tmp.fill('0');
+ tmp << hex;
+ for (int i = 0; i < mac_len_; i++) {
+ tmp.width(2);
+ tmp << mac_[i];
+ if (i < mac_len_-1) {
+ tmp << ":";
+ }
+ }
+ return (tmp.str());
+}
+
+IfaceMgr::IfaceMgr()
+ :control_buf_len_(CMSG_SPACE(sizeof(struct in6_pktinfo))),
+ control_buf_(new char[control_buf_len_])
+{
+
+ cout << "IfaceMgr initialization." << endl;
+
+ try {
+ // required for sending/receiving packets
+ // let's keep it in front, just in case someone
+ // wants to send anything during initialization
+
+ // control_buf_ = boost::scoped_array<char>();
+
+ detectIfaces();
+
+ if (!openSockets()) {
+ isc_throw(Unexpected, "Failed to open/bind sockets.");
+ }
+ } catch (const std::exception& ex) {
+ cout << "IfaceMgr creation failed:" << ex.what() << endl;
+
+ // TODO Uncomment this (or call LOG_FATAL) once
+ // interface detection is implemented. Otherwise
+ // it is not possible to run tests in a portable
+ // way (see detectIfaces() method).
+ // throw ex;
+ }
+}
+
+IfaceMgr::~IfaceMgr() {
+ // control_buf_ is deleted automatically (scoped_ptr)
+ control_buf_len_ = 0;
+}
+
+void
+IfaceMgr::detectIfaces() {
+ string ifaceName, linkLocal;
+
+ // TODO do the actual detection. Currently interface detection is faked
+ // by reading a text file.
+
+ cout << "Interface detection is not implemented yet. "
+ << "Reading interfaces.txt file instead." << endl;
+ cout << "Please use format: interface-name link-local-address" << endl;
+
+ try {
+ ifstream interfaces("interfaces.txt");
+
+ if (!interfaces.good()) {
+ cout << "Failed to read interfaces.txt file." << endl;
+ isc_throw(Unexpected, "Failed to read interfaces.txt");
+ }
+ interfaces >> ifaceName;
+ interfaces >> linkLocal;
+
+ cout << "Detected interface " << ifaceName << "/" << linkLocal << endl;
+
+ Iface iface(ifaceName, if_nametoindex( ifaceName.c_str() ) );
+ IOAddress addr(linkLocal);
+ iface.addrs_.push_back(addr);
+ ifaces_.push_back(iface);
+ interfaces.close();
+ } catch (const std::exception& ex) {
+ // TODO: deallocate whatever memory we used
+ // not that important, since this function is going to be
+ // thrown away as soon as we get proper interface detection
+ // implemented
+
+ // TODO Do LOG_FATAL here
+ std::cerr << "Interface detection failed." << std::endl;
+ throw ex;
+ }
+}
+
+bool
+IfaceMgr::openSockets() {
+ int sock;
+
+ for (IfaceLst::iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+
+ for (Addr6Lst::iterator addr=iface->addrs_.begin();
+ addr!=iface->addrs_.end();
+ ++addr) {
+
+ sock = openSocket(iface->name_, *addr,
+ DHCP6_SERVER_PORT);
+ if (sock<0) {
+ cout << "Failed to open unicast socket." << endl;
+ return (false);
+ }
+ sendsock_ = sock;
+
+ sock = openSocket(iface->name_,
+ IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
+ DHCP6_SERVER_PORT);
+ if (sock<0) {
+ cout << "Failed to open multicast socket." << endl;
+ close(sendsock_);
+ return (false);
+ }
+ recvsock_ = sock;
+ }
+ }
+
+ return (true);
+}
+
+void
+IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
+ for (IfaceLst::const_iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+ out << "Detected interface " << iface->getFullName() << endl;
+ out << " " << iface->addrs_.size() << " addr(s):" << endl;
+ for (Addr6Lst::const_iterator addr=iface->addrs_.begin();
+ addr != iface->addrs_.end();
+ ++addr) {
+ out << " " << addr->toText() << endl;
+ }
+ out << " mac: " << iface->getPlainMac() << endl;
+ }
+}
+
+IfaceMgr::Iface*
+IfaceMgr::getIface(int ifindex) {
+ for (IfaceLst::iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+ if (iface->ifindex_ == ifindex)
+ return (&(*iface));
+ }
+
+ return (NULL); // not found
+}
+
+IfaceMgr::Iface*
+IfaceMgr::getIface(const std::string& ifname) {
+ for (IfaceLst::iterator iface=ifaces_.begin();
+ iface!=ifaces_.end();
+ ++iface) {
+ if (iface->name_ == ifname)
+ return (&(*iface));
+ }
+
+ return (NULL); // not found
+}
+
+int
+IfaceMgr::openSocket(const std::string& ifname,
+ const IOAddress& addr,
+ int port) {
+ struct sockaddr_in6 addr6;
+
+ cout << "Creating socket on " << ifname << "/" << addr.toText()
+ << "/port=" << port << endl;
+
+ memset(&addr6, 0, sizeof(addr6));
+ addr6.sin6_family = AF_INET6;
+ addr6.sin6_port = htons(port);
+ addr6.sin6_scope_id = if_nametoindex(ifname.c_str());
+
+ memcpy(&addr6.sin6_addr,
+ addr.getAddress().to_v6().to_bytes().data(),
+ sizeof(addr6.sin6_addr));
+#ifdef HAVE_SA_LEN
+ addr6->sin6_len = sizeof(addr6);
+#endif
+
+ // TODO: use sockcreator once it becomes available
+
+ // make a socket
+ int sock = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (sock < 0) {
+ cout << "Failed to create UDP6 socket." << endl;
+ return (-1);
+ }
+
+ /* Set the REUSEADDR option so that we don't fail to start if
+ we're being restarted. */
+ int flag = 1;
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+ (char *)&flag, sizeof(flag)) < 0) {
+ cout << "Can't set SO_REUSEADDR option on dhcpv6 socket." << endl;
+ close(sock);
+ return (-1);
+ }
+
+ if (bind(sock, (struct sockaddr *)&addr6, sizeof(addr6)) < 0) {
+ cout << "Failed to bind socket " << sock << " to " << addr.toText()
+ << "/port=" << port << endl;
+ close(sock);
+ return (-1);
+ }
+#ifdef IPV6_RECVPKTINFO
+ /* RFC3542 - a new way */
+ if (setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO,
+ &flag, sizeof(flag)) != 0) {
+ cout << "setsockopt: IPV6_RECVPKTINFO failed." << endl;
+ close(sock);
+ return (-1);
+ }
+#else
+ /* RFC2292 - an old way */
+ if (setsockopt(sock, IPPROTO_IPV6, IPV6_PKTINFO,
+ &flag, sizeof(flag)) != 0) {
+ cout << "setsockopt: IPV6_PKTINFO: failed." << endl;
+ close(sock);
+ return (-1);
+ }
+#endif
+
+ // multicast stuff
+
+ if (addr.getAddress().to_v6().is_multicast()) {
+ // both mcast (ALL_DHCP_RELAY_AGENTS_AND_SERVERS and ALL_DHCP_SERVERS)
+ // are link and site-scoped, so there is no sense to join those groups
+ // with global addresses.
+
+ if ( !joinMcast( sock, ifname,
+ string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
+ close(sock);
+ return (-1);
+ }
+ }
+
+ cout << "Created socket " << sock << " on " << ifname << "/" <<
+ addr.toText() << "/port=" << port << endl;
+
+ return (sock);
+}
+
+bool
+IfaceMgr::joinMcast(int sock, const std::string& ifname,
+const std::string & mcast) {
+
+ struct ipv6_mreq mreq;
+
+ if (inet_pton(AF_INET6, mcast.c_str(),
+ &mreq.ipv6mr_multiaddr) <= 0) {
+ cout << "Failed to convert " << ifname
+ << " to IPv6 multicast address." << endl;
+ return (false);
+ }
+
+ mreq.ipv6mr_interface = if_nametoindex(ifname.c_str());
+ if (setsockopt(sock, IPPROTO_IPV6, IPV6_JOIN_GROUP,
+ &mreq, sizeof(mreq)) < 0) {
+ cout << "Failed to join " << mcast << " multicast group." << endl;
+ return (false);
+ }
+
+ cout << "Joined multicast " << mcast << " group." << endl;
+
+ return (true);
+}
+
+bool
+IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
+ struct msghdr m;
+ struct iovec v;
+ int result;
+ struct in6_pktinfo *pktinfo;
+ struct cmsghdr *cmsg;
+ memset(&control_buf_[0], 0, control_buf_len_);
+
+ /*
+ * Initialize our message header structure.
+ */
+ memset(&m, 0, sizeof(m));
+
+ /*
+ * Set the target address we're sending to.
+ */
+ sockaddr_in6 to;
+ memset(&to, 0, sizeof(to));
+ to.sin6_family = AF_INET6;
+ to.sin6_port = htons(pkt->remote_port_);
+ memcpy(&to.sin6_addr,
+ pkt->remote_addr_.getAddress().to_v6().to_bytes().data(),
+ 16);
+ to.sin6_scope_id = pkt->ifindex_;
+
+ m.msg_name = &to;
+ m.msg_namelen = sizeof(to);
+
+ /*
+ * Set the data buffer we're sending. (Using this wacky
+ * "scatter-gather" stuff... we only have a single chunk
+ * of data to send, so we declare a single vector entry.)
+ */
+ v.iov_base = (char *) &pkt->data_[0];
+ v.iov_len = pkt->data_len_;
+ m.msg_iov = &v;
+ m.msg_iovlen = 1;
+
+ /*
+ * Setting the interface is a bit more involved.
+ *
+ * We have to create a "control message", and set that to
+ * define the IPv6 packet information. We could set the
+ * source address if we wanted, but we can safely let the
+ * kernel decide what that should be.
+ */
+ m.msg_control = &control_buf_[0];
+ m.msg_controllen = control_buf_len_;
+ cmsg = CMSG_FIRSTHDR(&m);
+ cmsg->cmsg_level = IPPROTO_IPV6;
+ cmsg->cmsg_type = IPV6_PKTINFO;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(*pktinfo));
+ pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+ memset(pktinfo, 0, sizeof(*pktinfo));
+ pktinfo->ipi6_ifindex = pkt->ifindex_;
+ m.msg_controllen = cmsg->cmsg_len;
+
+ result = sendmsg(sendsock_, &m, 0);
+ if (result < 0) {
+ cout << "Send packet failed." << endl;
+ }
+ cout << "Sent " << result << " bytes." << endl;
+
+ cout << "Sent " << pkt->data_len_ << " bytes over "
+ << pkt->iface_ << "/" << pkt->ifindex_ << " interface: "
+ << " dst=" << pkt->remote_addr_.toText()
+ << ", src=" << pkt->local_addr_.toText()
+ << endl;
+
+ return (result);
+}
+
+boost::shared_ptr<Pkt6>
+IfaceMgr::receive() {
+ struct msghdr m;
+ struct iovec v;
+ int result;
+ struct cmsghdr* cmsg;
+ struct in6_pktinfo* pktinfo;
+ struct sockaddr_in6 from;
+ struct in6_addr to_addr;
+ boost::shared_ptr<Pkt6> pkt;
+ char addr_str[INET6_ADDRSTRLEN];
+
+ try {
+ // RFC3315 states that server responses may be
+ // fragmented if they are over MTU. There is no
+ // text whether client's packets may be larger
+ // than 1500. Nevertheless to be on the safe side
+ // we use larger buffer. This buffer limit is checked
+ // during reception (see iov_len below), so we are
+ // safe
+ pkt = boost::shared_ptr<Pkt6>(new Pkt6(65536));
+ } catch (const std::exception& ex) {
+ cout << "Failed to create new packet." << endl;
+ return (boost::shared_ptr<Pkt6>()); // NULL
+ }
+
+ memset(&control_buf_[0], 0, control_buf_len_);
+
+ memset(&from, 0, sizeof(from));
+ memset(&to_addr, 0, sizeof(to_addr));
+
+ /*
+ * Initialize our message header structure.
+ */
+ memset(&m, 0, sizeof(m));
+
+ /*
+ * Point so we can get the from address.
+ */
+ m.msg_name = &from;
+ m.msg_namelen = sizeof(from);
+
+ /*
+ * Set the data buffer we're receiving. (Using this wacky
+ * "scatter-gather" stuff... but we that doesn't really make
+ * sense for us, so we use a single vector entry.)
+ */
+ v.iov_base = (void*)&pkt->data_[0];
+ v.iov_len = pkt->data_len_;
+ m.msg_iov = &v;
+ m.msg_iovlen = 1;
+
+ /*
+ * Getting the interface is a bit more involved.
+ *
+ * We set up some space for a "control message". We have
+ * previously asked the kernel to give us packet
+ * information (when we initialized the interface), so we
+ * should get the destination address from that.
+ */
+ m.msg_control = &control_buf_[0];
+ m.msg_controllen = control_buf_len_;
+
+ result = recvmsg(recvsock_, &m, 0);
+
+ if (result >= 0) {
+ /*
+ * If we did read successfully, then we need to loop
+ * through the control messages we received and
+ * find the one with our destination address.
+ *
+ * We also keep a flag to see if we found it. If we
+ * didn't, then we consider this to be an error.
+ */
+ int found_pktinfo = 0;
+ cmsg = CMSG_FIRSTHDR(&m);
+ while (cmsg != NULL) {
+ if ((cmsg->cmsg_level == IPPROTO_IPV6) &&
+ (cmsg->cmsg_type == IPV6_PKTINFO)) {
+ pktinfo = (struct in6_pktinfo*)CMSG_DATA(cmsg);
+ to_addr = pktinfo->ipi6_addr;
+ pkt->ifindex_ = pktinfo->ipi6_ifindex;
+ found_pktinfo = 1;
+ }
+ cmsg = CMSG_NXTHDR(&m, cmsg);
+ }
+ if (!found_pktinfo) {
+ cout << "Unable to find pktinfo" << endl;
+ return (boost::shared_ptr<Pkt6>()); // NULL
+ }
+ } else {
+ cout << "Failed to receive data." << endl;
+ return (boost::shared_ptr<Pkt6>()); // NULL
+ }
+
+ // That's ugly.
+ // TODO add IOAddress constructor that will take struct in6_addr*
+ // TODO: there's from_bytes() method added in IOAddress. Use it!
+ inet_ntop(AF_INET6, &to_addr, addr_str,INET6_ADDRSTRLEN);
+ pkt->local_addr_ = IOAddress(string(addr_str));
+
+ // TODO: there's from_bytes() method added in IOAddress. Use it!
+ inet_ntop(AF_INET6, &from.sin6_addr, addr_str, INET6_ADDRSTRLEN);
+ pkt->remote_addr_ = IOAddress(string(addr_str));
+
+ pkt->remote_port_ = ntohs(from.sin6_port);
+
+ Iface* received = getIface(pkt->ifindex_);
+ if (received) {
+ pkt->iface_ = received->name_;
+ } else {
+ cout << "Received packet over unknown interface (ifindex="
+ << pkt->ifindex_ << ")." << endl;
+ return (boost::shared_ptr<Pkt6>()); // NULL
+ }
+
+ pkt->data_len_ = result;
+
+ // TODO Move this to LOG_DEBUG
+ cout << "Received " << pkt->data_len_ << " bytes over "
+ << pkt->iface_ << "/" << pkt->ifindex_ << " interface: "
+ << " src=" << pkt->remote_addr_.toText()
+ << ", dst=" << pkt->local_addr_.toText()
+ << endl;
+
+ return (pkt);
+}
+
+}
diff --git a/src/bin/dhcp6/iface_mgr.h b/src/bin/dhcp6/iface_mgr.h
new file mode 100644
index 0000000..249c7ef
--- /dev/null
+++ b/src/bin/dhcp6/iface_mgr.h
@@ -0,0 +1,229 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef IFACE_MGR_H
+#define IFACE_MGR_H
+
+#include <list>
+#include <boost/shared_ptr.hpp>
+#include <boost/scoped_array.hpp>
+#include <boost/noncopyable.hpp>
+#include "asiolink/io_address.h"
+#include "dhcp/pkt6.h"
+
+namespace isc {
+
+namespace dhcp {
+/// @brief handles network interfaces, transmission and reception
+///
+/// IfaceMgr is an interface manager class that detects available network
+/// interfaces, configured addresses, link-local addresses, and provides
+/// API for using sockets.
+///
+class IfaceMgr : public boost::noncopyable {
+public:
+ /// type that defines list of addresses
+ typedef std::list<isc::asiolink::IOAddress> Addr6Lst;
+
+ /// maximum MAC address length (Infiniband uses 20 bytes)
+ static const unsigned int MAX_MAC_LEN = 20;
+
+ /// @brief represents a single network interface
+ ///
+ /// Iface structure represents network interface with all useful
+ /// information, like name, interface index, MAC address and
+ /// list of assigned addresses
+ struct Iface {
+ /// constructor
+ Iface(const std::string& name, int ifindex);
+
+ /// returns full interface name in format ifname/ifindex
+ std::string getFullName() const;
+
+ /// returns link-layer address a plain text
+ std::string getPlainMac() const;
+
+ /// network interface name
+ std::string name_;
+
+ /// interface index (a value that uniquely indentifies an interface)
+ int ifindex_;
+
+ /// list of assigned addresses
+ Addr6Lst addrs_;
+
+ /// link-layer address
+ uint8_t mac_[MAX_MAC_LEN];
+
+ /// length of link-layer address (usually 6)
+ int mac_len_;
+
+ /// socket used to sending data
+ int sendsock_;
+
+ /// socket used for receiving data
+ int recvsock_;
+ };
+
+ // TODO performance improvement: we may change this into
+ // 2 maps (ifindex-indexed and name-indexed) and
+ // also hide it (make it public make tests easier for now)
+
+ /// type that holds a list of interfaces
+ typedef std::list<Iface> IfaceLst;
+
+ /// IfaceMgr is a singleton class. This method returns reference
+ /// to its sole instance.
+ ///
+ /// @return the only existing instance of interface manager
+ static IfaceMgr& instance();
+
+ /// @brief Returns interface with specified interface index
+ ///
+ /// @param ifindex index of searched interface
+ ///
+ /// @return interface with requested index (or NULL if no such
+ /// interface is present)
+ ///
+ Iface*
+ getIface(int ifindex);
+
+ /// @brief Returns interface with specified interface name
+ ///
+ /// @param ifname name of searched interface
+ ///
+ /// @return interface with requested name (or NULL if no such
+ /// interface is present)
+ ///
+ Iface*
+ getIface(const std::string& ifname);
+
+ /// debugging method that prints out all available interfaces
+ ///
+ /// @param out specifies stream to print list of interfaces to
+ void
+ printIfaces(std::ostream& out = std::cout);
+
+ /// @brief Sends a packet.
+ ///
+ /// Sends a packet. All parameters for actual transmission are specified in
+ /// Pkt6 structure itself. That includes destination address, src/dst port
+ /// and interface over which data will be sent.
+ ///
+ /// @param pkt packet to be sent
+ ///
+ /// @return true if sending was successful
+ bool
+ send(boost::shared_ptr<Pkt6>& pkt);
+
+ /// @brief Tries to receive packet over open sockets.
+ ///
+ /// Attempts to receive a single packet of any of the open sockets.
+ /// If reception is successful and all information about its sender
+ /// are obtained, Pkt6 object is created and returned.
+ ///
+ /// TODO Start using select() and add timeout to be able
+ /// to not wait infinitely, but rather do something useful
+ /// (e.g. remove expired leases)
+ ///
+ /// @return Pkt6 object representing received packet (or NULL)
+ boost::shared_ptr<Pkt6> receive();
+
+ // don't use private, we need derived classes in tests
+protected:
+
+ /// @brief Protected constructor.
+ ///
+ /// Protected constructor. This is a singleton class. We don't want
+ /// anyone to create instances of IfaceMgr. Use instance() method
+ IfaceMgr();
+
+ ~IfaceMgr();
+
+ /// @brief Detects network interfaces.
+ ///
+ /// This method will eventually detect available interfaces. For now
+ /// it offers stub implementation. First interface name and link-local
+ /// IPv6 address is read from intefaces.txt file.
+ void
+ detectIfaces();
+
+ ///
+ /// Opens UDP/IPv6 socket and binds it to address, interface and port.
+ ///
+ /// @param ifname name of the interface
+ /// @param addr address to be bound.
+ /// @param port UDP port.
+ ///
+ /// @return socket descriptor, if socket creation, binding and multicast
+ /// group join were all successful. -1 otherwise.
+ int openSocket(const std::string& ifname,
+ const isc::asiolink::IOAddress& addr,
+ int port);
+
+ // TODO: having 2 maps (ifindex->iface and ifname->iface would)
+ // probably be better for performance reasons
+
+ /// List of available interfaces
+ IfaceLst ifaces_;
+
+ /// a pointer to a sole instance of this class (a singleton)
+ static IfaceMgr * instance_;
+
+ // TODO: Also keep this interface on Iface once interface detection
+ // is implemented. We may need it e.g. to close all sockets on
+ // specific interface
+ int recvsock_; // TODO: should be fd_set eventually, but we have only
+ int sendsock_; // 2 sockets for now. Will do for until next release
+ // we can't use the same socket, as receiving socket
+ // is bound to multicast address. And we all know what happens
+ // to people who try to use multicast as source address.
+
+ /// length of the control_buf_ array
+ int control_buf_len_;
+
+ /// control-buffer, used in transmission and reception
+ boost::scoped_array<char> control_buf_;
+
+private:
+ /// Opens sockets on detected interfaces.
+ bool
+ openSockets();
+
+ /// creates a single instance of this class (a singleton implementation)
+ static void
+ instanceCreate();
+
+ /// @brief Joins IPv6 multicast group on a socket.
+ ///
+ /// Socket must be created and bound to an address. Note that this
+ /// address is different than the multicast address. For example DHCPv6
+ /// server should bind its socket to link-local address (fe80::1234...)
+ /// and later join ff02::1:2 multicast group.
+ ///
+ /// @param sock socket fd (socket must be bound)
+ /// @param ifname interface name (for link-scoped multicast groups)
+ /// @param mcast multicast address to join (e.g. "ff02::1:2")
+ ///
+ /// @return true if multicast join was successful
+ ///
+ bool
+ joinMcast(int sock, const std::string& ifname,
+ const std::string& mcast);
+};
+
+}; // namespace isc::dhcp
+}; // namespace isc
+
+#endif
diff --git a/src/bin/dhcp6/interfaces.txt b/src/bin/dhcp6/interfaces.txt
new file mode 100644
index 0000000..6a64309
--- /dev/null
+++ b/src/bin/dhcp6/interfaces.txt
@@ -0,0 +1,10 @@
+eth0 fe80::21e:8cff:fe9b:7349
+
+#
+# only first line is read.
+# please use following format:
+# interface-name link-local-ipv6-address
+#
+# This file will become obsolete once proper interface detection
+# is implemented.
+#
diff --git a/src/bin/dhcp6/main.cc b/src/bin/dhcp6/main.cc
new file mode 100644
index 0000000..5323811
--- /dev/null
+++ b/src/bin/dhcp6/main.cc
@@ -0,0 +1,112 @@
+// Copyright (C) 2009-2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/select.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#include <cassert>
+#include <iostream>
+
+#include <exceptions/exceptions.h>
+#if 0
+// TODO cc is not used yet. It should be eventually
+#include <cc/session.h>
+#include <config/ccsession.h>
+#endif
+
+#include <util/buffer.h>
+#include <log/dummylog.h>
+
+#include <dhcp6/spec_config.h>
+#include "dhcp6/dhcp6_srv.h"
+
+using namespace std;
+using namespace isc::util;
+
+using namespace isc;
+using namespace isc::dhcp;
+
+namespace {
+
+bool verbose_mode = false;
+
+void
+usage() {
+ cerr << "Usage: b10-dhcp6 [-v]"
+ << endl;
+ cerr << "\t-v: verbose output" << endl;
+ exit(1);
+}
+} // end of anonymous namespace
+
+int
+main(int argc, char* argv[]) {
+ int ch;
+
+ while ((ch = getopt(argc, argv, ":v")) != -1) {
+ switch (ch) {
+ case 'v':
+ verbose_mode = true;
+ isc::log::denabled = true;
+ break;
+ case ':':
+ default:
+ usage();
+ }
+ }
+
+ cout << "My pid=" << getpid() << endl;
+
+ if (argc - optind > 0) {
+ usage();
+ }
+
+ int ret = 0;
+
+ // TODO remainder of auth to dhcp6 code copy. We need to enable this in
+ // dhcp6 eventually
+#if 0
+ Session* cc_session = NULL;
+ Session* statistics_session = NULL;
+ ModuleCCSession* config_session = NULL;
+#endif
+ try {
+ string specfile;
+ if (getenv("B10_FROM_BUILD")) {
+ specfile = string(getenv("B10_FROM_BUILD")) +
+ "/src/bin/auth/dhcp6.spec";
+ } else {
+ specfile = string(DHCP6_SPECFILE_LOCATION);
+ }
+
+ cout << "[b10-dhcp6] Initiating DHCPv6 operation." << endl;
+
+ Dhcpv6Srv* srv = new Dhcpv6Srv();
+
+ srv->run();
+
+ } catch (const std::exception& ex) {
+ cerr << "[b10-dhcp6] Server failed: " << ex.what() << endl;
+ ret = 1;
+ }
+
+ return (ret);
+}
diff --git a/src/bin/dhcp6/spec_config.h.pre.in b/src/bin/dhcp6/spec_config.h.pre.in
new file mode 100644
index 0000000..42775b2
--- /dev/null
+++ b/src/bin/dhcp6/spec_config.h.pre.in
@@ -0,0 +1,15 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define DHCP6_SPECFILE_LOCATION "@prefix@/share/@PACKAGE@/dhcp6.spec"
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
new file mode 100644
index 0000000..985368e
--- /dev/null
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -0,0 +1,64 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+#PYTESTS = args_test.py bind10_test.py
+# NOTE: this has a generated test found in the builddir
+PYTESTS = dhcp6_test.py
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
+
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += -I$(top_builddir)/src/bin # for generated spec_config.h header
+AM_CPPFLAGS += -I$(top_srcdir)/src/bin
+AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
+AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_top_srcdir)/src/lib/testutils/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/bin/dhcp6/tests\"
+AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
+
+CLEANFILES = $(builddir)/interfaces.txt
+
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+TESTS =
+if HAVE_GTEST
+
+TESTS += dhcp6_unittests
+
+dhcp6_unittests_SOURCES = ../iface_mgr.h ../iface_mgr.cc
+dhcp6_unittests_SOURCES += ../dhcp6_srv.h ../dhcp6_srv.cc
+dhcp6_unittests_SOURCES += dhcp6_unittests.cc
+dhcp6_unittests_SOURCES += iface_mgr_unittest.cc
+dhcp6_unittests_SOURCES += dhcp6_srv_unittest.cc
+
+dhcp6_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+dhcp6_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+dhcp6_unittests_LDADD = $(GTEST_LDADD)
+dhcp6_unittests_LDADD += $(SQLITE_LIBS)
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
new file mode 100644
index 0000000..72e48e4
--- /dev/null
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -0,0 +1,148 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include "dhcp/dhcp6.h"
+#include "dhcp6/dhcp6_srv.h"
+#include "dhcp/option6_ia.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+
+// namespace has to be named, because friends are defined in Dhcpv6Srv class
+// Maybe it should be isc::test?
+namespace test {
+
+class NakedDhcpv6Srv: public Dhcpv6Srv {
+ // "naked" Interface Manager, exposes internal fields
+public:
+ NakedDhcpv6Srv() { }
+
+ boost::shared_ptr<Pkt6>
+ processSolicit(boost::shared_ptr<Pkt6>& request) {
+ return Dhcpv6Srv::processSolicit(request);
+ }
+ boost::shared_ptr<Pkt6>
+ processRequest(boost::shared_ptr<Pkt6>& request) {
+ return Dhcpv6Srv::processRequest(request);
+ }
+};
+
+class Dhcpv6SrvTest : public ::testing::Test {
+public:
+ Dhcpv6SrvTest() {
+ }
+};
+
+TEST_F(Dhcpv6SrvTest, basic) {
+ // there's almost no code now. What's there provides echo capability
+ // that is just a proof of concept and will be removed soon
+ // No need to thoroughly test it
+
+ // srv has stubbed interface detection. It will read
+ // interfaces.txt instead. It will pretend to have detected
+ // fe80::1234 link-local address on eth0 interface. Obviously
+ // an attempt to bind this socket will fail.
+ EXPECT_NO_THROW( {
+ Dhcpv6Srv * srv = new Dhcpv6Srv();
+
+ delete srv;
+ });
+
+}
+
+TEST_F(Dhcpv6SrvTest, Solicit_basic) {
+ NakedDhcpv6Srv * srv = 0;
+ EXPECT_NO_THROW( srv = new NakedDhcpv6Srv(); );
+
+ // a dummy content for client-id
+ boost::shared_array<uint8_t> clntDuid(new uint8_t[32]);
+ for (int i=0; i<32; i++)
+ clntDuid[i] = 100+i;
+
+ boost::shared_ptr<Pkt6> sol =
+ boost::shared_ptr<Pkt6>(new Pkt6(DHCPV6_SOLICIT,
+ 1234, Pkt6::UDP));
+
+ boost::shared_ptr<Option6IA> ia =
+ boost::shared_ptr<Option6IA>(new Option6IA(D6O_IA_NA, 234));
+ ia->setT1(1501);
+ ia->setT2(2601);
+ sol->addOption(ia);
+
+ // Let's not send address in solicit yet
+ // boost::shared_ptr<Option6IAAddr> addr(new Option6IAAddr(D6O_IAADDR,
+ // IOAddress("2001:db8:1234:ffff::ffff"), 5001, 7001));
+ // ia->addOption(addr);
+ // sol->addOption(ia);
+
+ // constructed very simple SOLICIT message with:
+ // - client-id option (mandatory)
+ // - IA option (a request for address, without any addresses)
+
+ // expected returned ADVERTISE message:
+ // - copy of client-id
+ // - server-id
+ // - IA that includes IAADDR
+
+ boost::shared_ptr<Option> clientid =
+ boost::shared_ptr<Option>(new Option(Option::V6, D6O_CLIENTID,
+ clntDuid, 0, 16));
+ sol->addOption(clientid);
+
+ boost::shared_ptr<Pkt6> reply = srv->processSolicit(sol);
+
+ // check if we get response at all
+ ASSERT_TRUE( reply != boost::shared_ptr<Pkt6>() );
+
+ EXPECT_EQ( DHCPV6_ADVERTISE, reply->getType() );
+ EXPECT_EQ( 1234, reply->getTransid() );
+
+ boost::shared_ptr<Option> tmp = reply->getOption(D6O_IA_NA);
+ ASSERT_TRUE( tmp );
+
+ Option6IA * reply_ia = dynamic_cast<Option6IA*> ( tmp.get() );
+ EXPECT_EQ( 234, reply_ia->getIAID() );
+
+ // check that there's an address included
+ EXPECT_TRUE( reply_ia->getOption(D6O_IAADDR));
+
+ // check that server included our own client-id
+ tmp = reply->getOption(D6O_CLIENTID);
+ ASSERT_TRUE( tmp );
+ EXPECT_EQ(clientid->getType(), tmp->getType() );
+ ASSERT_EQ(clientid->len(), tmp->len() );
+
+ EXPECT_TRUE( clientid->getData() == tmp->getData() );
+
+ // check that server included its server-id
+ tmp = reply->getOption(D6O_SERVERID);
+ EXPECT_EQ(tmp->getType(), srv->getServerID()->getType() );
+ ASSERT_EQ(tmp->len(), srv->getServerID()->len() );
+
+ EXPECT_TRUE(tmp->getData() == srv->getServerID()->getData());
+
+ // more checks to be implemented
+ delete srv;
+
+}
+
+}
diff --git a/src/bin/dhcp6/tests/dhcp6_test.py b/src/bin/dhcp6/tests/dhcp6_test.py
new file mode 100644
index 0000000..5ae1f5e
--- /dev/null
+++ b/src/bin/dhcp6/tests/dhcp6_test.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from bind10_src import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+
+import unittest
+import sys
+import os
+import signal
+import socket
+from isc.net.addr import IPAddr
+import time
+import isc
+
+class TestDhcpv6Daemon(unittest.TestCase):
+ def setUp(self):
+ # redirect stdout to a pipe so we can check that our
+ # process spawning is doing the right thing with stdout
+ self.old_stdout = os.dup(sys.stdout.fileno())
+ self.pipes = os.pipe()
+ os.dup2(self.pipes[1], sys.stdout.fileno())
+ os.close(self.pipes[1])
+ # note that we use dup2() to restore the original stdout
+ # to the main program ASAP in each test... this prevents
+ # hangs reading from the child process (as the pipe is only
+ # open in the child), and also insures nice pretty output
+
+ def tearDown(self):
+ # clean up our stdout munging
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ os.close(self.pipes[0])
+
+ def test_alive(self):
+ """
+ Simple test. Checks that b10-dhcp6 can be started and prints out info
+ about starting DHCPv6 operation.
+ """
+ pi = ProcessInfo('Test Process', [ '../b10-dhcp6' , '-v' ])
+ pi.spawn()
+ time.sleep(1)
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ self.assertNotEqual(pi.process, None)
+ self.assertTrue(type(pi.pid) is int)
+ output = os.read(self.pipes[0], 4096)
+ self.assertEqual( str(output).count("[b10-dhcp6] Initiating DHCPv6 operation."), 1)
+
+ # kill this process
+ # XXX: b10-dhcp6 is too dumb to understand 'shutdown' command for now,
+ # so let's just kill the bastard
+ os.kill(pi.pid, signal.SIGTERM)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/bin/dhcp6/tests/dhcp6_unittests.cc b/src/bin/dhcp6/tests/dhcp6_unittests.cc
new file mode 100644
index 0000000..360fb71
--- /dev/null
+++ b/src/bin/dhcp6/tests/dhcp6_unittests.cc
@@ -0,0 +1,28 @@
+// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdio.h>
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+
+int
+main(int argc, char* argv[]) {
+
+ ::testing::InitGoogleTest(&argc, argv);
+ isc::log::initLogger();
+
+ int result = RUN_ALL_TESTS();
+
+ return result;
+}
diff --git a/src/bin/dhcp6/tests/iface_mgr_unittest.cc b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
new file mode 100644
index 0000000..f126e6a
--- /dev/null
+++ b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
@@ -0,0 +1,367 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <fstream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include "io_address.h"
+#include "dhcp/pkt6.h"
+#include "dhcp6/iface_mgr.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+
+// name of loopback interface detection
+char LOOPBACK[32] = "lo";
+
+namespace {
+const char* const INTERFACE_FILE = TEST_DATA_BUILDDIR "/interfaces.txt";
+
+class NakedIfaceMgr: public IfaceMgr {
+ // "naked" Interface Manager, exposes internal fields
+public:
+ NakedIfaceMgr() { }
+ IfaceLst & getIfacesLst() { return ifaces_; }
+ void setSendSock(int sock) { sendsock_ = sock; }
+ void setRecvSock(int sock) { recvsock_ = sock; }
+
+ int openSocket(const std::string& ifname,
+ const isc::asiolink::IOAddress& addr,
+ int port) {
+ return IfaceMgr::openSocket(ifname, addr, port);
+ }
+
+};
+
+// dummy class for now, but this will be expanded when needed
+class IfaceMgrTest : public ::testing::Test {
+public:
+ IfaceMgrTest() {
+ }
+};
+
+// We need some known interface to work reliably. Loopback interface
+// is named lo on Linux and lo0 on BSD boxes. We need to find out
+// which is available. This is not a real test, but rather a workaround
+// that will go away when interface detection is implemented.
+
+// NOTE: At this stage of development, write access to current directory
+// during running tests is required.
+TEST_F(IfaceMgrTest, loDetect) {
+
+ // poor man's interface detection
+ // it will go away as soon as proper interface detection
+ // is implemented
+ if (if_nametoindex("lo")>0) {
+ cout << "This is Linux, using lo as loopback." << endl;
+ sprintf(LOOPBACK, "lo");
+ } else if (if_nametoindex("lo0")>0) {
+ cout << "This is BSD, using lo0 as loopback." << endl;
+ sprintf(LOOPBACK, "lo0");
+ } else {
+ cout << "Failed to detect loopback interface. Neither "
+ << "lo or lo0 worked. I give up." << endl;
+ ASSERT_TRUE(false);
+ }
+}
+
+// uncomment this test to create packet writer. It will
+// write incoming DHCPv6 packets as C arrays. That is useful
+// for generating test sequences based on actual traffic
+//
+// TODO: this potentially should be moved to a separate tool
+//
+
+#if 0
+TEST_F(IfaceMgrTest, dhcp6Sniffer) {
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ unlink("interfaces.txt");
+
+ ofstream interfaces("interfaces.txt", ios::ate);
+ interfaces << "eth0 fe80::21e:8cff:fe9b:7349";
+ interfaces.close();
+
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ Pkt6 * pkt = 0;
+ int cnt = 0;
+ cout << "---8X-----------------------------------------" << endl;
+ while (true) {
+ pkt = ifacemgr->receive();
+
+ cout << "// Received " << pkt->data_len_ << " bytes packet:" << endl;
+ cout << "Pkt6 *capture" << cnt++ << "() {" << endl;
+ cout << " Pkt6* pkt;" << endl;
+ cout << " pkt = new Pkt6(" << pkt->data_len_ << ");" << endl;
+ cout << " pkt->remote_port_ = " << pkt-> remote_port_ << ";" << endl;
+ cout << " pkt->remote_addr_ = IOAddress(\""
+ << pkt->remote_addr_.toText() << "\");" << endl;
+ cout << " pkt->local_port_ = " << pkt-> local_port_ << ";" << endl;
+ cout << " pkt->local_addr_ = IOAddress(\""
+ << pkt->local_addr_.toText() << "\");" << endl;
+ cout << " pkt->ifindex_ = " << pkt->ifindex_ << ";" << endl;
+ cout << " pkt->iface_ = \"" << pkt->iface_ << "\";" << endl;
+
+ // TODO it is better to declare an array and then memcpy it to
+ // packet.
+ for (int i=0; i< pkt->data_len_; i++) {
+ cout << " pkt->data_[" << i << "]="
+ << (int)(unsigned char)pkt->data_[i] << "; ";
+ if (!(i%4))
+ cout << endl;
+ }
+ cout << endl;
+ cout << " return (pkt);" << endl;
+ cout << "}" << endl << endl;
+
+ delete pkt;
+ }
+ cout << "---8X-----------------------------------------" << endl;
+
+ // never happens. Infinite loop is infinite
+ delete pkt;
+ delete ifacemgr;
+}
+#endif
+
+TEST_F(IfaceMgrTest, basic) {
+ // checks that IfaceManager can be instantiated
+
+ IfaceMgr & ifacemgr = IfaceMgr::instance();
+ ASSERT_TRUE(&ifacemgr != 0);
+}
+
+TEST_F(IfaceMgrTest, ifaceClass) {
+ // basic tests for Iface inner class
+
+ IfaceMgr::Iface * iface = new IfaceMgr::Iface("eth5", 7);
+
+ EXPECT_STREQ("eth5/7", iface->getFullName().c_str());
+
+ delete iface;
+
+}
+
+// TODO: Implement getPlainMac() test as soon as interface detection
+// is implemented.
+TEST_F(IfaceMgrTest, getIface) {
+
+ cout << "Interface checks. Please ignore socket binding errors." << endl;
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ // interface name, ifindex
+ IfaceMgr::Iface iface1("lo1", 1);
+ IfaceMgr::Iface iface2("eth5", 2);
+ IfaceMgr::Iface iface3("en3", 5);
+ IfaceMgr::Iface iface4("e1000g0", 3);
+
+ // note: real interfaces may be detected as well
+ ifacemgr->getIfacesLst().push_back(iface1);
+ ifacemgr->getIfacesLst().push_back(iface2);
+ ifacemgr->getIfacesLst().push_back(iface3);
+ ifacemgr->getIfacesLst().push_back(iface4);
+
+ cout << "There are " << ifacemgr->getIfacesLst().size()
+ << " interfaces." << endl;
+ for (IfaceMgr::IfaceLst::iterator iface=ifacemgr->getIfacesLst().begin();
+ iface != ifacemgr->getIfacesLst().end();
+ ++iface) {
+ cout << " " << iface->name_ << "/" << iface->ifindex_ << endl;
+ }
+
+
+ // check that interface can be retrieved by ifindex
+ IfaceMgr::Iface * tmp = ifacemgr->getIface(5);
+ // ASSERT_NE(NULL, tmp); is not supported. hmmmm.
+ ASSERT_TRUE( tmp != NULL );
+
+ EXPECT_STREQ( "en3", tmp->name_.c_str() );
+ EXPECT_EQ(5, tmp->ifindex_);
+
+ // check that interface can be retrieved by name
+ tmp = ifacemgr->getIface("lo1");
+ ASSERT_TRUE( tmp != NULL );
+
+ EXPECT_STREQ( "lo1", tmp->name_.c_str() );
+ EXPECT_EQ(1, tmp->ifindex_);
+
+ // check that non-existing interfaces are not returned
+ EXPECT_EQ(static_cast<void*>(NULL), ifacemgr->getIface("wifi0") );
+
+ delete ifacemgr;
+}
+
+TEST_F(IfaceMgrTest, detectIfaces) {
+
+ // test detects that interfaces can be detected
+ // there is no code for that now, but interfaces are
+ // read from file
+ fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+ fakeifaces << "eth0 fe80::1234";
+ fakeifaces.close();
+
+ // this is not usable on systems that don't have eth0
+ // interfaces. Nevertheless, this fake interface should
+ // be on list, but if_nametoindex() will fail.
+
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ ASSERT_TRUE( ifacemgr->getIface("eth0") != NULL );
+
+ IfaceMgr::Iface * eth0 = ifacemgr->getIface("eth0");
+
+ // there should be one address
+ EXPECT_EQ(1, eth0->addrs_.size());
+
+ IOAddress * addr = &(*eth0->addrs_.begin());
+ ASSERT_TRUE( addr != NULL );
+
+ EXPECT_STREQ( "fe80::1234", addr->toText().c_str() );
+
+ delete ifacemgr;
+}
+
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+// Fix for this is available on 1186 branch, will reenable
+// this test once 1186 is merged
+TEST_F(IfaceMgrTest, DISABLED_sockets) {
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ IOAddress loAddr("::1");
+
+ // bind multicast socket to port 10547
+ int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+ EXPECT_GT(socket1, 0); // socket > 0
+
+ // bind unicast socket to port 10548
+ int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10548);
+ EXPECT_GT(socket2, 0);
+
+ // expect success. This address/port is already bound, but
+ // we are using SO_REUSEADDR, so we can bind it twice
+ int socket3 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+
+ // rebinding succeeds on Linux, fails on BSD
+ // TODO: add OS-specific defines here (or modify code to
+ // behave the same way on all OSes, but that may not be
+ // possible
+ // EXPECT_GT(socket3, 0); // socket > 0
+
+ // we now have 3 sockets open at the same time. Looks good.
+
+ close(socket1);
+ close(socket2);
+ close(socket3);
+
+ delete ifacemgr;
+}
+
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ IOAddress loAddr("::1");
+ IOAddress mcastAddr("ff02::1:2");
+
+ // bind multicast socket to port 10547
+ int socket1 = ifacemgr->openSocket(LOOPBACK, mcastAddr, 10547);
+ EXPECT_GT(socket1, 0); // socket > 0
+
+ // expect success. This address/port is already bound, but
+ // we are using SO_REUSEADDR, so we can bind it twice
+ int socket2 = ifacemgr->openSocket(LOOPBACK, mcastAddr, 10547);
+ EXPECT_GT(socket2, 0);
+
+ // there's no good way to test negative case here.
+ // we would need non-multicast interface. We will be able
+ // to iterate thru available interfaces and check if there
+ // are interfaces without multicast-capable flag.
+
+ close(socket1);
+ close(socket2);
+
+ delete ifacemgr;
+}
+
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+// Fix for this is available on 1186 branch, will reenable
+// this test once 1186 is merged
+TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
+ // testing socket operation in a portable way is tricky
+ // without interface detection implemented
+
+ fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+ fakeifaces << LOOPBACK << " ::1";
+ fakeifaces.close();
+
+ NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+ // let's assume that every supported OS have lo interface
+ IOAddress loAddr("::1");
+ int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+ int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
+
+ ifacemgr->setSendSock(socket2);
+ ifacemgr->setRecvSock(socket1);
+
+ boost::shared_ptr<Pkt6> sendPkt(new Pkt6(128) );
+
+ // prepare dummy payload
+ for (int i=0;i<128; i++) {
+ sendPkt->data_[i] = i;
+ }
+
+ sendPkt->remote_port_ = 10547;
+ sendPkt->remote_addr_ = IOAddress("::1");
+ sendPkt->ifindex_ = 1;
+ sendPkt->iface_ = LOOPBACK;
+
+ boost::shared_ptr<Pkt6> rcvPkt;
+
+ EXPECT_EQ(true, ifacemgr->send(sendPkt));
+
+ rcvPkt = ifacemgr->receive();
+
+ ASSERT_TRUE( rcvPkt ); // received our own packet
+
+ // let's check that we received what was sent
+ EXPECT_EQ(sendPkt->data_len_, rcvPkt->data_len_);
+ EXPECT_EQ(0, memcmp(&sendPkt->data_[0], &rcvPkt->data_[0],
+ rcvPkt->data_len_) );
+
+ EXPECT_EQ(sendPkt->remote_addr_.toText(), rcvPkt->remote_addr_.toText());
+ EXPECT_EQ(rcvPkt->remote_port_, 10546);
+
+ delete ifacemgr;
+}
+
+}
diff --git a/src/bin/host/Makefile.am b/src/bin/host/Makefile.am
index ec34ce7..a8f96c2 100644
--- a/src/bin/host/Makefile.am
+++ b/src/bin/host/Makefile.am
@@ -13,6 +13,7 @@ CLEANFILES = *.gcno *.gcda
bin_PROGRAMS = b10-host
b10_host_SOURCES = host.cc
b10_host_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+b10_host_LDADD += $(top_builddir)/src/lib/util/libutil.la
b10_host_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
man_MANS = b10-host.1
diff --git a/src/bin/host/b10-host.1 b/src/bin/host/b10-host.1
index ed0068b..050f6a3 100644
--- a/src/bin/host/b10-host.1
+++ b/src/bin/host/b10-host.1
@@ -103,10 +103,6 @@ It doesn\'t use
at this time\&. The default name server used is 127\&.0\&.0\&.1\&.
.PP
-\fBb10\-host\fR
-does not do reverse lookups by default yet (by detecting if name is a IPv4 or IPv6 address)\&.
-.PP
-
\fB\-p\fR
is not a standard feature\&.
.SH "HISTORY"
diff --git a/src/bin/host/b10-host.xml b/src/bin/host/b10-host.xml
index 7da07dd..a17ef67 100644
--- a/src/bin/host/b10-host.xml
+++ b/src/bin/host/b10-host.xml
@@ -176,11 +176,6 @@
</para>
<para>
- <command>b10-host</command> does not do reverse lookups by
- default yet (by detecting if name is a IPv4 or IPv6 address).
- </para>
-
- <para>
<option>-p</option> is not a standard feature.
</para>
</refsect1>
diff --git a/src/bin/loadzone/Makefile.am b/src/bin/loadzone/Makefile.am
index 74d4dd4..a235d68 100644
--- a/src/bin/loadzone/Makefile.am
+++ b/src/bin/loadzone/Makefile.am
@@ -1,5 +1,6 @@
SUBDIRS = . tests/correct tests/error
bin_SCRIPTS = b10-loadzone
+noinst_SCRIPTS = run_loadzone.sh
CLEANFILES = b10-loadzone
diff --git a/src/bin/loadzone/run_loadzone.sh.in b/src/bin/loadzone/run_loadzone.sh.in
old mode 100644
new mode 100755
index b7ac19f..43b7920
--- a/src/bin/loadzone/run_loadzone.sh.in
+++ b/src/bin/loadzone/run_loadzone.sh.in
@@ -18,9 +18,17 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python
export PYTHONPATH
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
diff --git a/src/bin/loadzone/tests/correct/Makefile.am b/src/bin/loadzone/tests/correct/Makefile.am
index a90cab2..fb882ba 100644
--- a/src/bin/loadzone/tests/correct/Makefile.am
+++ b/src/bin/loadzone/tests/correct/Makefile.am
@@ -13,8 +13,17 @@ EXTRA_DIST += ttl2.db
EXTRA_DIST += ttlext.db
EXTRA_DIST += example.db
+noinst_SCRIPTS = correct_test.sh
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# TODO: maybe use TESTS?
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
echo Running test: correct_test.sh
- $(SHELL) $(abs_builddir)/correct_test.sh
+ $(LIBRARY_PATH_PLACEHOLDER) $(SHELL) $(abs_builddir)/correct_test.sh
diff --git a/src/bin/loadzone/tests/correct/correct_test.sh.in b/src/bin/loadzone/tests/correct/correct_test.sh.in
old mode 100644
new mode 100755
index 509d8e5..d944451
--- a/src/bin/loadzone/tests/correct/correct_test.sh.in
+++ b/src/bin/loadzone/tests/correct/correct_test.sh.in
@@ -18,7 +18,7 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
export PYTHONPATH
LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
diff --git a/src/bin/loadzone/tests/error/Makefile.am b/src/bin/loadzone/tests/error/Makefile.am
index bbeec07..03263b7 100644
--- a/src/bin/loadzone/tests/error/Makefile.am
+++ b/src/bin/loadzone/tests/error/Makefile.am
@@ -12,8 +12,17 @@ EXTRA_DIST += keyerror3.db
EXTRA_DIST += originerr1.db
EXTRA_DIST += originerr2.db
+noinst_SCRIPTS = error_test.sh
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# TODO: use TESTS ?
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
echo Running test: error_test.sh
- $(SHELL) $(abs_builddir)/error_test.sh
+ $(LIBRARY_PATH_PLACEHOLDER) $(SHELL) $(abs_builddir)/error_test.sh
diff --git a/src/bin/loadzone/tests/error/error_test.sh.in b/src/bin/loadzone/tests/error/error_test.sh.in
old mode 100644
new mode 100755
index d1d6bd1..94c5edb
--- a/src/bin/loadzone/tests/error/error_test.sh.in
+++ b/src/bin/loadzone/tests/error/error_test.sh.in
@@ -18,7 +18,7 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
export PYTHONPATH
LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
diff --git a/src/bin/msgq/Makefile.am b/src/bin/msgq/Makefile.am
index 61d4f23..908cab5 100644
--- a/src/bin/msgq/Makefile.am
+++ b/src/bin/msgq/Makefile.am
@@ -1,7 +1,7 @@
SUBDIRS = . tests
pkglibexecdir = $(libexecdir)/@PACKAGE@
-
+
pkglibexec_SCRIPTS = b10-msgq
CLEANFILES = b10-msgq msgq.pyc
@@ -20,3 +20,8 @@ endif
b10-msgq: msgq.py
$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" msgq.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/msgq/msgq.py.in b/src/bin/msgq/msgq.py.in
index 06fe840..333ae89 100755
--- a/src/bin/msgq/msgq.py.in
+++ b/src/bin/msgq/msgq.py.in
@@ -28,7 +28,6 @@ import struct
import errno
import time
import select
-import pprint
import random
from optparse import OptionParser, OptionValueError
import isc.util.process
@@ -96,10 +95,10 @@ class MsgQ:
"@PACKAGE_NAME@",
"msgq_socket").replace("${prefix}",
"@prefix@")
-
+
def __init__(self, socket_file=None, verbose=False):
"""Initialize the MsgQ master.
-
+
The socket_file specifies the path to the UNIX domain socket
that the msgq process listens on. If it is None, the
environment variable BIND10_MSGQ_SOCKET_FILE is used. If that
@@ -135,7 +134,7 @@ class MsgQ:
self.poller = select.poll()
except AttributeError:
self.kqueue = select.kqueue()
-
+
def add_kqueue_socket(self, socket, write_filter=False):
"""Add a kquque filter for a socket. By default the read
filter is used; if write_filter is set to True, the write
@@ -167,7 +166,7 @@ class MsgQ:
self.socket_file)
self.listen_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-
+
if os.path.exists(self.socket_file):
os.remove(self.socket_file)
try:
@@ -196,7 +195,7 @@ class MsgQ:
if self.verbose:
sys.stdout.write("[b10-msgq] Listening\n")
-
+
self.runnable = True
def process_accept(self):
@@ -293,9 +292,6 @@ class MsgQ:
sys.stderr.write("[b10-msgq] Routing decode error: %s\n" % err)
return
-# sys.stdout.write("\t" + pprint.pformat(routingmsg) + "\n")
-# sys.stdout.write("\t" + pprint.pformat(data) + "\n")
-
self.process_command(fd, sock, routingmsg, data)
def process_command(self, fd, sock, routing, data):
@@ -357,7 +353,18 @@ class MsgQ:
if fileno in self.sendbuffs:
amount_sent = 0
else:
- amount_sent = self.__send_data(sock, msg)
+ try:
+ amount_sent = self.__send_data(sock, msg)
+ except socket.error as sockerr:
+ # in the case the other side seems gone, kill the socket
+ # and drop the send action
+ if sockerr.errno == errno.EPIPE:
+ print("[b10-msgq] SIGPIPE on send, dropping message " +
+ "and closing connection")
+ self.kill_socket(fileno, sock)
+ return
+ else:
+ raise
# Still something to send
if amount_sent < len(msg):
@@ -448,12 +455,12 @@ class MsgQ:
def run(self):
"""Process messages. Forever. Mostly."""
-
+
if self.poller:
self.run_poller()
else:
self.run_kqueue()
-
+
def run_poller(self):
while True:
try:
@@ -511,7 +518,7 @@ def signal_handler(signal, frame):
if __name__ == "__main__":
def check_port(option, opt_str, value, parser):
- """Function to insure that the port we are passed is actually
+ """Function to insure that the port we are passed is actually
a valid port number. Used by OptionParser() on startup."""
intval = int(value)
if (intval < 0) or (intval > 65535):
diff --git a/src/bin/msgq/tests/Makefile.am b/src/bin/msgq/tests/Makefile.am
index 0bbb964..50b218b 100644
--- a/src/bin/msgq/tests/Makefile.am
+++ b/src/bin/msgq/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = msgq_test.py
EXTRA_DIST = $(PYTESTS)
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -11,7 +18,8 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/msgq:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/msgq \
BIND10_TEST_SOCKET_FILE=$(builddir)/test_msgq_socket.sock \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/msgq/tests/msgq_test.py b/src/bin/msgq/tests/msgq_test.py
index 26878f7..fe4f7d4 100644
--- a/src/bin/msgq/tests/msgq_test.py
+++ b/src/bin/msgq/tests/msgq_test.py
@@ -202,7 +202,7 @@ class SendNonblock(unittest.TestCase):
try:
def killall(signum, frame):
os.kill(queue_pid, signal.SIGTERM)
- sys.exit(1)
+ os._exit(1)
signal.signal(signal.SIGALRM, killall)
msg = msgq.preparemsg({"type" : "ping"}, data)
now = time.clock()
diff --git a/src/bin/resolver/Makefile.am b/src/bin/resolver/Makefile.am
index 094e3ad..3f5f049 100644
--- a/src/bin/resolver/Makefile.am
+++ b/src/bin/resolver/Makefile.am
@@ -18,10 +18,12 @@ endif
pkglibexecdir = $(libexecdir)/@PACKAGE@
-CLEANFILES = *.gcno *.gcda resolver.spec spec_config.h
+CLEANFILES = *.gcno *.gcda
+CLEANFILES += resolver.spec spec_config.h
+CLEANFILES += resolver_messages.cc resolver_messages.h
man_MANS = b10-resolver.8
-EXTRA_DIST = $(man_MANS) b10-resolver.xml
+EXTRA_DIST = $(man_MANS) b10-resolver.xml resolver_messages.mes
if ENABLE_MAN
@@ -36,16 +38,29 @@ resolver.spec: resolver.spec.pre
spec_config.h: spec_config.h.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
-BUILT_SOURCES = spec_config.h
+# Define rule to build logging source files from message file
+resolver_messages.h resolver_messages.cc: resolver_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/bin/resolver/resolver_messages.mes
+
+
+BUILT_SOURCES = spec_config.h resolver_messages.cc resolver_messages.h
+
pkglibexec_PROGRAMS = b10-resolver
b10_resolver_SOURCES = resolver.cc resolver.h
+b10_resolver_SOURCES += resolver_log.cc resolver_log.h
b10_resolver_SOURCES += response_scrubber.cc response_scrubber.h
b10_resolver_SOURCES += $(top_builddir)/src/bin/auth/change_user.h
b10_resolver_SOURCES += $(top_builddir)/src/bin/auth/common.h
b10_resolver_SOURCES += main.cc
+
+nodist_b10_resolver_SOURCES = resolver_messages.cc resolver_messages.h
+
+
b10_resolver_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
b10_resolver_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
b10_resolver_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+b10_resolver_LDADD += $(top_builddir)/src/lib/util/libutil.la
+b10_resolver_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
b10_resolver_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
b10_resolver_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
b10_resolver_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
diff --git a/src/bin/resolver/b10-resolver.8 b/src/bin/resolver/b10-resolver.8
index 849092c..9161ec2 100644
--- a/src/bin/resolver/b10-resolver.8
+++ b/src/bin/resolver/b10-resolver.8
@@ -2,12 +2,12 @@
.\" Title: b10-resolver
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: February 17, 2011
+.\" Date: August 17, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-RESOLVER" "8" "February 17, 2011" "BIND10" "BIND10"
+.TH "B10\-RESOLVER" "8" "August 17, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -54,7 +54,7 @@ must be either a valid numeric user ID or a valid user name\&. By default the da
.PP
\fB\-v\fR
.RS 4
-Enabled verbose mode\&. This enables diagnostic messages to STDERR\&.
+Enable verbose mode\&. This sets logging to the maximum debugging level\&.
.RE
.SH "CONFIGURATION AND COMMANDS"
.PP
@@ -77,6 +77,25 @@ string and
number\&. The defaults are address ::1 port 53 and address 127\&.0\&.0\&.1 port 53\&.
.PP
+
+
+
+
+
+\fIquery_acl\fR
+is a list of query access control rules\&. The list items are the
+\fIaction\fR
+string and the
+\fIfrom\fR
+or
+\fIkey\fR
+strings\&. The possible actions are ACCEPT, REJECT and DROP\&. The
+\fIfrom\fR
+is a remote (source) IPv4 or IPv6 address or special keyword\&. The
+\fIkey\fR
+is a TSIG key name\&. The default configuration accepts queries from 127\&.0\&.0\&.1 and ::1\&.
+.PP
+
\fIretries\fR
is the number of times to retry (resend query) after a query timeout (\fItimeout_query\fR)\&. The default is 3\&.
.PP
@@ -88,7 +107,7 @@ to use directly as root servers to start resolving\&. The list items are the
\fIaddress\fR
string and
\fIport\fR
-number\&. If empty, a hardcoded address for F\-root (192\&.5\&.5\&.241) is used\&.
+number\&. By default, a hardcoded address for l\&.root\-servers\&.net (199\&.7\&.83\&.42 or 2001:500:3::42) is used\&.
.PP
\fItimeout_client\fR
@@ -121,8 +140,7 @@ BIND 10 Guide\&.
.PP
The
\fBb10\-resolver\fR
-daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&.
-
+daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&. Caching was implemented in February 2011\&. Access control was introduced in June 2011\&.
.SH "COPYRIGHT"
.br
Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/resolver/b10-resolver.xml b/src/bin/resolver/b10-resolver.xml
index bdf4f8a..75cced7 100644
--- a/src/bin/resolver/b10-resolver.xml
+++ b/src/bin/resolver/b10-resolver.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>February 17, 2011</date>
+ <date>August 17, 2011</date>
</refentryinfo>
<refmeta>
@@ -99,11 +99,14 @@
</listitem>
</varlistentry>
+<!-- TODO: this needs to be fixed as -v on command line
+should imply stdout or stderr output also -->
+<!-- TODO: can this -v be overidden by configuration or bindctl? -->
<varlistentry>
<term><option>-v</option></term>
<listitem><para>
- Enabled verbose mode. This enables diagnostic messages to
- STDERR.
+ Enable verbose mode.
+ This sets logging to the maximum debugging level.
</para></listitem>
</varlistentry>
@@ -147,6 +150,22 @@ once that is merged you can for instance do 'config add Resolver/forward_address
</para>
<para>
+<!-- TODO: need more explanation or point to guide. -->
+<!-- TODO: what about a netmask or cidr? -->
+<!-- TODO: document "key" -->
+<!-- TODO: where are the TSIG keys defined? -->
+<!-- TODO: key and from are mutually exclusive? what if both defined? -->
+ <varname>query_acl</varname> is a list of query access control
+ rules. The list items are the <varname>action</varname> string
+ and the <varname>from</varname> or <varname>key</varname> strings.
+ The possible actions are ACCEPT, REJECT and DROP.
+ The <varname>from</varname> is a remote (source) IPv4 or IPv6
+ address or special keyword.
+ The <varname>key</varname> is a TSIG key name.
+ The default configuration accepts queries from 127.0.0.1 and ::1.
+ </para>
+
+ <para>
<varname>retries</varname> is the number of times to retry
(resend query) after a query timeout
(<varname>timeout_query</varname>).
@@ -159,8 +178,10 @@ once that is merged you can for instance do 'config add Resolver/forward_address
root servers to start resolving.
The list items are the <varname>address</varname> string
and <varname>port</varname> number.
- If empty, a hardcoded address for F-root (192.5.5.241) is used.
+ By default, a hardcoded address for l.root-servers.net
+ (199.7.83.42 or 2001:500:3::42) is used.
</para>
+<!-- TODO: this is broken, see ticket #1184 -->
<para>
<varname>timeout_client</varname> is the number of milliseconds
@@ -234,7 +255,8 @@ once that is merged you can for instance do 'config add Resolver/forward_address
The <command>b10-resolver</command> daemon was first coded in
September 2010. The initial implementation only provided
forwarding. Iteration was introduced in January 2011.
-<!-- TODO: document when caching was added -->
+ Caching was implemented in February 2011.
+ Access control was introduced in June 2011.
<!-- TODO: document when validation was added -->
</para>
</refsect1>
diff --git a/src/bin/resolver/main.cc b/src/bin/resolver/main.cc
index 5103bf9..79146da 100644
--- a/src/bin/resolver/main.cc
+++ b/src/bin/resolver/main.cc
@@ -52,13 +52,14 @@
#include <cache/resolver_cache.h>
#include <nsas/nameserver_address_store.h>
-#include <log/dummylog.h>
+#include <log/logger_support.h>
+#include <log/logger_level.h>
+#include "resolver_log.h"
using namespace std;
using namespace isc::cc;
using namespace isc::config;
using namespace isc::data;
-using isc::log::dlog;
using namespace isc::asiodns;
using namespace isc::asiolink;
@@ -79,7 +80,7 @@ my_command_handler(const string& command, ConstElementPtr args) {
ConstElementPtr answer = createAnswer();
if (command == "print_message") {
- cout << args << endl;
+ LOG_INFO(resolver_logger, RESOLVER_PRINT_COMMAND).arg(args);
/* let's add that message to our answer as well */
answer = createAnswer(0, args);
} else if (command == "shutdown") {
@@ -100,7 +101,7 @@ usage() {
int
main(int argc, char* argv[]) {
- isc::log::dprefix = "b10-resolver";
+ bool verbose = false;
int ch;
const char* uid = NULL;
@@ -110,7 +111,7 @@ main(int argc, char* argv[]) {
uid = optarg;
break;
case 'v':
- isc::log::denabled = true;
+ verbose = true;
break;
case '?':
default:
@@ -122,13 +123,18 @@ main(int argc, char* argv[]) {
usage();
}
- if (isc::log::denabled) { // Show the command line
- string cmdline("Command line:");
- for (int i = 0; i < argc; ++ i) {
- cmdline = cmdline + " " + argv[i];
- }
- dlog(cmdline);
+ // Until proper logging comes along, initialize the logging with the
+ // temporary initLogger() code. If verbose, we'll use maximum verbosity.
+ isc::log::initLogger("b10-resolver",
+ (verbose ? isc::log::DEBUG : isc::log::INFO),
+ isc::log::MAX_DEBUG_LEVEL, NULL);
+
+ // Print the starting message
+ string cmdline = argv[0];
+ for (int i = 1; i < argc; ++ i) {
+ cmdline = cmdline + " " + argv[i];
}
+ LOG_INFO(resolver_logger, RESOLVER_STARTING).arg(cmdline);
int ret = 0;
@@ -144,7 +150,7 @@ main(int argc, char* argv[]) {
}
resolver = boost::shared_ptr<Resolver>(new Resolver());
- dlog("Server created.");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CREATED);
SimpleCallback* checkin = resolver->getCheckinProvider();
DNSLookup* lookup = resolver->getDNSLookupProvider();
@@ -197,15 +203,13 @@ main(int argc, char* argv[]) {
DNSService dns_service(io_service, checkin, lookup, answer);
resolver->setDNSService(dns_service);
- dlog("IOService created.");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_SERVICE_CREATED);
cc_session = new Session(io_service.get_io_service());
- dlog("Configuration session channel created.");
-
config_session = new ModuleCCSession(specfile, *cc_session,
my_config_handler,
my_command_handler);
- dlog("Configuration channel established.");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIG_CHANNEL);
// FIXME: This does not belong here, but inside Boss
if (uid != NULL) {
@@ -213,17 +217,22 @@ main(int argc, char* argv[]) {
}
resolver->setConfigSession(config_session);
- dlog("Config loaded");
+ // Install all initial configurations. If loading configuration
+ // fails, it will be logged, but we start the server anyway, giving
+ // the user a second chance to correct the configuration.
+ resolver->updateConfig(config_session->getFullConfig());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIG_LOADED);
- dlog("Server started.");
+ LOG_INFO(resolver_logger, RESOLVER_STARTED);
io_service.run();
} catch (const std::exception& ex) {
- dlog(string("Server failed: ") + ex.what(),true);
+ LOG_FATAL(resolver_logger, RESOLVER_FAILED).arg(ex.what());
ret = 1;
}
delete config_session;
delete cc_session;
+ LOG_INFO(resolver_logger, RESOLVER_SHUTDOWN);
return (ret);
}
diff --git a/src/bin/resolver/resolver.cc b/src/bin/resolver/resolver.cc
index e43b48e..bb1eb3b 100644
--- a/src/bin/resolver/resolver.cc
+++ b/src/bin/resolver/resolver.cc
@@ -14,18 +14,24 @@
#include <config.h>
+#include <stdint.h>
#include <netinet/in.h>
#include <algorithm>
#include <vector>
#include <cassert>
+#include <boost/shared_ptr.hpp>
+#include <boost/foreach.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <acl/dns.h>
+#include <acl/loader.h>
+
#include <asiodns/asiodns.h>
#include <asiolink/asiolink.h>
-#include <boost/foreach.hpp>
-#include <boost/lexical_cast.hpp>
-
#include <config/ccsession.h>
#include <exceptions/exceptions.h>
@@ -41,24 +47,28 @@
#include <dns/rrttl.h>
#include <dns/message.h>
#include <dns/messagerenderer.h>
+
+#include <server_common/client.h>
#include <server_common/portconfig.h>
#include <resolve/recursive_query.h>
-#include <log/dummylog.h>
-
-#include <resolver/resolver.h>
+#include "resolver.h"
+#include "resolver_log.h"
using namespace std;
+using boost::shared_ptr;
using namespace isc;
using namespace isc::util;
+using namespace isc::acl;
+using isc::acl::dns::RequestACL;
using namespace isc::dns;
using namespace isc::data;
using namespace isc::config;
-using isc::log::dlog;
using namespace isc::asiodns;
using namespace isc::asiolink;
+using namespace isc::server_common;
using namespace isc::server_common::portconfig;
class ResolverImpl {
@@ -73,6 +83,9 @@ public:
client_timeout_(4000),
lookup_timeout_(30000),
retries_(3),
+ // we apply "reject all" (implicit default of the loader) ACL by
+ // default:
+ query_acl_(acl::dns::getRequestLoader().load(Element::fromJSON("[]"))),
rec_query_(NULL)
{}
@@ -85,7 +98,7 @@ public:
isc::cache::ResolverCache& cache)
{
assert(!rec_query_); // queryShutdown must be called first
- dlog("Query setup");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_QUERY_SETUP);
rec_query_ = new RecursiveQuery(dnss,
nsas, cache,
upstream_,
@@ -101,7 +114,8 @@ public:
// (this is not a safety check, just to prevent logging of
// actions that are not performed
if (rec_query_) {
- dlog("Query shutdown");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT,
+ RESOLVER_QUERY_SHUTDOWN);
delete rec_query_;
rec_query_ = NULL;
}
@@ -113,13 +127,12 @@ public:
upstream_ = upstream;
if (dnss) {
if (!upstream_.empty()) {
- dlog("Setting forward addresses:");
BOOST_FOREACH(const AddressPair& address, upstream) {
- dlog(" " + address.first + ":" +
- boost::lexical_cast<string>(address.second));
+ LOG_INFO(resolver_logger, RESOLVER_FORWARD_ADDRESS)
+ .arg(address.first).arg(address.second);
}
} else {
- dlog("No forward addresses, running in recursive mode");
+ LOG_INFO(resolver_logger, RESOLVER_RECURSIVE);
}
}
}
@@ -130,13 +143,12 @@ public:
upstream_root_ = upstream_root;
if (dnss) {
if (!upstream_root_.empty()) {
- dlog("Setting root addresses:");
BOOST_FOREACH(const AddressPair& address, upstream_root) {
- dlog(" " + address.first + ":" +
- boost::lexical_cast<string>(address.second));
+ LOG_INFO(resolver_logger, RESOLVER_SET_ROOT_ADDRESS)
+ .arg(address.first).arg(address.second);
}
} else {
- dlog("No root addresses");
+ LOG_WARN(resolver_logger, RESOLVER_NO_ROOT_ADDRESS);
}
}
}
@@ -144,10 +156,20 @@ public:
void resolve(const isc::dns::QuestionPtr& question,
const isc::resolve::ResolverInterface::CallbackPtr& callback);
- void processNormalQuery(ConstMessagePtr query_message,
- MessagePtr answer_message,
- OutputBufferPtr buffer,
- DNSServer* server);
+ enum NormalQueryResult { RECURSION, DROPPED, ERROR };
+ NormalQueryResult processNormalQuery(const IOMessage& io_message,
+ MessagePtr query_message,
+ MessagePtr answer_message,
+ OutputBufferPtr buffer,
+ DNSServer* server);
+
+ const RequestACL& getQueryACL() const {
+ return (*query_acl_);
+ }
+
+ void setQueryACL(shared_ptr<const RequestACL> new_acl) {
+ query_acl_ = new_acl;
+ }
/// Currently non-configurable, but will be.
static const uint16_t DEFAULT_LOCAL_UDPSIZE = 4096;
@@ -172,6 +194,8 @@ public:
unsigned retries_;
private:
+ /// ACL on incoming queries
+ shared_ptr<const RequestACL> query_acl_;
/// Object to handle upstream queries
RecursiveQuery* rec_query_;
@@ -186,8 +210,6 @@ class QuestionInserter {
public:
QuestionInserter(MessagePtr message) : message_(message) {}
void operator()(const QuestionPtr question) {
- dlog(string("Adding question ") + question->getName().toText() +
- " to message");
message_->addQuestion(question);
}
MessagePtr message_;
@@ -234,10 +256,6 @@ makeErrorMessage(MessagePtr message, MessagePtr answer_message,
message->setRcode(rcode);
MessageRenderer renderer(*buffer);
message->toWire(renderer);
-
- dlog(string("Sending an error response (") +
- boost::lexical_cast<string>(renderer.getLength()) + " bytes):\n" +
- message->toText());
}
// This is a derived class of \c DNSLookup, to serve as a
@@ -312,9 +330,9 @@ public:
answer_message->toWire(renderer);
- dlog(string("sending a response (") +
- boost::lexical_cast<string>(renderer.getLength()) + "bytes): \n" +
- answer_message->toText());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL,
+ RESOLVER_DNS_MESSAGE_SENT)
+ .arg(renderer.getLength()).arg(*answer_message);
}
};
@@ -335,9 +353,12 @@ private:
Resolver::Resolver() :
impl_(new ResolverImpl()),
+ dnss_(NULL),
checkin_(new ConfigCheck(this)),
dns_lookup_(new MessageLookup(this)),
dns_answer_(new MessageAnswer),
+ nsas_(NULL),
+ cache_(NULL),
configured_(false)
{}
@@ -391,21 +412,25 @@ Resolver::processMessage(const IOMessage& io_message,
OutputBufferPtr buffer,
DNSServer* server)
{
- dlog("Got a DNS message");
InputBuffer request_buffer(io_message.getData(), io_message.getDataSize());
// First, check the header part. If we fail even for the base header,
// just drop the message.
+
+ // In the following code, the debug output is such that there should only be
+ // one debug message if packet processing failed. There could be two if
+ // it succeeded.
try {
query_message->parseHeader(request_buffer);
// Ignore all responses.
if (query_message->getHeaderFlag(Message::HEADERFLAG_QR)) {
- dlog("Received unexpected response, ignoring");
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_UNEXPECTED_RESPONSE);
server->resume(false);
return;
}
} catch (const Exception& ex) {
- dlog(string("DNS packet exception: ") + ex.what(),true);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_HEADER_ERROR)
+ .arg(ex.what());
server->resume(false);
return;
}
@@ -414,68 +439,63 @@ Resolver::processMessage(const IOMessage& io_message,
try {
query_message->fromWire(request_buffer);
} catch (const DNSProtocolError& error) {
- dlog(string("returning ") + error.getRcode().toText() + ": " +
- error.what());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_PROTOCOL_ERROR)
+ .arg(error.what()).arg(error.getRcode());
makeErrorMessage(query_message, answer_message,
buffer, error.getRcode());
server->resume(true);
return;
} catch (const Exception& ex) {
- dlog(string("returning SERVFAIL: ") + ex.what());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_MESSAGE_ERROR)
+ .arg(ex.what()).arg(Rcode::SERVFAIL());
makeErrorMessage(query_message, answer_message,
buffer, Rcode::SERVFAIL());
server->resume(true);
return;
- } // other exceptions will be handled at a higher layer.
+ } // Other exceptions will be handled at a higher layer.
- dlog("received a message:\n" + query_message->toText());
+ // Note: there appears to be no LOG_DEBUG for a successfully-received
+ // message. This is not an oversight - it is handled below. In the
+ // meantime, output the full message for debug purposes (if requested).
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_DETAIL,
+ RESOLVER_DNS_MESSAGE_RECEIVED).arg(*query_message);
// Perform further protocol-level validation.
- bool sendAnswer = true;
+ bool send_answer = true;
if (query_message->getOpcode() == Opcode::NOTIFY()) {
+
makeErrorMessage(query_message, answer_message,
buffer, Rcode::NOTAUTH());
- dlog("Notify arrived, but we are not authoritative");
+ // Notify arrived, but we are not authoritative.
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+ RESOLVER_NOTIFY_RECEIVED);
} else if (query_message->getOpcode() != Opcode::QUERY()) {
- dlog("Unsupported opcode (got: " + query_message->getOpcode().toText() +
- ", expected: " + Opcode::QUERY().toText());
+ // Unsupported opcode.
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+ RESOLVER_UNSUPPORTED_OPCODE).arg(query_message->getOpcode());
makeErrorMessage(query_message, answer_message,
buffer, Rcode::NOTIMP());
} else if (query_message->getRRCount(Message::SECTION_QUESTION) != 1) {
- dlog("The query contained " +
- boost::lexical_cast<string>(query_message->getRRCount(
- Message::SECTION_QUESTION) + " questions, exactly one expected"));
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::FORMERR());
+ // Not one question
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS,
+ RESOLVER_NOT_ONE_QUESTION)
+ .arg(query_message->getRRCount(Message::SECTION_QUESTION));
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::FORMERR());
} else {
- ConstQuestionPtr question = *query_message->beginQuestion();
- const RRType &qtype = question->getType();
- if (qtype == RRType::AXFR()) {
- if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::FORMERR());
- } else {
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::NOTIMP());
- }
- } else if (qtype == RRType::IXFR()) {
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::NOTIMP());
- } else if (question->getClass() != RRClass::IN()) {
- makeErrorMessage(query_message, answer_message,
- buffer, Rcode::REFUSED());
- } else {
+ const ResolverImpl::NormalQueryResult result =
+ impl_->processNormalQuery(io_message, query_message,
+ answer_message, buffer, server);
+ if (result == ResolverImpl::RECURSION) {
// The RecursiveQuery object will post the "resume" event to the
// DNSServer when an answer arrives, so we don't have to do it now.
- sendAnswer = false;
- impl_->processNormalQuery(query_message, answer_message,
- buffer, server);
+ return;
+ } else if (result == ResolverImpl::DROPPED) {
+ send_answer = false;
}
}
- if (sendAnswer) {
- server->resume(true);
- }
+ server->resume(send_answer);
}
void
@@ -485,25 +505,85 @@ ResolverImpl::resolve(const QuestionPtr& question,
rec_query_->resolve(question, callback);
}
-void
-ResolverImpl::processNormalQuery(ConstMessagePtr query_message,
+ResolverImpl::NormalQueryResult
+ResolverImpl::processNormalQuery(const IOMessage& io_message,
+ MessagePtr query_message,
MessagePtr answer_message,
OutputBufferPtr buffer,
DNSServer* server)
{
+ const ConstQuestionPtr question = *query_message->beginQuestion();
+ const RRType qtype = question->getType();
+ const RRClass qclass = question->getClass();
+
+ // Apply query ACL
+ const Client client(io_message);
+ const BasicAction query_action(
+ getQueryACL().execute(acl::dns::RequestContext(
+ client.getRequestSourceIPAddress(),
+ query_message->getTSIGRecord())));
+ if (query_action == isc::acl::REJECT) {
+ LOG_INFO(resolver_logger, RESOLVER_QUERY_REJECTED)
+ .arg(question->getName()).arg(qtype).arg(qclass).arg(client);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::REFUSED());
+ return (ERROR);
+ } else if (query_action == isc::acl::DROP) {
+ LOG_INFO(resolver_logger, RESOLVER_QUERY_DROPPED)
+ .arg(question->getName()).arg(qtype).arg(qclass).arg(client);
+ return (DROPPED);
+ }
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_QUERY_ACCEPTED)
+ .arg(question->getName()).arg(qtype).arg(question->getClass())
+ .arg(client);
+
+ // ACL passed. Reject inappropriate queries for the resolver.
+ if (qtype == RRType::AXFR()) {
+ if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
+ // Can't process AXFR request received over UDP
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_AXFR_UDP);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::FORMERR());
+ } else {
+ // ... or over TCP for that matter
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_AXFR_TCP);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::NOTIMP());
+ }
+ return (ERROR);
+ } else if (qtype == RRType::IXFR()) {
+ // Can't process IXFR request
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_IXFR);
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::NOTIMP());
+ return (ERROR);
+ } else if (qclass != RRClass::IN()) {
+ // Non-IN message received, refuse it.
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_NON_IN_PACKET)
+ .arg(question->getClass());
+ makeErrorMessage(query_message, answer_message, buffer,
+ Rcode::REFUSED());
+ return (ERROR);
+ }
+
+ // Everything is okay. Start resolver.
if (upstream_.empty()) {
- dlog("Processing normal query");
- ConstQuestionPtr question = *query_message->beginQuestion();
+ // Processing normal query
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_NORMAL_QUERY);
rec_query_->resolve(*question, answer_message, buffer, server);
} else {
- dlog("Processing forward query");
+ // Processing forward query
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_FORWARD_QUERY);
rec_query_->forward(query_message, answer_message, buffer, server);
}
+
+ return (RECURSION);
}
ConstElementPtr
Resolver::updateConfig(ConstElementPtr config) {
- dlog("New config comes: " + config->toWire());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_CONFIG_UPDATED)
+ .arg(*config);
try {
// Parse forward_addresses
@@ -516,6 +596,10 @@ Resolver::updateConfig(ConstElementPtr config) {
ConstElementPtr listenAddressesE(config->get("listen_on"));
AddressList listenAddresses(parseAddresses(listenAddressesE,
"listen_on"));
+ const ConstElementPtr query_acl_cfg(config->get("query_acl"));
+ const shared_ptr<const RequestACL> query_acl =
+ query_acl_cfg ? acl::dns::getRequestLoader().load(query_acl_cfg) :
+ shared_ptr<RequestACL>();
bool set_timeouts(false);
int qtimeout = impl_->query_timeout_;
int ctimeout = impl_->client_timeout_;
@@ -530,6 +614,8 @@ Resolver::updateConfig(ConstElementPtr config) {
// check for us
qtimeout = qtimeoutE->intValue();
if (qtimeout < -1) {
+ LOG_ERROR(resolver_logger, RESOLVER_QUERY_TIME_SMALL)
+ .arg(qtimeout);
isc_throw(BadValue, "Query timeout too small");
}
set_timeouts = true;
@@ -537,6 +623,8 @@ Resolver::updateConfig(ConstElementPtr config) {
if (ctimeoutE) {
ctimeout = ctimeoutE->intValue();
if (ctimeout < -1) {
+ LOG_ERROR(resolver_logger, RESOLVER_CLIENT_TIME_SMALL)
+ .arg(ctimeout);
isc_throw(BadValue, "Client timeout too small");
}
set_timeouts = true;
@@ -544,12 +632,19 @@ Resolver::updateConfig(ConstElementPtr config) {
if (ltimeoutE) {
ltimeout = ltimeoutE->intValue();
if (ltimeout < -1) {
+ LOG_ERROR(resolver_logger, RESOLVER_LOOKUP_TIME_SMALL)
+ .arg(ltimeout);
isc_throw(BadValue, "Lookup timeout too small");
}
set_timeouts = true;
}
if (retriesE) {
+ // Do the assignment from "retriesE->intValue()" to "retries"
+ // _after_ the comparison (as opposed to before it for the timeouts)
+ // because "retries" is unsigned.
if (retriesE->intValue() < 0) {
+ LOG_ERROR(resolver_logger, RESOLVER_NEGATIVE_RETRIES)
+ .arg(retriesE->intValue());
isc_throw(BadValue, "Negative number of retries");
}
retries = retriesE->intValue();
@@ -562,15 +657,6 @@ Resolver::updateConfig(ConstElementPtr config) {
if (listenAddressesE) {
setListenAddresses(listenAddresses);
need_query_restart = true;
- } else {
- if (!configured_) {
- // TODO: ModuleSpec needs getDefault()
- AddressList initial_addresses;
- initial_addresses.push_back(AddressPair("127.0.0.1", 53));
- initial_addresses.push_back(AddressPair("::1", 53));
- setListenAddresses(initial_addresses);
- need_query_restart = true;
- }
}
if (forwardAddressesE) {
setForwardAddresses(forwardAddresses);
@@ -584,6 +670,9 @@ Resolver::updateConfig(ConstElementPtr config) {
setTimeouts(qtimeout, ctimeout, ltimeout, retries);
need_query_restart = true;
}
+ if (query_acl) {
+ setQueryACL(query_acl);
+ }
if (need_query_restart) {
impl_->queryShutdown();
@@ -591,8 +680,11 @@ Resolver::updateConfig(ConstElementPtr config) {
}
setConfigured();
return (isc::config::createAnswer());
+
} catch (const isc::Exception& error) {
- dlog(string("error in config: ") + error.what(),true);
+
+ // Configuration error
+ LOG_ERROR(resolver_logger, RESOLVER_CONFIG_ERROR).arg(error.what());
return (isc::config::createAnswer(1, error.what()));
}
}
@@ -632,10 +724,10 @@ Resolver::setListenAddresses(const AddressList& addresses) {
void
Resolver::setTimeouts(int query_timeout, int client_timeout,
int lookup_timeout, unsigned retries) {
- dlog("Setting query timeout to " + boost::lexical_cast<string>(query_timeout) +
- ", client timeout to " + boost::lexical_cast<string>(client_timeout) +
- ", lookup timeout to " + boost::lexical_cast<string>(lookup_timeout) +
- " and retry count to " + boost::lexical_cast<string>(retries));
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_SET_PARAMS)
+ .arg(query_timeout).arg(client_timeout).arg(lookup_timeout)
+ .arg(retries);
+
impl_->query_timeout_ = query_timeout;
impl_->client_timeout_ = client_timeout;
impl_->lookup_timeout_ = lookup_timeout;
@@ -666,3 +758,18 @@ AddressList
Resolver::getListenAddresses() const {
return (impl_->listen_);
}
+
+const RequestACL&
+Resolver::getQueryACL() const {
+ return (impl_->getQueryACL());
+}
+
+void
+Resolver::setQueryACL(shared_ptr<const RequestACL> new_acl) {
+ if (!new_acl) {
+ isc_throw(InvalidParameter, "NULL pointer is passed to setQueryACL");
+ }
+
+ LOG_INFO(resolver_logger, RESOLVER_SET_QUERY_ACL);
+ impl_->setQueryACL(new_acl);
+}
diff --git a/src/bin/resolver/resolver.h b/src/bin/resolver/resolver.h
index 2890dd3..4b9c773 100644
--- a/src/bin/resolver/resolver.h
+++ b/src/bin/resolver/resolver.h
@@ -19,8 +19,11 @@
#include <vector>
#include <utility>
+#include <boost/shared_ptr.hpp>
+
#include <cc/data.h>
#include <config/ccsession.h>
+#include <acl/dns.h>
#include <dns/message.h>
#include <util/buffer.h>
@@ -236,6 +239,25 @@ public:
*/
int getRetries() const;
+ /// Get the query ACL.
+ ///
+ /// \exception None
+ const isc::acl::dns::RequestACL& getQueryACL() const;
+
+ /// Set the new query ACL.
+ ///
+ /// This method replaces the existing query ACL completely.
+ /// Normally this method will be called via the configuration handler,
+ /// but is publicly available for convenience of tests (and other
+ /// experimental purposes).
+ /// \c new_acl must not be a NULL pointer.
+ ///
+ /// \exception InvalidParameter The given pointer is NULL
+ ///
+ /// \param new_acl The new ACL to replace the existing one.
+ void setQueryACL(boost::shared_ptr<const isc::acl::dns::RequestACL>
+ new_acl);
+
private:
ResolverImpl* impl_;
isc::asiodns::DNSService* dnss_;
diff --git a/src/bin/resolver/resolver.spec.pre.in b/src/bin/resolver/resolver.spec.pre.in
index 9df1e75..076ef85 100644
--- a/src/bin/resolver/resolver.spec.pre.in
+++ b/src/bin/resolver/resolver.spec.pre.in
@@ -113,6 +113,41 @@
}
]
}
+ },
+ {
+ "item_name": "query_acl",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [
+ {
+ "action": "ACCEPT",
+ "from": "127.0.0.1"
+ },
+ {
+ "action": "ACCEPT",
+ "from": "::1"
+ }
+ ],
+ "list_item_spec": {
+ "item_name": "rule",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "action",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ {
+ "item_name": "from",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/resolver/resolver_log.cc b/src/bin/resolver/resolver_log.cc
new file mode 100644
index 0000000..4af0159
--- /dev/null
+++ b/src/bin/resolver/resolver_log.cc
@@ -0,0 +1,19 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// Defines the logger used by the NSAS
+
+#include "resolver_log.h"
+
+isc::log::Logger resolver_logger("resolver");
diff --git a/src/bin/resolver/resolver_log.h b/src/bin/resolver/resolver_log.h
new file mode 100644
index 0000000..e0e3fda
--- /dev/null
+++ b/src/bin/resolver/resolver_log.h
@@ -0,0 +1,49 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __RESOLVER_LOG__H
+#define __RESOLVER_LOG__H
+
+#include <log/macros.h>
+#include "resolver_messages.h"
+
+/// \brief Resolver Logging
+///
+/// Defines the levels used to output debug messages in the resolver. Note that
+/// higher numbers equate to more verbose (and detailed) output.
+
+// Initialization and shutdown of the resolver.
+const int RESOLVER_DBG_INIT = DBGLVL_START_SHUT;
+
+// Configuration messages
+const int RESOLVER_DBG_CONFIG = DBGLVL_COMMAND;
+
+// Trace sending and receiving of messages
+const int RESOLVER_DBG_IO = DBGLVL_TRACE_BASIC;
+
+// Trace processing of messages
+const int RESOLVER_DBG_PROCESS = DBGLVL_TRACE_DETAIL;
+
+// Detailed message information
+const int RESOLVER_DBG_DETAIL = DBGLVL_TRACE_DETAIL_DATA;
+
+
+/// \brief Resolver Logger
+///
+/// Define the logger used to log messages. We could define it in multiple
+/// modules, but defining in a single module and linking to it saves time and
+/// space.
+extern isc::log::Logger resolver_logger;
+
+#endif // __RESOLVER_LOG__H
diff --git a/src/bin/resolver/resolver_messages.mes b/src/bin/resolver/resolver_messages.mes
new file mode 100644
index 0000000..7930c52
--- /dev/null
+++ b/src/bin/resolver/resolver_messages.mes
@@ -0,0 +1,248 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# along with the resolver methods.
+
+% RESOLVER_AXFR_TCP AXFR request received over TCP
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+
+% RESOLVER_AXFR_UDP AXFR request received over UDP
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
+
+% RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
+
+% RESOLVER_CONFIG_CHANNEL configuration channel created
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
+
+% RESOLVER_CONFIG_ERROR error in configuration: %1
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
+
+% RESOLVER_CONFIG_LOADED configuration loaded
+This is a debug message output when the resolver configuration has been
+successfully loaded.
+
+% RESOLVER_CONFIG_UPDATED configuration updated: %1
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
+
+% RESOLVER_CREATED main resolver object created
+This is a debug message indicating that the main resolver object has
+been created.
+
+% RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1
+This is a debug message from the resolver listing the contents of a
+received DNS message.
+
+% RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
+
+% RESOLVER_FAILED resolver failed, reason: %1
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
+
+% RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
+
+% RESOLVER_FORWARD_QUERY processing forward query
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
+servers.
+
+% RESOLVER_HEADER_ERROR message received, exception when processing header: %1
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
+
+% RESOLVER_IXFR IXFR request received
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+
+% RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
+
+% RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
+
+% RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
+
+% RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
+
+% RESOLVER_NORMAL_QUERY processing normal query
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
+
+% RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
+
+% RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
+
+% RESOLVER_NO_ROOT_ADDRESS no root addresses available
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
+
+% RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
+
+% RESOLVER_PRINT_COMMAND print message command, arguments are: %1
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
+
+% RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
+
+% RESOLVER_QUERY_SETUP query setup
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+
+% RESOLVER_QUERY_SHUTDOWN query shutdown
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+
+% RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
+
+% RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
+
+% RESOLVER_RECURSIVE running in recursive mode
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
+
+% RESOLVER_SERVICE_CREATED service object created
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
+
+% RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4
+This debug message lists the parameters being set for the resolver. These are:
+query timeout: the timeout (in ms) used for queries originated by the resolver
+to upstream servers. Client timeout: the interval to resolve a query by
+a client: after this time, the resolver sends back a SERVFAIL to the client
+whilst continuing to resolve the query. Lookup timeout: the time at which the
+resolver gives up trying to resolve a query. Retry count: the number of times
+the resolver will retry a query to an upstream server if it gets a timeout.
+
+The client and lookup timeouts require a bit more explanation. The
+resolution of the client query might require a large number of queries to
+upstream nameservers. Even if none of these queries timeout, the total time
+taken to perform all the queries may exceed the client timeout. When this
+happens, a SERVFAIL is returned to the client, but the resolver continues
+with the resolution process; data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
+At this point it will wait for pending upstream queries to complete or
+timeout and drop the query.
+
+% RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
+
+% RESOLVER_SHUTDOWN resolver shutdown complete
+This informational message is output when the resolver has shut down.
+
+% RESOLVER_STARTED resolver started
+This informational message is output by the resolver when all initialization
+has been completed and it is entering its main loop.
+
+% RESOLVER_STARTING starting resolver with command line '%1'
+An informational message, this is output when the resolver starts up.
+
+% RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
+
+% RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+
+% RESOLVER_SET_QUERY_ACL query ACL is configured
+This debug message is generated when a new query ACL is configured for
+the resolver.
+
+% RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+
+% RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+
+% RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index 444358b..12ddab3 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -1,6 +1,7 @@
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_builddir)/src/lib/dns -I$(top_srcdir)/src/bin
AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
+AM_CPPFLAGS += -I$(top_builddir)/src/bin/resolver
AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(top_srcdir)/src/lib/testutils/testdata\"
AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/lib/testutils/testdata\"
AM_CPPFLAGS += $(BOOST_INCLUDES)
@@ -16,24 +17,29 @@ CLEANFILES = *.gcno *.gcda
TESTS =
if HAVE_GTEST
TESTS += run_unittests
+
run_unittests_SOURCES = $(top_srcdir)/src/lib/dns/tests/unittest_util.h
run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
run_unittests_SOURCES += ../resolver.h ../resolver.cc
+run_unittests_SOURCES += ../resolver_log.h ../resolver_log.cc
run_unittests_SOURCES += ../response_scrubber.h ../response_scrubber.cc
run_unittests_SOURCES += resolver_unittest.cc
run_unittests_SOURCES += resolver_config_unittest.cc
run_unittests_SOURCES += response_scrubber_unittest.cc
run_unittests_SOURCES += run_unittests.cc
+
+nodist_run_unittests_SOURCES = ../resolver_messages.h ../resolver_messages.cc
+
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
-run_unittests_LDADD += $(SQLITE_LIBS)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la
-run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
@@ -42,6 +48,9 @@ run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
# Note the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS
@@ -51,6 +60,4 @@ run_unittests_CXXFLAGS += -Wno-unused-parameter
endif
endif
-
-
noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/resolver/tests/resolver_config_unittest.cc b/src/bin/resolver/tests/resolver_config_unittest.cc
index 70e856d..c089041 100644
--- a/src/bin/resolver/tests/resolver_config_unittest.cc
+++ b/src/bin/resolver/tests/resolver_config_unittest.cc
@@ -16,12 +16,23 @@
#include <string>
+#include <boost/scoped_ptr.hpp>
+
#include <gtest/gtest.h>
#include <cc/data.h>
+#include <config/ccsession.h>
+
#include <asiodns/asiodns.h>
#include <asiolink/asiolink.h>
+#include <asiolink/io_address.h>
+#include <asiolink/io_socket.h>
+#include <asiolink/io_message.h>
+
+#include <acl/acl.h>
+
+#include <server_common/client.h>
#include <resolver/resolver.h>
@@ -30,25 +41,42 @@
#include <testutils/portconfig.h>
using namespace std;
+using boost::scoped_ptr;
+using namespace isc::acl;
+using isc::acl::dns::RequestContext;
using namespace isc::data;
using namespace isc::testutils;
using namespace isc::asiodns;
using namespace isc::asiolink;
+using namespace isc::server_common;
using isc::UnitTestUtil;
namespace {
class ResolverConfig : public ::testing::Test {
- public:
- IOService ios;
- DNSService dnss;
- Resolver server;
- ResolverConfig() :
- dnss(ios, NULL, NULL, NULL)
- {
- server.setDNSService(dnss);
- server.setConfigured();
- }
- void invalidTest(const string &JSON, const string& name);
+protected:
+ IOService ios;
+ DNSService dnss;
+ Resolver server;
+ scoped_ptr<const IOEndpoint> endpoint;
+ scoped_ptr<const IOMessage> query_message;
+ scoped_ptr<const Client> client;
+ scoped_ptr<const RequestContext> request;
+ ResolverConfig() : dnss(ios, NULL, NULL, NULL) {
+ server.setDNSService(dnss);
+ server.setConfigured();
+ }
+ const RequestContext& createRequest(const string& source_addr) {
+ endpoint.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress(source_addr),
+ 53210));
+ query_message.reset(new IOMessage(NULL, 0,
+ IOSocket::getDummyUDPSocket(),
+ *endpoint));
+ client.reset(new Client(*query_message));
+ request.reset(new RequestContext(client->getRequestSourceIPAddress(),
+ NULL));
+ return (*request);
+ }
+ void invalidTest(const string &JSON, const string& name);
};
TEST_F(ResolverConfig, forwardAddresses) {
@@ -77,14 +105,14 @@ TEST_F(ResolverConfig, forwardAddresses) {
TEST_F(ResolverConfig, forwardAddressConfig) {
// Try putting there some address
- ElementPtr config(Element::fromJSON("{"
- "\"forward_addresses\": ["
- " {"
- " \"address\": \"192.0.2.1\","
- " \"port\": 53"
- " }"
- "]"
- "}"));
+ ConstElementPtr config(Element::fromJSON("{"
+ "\"forward_addresses\": ["
+ " {"
+ " \"address\": \"192.0.2.1\","
+ " \"port\": 53"
+ " }"
+ "]"
+ "}"));
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
EXPECT_TRUE(server.isForwarding());
@@ -104,14 +132,14 @@ TEST_F(ResolverConfig, forwardAddressConfig) {
TEST_F(ResolverConfig, rootAddressConfig) {
// Try putting there some address
- ElementPtr config(Element::fromJSON("{"
- "\"root_addresses\": ["
- " {"
- " \"address\": \"192.0.2.1\","
- " \"port\": 53"
- " }"
- "]"
- "}"));
+ ConstElementPtr config(Element::fromJSON("{"
+ "\"root_addresses\": ["
+ " {"
+ " \"address\": \"192.0.2.1\","
+ " \"port\": 53"
+ " }"
+ "]"
+ "}"));
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
ASSERT_EQ(1, server.getRootAddresses().size());
@@ -187,12 +215,12 @@ TEST_F(ResolverConfig, timeouts) {
}
TEST_F(ResolverConfig, timeoutsConfig) {
- ElementPtr config = Element::fromJSON("{"
- "\"timeout_query\": 1000,"
- "\"timeout_client\": 2000,"
- "\"timeout_lookup\": 3000,"
- "\"retries\": 4"
- "}");
+ ConstElementPtr config = Element::fromJSON("{"
+ "\"timeout_query\": 1000,"
+ "\"timeout_client\": 2000,"
+ "\"timeout_lookup\": 3000,"
+ "\"retries\": 4"
+ "}");
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
EXPECT_EQ(1000, server.getQueryTimeout());
@@ -228,4 +256,140 @@ TEST_F(ResolverConfig, invalidTimeoutsConfig) {
"}", "Negative number of retries");
}
+TEST_F(ResolverConfig, defaultQueryACL) {
+ // If no configuration is loaded, the default ACL should reject everything.
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createRequest("2001:db8::1")));
+
+ // The following would be allowed if the server had loaded the default
+ // configuration from the spec file. In this context it should not have
+ // happened, and they should be rejected just like the above cases.
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("127.0.0.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("::1")));
+}
+
+TEST_F(ResolverConfig, emptyQueryACL) {
+ // Explicitly configured empty ACL should have the same effect.
+ ConstElementPtr config(Element::fromJSON("{ \"query_acl\": [] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createRequest("2001:db8::1")));
+}
+
+TEST_F(ResolverConfig, queryACLIPv4) {
+ // A simple "accept" query for a specific IPv4 address
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"} ] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createRequest("2001:db8::1")));
+}
+
+TEST_F(ResolverConfig, queryACLIPv6) {
+ // same for IPv6
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"2001:db8::1\"} ] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(
+ createRequest("2001:db8::1")));
+}
+
+TEST_F(ResolverConfig, multiEntryACL) {
+ // A bit more complicated one: mixture of IPv4 and IPv6 with 3 rules
+ // in total. We shouldn't have to check so many variations of rules
+ // as it should have been tested in the underlying ACL module. All we
+ // have to do to check is a reasonably complicated ACL configuration is
+ // loaded as expected.
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"},"
+ " {\"action\": \"REJECT\","
+ " \"from\": \"192.0.2.0/24\"},"
+ " {\"action\": \"DROP\","
+ " \"from\": \"2001:db8::1\"},"
+ "] }"));
+ ConstElementPtr result(server.updateConfig(config));
+ EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.2")));
+ EXPECT_EQ(DROP, server.getQueryACL().execute(
+ createRequest("2001:db8::1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(
+ createRequest("2001:db8::2"))); // match the default rule
+}
+
+
+int
+getResultCode(ConstElementPtr result) {
+ int rcode;
+ isc::config::parseAnswer(rcode, result);
+ return (rcode);
+}
+
+TEST_F(ResolverConfig, queryACLActionOnly) {
+ // "action only" rule will be accepted by the loader, which can
+ // effectively change the default action.
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"},"
+ " {\"action\": \"DROP\"} ] }"));
+ EXPECT_EQ(0, getResultCode(server.updateConfig(config)));
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+
+ // We reject non matching queries by default, but the last resort
+ // rule should have changed the action in that case to "DROP".
+ EXPECT_EQ(DROP, server.getQueryACL().execute(createRequest("192.0.2.2")));
+}
+
+TEST_F(ResolverConfig, badQueryACL) {
+ // Most of these cases shouldn't happen in practice because the syntax
+ // check should be performed before updateConfig(). But we check at
+ // least the server code won't crash even if an unexpected input is given.
+
+ // ACL must be a list
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\": 1 }"))));
+ // Each rule must have "action" and "from"
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"from\": \"192.0.2.1\"} ] }"))));
+ // invalid "action"
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": 1,"
+ " \"from\": \"192.0.2.1\"}]}"))));
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": \"BADACTION\","
+ " \"from\": \"192.0.2.1\"}]}"))));
+ // invalid "from"
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": 53}]}"))));
+ EXPECT_EQ(1, getResultCode(
+ server.updateConfig(
+ Element::fromJSON("{ \"query_acl\":"
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"1922.0.2.1\"}]}"))));
+}
+
}
diff --git a/src/bin/resolver/tests/resolver_unittest.cc b/src/bin/resolver/tests/resolver_unittest.cc
index 97edf12..71474dd 100644
--- a/src/bin/resolver/tests/resolver_unittest.cc
+++ b/src/bin/resolver/tests/resolver_unittest.cc
@@ -12,14 +12,22 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <string>
+
+#include <exceptions/exceptions.h>
+
#include <dns/name.h>
+#include <cc/data.h>
#include <resolver/resolver.h>
#include <dns/tests/unittest_util.h>
#include <testutils/dnsmessage_test.h>
#include <testutils/srv_test.h>
+using namespace std;
using namespace isc::dns;
+using namespace isc::data;
+using isc::acl::dns::RequestACL;
using namespace isc::testutils;
using isc::UnitTestUtil;
@@ -28,7 +36,17 @@ const char* const TEST_PORT = "53535";
class ResolverTest : public SrvTestBase{
protected:
- ResolverTest() : server(){}
+ ResolverTest() : server() {
+ // By default queries from the "default remote address" will be
+ // rejected, so we'll need to add an explicit ACL entry to allow that.
+ server.setConfigured();
+ server.updateConfig(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"" +
+ string(DEFAULT_REMOTE_ADDRESS) +
+ "\"} ] }"));
+ }
virtual void processMessage() {
server.processMessage(*io_message,
parse_message,
@@ -136,4 +154,45 @@ TEST_F(ResolverTest, notifyFail) {
Opcode::NOTIFY().getCode(), QR_FLAG, 0, 0, 0, 0);
}
+TEST_F(ResolverTest, setQueryACL) {
+ // valid cases are tested through other tests. We only explicitly check
+ // an invalid case: passing a NULL shared pointer.
+ EXPECT_THROW(server.setQueryACL(boost::shared_ptr<const RequestACL>()),
+ isc::InvalidParameter);
+}
+
+TEST_F(ResolverTest, queryACL) {
+ // The "ACCEPT" cases are covered in other tests. Here we explicitly
+ // test "REJECT" and "DROP" cases.
+
+ // Clear the existing ACL, reverting to the "default reject" rule.
+
+ // AXFR over UDP. This would otherwise result in FORMERR.
+ server.updateConfig(Element::fromJSON("{ \"query_acl\": [] }"));
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::AXFR());
+ createRequestPacket(request_message, IPPROTO_UDP);
+ server.processMessage(*io_message, parse_message, response_message,
+ response_obuffer, &dnsserv);
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ headerCheck(*parse_message, default_qid, Rcode::REFUSED(),
+ Opcode::QUERY().getCode(), QR_FLAG, 1, 0, 0, 0);
+
+ // Same query, but with an explicit "DROP" ACL entry. There should be
+ // no response.
+ server.updateConfig(Element::fromJSON("{ \"query_acl\": "
+ " [ {\"action\": \"DROP\","
+ " \"from\": \"" +
+ string(DEFAULT_REMOTE_ADDRESS) +
+ "\"} ] }"));
+ parse_message->clear(Message::PARSE);
+ response_message->clear(Message::RENDER);
+ response_obuffer->clear();
+ server.processMessage(*io_message, parse_message, response_message,
+ response_obuffer, &dnsserv);
+ EXPECT_FALSE(dnsserv.hasAnswer());
+}
+
+
}
diff --git a/src/bin/resolver/tests/response_scrubber_unittest.cc b/src/bin/resolver/tests/response_scrubber_unittest.cc
index eff5598..1570def 100644
--- a/src/bin/resolver/tests/response_scrubber_unittest.cc
+++ b/src/bin/resolver/tests/response_scrubber_unittest.cc
@@ -68,6 +68,12 @@ public:
return address_.getFamily();
}
+ // This is completely dummy and unused. Define it just for build.
+ virtual const struct sockaddr& getSockAddr() const {
+ static struct sockaddr sa;
+ return (sa);
+ }
+
private:
IOAddress address_; // Address of endpoint
uint16_t port_; // Port number of endpoint
diff --git a/src/bin/resolver/tests/run_unittests.cc b/src/bin/resolver/tests/run_unittests.cc
index 6ae848d..d3bbab7 100644
--- a/src/bin/resolver/tests/run_unittests.cc
+++ b/src/bin/resolver/tests/run_unittests.cc
@@ -13,6 +13,8 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <util/unittests/run_all.h>
#include <dns/tests/unittest_util.h>
@@ -21,6 +23,7 @@ main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
isc::UnitTestUtil::addDataPath(TEST_DATA_DIR);
isc::UnitTestUtil::addDataPath(TEST_DATA_BUILDDIR);
+ isc::log::initLogger();
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/bin/sockcreator/README b/src/bin/sockcreator/README
index 4dbbee7..e142d19 100644
--- a/src/bin/sockcreator/README
+++ b/src/bin/sockcreator/README
@@ -3,7 +3,7 @@ The socket creator
The only thing we need higher rights than standard user is binding sockets to
ports lower than 1024. So we will have a separate process that keeps the
-rights, while the rests drop them for security reasons.
+rights, while the rest drops them for security reasons.
This process is the socket creator. Its goal is to be as simple as possible
and to contain as little code as possible to minimise the amount of code
diff --git a/src/bin/sockcreator/tests/Makefile.am b/src/bin/sockcreator/tests/Makefile.am
index 2e1307a..223e761 100644
--- a/src/bin/sockcreator/tests/Makefile.am
+++ b/src/bin/sockcreator/tests/Makefile.am
@@ -16,10 +16,9 @@ run_unittests_SOURCES += run_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/io/libutil_io.la
-run_unittests_LDADD += \
- $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/sockcreator/tests/run_unittests.cc b/src/bin/sockcreator/tests/run_unittests.cc
index e787ab1..1287164 100644
--- a/src/bin/sockcreator/tests/run_unittests.cc
+++ b/src/bin/sockcreator/tests/run_unittests.cc
@@ -13,10 +13,11 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
int
main(int argc, char *argv[]) {
::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
+ return isc::util::unittests::run_all();
}
diff --git a/src/bin/stats/Makefile.am b/src/bin/stats/Makefile.am
index e4a4f92..63e2a3b 100644
--- a/src/bin/stats/Makefile.am
+++ b/src/bin/stats/Makefile.am
@@ -5,16 +5,25 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
pkglibexec_SCRIPTS = b10-stats b10-stats-httpd
b10_statsdir = $(pkgdatadir)
-b10_stats_DATA = stats.spec stats-httpd.spec stats-schema.spec
+b10_stats_DATA = stats.spec stats-httpd.spec
b10_stats_DATA += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+nodist_pylogmessage_PYTHON += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
CLEANFILES = b10-stats stats.pyc
CLEANFILES += b10-stats-httpd stats_httpd.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.pyc
man_MANS = b10-stats.8 b10-stats-httpd.8
EXTRA_DIST = $(man_MANS) b10-stats.xml b10-stats-httpd.xml
-EXTRA_DIST += stats.spec stats-httpd.spec stats-schema.spec
+EXTRA_DIST += stats.spec stats-httpd.spec
EXTRA_DIST += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
+EXTRA_DIST += stats_messages.mes stats_httpd_messages.mes
if ENABLE_MAN
@@ -26,11 +35,24 @@ b10-stats-httpd.8: b10-stats-httpd.xml
endif
+$(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py : stats_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/stats_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py : stats_httpd_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/stats_httpd_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-stats: stats.py
+b10-stats: stats.py $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" stats.py >$@
chmod a+x $@
-b10-stats-httpd: stats_httpd.py
+b10-stats-httpd: stats_httpd.py $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" stats_httpd.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/b10-stats-httpd.8 b/src/bin/stats/b10-stats-httpd.8
index ed4aafa..1206e1d 100644
--- a/src/bin/stats/b10-stats-httpd.8
+++ b/src/bin/stats/b10-stats-httpd.8
@@ -36,7 +36,7 @@ b10-stats-httpd \- BIND 10 HTTP server for HTTP/XML interface of statistics
.PP
\fBb10\-stats\-httpd\fR
-is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data from
+is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data or its schema from
\fBb10\-stats\fR, and it sends the data back in Python dictionary format and the server converts it into XML format\&. The server sends it to the HTTP client\&. The server can send three types of document, which are XML (Extensible Markup Language), XSD (XML Schema definition) and XSL (Extensible Stylesheet Language)\&. The XML document is the statistics data of BIND 10, The XSD document is the data schema of it, and The XSL document is the style sheet to be showed for the web browsers\&. There is different URL for each document\&. But please note that you would be redirected to the URL of XML document if you request the URL of the root document\&. For example, you would be redirected to http://127\&.0\&.0\&.1:8000/bind10/statistics/xml if you request http://127\&.0\&.0\&.1:8000/\&. Please see the manual and the spec file of
\fBb10\-stats\fR
for more details about the items of BIND 10 statistics\&. The server uses CC session in communication with
@@ -66,10 +66,6 @@ bindctl(1)\&. Please see the manual of
bindctl(1)
about how to configure the settings\&.
.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
-.PP
/usr/local/share/bind10\-devel/stats\-httpd\-xml\&.tpl
\(em the template file of XML document\&.
diff --git a/src/bin/stats/b10-stats-httpd.xml b/src/bin/stats/b10-stats-httpd.xml
index 34c704f..c8df9b8 100644
--- a/src/bin/stats/b10-stats-httpd.xml
+++ b/src/bin/stats/b10-stats-httpd.xml
@@ -57,7 +57,7 @@
by the BIND 10 boss process (<command>bind10</command>) and eventually
exited by it. The server is intended to be server requests by HTTP
clients like web browsers and third-party modules. When the server is
- asked, it requests BIND 10 statistics data from
+ asked, it requests BIND 10 statistics data or its schema from
<command>b10-stats</command>, and it sends the data back in Python
dictionary format and the server converts it into XML format. The server
sends it to the HTTP client. The server can send three types of document,
@@ -112,12 +112,6 @@
of <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum> about
how to configure the settings.
</para>
- <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
- <!--TODO: The filename should be computed from prefix-->
- — This is a spec file for data schema of
- of BIND 10 statistics. This schema cannot be configured
- via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
- </para>
<para>
<filename>/usr/local/share/bind10-devel/stats-httpd-xml.tpl</filename>
<!--TODO: The filename should be computed from prefix-->
@@ -138,7 +132,7 @@
<refsect1>
<title>CONFIGURATION AND COMMANDS</title>
<para>
- The configurable setting in
+ The configurable setting in
<filename>stats-httpd.spec</filename> is:
</para>
<variablelist>
diff --git a/src/bin/stats/b10-stats.8 b/src/bin/stats/b10-stats.8
index f69e4d3..0204ca1 100644
--- a/src/bin/stats/b10-stats.8
+++ b/src/bin/stats/b10-stats.8
@@ -1,22 +1,13 @@
'\" t
.\" Title: b10-stats
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
-.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: Oct 15, 2010
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-STATS" "8" "Oct 15, 2010" "BIND10" "BIND10"
-.\" -----------------------------------------------------------------
-.\" * Define some portability stuff
-.\" -----------------------------------------------------------------
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.\" http://bugs.debian.org/507673
-.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.ie \n(.g .ds Aq \(aq
-.el .ds Aq '
+.TH "B10\-STATS" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -45,9 +36,9 @@ with other modules like
\fBb10\-auth\fR
and so on\&. It waits for coming data from other modules, then other modules send data to stats module periodically\&. Other modules send stats data to stats module independently from implementation of stats module, so the frequency of sending data may not be constant\&. Stats module collects data and aggregates it\&.
\fBb10\-stats\fR
-invokes "sendstats" command for
+invokes an internal command for
\fBbind10\fR
-after its initial starting because it\*(Aqs sure to collect statistics data from
+after its initial starting because it\'s sure to collect statistics data from
\fBbind10\fR\&.
.SH "OPTIONS"
.PP
@@ -59,6 +50,84 @@ This
\fBb10\-stats\fR
switches to verbose mode\&. It sends verbose messages to STDOUT\&.
.RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The
+\fBb10\-stats\fR
+command does not have any configurable settings\&.
+.PP
+The configuration commands are:
+.PP
+
+
+\fBremove\fR
+removes the named statistics name and data\&.
+.PP
+
+
+\fBreset\fR
+will reset all statistics data to default values except for constant names\&. This may re\-add previously removed statistics names\&.
+.PP
+
+\fBset\fR
+.PP
+
+\fBshow\fR
+will send the statistics data in JSON format\&. By default, it outputs all the statistics data it has collected\&. An optional item name may be specified to receive individual output\&.
+.PP
+
+\fBshutdown\fR
+will shutdown the
+\fBb10\-stats\fR
+process\&. (Note that the
+\fBbind10\fR
+parent may restart it\&.)
+.PP
+
+\fBstatus\fR
+simply indicates that the daemon is running\&.
+.SH "STATISTICS DATA"
+.PP
+The
+\fBb10\-stats\fR
+daemon contains these statistics:
+.PP
+report_time
+.RS 4
+The latest report date and time in ISO 8601 format\&.
+.RE
+.PP
+stats\&.boot_time
+.RS 4
+The date and time when this daemon was started in ISO 8601 format\&. This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.
+.RE
+.PP
+stats\&.last_update_time
+.RS 4
+The date and time (in ISO 8601 format) when this daemon last received data from another component\&.
+.RE
+.PP
+stats\&.lname
+.RS 4
+This is the name used for the
+\fBb10\-msgq\fR
+command\-control channel\&. (This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.)
+.RE
+.PP
+stats\&.start_time
+.RS 4
+This is the date and time (in ISO 8601 format) when this daemon started collecting data\&.
+.RE
+.PP
+stats\&.timestamp
+.RS 4
+The current date and time represented in seconds since UNIX epoch (1970\-01\-01T0 0:00:00Z) with precision (delimited with a period) up to one hundred thousandth of second\&.
+.RE
+.PP
+See other manual pages for explanations for their statistics that are kept track by
+\fBb10\-stats\fR\&.
.SH "FILES"
.PP
/usr/local/share/bind10\-devel/stats\&.spec
@@ -66,10 +135,6 @@ switches to verbose mode\&. It sends verbose messages to STDOUT\&.
\fBb10\-stats\fR\&. It contains commands for
\fBb10\-stats\fR\&. They can be invoked via
bindctl(1)\&.
-.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
.SH "SEE ALSO"
.PP
@@ -82,7 +147,7 @@ BIND 10 Guide\&.
.PP
The
\fBb10\-stats\fR
-daemon was initially designed and implemented by Naoki Kambe of JPRS in Oct 2010\&.
+daemon was initially designed and implemented by Naoki Kambe of JPRS in October 2010\&.
.SH "COPYRIGHT"
.br
Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index f0c472d..13ada7a 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>Oct 15, 2010</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -64,9 +64,10 @@
send stats data to stats module independently from
implementation of stats module, so the frequency of sending data
may not be constant. Stats module collects data and aggregates
- it. <command>b10-stats</command> invokes "sendstats" command
+ it. <command>b10-stats</command> invokes an internal command
for <command>bind10</command> after its initial starting because it's
sure to collect statistics data from <command>bind10</command>.
+<!-- TODO: reword that last sentence? -->
</para>
</refsect1>
@@ -87,6 +88,123 @@
</refsect1>
<refsect1>
+ <title>CONFIGURATION AND COMMANDS</title>
+
+ <para>
+ The <command>b10-stats</command> command does not have any
+ configurable settings.
+ </para>
+
+<!-- TODO: formating -->
+ <para>
+ The configuration commands are:
+ </para>
+
+ <para>
+<!-- TODO: remove is removed in trac930 -->
+ <command>remove</command> removes the named statistics name and data.
+ </para>
+
+ <para>
+<!-- TODO: reset is removed in trac930 -->
+ <command>reset</command> will reset all statistics data to
+ default values except for constant names.
+ This may re-add previously removed statistics names.
+ </para>
+
+ <para>
+ <command>set</command>
+<!-- TODO: document this -->
+ </para>
+
+ <para>
+ <command>show</command> will send the statistics data
+ in JSON format.
+ By default, it outputs all the statistics data it has collected.
+ An optional item name may be specified to receive individual output.
+ </para>
+
+<!-- TODO: document showschema -->
+
+ <para>
+ <command>shutdown</command> will shutdown the
+ <command>b10-stats</command> process.
+ (Note that the <command>bind10</command> parent may restart it.)
+ </para>
+
+ <para>
+ <command>status</command> simply indicates that the daemon is
+ running.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The <command>b10-stats</command> daemon contains these statistics:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>report_time</term>
+<!-- TODO: why not named stats.report_time? -->
+ <listitem><simpara>The latest report date and time in
+ ISO 8601 format.</simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.boot_time</term>
+ <listitem><simpara>The date and time when this daemon was
+ started in ISO 8601 format.
+ This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.last_update_time</term>
+ <listitem><simpara>The date and time (in ISO 8601 format)
+ when this daemon last received data from another component.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.lname</term>
+ <listitem><simpara>This is the name used for the
+ <command>b10-msgq</command> command-control channel.
+ (This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.)
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.start_time</term>
+ <listitem><simpara>This is the date and time (in ISO 8601 format)
+ when this daemon started collecting data.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.timestamp</term>
+ <listitem><simpara>The current date and time represented in
+ seconds since UNIX epoch (1970-01-01T0 0:00:00Z) with
+ precision (delimited with a period) up to
+ one hundred thousandth of second.</simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <para>
+ See other manual pages for explanations for their statistics
+ that are kept track by <command>b10-stats</command>.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para><filename>/usr/local/share/bind10-devel/stats.spec</filename>
<!--TODO: The filename should be computed from prefix-->
@@ -95,12 +213,6 @@
invoked
via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
</para>
- <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
- <!--TODO: The filename should be computed from prefix-->
- — This is a spec file for data schema of
- of BIND 10 statistics. This schema cannot be configured
- via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
- </para>
</refsect1>
<refsect1>
@@ -126,7 +238,7 @@
<title>HISTORY</title>
<para>
The <command>b10-stats</command> daemon was initially designed
- and implemented by Naoki Kambe of JPRS in Oct 2010.
+ and implemented by Naoki Kambe of JPRS in October 2010.
</para>
</refsect1>
</refentry><!--
diff --git a/src/bin/stats/stats-httpd-xml.tpl b/src/bin/stats/stats-httpd-xml.tpl
new file mode 100644
index 0000000..d5846ad
--- /dev/null
+++ b/src/bin/stats/stats-httpd-xml.tpl
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="$xsl_url_path"?>
+<!--
+ - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<stats:stats_data version="1.0"
+ xmlns:stats="$xsd_namespace"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="$xsd_namespace $xsd_url_path">
+ $xml_string
+</stats:stats_data>
diff --git a/src/bin/stats/stats-httpd-xml.tpl.in b/src/bin/stats/stats-httpd-xml.tpl.in
deleted file mode 100644
index d5846ad..0000000
--- a/src/bin/stats/stats-httpd-xml.tpl.in
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="$xsl_url_path"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<stats:stats_data version="1.0"
- xmlns:stats="$xsd_namespace"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="$xsd_namespace $xsd_url_path">
- $xml_string
-</stats:stats_data>
diff --git a/src/bin/stats/stats-httpd-xsd.tpl b/src/bin/stats/stats-httpd-xsd.tpl
new file mode 100644
index 0000000..6ad1280
--- /dev/null
+++ b/src/bin/stats/stats-httpd-xsd.tpl
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<schema targetNamespace="$xsd_namespace"
+ xmlns="http://www.w3.org/2001/XMLSchema"
+ xmlns:stats="$xsd_namespace">
+ <annotation>
+ <documentation xml:lang="en">XML schema of the statistics
+ data in BIND 10</documentation>
+ </annotation>
+ <element name="stats_data">
+ <annotation>
+ <documentation>A set of statistics data</documentation>
+ </annotation>
+ <complexType>
+ $xsd_string
+ <attribute name="version" type="token" use="optional" default="1.0">
+ <annotation>
+ <documentation>Version number of syntax</documentation>
+ </annotation>
+ </attribute>
+ </complexType>
+ </element>
+</schema>
diff --git a/src/bin/stats/stats-httpd-xsd.tpl.in b/src/bin/stats/stats-httpd-xsd.tpl.in
deleted file mode 100644
index 6ad1280..0000000
--- a/src/bin/stats/stats-httpd-xsd.tpl.in
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<schema targetNamespace="$xsd_namespace"
- xmlns="http://www.w3.org/2001/XMLSchema"
- xmlns:stats="$xsd_namespace">
- <annotation>
- <documentation xml:lang="en">XML schema of the statistics
- data in BIND 10</documentation>
- </annotation>
- <element name="stats_data">
- <annotation>
- <documentation>A set of statistics data</documentation>
- </annotation>
- <complexType>
- $xsd_string
- <attribute name="version" type="token" use="optional" default="1.0">
- <annotation>
- <documentation>Version number of syntax</documentation>
- </annotation>
- </attribute>
- </complexType>
- </element>
-</schema>
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
new file mode 100644
index 0000000..a1f6406
--- /dev/null
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -0,0 +1,57 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<xsl:stylesheet version="1.0"
+ xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml"
+ xmlns:stats="$xsd_namespace">
+ <xsl:output method="html" encoding="UTF-8"
+ doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"
+ doctype-system=" http://www.w3.org/TR/html4/loose.dtd " />
+ <xsl:template match="/">
+ <html lang="en">
+ <head>
+ <title>BIND 10 Statistics</title>
+ <style type="text/css"><![CDATA[
+table {
+ border: 1px #000000 solid;
+ border-collapse: collapse;
+}
+td, th {
+ padding: 3px 20px;
+ border: 1px #000000 solid;
+}
+td.title {
+ text-decoration:underline;
+}
+]]>
+ </style>
+ </head>
+ <body>
+ <h1>BIND 10 Statistics</h1>
+ <table>
+ <tr>
+ <th>Owner</th>
+ <th>Title</th>
+ <th>Value</th>
+ </tr>
+ <xsl:apply-templates />
+ </table>
+ </body>
+ </html>
+ </xsl:template>
+ $xsl_string
+</xsl:stylesheet>
diff --git a/src/bin/stats/stats-httpd-xsl.tpl.in b/src/bin/stats/stats-httpd-xsl.tpl.in
deleted file mode 100644
index 01ffdc6..0000000
--- a/src/bin/stats/stats-httpd-xsl.tpl.in
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<xsl:stylesheet version="1.0"
- xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml"
- xmlns:stats="$xsd_namespace">
- <xsl:output method="html" encoding="UTF-8"
- doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"
- doctype-system=" http://www.w3.org/TR/html4/loose.dtd " />
- <xsl:template match="/">
- <html lang="en">
- <head>
- <title>BIND 10 Statistics</title>
- <style type="text/css"><![CDATA[
-table {
- border: 1px #000000 solid;
- border-collapse: collapse;
-}
-td, th {
- padding: 3px 20px;
- border: 1px #000000 solid;
-}
-td.title {
- text-decoration:underline;
-}
-]]>
- </style>
- </head>
- <body>
- <h1>BIND 10 Statistics</h1>
- <table>
- <tr>
- <th>Title</th>
- <th>Value</th>
- </tr>
- <xsl:apply-templates />
- </table>
- </body>
- </html>
- </xsl:template>
- $xsl_string
-</xsl:stylesheet>
diff --git a/src/bin/stats/stats-httpd.spec b/src/bin/stats/stats-httpd.spec
new file mode 100644
index 0000000..6307135
--- /dev/null
+++ b/src/bin/stats/stats-httpd.spec
@@ -0,0 +1,54 @@
+{
+ "module_spec": {
+ "module_name": "StatsHttpd",
+ "module_description": "Stats HTTP daemon",
+ "config_data": [
+ {
+ "item_name": "listen_on",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [
+ {
+ "address": "127.0.0.1",
+ "port": 8000
+ }
+ ],
+ "list_item_spec": {
+ "item_name": "address",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ {
+ "item_name": "address",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "127.0.0.1",
+ "item_description": "listen-on address for HTTP"
+ },
+ {
+ "item_name": "port",
+ "item_type": "integer",
+ "item_optional": true,
+ "item_default": 8000,
+ "item_description": "listen-on port for HTTP"
+ }
+ ]
+ },
+ "item_description": "http listen-on address and port"
+ }
+ ],
+ "commands": [
+ {
+ "command_name": "status",
+ "command_description": "Status of the stats httpd",
+ "command_args": []
+ },
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down the stats httpd",
+ "command_args": []
+ }
+ ]
+ }
+}
diff --git a/src/bin/stats/stats-httpd.spec.in b/src/bin/stats/stats-httpd.spec.in
deleted file mode 100644
index 6307135..0000000
--- a/src/bin/stats/stats-httpd.spec.in
+++ /dev/null
@@ -1,54 +0,0 @@
-{
- "module_spec": {
- "module_name": "StatsHttpd",
- "module_description": "Stats HTTP daemon",
- "config_data": [
- {
- "item_name": "listen_on",
- "item_type": "list",
- "item_optional": false,
- "item_default": [
- {
- "address": "127.0.0.1",
- "port": 8000
- }
- ],
- "list_item_spec": {
- "item_name": "address",
- "item_type": "map",
- "item_optional": false,
- "item_default": {},
- "map_item_spec": [
- {
- "item_name": "address",
- "item_type": "string",
- "item_optional": true,
- "item_default": "127.0.0.1",
- "item_description": "listen-on address for HTTP"
- },
- {
- "item_name": "port",
- "item_type": "integer",
- "item_optional": true,
- "item_default": 8000,
- "item_description": "listen-on port for HTTP"
- }
- ]
- },
- "item_description": "http listen-on address and port"
- }
- ],
- "commands": [
- {
- "command_name": "status",
- "command_description": "Status of the stats httpd",
- "command_args": []
- },
- {
- "command_name": "shutdown",
- "command_description": "Shut down the stats httpd",
- "command_args": []
- }
- ]
- }
-}
diff --git a/src/bin/stats/stats-schema.spec.in b/src/bin/stats/stats-schema.spec.in
deleted file mode 100644
index 37e9c1a..0000000
--- a/src/bin/stats/stats-schema.spec.in
+++ /dev/null
@@ -1,87 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Statistics data schema",
- "config_data": [
- {
- "item_name": "report_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "Report time",
- "item_description": "A date time when stats module reports",
- "item_format": "date-time"
- },
- {
- "item_name": "bind10.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "bind10.BootTime",
- "item_description": "A date time when bind10 process starts initially",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.BootTime",
- "item_description": "A date time when the stats module starts initially or when the stats module restarts",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.start_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.StartTime",
- "item_description": "A date time when the stats module starts collecting data or resetting values last time",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.last_update_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.LastUpdateTime",
- "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.timestamp",
- "item_type": "real",
- "item_optional": false,
- "item_default": 0.0,
- "item_title": "stats.Timestamp",
- "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)",
- "item_format": "second"
- },
- {
- "item_name": "stats.lname",
- "item_type": "string",
- "item_optional": false,
- "item_default": "",
- "item_title": "stats.LocalName",
- "item_description": "A localname of stats module given via CC protocol"
- },
- {
- "item_name": "auth.queries.tcp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.tcp",
- "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
- },
- {
- "item_name": "auth.queries.udp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.udp",
- "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
- }
- ],
- "commands": []
- }
-}
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
old mode 100644
new mode 100755
index 969676e..3a7f47a
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -15,399 +15,400 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+Statistics daemon in BIND 10
+
+"""
import sys; sys.path.append ('@@PYTHONPATH@@')
import os
-import signal
-import select
from time import time, strftime, gmtime
from optparse import OptionParser, OptionValueError
-from collections import defaultdict
-from isc.config.ccsession import ModuleCCSession, create_answer
-from isc.cc import Session, SessionError
-# for setproctitle
+import isc
import isc.util.process
+import isc.log
+from isc.log_messages.stats_messages import *
+
+isc.log.init("b10-stats")
+logger = isc.log.Logger("stats")
+
+# Some constants for debug levels.
+DBG_STATS_MESSAGING = logger.DBGLVL_COMMAND
+
+# This is for boot_time of Stats
+_BASETIME = gmtime()
+
+# for setproctitle
isc.util.process.rename()
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to that, otherwise we use the ones
# installed on the system
if "B10_FROM_SOURCE" in os.environ:
- BASE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats"
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
+ "src" + os.sep + "bin" + os.sep + "stats" + os.sep + "stats.spec"
else:
PREFIX = "@prefix@"
DATAROOTDIR = "@datarootdir@"
- BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
- BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
+ SPECFILE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@" + os.sep + "stats.spec"
+ SPECFILE_LOCATION = SPECFILE_LOCATION.replace("${datarootdir}", DATAROOTDIR)\
+ .replace("${prefix}", PREFIX)
-class Singleton(type):
+def get_timestamp():
"""
- A abstract class of singleton pattern
+ get current timestamp
"""
- # Because of singleton pattern:
- # At the beginning of coding, one UNIX domain socket is needed
- # for config manager, another socket is needed for stats module,
- # then stats module might need two sockets. So I adopted the
- # singleton pattern because I avoid creating multiple sockets in
- # one stats module. But in the initial version stats module
- # reports only via bindctl, so just one socket is needed. To use
- # the singleton pattern is not important now. :(
+ return time()
- def __init__(self, *args, **kwargs):
- type.__init__(self, *args, **kwargs)
- self._instances = {}
+def get_datetime(gmt=None):
+ """
+ get current datetime
+ """
+ if not gmt: gmt = gmtime()
+ return strftime("%Y-%m-%dT%H:%M:%SZ", gmt)
- def __call__(self, *args, **kwargs):
- if args not in self._instances:
- self._instances[args]={}
- kw = tuple(kwargs.items())
- if kw not in self._instances[args]:
- self._instances[args][kw] = type.__call__(self, *args, **kwargs)
- return self._instances[args][kw]
+def get_spec_defaults(spec):
+ """
+ extracts the default values of the items from spec specified in
+ arg, and returns the dict-type variable which is a set of the item
+ names and the default values
+ """
+ if type(spec) is not list: return {}
+ def _get_spec_defaults(spec):
+ item_type = spec['item_type']
+ if item_type == "integer":
+ return int(spec.get('item_default', 0))
+ elif item_type == "real":
+ return float(spec.get('item_default', 0.0))
+ elif item_type == "boolean":
+ return bool(spec.get('item_default', False))
+ elif item_type == "string":
+ return str(spec.get('item_default', ""))
+ elif item_type == "list":
+ return spec.get(
+ "item_default",
+ [ _get_spec_defaults(spec["list_item_spec"]) ])
+ elif item_type == "map":
+ return spec.get(
+ "item_default",
+ dict([ (s["item_name"], _get_spec_defaults(s)) for s in spec["map_item_spec"] ]) )
+ else:
+ return spec.get("item_default", None)
+ return dict([ (s['item_name'], _get_spec_defaults(s)) for s in spec ])
class Callback():
"""
A Callback handler class
"""
- def __init__(self, name=None, callback=None, args=(), kwargs={}):
- self.name = name
- self.callback = callback
+ def __init__(self, command=None, args=(), kwargs={}):
+ self.command = command
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
- if not args:
- args = self.args
- if not kwargs:
- kwargs = self.kwargs
- if self.callback:
- return self.callback(*args, **kwargs)
-
-class Subject():
- """
- A abstract subject class of observer pattern
- """
- # Because of observer pattern:
- # In the initial release, I'm also sure that observer pattern
- # isn't definitely needed because the interface between gathering
- # and reporting statistics data is single. However in the future
- # release, the interfaces may be multiple, that is, multiple
- # listeners may be needed. For example, one interface, which
- # stats module has, is for between ''config manager'' and stats
- # module, another interface is for between ''HTTP server'' and
- # stats module, and one more interface is for between ''SNMP
- # server'' and stats module. So by considering that stats module
- # needs multiple interfaces in the future release, I adopted the
- # observer pattern in stats module. But I don't have concrete
- # ideas in case of multiple listener currently.
-
- def __init__(self):
- self._listeners = []
-
- def attach(self, listener):
- if not listener in self._listeners:
- self._listeners.append(listener)
+ if not args: args = self.args
+ if not kwargs: kwargs = self.kwargs
+ if self.command: return self.command(*args, **kwargs)
- def detach(self, listener):
- try:
- self._listeners.remove(listener)
- except ValueError:
- pass
+class StatsError(Exception):
+ """Exception class for Stats class"""
+ pass
- def notify(self, event, modifier=None):
- for listener in self._listeners:
- if modifier != listener:
- listener.update(event)
-
-class Listener():
+class Stats:
"""
- A abstract listener class of observer pattern
+ Main class of stats module
"""
- def __init__(self, subject):
- self.subject = subject
- self.subject.attach(self)
- self.events = {}
-
- def update(self, name):
- if name in self.events:
- callback = self.events[name]
- return callback()
-
- def add_event(self, event):
- self.events[event.name]=event
-
-class SessionSubject(Subject, metaclass=Singleton):
- """
- A concrete subject class which creates CC session object
- """
- def __init__(self, session=None, verbose=False):
- Subject.__init__(self)
- self.verbose = verbose
- self.session=session
- self.running = False
-
- def start(self):
- self.running = True
- self.notify('start')
-
- def stop(self):
+ def __init__(self):
self.running = False
- self.notify('stop')
-
- def check(self):
- self.notify('check')
-
-class CCSessionListener(Listener):
- """
- A concrete listener class which creates SessionSubject object and
- ModuleCCSession object
- """
- def __init__(self, subject, verbose=False):
- Listener.__init__(self, subject)
- self.verbose = verbose
- self.session = subject.session
- self.boot_time = get_datetime()
-
# create ModuleCCSession object
- self.cc_session = ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler,
- self.command_handler,
- self.session)
-
- self.session = self.subject.session = self.cc_session._session
-
- # initialize internal data
- self.stats_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION).get_config_spec()
- self.stats_data = self.initialize_data(self.stats_spec)
-
- # add event handler invoked via SessionSubject object
- self.add_event(Callback('start', self.start))
- self.add_event(Callback('stop', self.stop))
- self.add_event(Callback('check', self.check))
- # don't add 'command_' suffix to the special commands in
- # order to prevent executing internal command via bindctl
-
+ self.mccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler)
+ self.cc_session = self.mccs._session
+ # get module spec
+ self.module_name = self.mccs.get_module_spec().get_module_name()
+ self.modules = {}
+ self.statistics_data = {}
# get commands spec
- self.commands_spec = self.cc_session.get_module_spec().get_commands_spec()
-
+ self.commands_spec = self.mccs.get_module_spec().get_commands_spec()
# add event handler related command_handler of ModuleCCSession
- # invoked via bindctl
+ self.callbacks = {}
for cmd in self.commands_spec:
+ # add prefix "command_"
+ name = "command_" + cmd["command_name"]
try:
- # add prefix "command_"
- name = "command_" + cmd["command_name"]
callback = getattr(self, name)
- kwargs = self.initialize_data(cmd["command_args"])
- self.add_event(Callback(name=name, callback=callback, args=(), kwargs=kwargs))
- except AttributeError as ae:
- sys.stderr.write("[b10-stats] Caught undefined command while parsing spec file: "
- +str(cmd["command_name"])+"\n")
+ kwargs = get_spec_defaults(cmd["command_args"])
+ self.callbacks[name] = Callback(command=callback, kwargs=kwargs)
+ except AttributeError:
+ raise StatsError(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
+ self.mccs.start()
def start(self):
"""
- start the cc chanel
+ Start stats module
"""
- # set initial value
- self.stats_data['stats.boot_time'] = self.boot_time
- self.stats_data['stats.start_time'] = get_datetime()
- self.stats_data['stats.last_update_time'] = get_datetime()
- self.stats_data['stats.lname'] = self.session.lname
- self.cc_session.start()
+ self.running = True
+ logger.info(STATS_STARTING)
+
# request Bob to send statistics data
- if self.verbose:
- sys.stdout.write("[b10-stats] request Bob to send statistics data\n")
- cmd = isc.config.ccsession.create_command("sendstats", None)
- seq = self.session.group_sendmsg(cmd, 'Boss')
- self.session.group_recvmsg(True, seq)
+ logger.debug(DBG_STATS_MESSAGING, STATS_SEND_REQUEST_BOSS)
+ cmd = isc.config.ccsession.create_command("getstats", None)
+ seq = self.cc_session.group_sendmsg(cmd, 'Boss')
+ try:
+ answer, env = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ rcode, args = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ errors = self.update_statistics_data(
+ args["owner"], **args["data"])
+ if errors:
+ raise StatsError("boss spec file is incorrect: "
+ + ", ".join(errors))
+ errors = self.update_statistics_data(
+ self.module_name,
+ last_update_time=get_datetime())
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ except isc.cc.session.SessionTimeout:
+ pass
- def stop(self):
- """
- stop the cc chanel
- """
- return self.cc_session.close()
+ # initialized Statistics data
+ errors = self.update_statistics_data(
+ self.module_name,
+ lname=self.cc_session.lname,
+ boot_time=get_datetime(_BASETIME)
+ )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
- def check(self):
- """
- check the cc chanel
- """
- return self.cc_session.check_command(False)
+ while self.running:
+ self.mccs.check_command(False)
def config_handler(self, new_config):
"""
handle a configure from the cc channel
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] newconfig received: "+str(new_config)+"\n")
-
+ logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_NEW_CONFIG,
+ new_config)
# do nothing currently
- return create_answer(0)
+ return isc.config.create_answer(0)
- def command_handler(self, command, *args, **kwargs):
+ def command_handler(self, command, kwargs):
"""
handle commands from the cc channel
"""
- # add 'command_' suffix in order to executing command via bindctl
name = 'command_' + command
-
- if name in self.events:
- event = self.events[name]
- return event(*args, **kwargs)
+ if name in self.callbacks:
+ callback = self.callbacks[name]
+ if kwargs:
+ return callback(**kwargs)
+ else:
+ return callback()
else:
- return self.command_unknown(command, args)
+ logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
+ return isc.config.create_answer(1, "Unknown command: '"+str(command)+"'")
- def command_shutdown(self, args):
+ def update_modules(self):
"""
- handle shutdown command
+ updates information of each module. This method gets each
+ module's information from the config manager and sets it into
+ self.modules. If its getting from the config manager fails, it
+ raises StatsError.
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'shutdown' command received\n")
- self.subject.running = False
- return create_answer(0)
+ modules = {}
+ seq = self.cc_session.group_sendmsg(
+ isc.config.ccsession.create_command(
+ isc.config.ccsession.COMMAND_GET_STATISTICS_SPEC),
+ 'ConfigManager')
+ (answer, env) = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ (rcode, value) = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ for mod in value:
+ spec = { "module_name" : mod }
+ if value[mod] and type(value[mod]) is list:
+ spec["statistics"] = value[mod]
+ modules[mod] = isc.config.module_spec.ModuleSpec(spec)
+ else:
+ raise StatsError("Updating module spec fails: " + str(value))
+ modules[self.module_name] = self.mccs.get_module_spec()
+ self.modules = modules
- def command_set(self, args, stats_data={}):
+ def get_statistics_data(self, owner=None, name=None):
"""
- handle set command
+ returns statistics data which stats module has of each
+ module. If it can't find specified statistics data, it raises
+ StatsError.
"""
- # 'args' must be dictionary type
- self.stats_data.update(args['stats_data'])
-
- # overwrite "stats.LastUpdateTime"
- self.stats_data['stats.last_update_time'] = get_datetime()
-
- return create_answer(0)
+ self.update_statistics_data()
+ if owner and name:
+ try:
+ return self.statistics_data[owner][name]
+ except KeyError:
+ pass
+ elif owner:
+ try:
+ return self.statistics_data[owner]
+ except KeyError:
+ pass
+ elif name:
+ pass
+ else:
+ return self.statistics_data
+ raise StatsError("No statistics data found: "
+ + "owner: " + str(owner) + ", "
+ + "name: " + str(name))
- def command_remove(self, args, stats_item_name=''):
+ def update_statistics_data(self, owner=None, **data):
"""
- handle remove command
+ change statistics date of specified module into specified
+ data. It updates information of each module first, and it
+ updates statistics data. If specified data is invalid for
+ statistics spec of specified owner, it returns a list of error
+ messeges. If there is no error or if neither owner nor data is
+ specified in args, it returns None.
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'remove' command received, args: "+str(args)+"\n")
-
- # 'args' must be dictionary type
- if args and args['stats_item_name'] in self.stats_data:
- stats_item_name = args['stats_item_name']
-
- # just remove one item
- self.stats_data.pop(stats_item_name)
-
- return create_answer(0)
-
- def command_show(self, args, stats_item_name=''):
+ self.update_modules()
+ statistics_data = {}
+ for (name, module) in self.modules.items():
+ value = get_spec_defaults(module.get_statistics_spec())
+ if module.validate_statistics(True, value):
+ statistics_data[name] = value
+ for (name, value) in self.statistics_data.items():
+ if name in statistics_data:
+ statistics_data[name].update(value)
+ else:
+ statistics_data[name] = value
+ self.statistics_data = statistics_data
+ if owner and data:
+ errors = []
+ try:
+ if self.modules[owner].validate_statistics(False, data, errors):
+ self.statistics_data[owner].update(data)
+ return
+ except KeyError:
+ errors.append("unknown module name: " + str(owner))
+ return errors
+
+ def command_status(self):
"""
- handle show command
+ handle status command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'show' command received, args: "+str(args)+"\n")
-
- # always overwrite 'report_time' and 'stats.timestamp'
- # if "show" command invoked
- self.stats_data['report_time'] = get_datetime()
- self.stats_data['stats.timestamp'] = get_timestamp()
-
- # if with args
- if args and args['stats_item_name'] in self.stats_data:
- stats_item_name = args['stats_item_name']
- return create_answer(0, {stats_item_name: self.stats_data[stats_item_name]})
+ logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
+ return isc.config.create_answer(
+ 0, "Stats is up. (PID " + str(os.getpid()) + ")")
- return create_answer(0, self.stats_data)
-
- def command_reset(self, args):
+ def command_shutdown(self):
"""
- handle reset command
+ handle shutdown command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'reset' command received\n")
-
- # re-initialize internal variables
- self.stats_data = self.initialize_data(self.stats_spec)
-
- # reset initial value
- self.stats_data['stats.boot_time'] = self.boot_time
- self.stats_data['stats.start_time'] = get_datetime()
- self.stats_data['stats.last_update_time'] = get_datetime()
- self.stats_data['stats.lname'] = self.session.lname
-
- return create_answer(0)
+ logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
+ self.running = False
+ return isc.config.create_answer(0)
- def command_status(self, args):
+ def command_show(self, owner=None, name=None):
"""
- handle status command
+ handle show command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'status' command received\n")
- # just return "I'm alive."
- return create_answer(0, "I'm alive.")
-
- def command_unknown(self, command, args):
+ if owner or name:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_NAME_COMMAND,
+ str(owner)+", "+str(name))
+ else:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_ALL_COMMAND)
+ errors = self.update_statistics_data(
+ self.module_name,
+ timestamp=get_timestamp(),
+ report_time=get_datetime()
+ )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ try:
+ return isc.config.create_answer(
+ 0, self.get_statistics_data(owner, name))
+ except StatsError:
+ return isc.config.create_answer(
+ 1, "specified arguments are incorrect: " \
+ + "owner: " + str(owner) + ", name: " + str(name))
+
+ def command_showschema(self, owner=None, name=None):
"""
- handle an unknown command
+ handle show command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] Unknown command received: '"
- + str(command) + "'\n")
- return create_answer(1, "Unknown command: '"+str(command)+"'")
-
+ if owner or name:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND,
+ str(owner)+", "+str(name))
+ else:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND)
+ self.update_modules()
+ schema = {}
+ schema_byname = {}
+ for mod in self.modules:
+ spec = self.modules[mod].get_statistics_spec()
+ schema_byname[mod] = {}
+ if spec:
+ schema[mod] = spec
+ for item in spec:
+ schema_byname[mod][item['item_name']] = item
+ if owner:
+ try:
+ if name:
+ return isc.config.create_answer(0, schema_byname[owner][name])
+ else:
+ return isc.config.create_answer(0, schema[owner])
+ except KeyError:
+ pass
+ else:
+ if name:
+ return isc.config.create_answer(1, "module name is not specified")
+ else:
+ return isc.config.create_answer(0, schema)
+ return isc.config.create_answer(
+ 1, "specified arguments are incorrect: " \
+ + "owner: " + str(owner) + ", name: " + str(name))
- def initialize_data(self, spec):
+ def command_set(self, owner, data):
"""
- initialize stats data
+ handle set command
"""
- def __get_init_val(spec):
- if spec['item_type'] == 'null':
- return None
- elif spec['item_type'] == 'boolean':
- return bool(spec.get('item_default', False))
- elif spec['item_type'] == 'string':
- return str(spec.get('item_default', ''))
- elif spec['item_type'] in set(['number', 'integer']):
- return int(spec.get('item_default', 0))
- elif spec['item_type'] in set(['float', 'double', 'real']):
- return float(spec.get('item_default', 0.0))
- elif spec['item_type'] in set(['list', 'array']):
- return spec.get('item_default',
- [ __get_init_val(s) for s in spec['list_item_spec'] ])
- elif spec['item_type'] in set(['map', 'object']):
- return spec.get('item_default',
- dict([ (s['item_name'], __get_init_val(s)) for s in spec['map_item_spec'] ]) )
- else:
- return spec.get('item_default')
- return dict([ (s['item_name'], __get_init_val(s)) for s in spec ])
+ errors = self.update_statistics_data(owner, **data)
+ if errors:
+ return isc.config.create_answer(
+ 1, "errors while setting statistics data: " \
+ + ", ".join(errors))
+ errors = self.update_statistics_data(
+ self.module_name, last_update_time=get_datetime() )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ return isc.config.create_answer(0)
-def get_timestamp():
- """
- get current timestamp
- """
- return time()
-
-def get_datetime():
- """
- get current datetime
- """
- return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
-
-def main(session=None):
+if __name__ == "__main__":
try:
parser = OptionParser()
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
+ parser.add_option(
+ "-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
(options, args) = parser.parse_args()
- subject = SessionSubject(session=session, verbose=options.verbose)
- listener = CCSessionListener(subject, verbose=options.verbose)
- subject.start()
- while subject.running:
- subject.check()
- subject.stop()
-
- except OptionValueError:
- sys.stderr.write("[b10-stats] Error parsing options\n")
- except SessionError as se:
- sys.stderr.write("[b10-stats] Error creating Stats module, "
- + "is the command channel daemon running?\n")
+ if options.verbose:
+ isc.log.init("b10-stats", "DEBUG", 99)
+ stats = Stats()
+ stats.start()
+ except OptionValueError as ove:
+ logger.fatal(STATS_BAD_OPTION_VALUE, ove)
+ sys.exit(1)
+ except isc.cc.session.SessionError as se:
+ logger.fatal(STATS_CC_SESSION_ERROR, se)
+ sys.exit(1)
+ except StatsError as se:
+ logger.fatal(STATS_START_ERROR, se)
+ sys.exit(1)
except KeyboardInterrupt as kie:
- sys.stderr.write("[b10-stats] Interrupted, exiting\n")
-
-if __name__ == "__main__":
- main()
+ logger.info(STATS_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
new file mode 100644
index 0000000..e716b62
--- /dev/null
+++ b/src/bin/stats/stats.spec
@@ -0,0 +1,125 @@
+{
+ "module_spec": {
+ "module_name": "Stats",
+ "module_description": "Stats daemon",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "status",
+ "command_description": "Show status of the stats daemon",
+ "command_args": []
+ },
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down the stats module",
+ "command_args": []
+ },
+ {
+ "command_name": "show",
+ "command_description": "Show the specified/all statistics data",
+ "command_args": [
+ {
+ "item_name": "owner",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "name",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "statistics item name of the owner"
+ }
+ ]
+ },
+ {
+ "command_name": "showschema",
+ "command_description": "show the specified/all statistics shema",
+ "command_args": [
+ {
+ "item_name": "owner",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "name",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "statistics item name of the owner"
+ }
+ ]
+ },
+ {
+ "command_name": "set",
+ "command_description": "set the value of specified name in statistics data",
+ "command_args": [
+ {
+ "item_name": "owner",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "data",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "item_description": "statistics data set of the owner",
+ "map_item_spec": []
+ }
+ ]
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "report_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Report time",
+ "item_description": "A date time when stats module reports",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when the stats module starts initially or when the stats module restarts",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "last_update_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Last update time",
+ "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "timestamp",
+ "item_type": "real",
+ "item_optional": false,
+ "item_default": 0.0,
+ "item_title": "Timestamp",
+ "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
+ },
+ {
+ "item_name": "lname",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "",
+ "item_title": "Local Name",
+ "item_description": "A localname of stats module given via CC protocol"
+ }
+ ]
+ }
+}
diff --git a/src/bin/stats/stats.spec.in b/src/bin/stats/stats.spec.in
deleted file mode 100644
index 25f6b54..0000000
--- a/src/bin/stats/stats.spec.in
+++ /dev/null
@@ -1,61 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Stats daemon",
- "config_data": [],
- "commands": [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "show",
- "command_description": "show the specified/all statistics data",
- "command_args": [
- {
- "item_name": "stats_item_name",
- "item_type": "string",
- "item_optional": true,
- "item_default": ""
- }
- ]
- },
- {
- "command_name": "set",
- "command_description": "set the value of specified name in statistics data",
- "command_args": [
- {
- "item_name": "stats_data",
- "item_type": "map",
- "item_optional": false,
- "item_default": {},
- "map_item_spec": []
- }
- ]
- },
- {
- "command_name": "remove",
- "command_description": "remove the specified name from statistics data",
- "command_args": [
- {
- "item_name": "stats_item_name",
- "item_type": "string",
- "item_optional": false,
- "item_default": ""
- }
- ]
- },
- {
- "command_name": "reset",
- "command_description": "reset all statistics data to default values except for several constant names",
- "command_args": []
- },
- {
- "command_name": "shutdown",
- "command_description": "Shut down the stats module",
- "command_args": []
- }
- ]
- }
-}
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
index 97e9c78..042630d 100644
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -34,6 +34,16 @@ import isc.cc
import isc.config
import isc.util.process
+import isc.log
+from isc.log_messages.stats_httpd_messages import *
+
+isc.log.init("b10-stats-httpd")
+logger = isc.log.Logger("stats-httpd")
+
+# Some constants for debug levels.
+DBG_STATHTTPD_INIT = logger.DBGLVL_START_SHUT
+DBG_STATHTTPD_MESSAGING = logger.DBGLVL_COMMAND
+
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to that, otherwise we use the ones
# installed on the system
@@ -46,7 +56,6 @@ else:
BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
XML_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xml.tpl"
XSD_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsd.tpl"
XSL_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsl.tpl"
@@ -58,7 +67,6 @@ XSD_URL_PATH = '/bind10/statistics/xsd'
XSL_URL_PATH = '/bind10/statistics/xsl'
# TODO: This should be considered later.
XSD_NAMESPACE = 'http://bind10.isc.org' + XSD_URL_PATH
-DEFAULT_CONFIG = dict(listen_on=[('127.0.0.1', 8000)])
# Assign this process name
isc.util.process.rename()
@@ -98,9 +106,7 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
return None
except StatsHttpdError as err:
self.send_error(500)
- if self.server.verbose:
- self.server.log_writer(
- "[b10-stats-httpd] %s\n" % err)
+ logger.error(STATHTTPD_SERVER_ERROR, err)
return None
else:
self.send_response(200)
@@ -109,15 +115,6 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
self.end_headers()
return body
- def log_message(self, format, *args):
- """Change the default log format"""
- if self.server.verbose:
- self.server.log_writer(
- "[b10-stats-httpd] %s - - [%s] %s\n" %
- (self.address_string(),
- self.log_date_time_string(),
- format%args))
-
class HttpServerError(Exception):
"""Exception class for HttpServer class. It is intended to be
passed from the HttpServer object to the StatsHttpd object."""
@@ -134,13 +131,12 @@ class HttpServer(http.server.HTTPServer):
sys.stderr.write. They are intended to be referred by HttpHandler
object."""
def __init__(self, server_address, handler,
- xml_handler, xsd_handler, xsl_handler, log_writer, verbose=False):
+ xml_handler, xsd_handler, xsl_handler, log_writer):
self.server_address = server_address
self.xml_handler = xml_handler
self.xsd_handler = xsd_handler
self.xsl_handler = xsl_handler
self.log_writer = log_writer
- self.verbose = verbose
http.server.HTTPServer.__init__(self, server_address, handler)
class StatsHttpdError(Exception):
@@ -154,37 +150,33 @@ class StatsHttpd:
statistics module. It handles HTTP requests, and command channel
and config channel CC session. It uses select.select function
while waiting for clients requests."""
- def __init__(self, verbose=False):
- self.verbose = verbose
+ def __init__(self):
self.running = False
self.poll_intval = 0.5
self.write_log = sys.stderr.write
self.mccs = None
self.httpd = []
self.open_mccs()
+ self.config = {}
self.load_config()
- self.load_templates()
+ self.http_addrs = []
+ self.mccs.start()
self.open_httpd()
def open_mccs(self):
"""Opens a ModuleCCSession object"""
# create ModuleCCSession
- if self.verbose:
- self.write_log("[b10-stats-httpd] Starting CC Session\n")
+ logger.debug(DBG_STATHTTPD_INIT, STATHTTPD_STARTING_CC_SESSION)
self.mccs = isc.config.ModuleCCSession(
SPECFILE_LOCATION, self.config_handler, self.command_handler)
self.cc_session = self.mccs._session
- # read spec file of stats module and subscribe 'Stats'
- self.stats_module_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION)
- self.stats_config_spec = self.stats_module_spec.get_config_spec()
- self.stats_module_name = self.stats_module_spec.get_module_name()
def close_mccs(self):
"""Closes a ModuleCCSession object"""
if self.mccs is None:
return
- if self.verbose:
- self.write_log("[b10-stats-httpd] Closing CC Session\n")
+
+ logger.debug(DBG_STATHTTPD_INIT, STATHTTPD_CLOSING_CC_SESSION)
self.mccs.close()
self.mccs = None
@@ -192,18 +184,19 @@ class StatsHttpd:
"""Loads configuration from spec file or new configuration
from the config manager"""
# load config
- if len(new_config) > 0:
- self.config.update(new_config)
- else:
- self.config = DEFAULT_CONFIG
- self.config.update(
- dict([
- (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
- for itm in self.mccs.get_module_spec().get_config_spec()
- ])
- )
+ if len(self.config) == 0:
+ self.config = dict([
+ (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
+ for itm in self.mccs.get_module_spec().get_config_spec()
+ ])
+ self.config.update(new_config)
# set addresses and ports for HTTP
- self.http_addrs = [ (cf['address'], cf['port']) for cf in self.config['listen_on'] ]
+ addrs = []
+ if 'listen_on' in self.config:
+ for cf in self.config['listen_on']:
+ if 'address' in cf and 'port' in cf:
+ addrs.append((cf['address'], cf['port']))
+ self.http_addrs = addrs
def open_httpd(self):
"""Opens sockets for HTTP. Iterating each HTTP address to be
@@ -211,51 +204,44 @@ class StatsHttpd:
for addr in self.http_addrs:
self.httpd.append(self._open_httpd(addr))
- def _open_httpd(self, server_address, address_family=None):
+ def _open_httpd(self, server_address):
+ httpd = None
try:
- # try IPv6 at first
- if address_family is not None:
- HttpServer.address_family = address_family
- elif socket.has_ipv6:
- HttpServer.address_family = socket.AF_INET6
+ # get address family for the server_address before
+ # creating HttpServer object. If a specified address is
+ # not numerical, gaierror may be thrown.
+ address_family = socket.getaddrinfo(
+ server_address[0], server_address[1], 0,
+ socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_NUMERICHOST
+ )[0][0]
+ HttpServer.address_family = address_family
httpd = HttpServer(
server_address, HttpHandler,
self.xml_handler, self.xsd_handler, self.xsl_handler,
- self.write_log, self.verbose)
+ self.write_log)
+ logger.info(STATHTTPD_STARTED, server_address[0],
+ server_address[1])
+ return httpd
except (socket.gaierror, socket.error,
OverflowError, TypeError) as err:
- # try IPv4 next
- if HttpServer.address_family == socket.AF_INET6:
- httpd = self._open_httpd(server_address, socket.AF_INET)
- else:
- raise HttpServerError(
- "Invalid address %s, port %s: %s: %s" %
- (server_address[0], server_address[1],
- err.__class__.__name__, err))
- else:
- if self.verbose:
- self.write_log(
- "[b10-stats-httpd] Started on address %s, port %s\n" %
- server_address)
- return httpd
+ if httpd:
+ httpd.server_close()
+ raise HttpServerError(
+ "Invalid address %s, port %s: %s: %s" %
+ (server_address[0], server_address[1],
+ err.__class__.__name__, err))
def close_httpd(self):
"""Closes sockets for HTTP"""
- if len(self.httpd) == 0:
- return
- for ht in self.httpd:
- if self.verbose:
- self.write_log(
- "[b10-stats-httpd] Closing address %s, port %s\n" %
- (ht.server_address[0], ht.server_address[1])
- )
+ while len(self.httpd)>0:
+ ht = self.httpd.pop()
+ logger.info(STATHTTPD_CLOSING, ht.server_address[0],
+ ht.server_address[1])
ht.server_close()
- self.httpd = []
def start(self):
"""Starts StatsHttpd objects to run. Waiting for client
requests by using select.select functions"""
- self.mccs.start()
self.running = True
while self.running:
try:
@@ -285,10 +271,10 @@ class StatsHttpd:
def stop(self):
"""Stops the running StatsHttpd objects. Closes CC session and
HTTP handling sockets"""
- if self.verbose:
- self.write_log("[b10-stats-httpd] Shutting down\n")
+ logger.info(STATHTTPD_SHUTDOWN)
self.close_httpd()
self.close_mccs()
+ self.running = False
def get_sockets(self):
"""Returns sockets to select.select"""
@@ -303,29 +289,29 @@ class StatsHttpd:
def config_handler(self, new_config):
"""Config handler for the ModuleCCSession object. It resets
addresses and ports to listen HTTP requests on."""
- if self.verbose:
- self.write_log("[b10-stats-httpd] Loading config : %s\n" % str(new_config))
- for key in new_config.keys():
- if key not in DEFAULT_CONFIG:
- if self.verbose:
- self.write_log(
- "[b10-stats-httpd] Unknown known config: %s" % key)
+ logger.debug(DBG_STATHTTPD_MESSAGING, STATHTTPD_HANDLE_CONFIG,
+ new_config)
+ errors = []
+ if not self.mccs.get_module_spec().\
+ validate_config(False, new_config, errors):
return isc.config.ccsession.create_answer(
- 1, "Unknown known config: %s" % key)
+ 1, ", ".join(errors))
# backup old config
old_config = self.config.copy()
- self.close_httpd()
self.load_config(new_config)
+ # If the http sockets aren't opened or
+ # if new_config doesn't have'listen_on', it returns
+ if len(self.httpd) == 0 or 'listen_on' not in new_config:
+ return isc.config.ccsession.create_answer(0)
+ self.close_httpd()
try:
self.open_httpd()
except HttpServerError as err:
- if self.verbose:
- self.write_log("[b10-stats-httpd] %s\n" % err)
- self.write_log("[b10-stats-httpd] Restoring old config\n")
+ logger.error(STATHTTPD_SERVER_ERROR, err)
# restore old config
- self.config_handler(old_config)
- return isc.config.ccsession.create_answer(
- 1, "[b10-stats-httpd] %s" % err)
+ self.load_config(old_config)
+ self.open_httpd()
+ return isc.config.ccsession.create_answer(1, str(err))
else:
return isc.config.ccsession.create_answer(0)
@@ -333,19 +319,18 @@ class StatsHttpd:
"""Command handler for the ModuleCCSesson object. It handles
"status" and "shutdown" commands."""
if command == "status":
- if self.verbose:
- self.write_log("[b10-stats-httpd] Received 'status' command\n")
+ logger.debug(DBG_STATHTTPD_MESSAGING,
+ STATHTTPD_RECEIVED_STATUS_COMMAND)
return isc.config.ccsession.create_answer(
0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")")
elif command == "shutdown":
- if self.verbose:
- self.write_log("[b10-stats-httpd] Received 'shutdown' command\n")
+ logger.debug(DBG_STATHTTPD_MESSAGING,
+ STATHTTPD_RECEIVED_SHUTDOWN_COMMAND)
self.running = False
- return isc.config.ccsession.create_answer(
- 0, "Stats Httpd is shutting down.")
+ return isc.config.ccsession.create_answer(0)
else:
- if self.verbose:
- self.write_log("[b10-stats-httpd] Received unknown command\n")
+ logger.debug(DBG_STATHTTPD_MESSAGING,
+ STATHTTPD_RECEIVED_UNKNOWN_COMMAND, command)
return isc.config.ccsession.create_answer(
1, "Unknown command: " + str(command))
@@ -354,8 +339,7 @@ class StatsHttpd:
the data which obtains from it"""
try:
seq = self.cc_session.group_sendmsg(
- isc.config.ccsession.create_command('show'),
- self.stats_module_name)
+ isc.config.ccsession.create_command('show'), 'Stats')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
@@ -370,75 +354,43 @@ class StatsHttpd:
raise StatsHttpdError("Stats module: %s" % str(value))
def get_stats_spec(self):
- """Just returns spec data"""
- return self.stats_config_spec
-
- def load_templates(self):
- """Setup the bodies of XSD and XSL documents to be responds to
- HTTP clients. Before that it also creates XML tag structures by
- using xml.etree.ElementTree.Element class and substitutes
- concrete strings with parameters embed in the string.Template
- object."""
- # for XSD
- xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
- for item in self.get_stats_spec():
- element = xml.etree.ElementTree.Element(
- "element",
- dict( name=item["item_name"],
- type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
- minOccurs="1",
- maxOccurs="1" ),
- )
- annotation = xml.etree.ElementTree.Element("annotation")
- appinfo = xml.etree.ElementTree.Element("appinfo")
- documentation = xml.etree.ElementTree.Element("documentation")
- appinfo.text = item["item_title"]
- documentation.text = item["item_description"]
- annotation.append(appinfo)
- annotation.append(documentation)
- element.append(annotation)
- xsd_root.append(element)
- xsd_string = xml.etree.ElementTree.tostring(xsd_root)
- self.xsd_body = self.open_template(XSD_TEMPLATE_LOCATION).substitute(
- xsd_string=xsd_string,
- xsd_namespace=XSD_NAMESPACE
- )
- assert self.xsd_body is not None
-
- # for XSL
- xsd_root = xml.etree.ElementTree.Element(
- "xsl:template",
- dict(match="*")) # started with xml:template tag
- for item in self.get_stats_spec():
- tr = xml.etree.ElementTree.Element("tr")
- td1 = xml.etree.ElementTree.Element(
- "td", { "class" : "title",
- "title" : item["item_description"] })
- td1.text = item["item_title"]
- td2 = xml.etree.ElementTree.Element("td")
- xsl_valueof = xml.etree.ElementTree.Element(
- "xsl:value-of",
- dict(select=item["item_name"]))
- td2.append(xsl_valueof)
- tr.append(td1)
- tr.append(td2)
- xsd_root.append(tr)
- xsl_string = xml.etree.ElementTree.tostring(xsd_root)
- self.xsl_body = self.open_template(XSL_TEMPLATE_LOCATION).substitute(
- xsl_string=xsl_string,
- xsd_namespace=XSD_NAMESPACE)
- assert self.xsl_body is not None
+ """Requests statistics data to the Stats daemon and returns
+ the data which obtains from it"""
+ try:
+ seq = self.cc_session.group_sendmsg(
+ isc.config.ccsession.create_command('showschema'), 'Stats')
+ (answer, env) = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ (rcode, value) = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ return value
+ else:
+ raise StatsHttpdError("Stats module: %s" % str(value))
+ except (isc.cc.session.SessionTimeout,
+ isc.cc.session.SessionError) as err:
+ raise StatsHttpdError("%s: %s" %
+ (err.__class__.__name__, err))
def xml_handler(self):
"""Handler which requests to Stats daemon to obtain statistics
data and returns the body of XML document"""
xml_list=[]
- for (k, v) in self.get_stats_data().items():
- (k, v) = (str(k), str(v))
- elem = xml.etree.ElementTree.Element(k)
- elem.text = v
+ for (mod, spec) in self.get_stats_data().items():
+ if not spec: continue
+ elem1 = xml.etree.ElementTree.Element(str(mod))
+ for (k, v) in spec.items():
+ elem2 = xml.etree.ElementTree.Element(str(k))
+ elem2.text = str(v)
+ elem1.append(elem2)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
xml_list.append(
- xml.etree.ElementTree.tostring(elem))
+ str(xml.etree.ElementTree.tostring(elem1, encoding='utf-8'),
+ encoding='us-ascii'))
xml_string = "".join(xml_list)
self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
xml_string=xml_string,
@@ -450,18 +402,95 @@ class StatsHttpd:
def xsd_handler(self):
"""Handler which just returns the body of XSD document"""
+ # for XSD
+ xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
+ for (mod, spec) in self.get_stats_spec().items():
+ if not spec: continue
+ alltag = xml.etree.ElementTree.Element("all")
+ for item in spec:
+ element = xml.etree.ElementTree.Element(
+ "element",
+ dict( name=item["item_name"],
+ type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
+ minOccurs="1",
+ maxOccurs="1" ),
+ )
+ annotation = xml.etree.ElementTree.Element("annotation")
+ appinfo = xml.etree.ElementTree.Element("appinfo")
+ documentation = xml.etree.ElementTree.Element("documentation")
+ appinfo.text = item["item_title"]
+ documentation.text = item["item_description"]
+ annotation.append(appinfo)
+ annotation.append(documentation)
+ element.append(annotation)
+ alltag.append(element)
+
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ mod_element = xml.etree.ElementTree.Element("element", { "name" : mod })
+ mod_element.append(complextype)
+ xsd_root.append(mod_element)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ xsd_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
+ encoding='us-ascii')
+ self.xsd_body = self.open_template(XSD_TEMPLATE_LOCATION).substitute(
+ xsd_string=xsd_string,
+ xsd_namespace=XSD_NAMESPACE
+ )
+ assert self.xsd_body is not None
return self.xsd_body
def xsl_handler(self):
"""Handler which just returns the body of XSL document"""
+ # for XSL
+ xsd_root = xml.etree.ElementTree.Element(
+ "xsl:template",
+ dict(match="*")) # started with xml:template tag
+ for (mod, spec) in self.get_stats_spec().items():
+ if not spec: continue
+ for item in spec:
+ tr = xml.etree.ElementTree.Element("tr")
+ td0 = xml.etree.ElementTree.Element("td")
+ td0.text = str(mod)
+ td1 = xml.etree.ElementTree.Element(
+ "td", { "class" : "title",
+ "title" : item["item_description"] })
+ td1.text = item["item_title"]
+ td2 = xml.etree.ElementTree.Element("td")
+ xsl_valueof = xml.etree.ElementTree.Element(
+ "xsl:value-of",
+ dict(select=mod+'/'+item["item_name"]))
+ td2.append(xsl_valueof)
+ tr.append(td0)
+ tr.append(td1)
+ tr.append(td2)
+ xsd_root.append(tr)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ xsl_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
+ encoding='us-ascii')
+ self.xsl_body = self.open_template(XSL_TEMPLATE_LOCATION).substitute(
+ xsl_string=xsl_string,
+ xsd_namespace=XSD_NAMESPACE)
+ assert self.xsl_body is not None
return self.xsl_body
def open_template(self, file_name):
"""It opens a template file, and it loads all lines to a
string variable and returns string. Template object includes
the variable. Limitation of a file size isn't needed there."""
- lines = "".join(
- open(file_name, 'r').readlines())
+ f = open(file_name, 'r')
+ lines = "".join(f.readlines())
+ f.close()
assert lines is not None
return string.Template(lines)
@@ -472,14 +501,18 @@ if __name__ == "__main__":
"-v", "--verbose", dest="verbose", action="store_true",
help="display more about what is going on")
(options, args) = parser.parse_args()
- stats_httpd = StatsHttpd(verbose=options.verbose)
+ if options.verbose:
+ isc.log.init("b10-stats-httpd", "DEBUG", 99)
+ stats_httpd = StatsHttpd()
stats_httpd.start()
- except OptionValueError:
- sys.exit("[b10-stats-httpd] Error parsing options")
+ except OptionValueError as ove:
+ logger.fatal(STATHTTPD_BAD_OPTION_VALUE, ove)
+ sys.exit(1)
except isc.cc.session.SessionError as se:
- sys.exit("[b10-stats-httpd] Error creating module, "
- + "is the command channel daemon running?")
+ logger.fatal(STATHTTPD_CC_SESSION_ERROR, se)
+ sys.exit(1)
except HttpServerError as hse:
- sys.exit("[b10-stats-httpd] %s" % hse)
+ logger.fatal(STATHTTPD_START_SERVER_INIT_ERROR, hse)
+ sys.exit(1)
except KeyboardInterrupt as kie:
- sys.exit("[b10-stats-httpd] Interrupted, exiting")
+ logger.info(STATHTTPD_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes
new file mode 100644
index 0000000..0e984dc
--- /dev/null
+++ b/src/bin/stats/stats_httpd_messages.mes
@@ -0,0 +1,92 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the stats_httpd_messages python module.
+
+% STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+
+% STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+
+% STATHTTPD_CLOSING_CC_SESSION stopping cc session
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+
+% STATHTTPD_CLOSING closing %1#%2
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+
+% STATHTTPD_HANDLE_CONFIG reading configuration: %1
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+
+% STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+
+% STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+
+% STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+
+% STATHTTPD_SERVER_ERROR HTTP server error: %1
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+
+% STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+
+% STATHTTPD_SHUTDOWN shutting down
+The stats-httpd daemon is shutting down.
+
+% STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+
+% STATHTTPD_STARTED listening on %1#%2
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+
+% STATHTTPD_STARTING_CC_SESSION starting cc session
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+
+% STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+
+% STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
diff --git a/src/bin/stats/stats_messages.mes b/src/bin/stats/stats_messages.mes
new file mode 100644
index 0000000..cfffb3a
--- /dev/null
+++ b/src/bin/stats/stats_messages.mes
@@ -0,0 +1,76 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the stats_messages python module.
+
+% STATS_BAD_OPTION_VALUE bad command line argument: %1
+The stats module was called with a bad command-line argument and will
+not start.
+
+% STATS_CC_SESSION_ERROR error connecting to message bus: %1
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+
+% STATS_RECEIVED_NEW_CONFIG received new configuration: %1
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+
+% STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics
+The stats module received a command to show all statistics that it has
+collected.
+
+% STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1
+The stats module received a command to show the statistics that it has
+collected for the given item.
+
+% STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+A shutdown command was sent to the stats module and it will now shut down.
+
+% STATS_RECEIVED_STATUS_COMMAND received command to return status
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+
+% STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+
+% STATS_SEND_REQUEST_BOSS requesting boss to send statistics
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+
+% STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+
+% STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+
+% STATS_STARTING starting
+The stats module will be now starting.
+
+% STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema
+The stats module received a command to show all statistics schemas of all modules.
+
+% STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1
+The stats module received a command to show the specified statistics schema of the specified module.
+
+% STATS_START_ERROR stats module error: %1
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index 5a13277..afd572f 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -1,19 +1,33 @@
-SUBDIRS = isc http testdata
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-stats_test.py b10-stats-httpd_test.py
-EXTRA_DIST = $(PYTESTS) fake_time.py fake_socket.py fake_select.py
-CLEANFILES = fake_time.pyc fake_socket.pyc fake_select.pyc
+EXTRA_DIST = $(PYTESTS) test_utils.py
+CLEANFILES = test_utils.pyc msgq_socket_test
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests:$(abs_top_builddir)/src/bin/msgq:$(abs_top_builddir)/src/lib/python/isc/config \
B10_FROM_SOURCE=$(abs_top_srcdir) \
+ BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
+ CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index 07999ea..e867080 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -13,166 +13,269 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats http server in a
+close to real environment.
+"""
+
import unittest
import os
-import http.server
-import string
-import fake_select
import imp
-import sys
-import fake_socket
-
-import isc.cc
+import socket
+import errno
+import select
+import string
+import time
+import threading
+import http.client
+import xml.etree.ElementTree
+import random
+import isc
import stats_httpd
-stats_httpd.socket = fake_socket
-stats_httpd.select = fake_select
+import stats
+from test_utils import BaseModules, ThreadingServerManager, MyStats, MyStatsHttpd, SignalHandler, send_command, send_shutdown
DUMMY_DATA = {
- "auth.queries.tcp": 10000,
- "auth.queries.udp": 12000,
- "bind10.boot_time": "2011-03-04T11:59:05Z",
- "report_time": "2011-03-04T11:59:19Z",
- "stats.boot_time": "2011-03-04T11:59:06Z",
- "stats.last_update_time": "2011-03-04T11:59:07Z",
- "stats.lname": "4d70d40a_c at host",
- "stats.start_time": "2011-03-04T11:59:06Z",
- "stats.timestamp": 1299239959.560846
+ 'Boss' : {
+ "boot_time": "2011-03-04T11:59:06Z"
+ },
+ 'Auth' : {
+ "queries.tcp": 2,
+ "queries.udp": 3
+ },
+ 'Stats' : {
+ "report_time": "2011-03-04T11:59:19Z",
+ "boot_time": "2011-03-04T11:59:06Z",
+ "last_update_time": "2011-03-04T11:59:07Z",
+ "lname": "4d70d40a_c at host",
+ "timestamp": 1299239959.560846
+ }
}
-def push_answer(stats_httpd):
- stats_httpd.cc_session.group_sendmsg(
- { 'result':
- [ 0, DUMMY_DATA ] }, "Stats")
-
-def pull_query(stats_httpd):
- (msg, env) = stats_httpd.cc_session.group_recvmsg()
- if 'result' in msg:
- (ret, arg) = isc.config.ccsession.parse_answer(msg)
- else:
- (ret, arg) = isc.config.ccsession.parse_command(msg)
- return (ret, arg, env)
+def get_availaddr(address='127.0.0.1', port=8001):
+ """returns a tuple of address and port which is available to
+ listen on the platform. The first argument is a address for
+ search. The second argument is a port for search. If a set of
+ address and port is failed on the search for the availability, the
+ port number is increased and it goes on the next trial until the
+ available set of address and port is looked up. If the port number
+ reaches over 65535, it may stop the search and raise a
+ OverflowError exception."""
+ while True:
+ for addr in socket.getaddrinfo(
+ address, port, 0,
+ socket.SOCK_STREAM, socket.IPPROTO_TCP):
+ sock = socket.socket(addr[0], socket.SOCK_STREAM)
+ try:
+ sock.bind((address, port))
+ return (address, port)
+ except socket.error:
+ continue
+ finally:
+ if sock: sock.close()
+ # This address and port number are already in use.
+ # next port number is added
+ port = port + 1
+
+def is_ipv6_enabled(address='::1', port=8001):
+ """checks IPv6 enabled on the platform. address for check is '::1'
+ and port for check is random number between 8001 and
+ 65535. Retrying is 3 times even if it fails. The built-in socket
+ module provides a 'has_ipv6' parameter, but it's not used here
+ because there may be a situation where the value is True on an
+ environment where the IPv6 config is disabled."""
+ for p in random.sample(range(port, 65535), 3):
+ try:
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ sock.bind((address, p))
+ return True
+ except socket.error:
+ continue
+ finally:
+ if sock: sock.close()
+ return False
class TestHttpHandler(unittest.TestCase):
"""Tests for HttpHandler class"""
-
def setUp(self):
- self.verbose = True
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.assertTrue(type(self.stats_httpd.httpd) is list)
- self.httpd = self.stats_httpd.httpd
- for ht in self.httpd:
- self.assertTrue(ht.verbose)
- self.stats_httpd.cc_session.verbose = False
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.stats_server.run()
+ (self.address, self.port) = get_availaddr()
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, (self.address, self.port))
+ self.stats_httpd = self.stats_httpd_server.server
+ self.stats_httpd_server.run()
+ self.client = http.client.HTTPConnection(self.address, self.port)
+ self.client._http_vsn_str = 'HTTP/1.0\n'
+ self.client.connect()
- def test_do_GET(self):
- for ht in self.httpd:
- self._test_do_GET(ht._handler)
+ def tearDown(self):
+ self.client.close()
+ self.stats_httpd_server.shutdown()
+ self.stats_server.shutdown()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
- def _test_do_GET(self, handler):
+ def test_do_GET(self):
+ self.assertTrue(type(self.stats_httpd.httpd) is list)
+ self.assertEqual(len(self.stats_httpd.httpd), 1)
+ self.assertEqual((self.address, self.port), self.stats_httpd.http_addrs[0])
# URL is '/bind10/statistics/xml'
- handler.path = stats_httpd.XML_URL_PATH
- push_answer(self.stats_httpd)
- handler.do_GET()
- (ret, arg, env) = pull_query(self.stats_httpd)
- self.assertEqual(ret, "show")
- self.assertIsNone(arg)
- self.assertTrue('group' in env)
- self.assertEqual(env['group'], 'Stats')
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_URL_PATH)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
- self.assertTrue(handler.response.body.find(str(v))>0)
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ self.assertTrue(root.tag.find('stats_data') > 0)
+ for (k,v) in root.attrib.items():
+ if k.find('schemaLocation') > 0:
+ self.assertEqual(v, stats_httpd.XSD_NAMESPACE + ' ' + stats_httpd.XSD_URL_PATH)
+ for mod in DUMMY_DATA:
+ for (item, value) in DUMMY_DATA[mod].items():
+ self.assertIsNotNone(root.find(mod + '/' + item))
# URL is '/bind10/statitics/xsd'
- handler.path = stats_httpd.XSD_URL_PATH
- handler.do_GET()
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
+ tags = [ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ]
+ xsdpath = '/'.join(tags)
+ self.assertTrue(root.tag.find('schema') > 0)
+ self.assertTrue(hasattr(root, 'attrib'))
+ self.assertTrue('targetNamespace' in root.attrib)
+ self.assertEqual(root.attrib['targetNamespace'],
+ stats_httpd.XSD_NAMESPACE)
+ for elm in root.findall(xsdpath):
+ self.assertIsNotNone(elm.attrib['name'])
+ self.assertTrue(elm.attrib['name'] in DUMMY_DATA)
# URL is '/bind10/statitics/xsl'
- handler.path = stats_httpd.XSL_URL_PATH
- handler.do_GET()
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_trans = '{http://www.w3.org/1999/XSL/Transform}'
+ url_xhtml = '{http://www.w3.org/1999/xhtml}'
+ xslpath = url_trans + 'template/' + url_xhtml + 'tr'
+ self.assertEqual(root.tag, url_trans + 'stylesheet')
+ for tr in root.findall(xslpath):
+ tds = tr.findall(url_xhtml + 'td')
+ self.assertIsNotNone(tds)
+ self.assertEqual(type(tds), list)
+ self.assertTrue(len(tds) > 2)
+ self.assertTrue(hasattr(tds[0], 'text'))
+ self.assertTrue(tds[0].text in DUMMY_DATA)
+ valueof = tds[2].find(url_trans + 'value-of')
+ self.assertIsNotNone(valueof)
+ self.assertTrue(hasattr(valueof, 'attrib'))
+ self.assertIsNotNone(valueof.attrib)
+ self.assertTrue('select' in valueof.attrib)
+ self.assertTrue(valueof.attrib['select'] in \
+ [ tds[0].text+'/'+item for item in DUMMY_DATA[tds[0].text].keys() ])
# 302 redirect
- handler.path = '/'
- handler.headers = {'Host': 'my.host.domain'}
- handler.do_GET()
- self.assertEqual(handler.response.code, 302)
- self.assertEqual(handler.response.headers["Location"],
- "http://my.host.domain%s" % stats_httpd.XML_URL_PATH)
+ self.client._http_vsn_str = 'HTTP/1.1'
+ self.client.putrequest('GET', '/')
+ self.client.putheader('Host', self.address)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 302)
+ self.assertEqual(response.getheader('Location'),
+ "http://%s:%d%s" % (self.address, self.port, stats_httpd.XML_URL_PATH))
# 404 NotFound
- handler.path = '/path/to/foo/bar'
- handler.headers = {}
- handler.do_GET()
- self.assertEqual(handler.response.code, 404)
-
- # failure case(connection with Stats is down)
- handler.path = stats_httpd.XML_URL_PATH
- push_answer(self.stats_httpd)
- self.assertFalse(self.stats_httpd.cc_session._socket._closed)
- self.stats_httpd.cc_session._socket._closed = True
- handler.do_GET()
- self.stats_httpd.cc_session._socket._closed = False
- self.assertEqual(handler.response.code, 500)
- self.stats_httpd.cc_session._clear_queues()
-
- # failure case(Stats module returns err)
- handler.path = stats_httpd.XML_URL_PATH
- self.stats_httpd.cc_session.group_sendmsg(
- { 'result': [ 1, "I have an error." ] }, "Stats")
- self.assertFalse(self.stats_httpd.cc_session._socket._closed)
- self.stats_httpd.cc_session._socket._closed = False
- handler.do_GET()
- self.assertEqual(handler.response.code, 500)
- self.stats_httpd.cc_session._clear_queues()
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', '/path/to/foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+
+ def test_do_GET_failed1(self):
+ # checks status
+ self.assertEqual(send_command("status", "Stats"),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+ # failure case(Stats is down)
+ self.assertTrue(self.stats.running)
+ self.assertEqual(send_shutdown("Stats"), (0, None)) # Stats is down
+ self.assertFalse(self.stats.running)
+ self.stats_httpd.cc_session.set_timeout(milliseconds=100)
+
+ # request XML
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSD
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSL
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ def test_do_GET_failed2(self):
+ # failure case(Stats replies an error)
+ self.stats.mccs.set_command_handler(
+ lambda cmd, args: \
+ isc.config.ccsession.create_answer(1, "I have an error.")
+ )
+
+ # request XML
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSD
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSL
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
def test_do_HEAD(self):
- for ht in self.httpd:
- self._test_do_HEAD(ht._handler)
-
- def _test_do_HEAD(self, handler):
- handler.path = '/path/to/foo/bar'
- handler.do_HEAD()
- self.assertEqual(handler.response.code, 404)
-
- def test_log_message(self):
- for ht in self.httpd:
- self._test_log_message(ht._handler)
-
- def _test_log_message(self, handler):
- # switch write_log function
- handler.server.log_writer = handler.response._write_log
- log_message = 'ABCDEFG'
- handler.log_message("%s", log_message)
- self.assertEqual(handler.response.log,
- "[b10-stats-httpd] %s - - [%s] %s\n" %
- (handler.address_string(),
- handler.log_date_time_string(),
- log_message))
+ self.client.putrequest('HEAD', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
+
+ self.client.putrequest('HEAD', '/path/to/foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
class TestHttpServerError(unittest.TestCase):
"""Tests for HttpServerError exception"""
-
def test_raises(self):
try:
raise stats_httpd.HttpServerError('Nothing')
@@ -181,20 +284,24 @@ class TestHttpServerError(unittest.TestCase):
class TestHttpServer(unittest.TestCase):
"""Tests for HttpServer class"""
+ def setUp(self):
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+
+ def tearDown(self):
+ if hasattr(self, "stats_httpd"):
+ self.stats_httpd.stop()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
def test_httpserver(self):
- self.verbose = True
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.stats_httpd.cc_session.verbose = False
- for ht in self.stats_httpd.httpd:
- self.assertTrue(ht.server_address in self.stats_httpd.http_addrs)
- self.assertEqual(ht.verbose, self.verbose)
- self.assertEqual(ht.xml_handler, self.stats_httpd.xml_handler)
- self.assertEqual(ht.xsd_handler, self.stats_httpd.xsd_handler)
- self.assertEqual(ht.xsl_handler, self.stats_httpd.xsl_handler)
- self.assertEqual(ht.log_writer, self.stats_httpd.write_log)
- self.assertTrue(isinstance(ht._handler, stats_httpd.HttpHandler))
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertEqual(type(self.stats_httpd.httpd), list)
+ self.assertEqual(len(self.stats_httpd.httpd), 1)
+ for httpd in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(httpd, stats_httpd.HttpServer))
class TestStatsHttpdError(unittest.TestCase):
"""Tests for StatsHttpdError exception"""
@@ -209,136 +316,173 @@ class TestStatsHttpd(unittest.TestCase):
"""Tests for StatsHttpd class"""
def setUp(self):
- self.verbose = True
- fake_socket._CLOSED = False
- fake_socket.has_ipv6 = True
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.stats_httpd.cc_session.verbose = False
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats_server.run()
+ # checking IPv6 enabled on this platform
+ self.ipv6_enabled = is_ipv6_enabled()
def tearDown(self):
- self.stats_httpd.stop()
+ if hasattr(self, "stats_httpd"):
+ self.stats_httpd.stop()
+ self.stats_server.shutdown()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
def test_init(self):
- self.assertTrue(self.stats_httpd.verbose)
- self.assertFalse(self.stats_httpd.mccs.get_socket()._closed)
- self.assertEqual(self.stats_httpd.mccs.get_socket().fileno(),
- id(self.stats_httpd.mccs.get_socket()))
- for ht in self.stats_httpd.httpd:
- self.assertFalse(ht.socket._closed)
- self.assertEqual(ht.socket.fileno(), id(ht.socket))
- fake_socket._CLOSED = True
- self.assertRaises(isc.cc.session.SessionError,
- stats_httpd.StatsHttpd)
- fake_socket._CLOSED = False
+ server_address = get_availaddr()
+ self.stats_httpd = MyStatsHttpd(server_address)
+ self.assertEqual(self.stats_httpd.running, False)
+ self.assertEqual(self.stats_httpd.poll_intval, 0.5)
+ self.assertNotEqual(len(self.stats_httpd.httpd), 0)
+ self.assertEqual(type(self.stats_httpd.mccs), isc.config.ModuleCCSession)
+ self.assertEqual(type(self.stats_httpd.cc_session), isc.cc.Session)
+ self.assertEqual(len(self.stats_httpd.config), 2)
+ self.assertTrue('listen_on' in self.stats_httpd.config)
+ self.assertEqual(len(self.stats_httpd.config['listen_on']), 1)
+ self.assertTrue('address' in self.stats_httpd.config['listen_on'][0])
+ self.assertTrue('port' in self.stats_httpd.config['listen_on'][0])
+ self.assertTrue(server_address in set(self.stats_httpd.http_addrs))
+
+ def test_openclose_mccs(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.close_mccs()
+ self.assertEqual(self.stats_httpd.mccs, None)
+ self.stats_httpd.open_mccs()
+ self.assertIsNotNone(self.stats_httpd.mccs)
+ self.stats_httpd.mccs = None
+ self.assertEqual(self.stats_httpd.mccs, None)
+ self.assertEqual(self.stats_httpd.close_mccs(), None)
def test_mccs(self):
- self.stats_httpd.open_mccs()
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertIsNotNone(self.stats_httpd.mccs.get_socket())
self.assertTrue(
- isinstance(self.stats_httpd.mccs.get_socket(), fake_socket.socket))
+ isinstance(self.stats_httpd.mccs.get_socket(), socket.socket))
self.assertTrue(
isinstance(self.stats_httpd.cc_session, isc.cc.session.Session))
- self.assertTrue(
- isinstance(self.stats_httpd.stats_module_spec, isc.config.ModuleSpec))
- for cfg in self.stats_httpd.stats_config_spec:
- self.assertTrue('item_name' in cfg)
- self.assertTrue(cfg['item_name'] in DUMMY_DATA)
- self.assertTrue(len(self.stats_httpd.stats_config_spec), len(DUMMY_DATA))
-
- def test_load_config(self):
- self.stats_httpd.load_config()
- self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
+ statistics_spec = self.stats_httpd.get_stats_spec()
+ for mod in DUMMY_DATA:
+ self.assertTrue(mod in statistics_spec)
+ for cfg in statistics_spec[mod]:
+ self.assertTrue('item_name' in cfg)
+ self.assertTrue(cfg['item_name'] in DUMMY_DATA[mod])
+ self.assertTrue(len(statistics_spec[mod]), len(DUMMY_DATA[mod]))
+ self.stats_httpd.close_mccs()
+ self.assertIsNone(self.stats_httpd.mccs)
def test_httpd(self):
# dual stack (addresses is ipv4 and ipv6)
- fake_socket.has_ipv6 = True
- self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
- self.stats_httpd.http_addrs = [ ('::1', 8000), ('127.0.0.1', 8000) ]
- self.assertTrue(
- stats_httpd.HttpServer.address_family in set([fake_socket.AF_INET, fake_socket.AF_INET6]))
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ if self.ipv6_enabled:
+ server_addresses = (get_availaddr('::1'), get_availaddr())
+ self.stats_httpd = MyStatsHttpd(*server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertTrue(ht.address_family in set([socket.AF_INET, socket.AF_INET6]))
+ self.assertTrue(isinstance(ht.socket, socket.socket))
# dual stack (address is ipv6)
- fake_socket.has_ipv6 = True
- self.stats_httpd.http_addrs = [ ('::1', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # dual stack (address is ipv4)
- fake_socket.has_ipv6 = True
- self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # only-ipv4 single stack
- fake_socket.has_ipv6 = False
- self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
- self.stats_httpd.open_httpd()
+ if self.ipv6_enabled:
+ server_addresses = get_availaddr('::1')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family, socket.AF_INET6)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # dual/single stack (address is ipv4)
+ server_addresses = get_availaddr()
+ self.stats_httpd = MyStatsHttpd(server_addresses)
for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # only-ipv4 single stack (force set ipv6 )
- fake_socket.has_ipv6 = False
- self.stats_httpd.http_addrs = [ ('::1', 8000) ]
- self.assertRaises(stats_httpd.HttpServerError,
- self.stats_httpd.open_httpd)
-
- # hostname
- self.stats_httpd.http_addrs = [ ('localhost', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family, socket.AF_INET)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
- self.stats_httpd.http_addrs = [ ('my.host.domain', 8000) ]
- self.stats_httpd.open_httpd()
+ # any address (IPv4)
+ server_addresses = get_availaddr(address='0.0.0.0')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family,socket.AF_INET)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # any address (IPv6)
+ if self.ipv6_enabled:
+ server_addresses = get_availaddr(address='::')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family,socket.AF_INET6)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # existent hostname
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+ get_availaddr(address='localhost'))
+
+ # nonexistent hostname
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('my.host.domain', 8000))
# over flow of port number
- self.stats_httpd.http_addrs = [ ('', 80000) ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 80000))
+
# negative
- self.stats_httpd.http_addrs = [ ('', -8000) ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
- # alphabet
- self.stats_httpd.http_addrs = [ ('', 'ABCDE') ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
-
- def test_start(self):
- self.stats_httpd.cc_session.group_sendmsg(
- { 'command': [ "shutdown" ] }, "StatsHttpd")
- self.stats_httpd.start()
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.stats_httpd.cc_session.verbose = False
- self.assertRaises(
- fake_select.error, self.stats_httpd.start)
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', -8000))
- def test_stop(self):
- # success case
- fake_socket._CLOSED = False
- self.stats_httpd.stop()
+ # alphabet
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 'ABCDE'))
+
+ # Address already in use
+ server_addresses = get_availaddr()
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, server_addresses)
+ self.stats_httpd_server.run()
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, server_addresses)
+ send_shutdown("StatsHttpd")
+
+ def test_running(self):
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+ self.stats_httpd = self.stats_httpd_server.server
self.assertFalse(self.stats_httpd.running)
- self.assertIsNone(self.stats_httpd.mccs)
- for ht in self.stats_httpd.httpd:
- self.assertTrue(ht.socket._closed)
- self.assertTrue(self.stats_httpd.cc_session._socket._closed)
+ self.stats_httpd_server.run()
+ self.assertEqual(send_command("status", "StatsHttpd"),
+ (0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
+ self.assertTrue(self.stats_httpd.running)
+ self.assertEqual(send_shutdown("StatsHttpd"), (0, None))
+ self.assertFalse(self.stats_httpd.running)
+ self.stats_httpd_server.shutdown()
+
# failure case
- self.stats_httpd.cc_session._socket._closed = False
- self.stats_httpd.open_mccs()
- self.stats_httpd.cc_session._socket._closed = True
- self.stats_httpd.stop() # No excetion raises
- self.stats_httpd.cc_session._socket._closed = False
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.cc_session.close()
+ self.assertRaises(ValueError, self.stats_httpd.start)
+
+ def test_failure_with_a_select_error (self):
+ """checks select.error is raised if the exception except
+ errno.EINTR is raised while it's selecting"""
+ def raise_select_except(*args):
+ raise select.error('dummy error')
+ orig_select = stats_httpd.select.select
+ stats_httpd.select.select = raise_select_except
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertRaises(select.error, self.stats_httpd.start)
+ stats_httpd.select.select = orig_select
+
+ def test_nofailure_with_errno_EINTR(self):
+ """checks no exception is raised if errno.EINTR is raised
+ while it's selecting"""
+ def raise_select_except(*args):
+ raise select.error(errno.EINTR)
+ orig_select = stats_httpd.select.select
+ stats_httpd.select.select = raise_select_except
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+ self.stats_httpd_server.run()
+ self.stats_httpd_server.shutdown()
+ stats_httpd.select.select = orig_select
def test_open_template(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
# successful conditions
tmpl = self.stats_httpd.open_template(stats_httpd.XML_TEMPLATE_LOCATION)
self.assertTrue(isinstance(tmpl, string.Template))
@@ -372,13 +516,13 @@ class TestStatsHttpd(unittest.TestCase):
self.stats_httpd.open_template, '/path/to/foo/bar')
def test_commands(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
self.assertEqual(self.stats_httpd.command_handler("status", None),
isc.config.ccsession.create_answer(
0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
self.stats_httpd.running = True
self.assertEqual(self.stats_httpd.command_handler("shutdown", None),
- isc.config.ccsession.create_answer(
- 0, "Stats Httpd is shutting down."))
+ isc.config.ccsession.create_answer(0))
self.assertFalse(self.stats_httpd.running)
self.assertEqual(
self.stats_httpd.command_handler("__UNKNOWN_COMMAND__", None),
@@ -386,48 +530,153 @@ class TestStatsHttpd(unittest.TestCase):
1, "Unknown command: __UNKNOWN_COMMAND__"))
def test_config(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
self.assertEqual(
self.stats_httpd.config_handler(dict(_UNKNOWN_KEY_=None)),
isc.config.ccsession.create_answer(
- 1, "Unknown known config: _UNKNOWN_KEY_"))
- self.assertEqual(
- self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="::2",port=8000)])),
- isc.config.ccsession.create_answer(0))
- self.assertTrue("listen_on" in self.stats_httpd.config)
- for addr in self.stats_httpd.config["listen_on"]:
- self.assertTrue("address" in addr)
- self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "::2")
- self.assertTrue(addr["port"] == 8000)
+ 1, "unknown item _UNKNOWN_KEY_"))
+ addresses = get_availaddr()
self.assertEqual(
self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="::1",port=80)])),
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
isc.config.ccsession.create_answer(0))
self.assertTrue("listen_on" in self.stats_httpd.config)
for addr in self.stats_httpd.config["listen_on"]:
self.assertTrue("address" in addr)
self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "::1")
- self.assertTrue(addr["port"] == 80)
-
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
+
+ if self.ipv6_enabled:
+ addresses = get_availaddr("::1")
+ self.assertEqual(
+ self.stats_httpd.config_handler(
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
+ isc.config.ccsession.create_answer(0))
+ self.assertTrue("listen_on" in self.stats_httpd.config)
+ for addr in self.stats_httpd.config["listen_on"]:
+ self.assertTrue("address" in addr)
+ self.assertTrue("port" in addr)
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
+
+ addresses = get_availaddr()
self.assertEqual(
self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="1.2.3.4",port=54321)])),
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
isc.config.ccsession.create_answer(0))
self.assertTrue("listen_on" in self.stats_httpd.config)
for addr in self.stats_httpd.config["listen_on"]:
self.assertTrue("address" in addr)
self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "1.2.3.4")
- self.assertTrue(addr["port"] == 54321)
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
(ret, arg) = isc.config.ccsession.parse_answer(
self.stats_httpd.config_handler(
dict(listen_on=[dict(address="1.2.3.4",port=543210)]))
)
self.assertEqual(ret, 1)
+ def test_xml_handler(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_data = lambda: \
+ { 'Dummy' : { 'foo':'bar' } }
+ xml_body1 = self.stats_httpd.open_template(
+ stats_httpd.XML_TEMPLATE_LOCATION).substitute(
+ xml_string='<Dummy><foo>bar</foo></Dummy>',
+ xsd_namespace=stats_httpd.XSD_NAMESPACE,
+ xsd_url_path=stats_httpd.XSD_URL_PATH,
+ xsl_url_path=stats_httpd.XSL_URL_PATH)
+ xml_body2 = self.stats_httpd.xml_handler()
+ self.assertEqual(type(xml_body1), str)
+ self.assertEqual(type(xml_body2), str)
+ self.assertEqual(xml_body1, xml_body2)
+ self.stats_httpd.get_stats_data = lambda: \
+ { 'Dummy' : {'bar':'foo'} }
+ xml_body2 = self.stats_httpd.xml_handler()
+ self.assertNotEqual(xml_body1, xml_body2)
+
+ def test_xsd_handler(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ }
+ xsd_body1 = self.stats_httpd.open_template(
+ stats_httpd.XSD_TEMPLATE_LOCATION).substitute(
+ xsd_string=\
+ '<all><element name="Dummy"><complexType><all>' \
+ + '<element maxOccurs="1" minOccurs="1" name="foo" type="string">' \
+ + '<annotation><appinfo>Foo</appinfo>' \
+ + '<documentation>foo is bar</documentation>' \
+ + '</annotation></element></all>' \
+ + '</complexType></element></all>',
+ xsd_namespace=stats_httpd.XSD_NAMESPACE)
+ xsd_body2 = self.stats_httpd.xsd_handler()
+ self.assertEqual(type(xsd_body1), str)
+ self.assertEqual(type(xsd_body2), str)
+ self.assertEqual(xsd_body1, xsd_body2)
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ }
+ xsd_body2 = self.stats_httpd.xsd_handler()
+ self.assertNotEqual(xsd_body1, xsd_body2)
+
+ def test_xsl_handler(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ }
+ xsl_body1 = self.stats_httpd.open_template(
+ stats_httpd.XSL_TEMPLATE_LOCATION).substitute(
+ xsl_string='<xsl:template match="*"><tr>' \
+ + '<td>Dummy</td>' \
+ + '<td class="title" title="foo is bar">Foo</td>' \
+ + '<td><xsl:value-of select="Dummy/foo" /></td>' \
+ + '</tr></xsl:template>',
+ xsd_namespace=stats_httpd.XSD_NAMESPACE)
+ xsl_body2 = self.stats_httpd.xsl_handler()
+ self.assertEqual(type(xsl_body1), str)
+ self.assertEqual(type(xsl_body2), str)
+ self.assertEqual(xsl_body1, xsl_body2)
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ }
+ xsl_body2 = self.stats_httpd.xsl_handler()
+ self.assertNotEqual(xsl_body1, xsl_body2)
+
def test_for_without_B10_FROM_SOURCE(self):
# just lets it go through the code without B10_FROM_SOURCE env
# variable
@@ -437,8 +686,6 @@ class TestStatsHttpd(unittest.TestCase):
imp.reload(stats_httpd)
os.environ["B10_FROM_SOURCE"] = tmppath
imp.reload(stats_httpd)
- stats_httpd.socket = fake_socket
- stats_httpd.select = fake_select
if __name__ == "__main__":
unittest.main()
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index eccabdc..3813c7e 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -13,649 +13,593 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#
-# Tests for the stats module
-#
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats module in a close
+to real environment.
+"""
+
+import unittest
import os
-import sys
+import threading
+import io
import time
-import unittest
import imp
-from isc.cc.session import Session, SessionError
-from isc.config.ccsession import ModuleCCSession, ModuleCCSessionError
-from fake_time import time, strftime, gmtime
-import stats
-stats.time = time
-stats.strftime = strftime
-stats.gmtime = gmtime
-from stats import SessionSubject, CCSessionListener, get_timestamp, get_datetime
-from fake_time import _TEST_TIME_SECS, _TEST_TIME_STRF
-
-# setting Constant
-if sys.path[0] == '':
- TEST_SPECFILE_LOCATION = "./testdata/stats_test.spec"
-else:
- TEST_SPECFILE_LOCATION = sys.path[0] + "/testdata/stats_test.spec"
-class TestStats(unittest.TestCase):
+import stats
+import isc.cc.session
+from test_utils import BaseModules, ThreadingServerManager, MyStats, SignalHandler, send_command, send_shutdown
+
+class TestUtilties(unittest.TestCase):
+ items = [
+ { 'item_name': 'test_int1', 'item_type': 'integer', 'item_default': 12345 },
+ { 'item_name': 'test_real1', 'item_type': 'real', 'item_default': 12345.6789 },
+ { 'item_name': 'test_bool1', 'item_type': 'boolean', 'item_default': True },
+ { 'item_name': 'test_str1', 'item_type': 'string', 'item_default': 'ABCD' },
+ { 'item_name': 'test_list1', 'item_type': 'list', 'item_default': [1,2,3],
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'integer' } },
+ { 'item_name': 'test_map1', 'item_type': 'map', 'item_default': {'a':1,'b':2,'c':3},
+ 'map_item_spec' : [ { 'item_name': 'a', 'item_type': 'integer'},
+ { 'item_name': 'b', 'item_type': 'integer'},
+ { 'item_name': 'c', 'item_type': 'integer'} ] },
+ { 'item_name': 'test_int2', 'item_type': 'integer' },
+ { 'item_name': 'test_real2', 'item_type': 'real' },
+ { 'item_name': 'test_bool2', 'item_type': 'boolean' },
+ { 'item_name': 'test_str2', 'item_type': 'string' },
+ { 'item_name': 'test_list2', 'item_type': 'list',
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'integer' } },
+ { 'item_name': 'test_map2', 'item_type': 'map',
+ 'map_item_spec' : [ { 'item_name': 'A', 'item_type': 'integer'},
+ { 'item_name': 'B', 'item_type': 'integer'},
+ { 'item_name': 'C', 'item_type': 'integer'} ] },
+ { 'item_name': 'test_none', 'item_type': 'none' },
+ { 'item_name': 'test_list3', 'item_type': 'list', 'item_default': ["one","two","three"],
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'string' } },
+ { 'item_name': 'test_map3', 'item_type': 'map', 'item_default': {'a':'one','b':'two','c':'three'},
+ 'map_item_spec' : [ { 'item_name': 'a', 'item_type': 'string'},
+ { 'item_name': 'b', 'item_type': 'string'},
+ { 'item_name': 'c', 'item_type': 'string'} ] }
+ ]
def setUp(self):
- self.session = Session()
- self.subject = SessionSubject(session=self.session, verbose=True)
- self.listener = CCSessionListener(self.subject, verbose=True)
- self.stats_spec = self.listener.cc_session.get_module_spec().get_config_spec()
- self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
- self.stats_data = {
- 'report_time' : get_datetime(),
- 'bind10.boot_time' : "1970-01-01T00:00:00Z",
- 'stats.timestamp' : get_timestamp(),
- 'stats.lname' : self.session.lname,
- 'auth.queries.tcp': 0,
- 'auth.queries.udp': 0,
- "stats.boot_time": get_datetime(),
- "stats.start_time": get_datetime(),
- "stats.last_update_time": get_datetime()
- }
- # check starting
- self.assertFalse(self.subject.running)
- self.subject.start()
- self.assertTrue(self.subject.running)
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.module_name, 'Stats')
-
- def tearDown(self):
- # check closing
- self.subject.stop()
- self.assertFalse(self.subject.running)
- self.subject.detach(self.listener)
- self.listener.stop()
- self.session.close()
-
- def test_local_func(self):
- """
- Test for local function
-
- """
- # test for result_ok
- self.assertEqual(type(result_ok()), dict)
- self.assertEqual(result_ok(), {'result': [0]})
- self.assertEqual(result_ok(1), {'result': [1]})
- self.assertEqual(result_ok(0,'OK'), {'result': [0, 'OK']})
- self.assertEqual(result_ok(1,'Not good'), {'result': [1, 'Not good']})
- self.assertEqual(result_ok(None,"It's None"), {'result': [None, "It's None"]})
- self.assertNotEqual(result_ok(), {'RESULT': [0]})
-
- # test for get_timestamp
- self.assertEqual(get_timestamp(), _TEST_TIME_SECS)
-
- # test for get_datetime
- self.assertEqual(get_datetime(), _TEST_TIME_STRF)
-
- def test_show_command(self):
- """
- Test for show command
-
- """
- # test show command without arg
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- # ignore under 0.9 seconds
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command with arg
- self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.lname"}]}, "Stats")
- self.assertEqual(len(self.subject.session.message_queue), 1)
- self.subject.check()
- result_data = self.subject.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'stats.lname': self.stats_data['stats.lname']}),
- result_data)
- self.assertEqual(len(self.subject.session.message_queue), 0)
-
- # test show command with arg which has wrong name
- self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.dummy"}]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- # ignore under 0.9 seconds
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_set_command(self):
- """
- Test for set command
-
- """
- # test set command
- self.stats_data['auth.queries.udp'] = 54321
- self.assertEqual(self.stats_data['auth.queries.udp'], 54321)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'auth.queries.udp': 54321 }
- } ] },
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 2
- self.stats_data['auth.queries.udp'] = 0
- self.assertEqual(self.stats_data['auth.queries.udp'], 0)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
- self.session.group_sendmsg({ "command": [ "set", {'stats_data': {'auth.queries.udp': 0}} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command 2
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 3
- self.stats_data['auth.queries.tcp'] = 54322
- self.assertEqual(self.stats_data['auth.queries.udp'], 0)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 54322)
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'auth.queries.tcp': 54322 }
- } ] },
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command 3
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_remove_command(self):
- """
- Test for remove command
-
- """
- self.session.group_sendmsg({"command":
- [ "remove", {"stats_item_name": 'bind10.boot_time' }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.stats_data.pop('bind10.boot_time'), "1970-01-01T00:00:00Z")
- self.assertFalse('bind10.boot_time' in self.stats_data)
-
- # test show command with arg
- self.session.group_sendmsg({"command":
- [ "show", {"stats_item_name": 'bind10.boot_time'}]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertFalse('bind10.boot_time' in result_data['result'][1])
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_reset_command(self):
- """
- Test for reset command
-
- """
- self.session.group_sendmsg({"command": [ "reset" ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command
- self.session.group_sendmsg({"command": [ "show" ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_status_command(self):
- """
- Test for status command
-
- """
- self.session.group_sendmsg({"command": [ "status" ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(0, "I'm alive."),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_unknown_command(self):
- """
- Test for unknown command
-
- """
- self.session.group_sendmsg({"command": [ "hoge", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(1, "Unknown command: 'hoge'"),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_shutdown_command(self):
- """
- Test for shutdown command
-
- """
- self.session.group_sendmsg({"command": [ "shutdown", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.assertTrue(self.subject.running)
- self.subject.check()
- self.assertFalse(self.subject.running)
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
+ self.const_timestamp = 1308730448.965706
+ self.const_timetuple = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+ self.const_datetime = '2011-06-22T08:14:08Z'
+ stats.time = lambda : self.const_timestamp
+ stats.gmtime = lambda : self.const_timetuple
- def test_some_commands(self):
- """
- Test for some commands in a row
-
- """
- # test set command
- self.stats_data['bind10.boot_time'] = '2010-08-02T14:47:56Z'
- self.assertEqual(self.stats_data['bind10.boot_time'], '2010-08-02T14:47:56Z')
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'bind10.boot_time': '2010-08-02T14:47:56Z' }
- }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'bind10.boot_time' }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'bind10.boot_time': '2010-08-02T14:47:56Z'}),
- result_data)
- self.assertEqual(result_ok(0, {'bind10.boot_time': self.stats_data['bind10.boot_time']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 2nd
- self.stats_data['auth.queries.udp'] = 98765
- self.assertEqual(self.stats_data['auth.queries.udp'], 98765)
- self.session.group_sendmsg({ "command": [
- "set", { 'stats_data': {
- 'auth.queries.udp':
- self.stats_data['auth.queries.udp']
- } }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({"command": [
- "show", {'stats_item_name': 'auth.queries.udp'}
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 3
- self.stats_data['auth.queries.tcp'] = 4321
- self.session.group_sendmsg({"command": [
- "set",
- {'stats_data': {'auth.queries.tcp': 4321 }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check value
- self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.tcp'} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.tcp': 4321}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.tcp': self.stats_data['auth.queries.tcp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.udp'} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 4
- self.stats_data['auth.queries.tcp'] = 67890
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'auth.queries.tcp': 67890 }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command for all values
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_some_commands2(self):
- """
- Test for some commands in a row using list-type value
-
- """
- self.stats_data['listtype'] = [1, 2, 3]
- self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
- self.session.group_sendmsg({ "command": [
- "set", {'stats_data': {'listtype': [1, 2, 3] }}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'listtype'}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'listtype': [1, 2, 3]}),
- result_data)
- self.assertEqual(result_ok(0, {'listtype': self.stats_data['listtype']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set list-type value
- self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'listtype': [3, 2, 1, 0] }}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'listtype' }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'listtype': [3, 2, 1, 0]}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_some_commands3(self):
- """
- Test for some commands in a row using dictionary-type value
-
- """
- self.stats_data['dicttype'] = {"a": 1, "b": 2, "c": 3}
- self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'dicttype': {"a": 1, "b": 2, "c": 3} }
- }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' } ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'dicttype': {"a": 1, "b": 2, "c": 3}}),
- result_data)
- self.assertEqual(result_ok(0, {'dicttype': self.stats_data['dicttype']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set list-type value
- self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' }]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_config_update(self):
- """
- Test for config update
-
- """
- # test show command without arg
- self.session.group_sendmsg({"command": [ "config_update", {"x-version":999} ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
-
- def test_for_boss(self):
- last_queue = self.session.old_message_queue.pop()
- self.assertEqual(
- last_queue.msg, {'command': ['sendstats']})
+ def test_get_spec_defaults(self):
self.assertEqual(
- last_queue.env['group'], 'Boss')
-
-class TestStats2(unittest.TestCase):
+ stats.get_spec_defaults(self.items), {
+ 'test_int1' : 12345 ,
+ 'test_real1' : 12345.6789 ,
+ 'test_bool1' : True ,
+ 'test_str1' : 'ABCD' ,
+ 'test_list1' : [1,2,3] ,
+ 'test_map1' : {'a':1,'b':2,'c':3},
+ 'test_int2' : 0 ,
+ 'test_real2' : 0.0,
+ 'test_bool2' : False,
+ 'test_str2' : "",
+ 'test_list2' : [0],
+ 'test_map2' : { 'A' : 0, 'B' : 0, 'C' : 0 },
+ 'test_none' : None,
+ 'test_list3' : [ "one", "two", "three" ],
+ 'test_map3' : { 'a' : 'one', 'b' : 'two', 'c' : 'three' } })
+ self.assertEqual(stats.get_spec_defaults(None), {})
+ self.assertRaises(KeyError, stats.get_spec_defaults, [{'item_name':'Foo'}])
+
+ def test_get_timestamp(self):
+ self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+
+ def test_get_datetime(self):
+ self.assertEqual(stats.get_datetime(), self.const_datetime)
+ self.assertNotEqual(stats.get_datetime(
+ (2011, 6, 22, 8, 23, 40, 2, 173, 0)), self.const_datetime)
+
+class TestCallback(unittest.TestCase):
+ def setUp(self):
+ self.dummy_func = lambda *x, **y : (x, y)
+ self.dummy_args = (1,2,3)
+ self.dummy_kwargs = {'a':1,'b':2,'c':3}
+ self.cback1 = stats.Callback(
+ command=self.dummy_func,
+ args=self.dummy_args,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback2 = stats.Callback(
+ args=self.dummy_args,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback3 = stats.Callback(
+ command=self.dummy_func,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback4 = stats.Callback(
+ command=self.dummy_func,
+ args=self.dummy_args
+ )
+
+ def test_init(self):
+ self.assertEqual((self.cback1.command, self.cback1.args, self.cback1.kwargs),
+ (self.dummy_func, self.dummy_args, self.dummy_kwargs))
+ self.assertEqual((self.cback2.command, self.cback2.args, self.cback2.kwargs),
+ (None, self.dummy_args, self.dummy_kwargs))
+ self.assertEqual((self.cback3.command, self.cback3.args, self.cback3.kwargs),
+ (self.dummy_func, (), self.dummy_kwargs))
+ self.assertEqual((self.cback4.command, self.cback4.args, self.cback4.kwargs),
+ (self.dummy_func, self.dummy_args, {}))
+
+ def test_call(self):
+ self.assertEqual(self.cback1(), (self.dummy_args, self.dummy_kwargs))
+ self.assertEqual(self.cback1(100, 200), ((100, 200), self.dummy_kwargs))
+ self.assertEqual(self.cback1(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+ self.assertEqual(self.cback2(), None)
+ self.assertEqual(self.cback3(), ((), self.dummy_kwargs))
+ self.assertEqual(self.cback3(100, 200), ((100, 200), self.dummy_kwargs))
+ self.assertEqual(self.cback3(a=100, b=200), ((), {'a':100, 'b':200}))
+ self.assertEqual(self.cback4(), (self.dummy_args, {}))
+ self.assertEqual(self.cback4(100, 200), ((100, 200), {}))
+ self.assertEqual(self.cback4(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+class TestStats(unittest.TestCase):
def setUp(self):
- self.session = Session(verbose=True)
- self.subject = SessionSubject(session=self.session, verbose=True)
- self.listener = CCSessionListener(self.subject, verbose=True)
- self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
- # check starting
- self.assertFalse(self.subject.running)
- self.subject.start()
- self.assertTrue(self.subject.running)
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.module_name, 'Stats')
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats = stats.Stats()
+ self.const_timestamp = 1308730448.965706
+ self.const_datetime = '2011-06-22T08:14:08Z'
+ self.const_default_datetime = '1970-01-01T00:00:00Z'
def tearDown(self):
- # check closing
- self.subject.stop()
- self.assertFalse(self.subject.running)
- self.subject.detach(self.listener)
- self.listener.stop()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
+
+ def test_init(self):
+ self.assertEqual(self.stats.module_name, 'Stats')
+ self.assertFalse(self.stats.running)
+ self.assertTrue('command_show' in self.stats.callbacks)
+ self.assertTrue('command_status' in self.stats.callbacks)
+ self.assertTrue('command_shutdown' in self.stats.callbacks)
+ self.assertTrue('command_show' in self.stats.callbacks)
+ self.assertTrue('command_showschema' in self.stats.callbacks)
+ self.assertTrue('command_set' in self.stats.callbacks)
+
+ def test_init_undefcmd(self):
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Stats",
+ "module_description": "Stats daemon",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "_undef_command_",
+ "command_description": "a undefined command in stats",
+ "command_args": []
+ }
+ ],
+ "statistics": []
+ }
+}
+"""
+ orig_spec_location = stats.SPECFILE_LOCATION
+ stats.SPECFILE_LOCATION = io.StringIO(spec_str)
+ self.assertRaises(stats.StatsError, stats.Stats)
+ stats.SPECFILE_LOCATION = orig_spec_location
+
+ def test_start(self):
+ # start without err
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.assertFalse(self.stats.running)
+ self.stats_server.run()
+ self.assertEqual(send_command("status", "Stats"),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+ self.assertTrue(self.stats.running)
+ self.assertEqual(send_shutdown("Stats"), (0, None))
+ self.assertFalse(self.stats.running)
+ self.stats_server.shutdown()
+
+ # start with err
+ self.stats = stats.Stats()
+ self.stats.update_statistics_data = lambda x,**y: ['an error']
+ self.assertRaises(stats.StatsError, self.stats.start)
+
+ def test_handlers(self):
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.stats_server.run()
+ # config_handler
+ self.assertEqual(self.stats.config_handler({'foo':'bar'}),
+ isc.config.create_answer(0))
+
+ # command_handler
+ self.base.boss.server._started.wait()
+ self.base.boss.server._started.clear()
+ self.assertEqual(
+ send_command(
+ 'show', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'name' : 'boot_time' }),
+ (0, self.const_datetime))
+ self.assertEqual(
+ send_command(
+ 'set', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'data' : { 'boot_time' : self.const_datetime } }),
+ (0, None))
+ self.assertEqual(
+ send_command(
+ 'show', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'name' : 'boot_time' }),
+ (0, self.const_datetime))
+ self.assertEqual(
+ send_command('status', 'Stats'),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+ (rcode, value) = send_command('show', 'Stats')
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Auth' in value)
+ self.assertEqual(len(value['Stats']), 5)
+ self.assertEqual(len(value['Boss']), 1)
+ self.assertTrue('boot_time' in value['Boss'])
+ self.assertEqual(value['Boss']['boot_time'], self.const_datetime)
+ self.assertTrue('report_time' in value['Stats'])
+ self.assertTrue('boot_time' in value['Stats'])
+ self.assertTrue('last_update_time' in value['Stats'])
+ self.assertTrue('timestamp' in value['Stats'])
+ self.assertTrue('lname' in value['Stats'])
+ (rcode, value) = send_command('showschema', 'Stats')
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Auth' in value)
+ self.assertEqual(len(value['Stats']), 5)
+ self.assertEqual(len(value['Boss']), 1)
+ for item in value['Boss']:
+ self.assertTrue(len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ self.assertTrue('item_format' in item)
+ for item in value['Stats']:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
- def test_specfile(self):
+ self.assertEqual(
+ send_command('__UNKNOWN__', 'Stats'),
+ (1, "Unknown command: '__UNKNOWN__'"))
+
+ self.stats_server.shutdown()
+
+ def test_update_modules(self):
+ self.assertEqual(len(self.stats.modules), 0)
+ self.stats.update_modules()
+ self.assertTrue('Stats' in self.stats.modules)
+ self.assertTrue('Boss' in self.stats.modules)
+ self.assertFalse('Dummy' in self.stats.modules)
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Stats'].get_statistics_spec())
+ self.assertTrue('report_time' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertTrue('last_update_time' in my_statistics_data)
+ self.assertTrue('timestamp' in my_statistics_data)
+ self.assertTrue('lname' in my_statistics_data)
+ self.assertEqual(my_statistics_data['report_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['last_update_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['timestamp'], 0.0)
+ self.assertEqual(my_statistics_data['lname'], "")
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Boss'].get_statistics_spec())
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+ orig_parse_answer = stats.isc.config.ccsession.parse_answer
+ stats.isc.config.ccsession.parse_answer = lambda x: (99, 'error')
+ self.assertRaises(stats.StatsError, self.stats.update_modules)
+ stats.isc.config.ccsession.parse_answer = orig_parse_answer
+
+ def test_get_statistics_data(self):
+ my_statistics_data = self.stats.get_statistics_data()
+ self.assertTrue('Stats' in my_statistics_data)
+ self.assertTrue('Boss' in my_statistics_data)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+ self.assertTrue('report_time' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertTrue('last_update_time' in my_statistics_data)
+ self.assertTrue('timestamp' in my_statistics_data)
+ self.assertTrue('lname' in my_statistics_data)
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data, owner='Foo')
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+ self.assertTrue('boot_time' in my_statistics_data)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='report_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='boot_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='last_update_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='timestamp')
+ self.assertEqual(my_statistics_data, 0.0)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='lname')
+ self.assertEqual(my_statistics_data, '')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ owner='Stats', name='Bar')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ owner='Foo', name='Bar')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ name='Bar')
+
+ def test_update_statistics_data(self):
+ self.stats.update_statistics_data(owner='Stats', lname='foo at bar')
+ self.assertTrue('Stats' in self.stats.statistics_data)
+ my_statistics_data = self.stats.statistics_data['Stats']
+ self.assertEqual(my_statistics_data['lname'], 'foo at bar')
+ self.stats.update_statistics_data(owner='Stats', last_update_time=self.const_datetime)
+ self.assertTrue('Stats' in self.stats.statistics_data)
+ my_statistics_data = self.stats.statistics_data['Stats']
+ self.assertEqual(my_statistics_data['last_update_time'], self.const_datetime)
+ self.assertEqual(self.stats.update_statistics_data(owner='Stats', lname=0.0),
+ ['0.0 should be a string'])
+ self.assertEqual(self.stats.update_statistics_data(owner='Dummy', foo='bar'),
+ ['unknown module name: Dummy'])
+
+ def test_commands(self):
+ # status
+ self.assertEqual(self.stats.command_status(),
+ isc.config.create_answer(
+ 0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+ # shutdown
+ self.stats.running = True
+ self.assertEqual(self.stats.command_shutdown(),
+ isc.config.create_answer(0))
+ self.assertFalse(self.stats.running)
+
+ def test_command_show(self):
+ self.assertEqual(self.stats.command_show(owner='Foo', name=None),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: None"))
+ self.assertEqual(self.stats.command_show(owner='Foo', name='_bar_'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: _bar_"))
+ self.assertEqual(self.stats.command_show(owner='Foo', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: bar"))
+ self.assertEqual(self.stats.command_show(owner='Auth'),
+ isc.config.create_answer(
+ 0, {'queries.tcp': 0, 'queries.udp': 0}))
+ self.assertEqual(self.stats.command_show(owner='Auth', name='queries.udp'),
+ isc.config.create_answer(
+ 0, 0))
+ orig_get_timestamp = stats.get_timestamp
+ orig_get_datetime = stats.get_datetime
+ stats.get_timestamp = lambda : self.const_timestamp
+ stats.get_datetime = lambda : self.const_datetime
+ self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+ self.assertEqual(stats.get_datetime(), self.const_datetime)
+ self.assertEqual(self.stats.command_show(owner='Stats', name='report_time'), \
+ isc.config.create_answer(0, self.const_datetime))
+ self.assertEqual(self.stats.statistics_data['Stats']['timestamp'], self.const_timestamp)
+ self.assertEqual(self.stats.statistics_data['Stats']['boot_time'], self.const_default_datetime)
+ stats.get_timestamp = orig_get_timestamp
+ stats.get_datetime = orig_get_datetime
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [] } )
+ self.assertRaises(
+ stats.StatsError, self.stats.command_show, owner='Foo', name='bar')
+
+ def test_command_showchema(self):
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema())
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Auth' in value)
+ self.assertFalse('__Dummy__' in value)
+ schema = value['Stats']
+ self.assertEqual(len(schema), 5)
+ for item in schema:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
+
+ schema = value['Boss']
+ self.assertEqual(len(schema), 1)
+ for item in schema:
+ self.assertTrue(len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ self.assertTrue('item_format' in item)
+
+ schema = value['Auth']
+ self.assertEqual(len(schema), 2)
+ for item in schema:
+ self.assertTrue(len(item) == 6)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema(owner='Stats'))
+ self.assertEqual(rcode, 0)
+ self.assertFalse('Stats' in value)
+ self.assertFalse('Boss' in value)
+ self.assertFalse('Auth' in value)
+ for item in value:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
+
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema(owner='Stats', name='report_time'))
+ self.assertEqual(rcode, 0)
+ self.assertFalse('Stats' in value)
+ self.assertFalse('Boss' in value)
+ self.assertFalse('Auth' in value)
+ self.assertTrue(len(value) == 7)
+ self.assertTrue('item_name' in value)
+ self.assertTrue('item_type' in value)
+ self.assertTrue('item_optional' in value)
+ self.assertTrue('item_default' in value)
+ self.assertTrue('item_title' in value)
+ self.assertTrue('item_description' in value)
+ self.assertTrue('item_format' in value)
+ self.assertEqual(value['item_name'], 'report_time')
+ self.assertEqual(value['item_format'], 'date-time')
+
+ self.assertEqual(self.stats.command_showschema(owner='Foo'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: None"))
+ self.assertEqual(self.stats.command_showschema(owner='Foo', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: bar"))
+ self.assertEqual(self.stats.command_showschema(owner='Auth'),
+ isc.config.create_answer(
+ 0, [{
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+ "item_name": "queries.tcp",
+ "item_optional": False,
+ "item_title": "Queries TCP",
+ "item_type": "integer"
+ },
+ {
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially",
+ "item_name": "queries.udp",
+ "item_optional": False,
+ "item_title": "Queries UDP",
+ "item_type": "integer"
+ }]))
+ self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.tcp'),
+ isc.config.create_answer(
+ 0, {
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+ "item_name": "queries.tcp",
+ "item_optional": False,
+ "item_title": "Queries TCP",
+ "item_type": "integer"
+ }))
+
+ self.assertEqual(self.stats.command_showschema(owner='Stats', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Stats, name: bar"))
+ self.assertEqual(self.stats.command_showschema(name='bar'),
+ isc.config.create_answer(
+ 1, "module name is not specified"))
+
+ def test_command_set(self):
+ orig_get_datetime = stats.get_datetime
+ stats.get_datetime = lambda : self.const_datetime
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_set(owner='Boss',
+ data={ 'boot_time' : self.const_datetime }))
+ stats.get_datetime = orig_get_datetime
+ self.assertEqual(rcode, 0)
+ self.assertTrue(value is None)
+ self.assertEqual(self.stats.statistics_data['Boss']['boot_time'],
+ self.const_datetime)
+ self.assertEqual(self.stats.statistics_data['Stats']['last_update_time'],
+ self.const_datetime)
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : 'foo at bar' }),
+ isc.config.create_answer(0, None))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [] } )
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : '_foo_ at _bar_' }),
+ isc.config.create_answer(
+ 1,
+ "errors while setting statistics data: unknown item lname"))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name } )
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : '_foo_ at _bar_' }),
+ isc.config.create_answer(
+ 1,
+ "errors while setting statistics data: No statistics specification"))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [
+ {
+ "item_name": "dummy",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Local Name",
+ "item_description": "brabra"
+ } ] } )
+ self.assertRaises(stats.StatsError,
+ self.stats.command_set, owner='Stats', data={ 'dummy' : '_xxxx_yyyy_zzz_' })
+
+class TestOSEnv(unittest.TestCase):
+ def test_osenv(self):
"""
- Test for specfile
-
+ test for the environ variable "B10_FROM_SOURCE"
+ "B10_FROM_SOURCE" is set in Makefile
"""
- if "B10_FROM_SOURCE" in os.environ:
- self.assertEqual(stats.SPECFILE_LOCATION,
+ # test case having B10_FROM_SOURCE
+ self.assertTrue("B10_FROM_SOURCE" in os.environ)
+ self.assertEqual(stats.SPECFILE_LOCATION, \
os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + \
- os.sep + "stats.spec")
- self.assertEqual(stats.SCHEMA_SPECFILE_LOCATION,
- os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + \
- os.sep + "stats-schema.spec")
+ "src" + os.sep + "bin" + os.sep + "stats" + \
+ os.sep + "stats.spec")
+ # test case not having B10_FROM_SOURCE
+ path = os.environ["B10_FROM_SOURCE"]
+ os.environ.pop("B10_FROM_SOURCE")
+ self.assertFalse("B10_FROM_SOURCE" in os.environ)
+ # import stats again
+ imp.reload(stats)
+ # revert the changes
+ os.environ["B10_FROM_SOURCE"] = path
imp.reload(stats)
- # change path of SPECFILE_LOCATION
- stats.SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
- stats.SCHEMA_SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
- self.assertEqual(stats.SPECFILE_LOCATION, TEST_SPECFILE_LOCATION)
- self.subject = stats.SessionSubject(session=self.session, verbose=True)
- self.session = self.subject.session
- self.listener = stats.CCSessionListener(self.subject, verbose=True)
-
- self.assertEqual(self.listener.stats_spec, [])
- self.assertEqual(self.listener.stats_data, {})
-
- self.assertEqual(self.listener.commands_spec, [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "the_dummy",
- "command_description": "this is for testing",
- "command_args": []
- }])
-
- def test_func_initialize_data(self):
- """
- Test for initialize_data function
-
- """
- # prepare for sample data set
- stats_spec = [
- {
- "item_name": "none_sample",
- "item_type": "null",
- "item_default": "None"
- },
- {
- "item_name": "boolean_sample",
- "item_type": "boolean",
- "item_default": True
- },
- {
- "item_name": "string_sample",
- "item_type": "string",
- "item_default": "A something"
- },
- {
- "item_name": "int_sample",
- "item_type": "integer",
- "item_default": 9999999
- },
- {
- "item_name": "real_sample",
- "item_type": "real",
- "item_default": 0.0009
- },
- {
- "item_name": "list_sample",
- "item_type": "list",
- "item_default": [0, 1, 2, 3, 4],
- "list_item_spec": []
- },
- {
- "item_name": "map_sample",
- "item_type": "map",
- "item_default": {'name':'value'},
- "map_item_spec": []
- },
- {
- "item_name": "other_sample",
- "item_type": "__unknown__",
- "item_default": "__unknown__"
- }
- ]
- # data for comparison
- stats_data = {
- 'none_sample': None,
- 'boolean_sample': True,
- 'string_sample': 'A something',
- 'int_sample': 9999999,
- 'real_sample': 0.0009,
- 'list_sample': [0, 1, 2, 3, 4],
- 'map_sample': {'name':'value'},
- 'other_sample': '__unknown__'
- }
- self.assertEqual(self.listener.initialize_data(stats_spec), stats_data)
-
- def test_func_main(self):
- # explicitly make failed
- self.session.close()
- stats.main(session=self.session)
- def test_osenv(self):
- """
- test for not having environ "B10_FROM_SOURCE"
- """
- if "B10_FROM_SOURCE" in os.environ:
- path = os.environ["B10_FROM_SOURCE"]
- os.environ.pop("B10_FROM_SOURCE")
- imp.reload(stats)
- os.environ["B10_FROM_SOURCE"] = path
- imp.reload(stats)
-
-def result_ok(*args):
- if args:
- return { 'result': list(args) }
- else:
- return { 'result': [ 0 ] }
+def test_main():
+ unittest.main()
if __name__ == "__main__":
- unittest.main()
+ test_main()
diff --git a/src/bin/stats/tests/fake_select.py b/src/bin/stats/tests/fake_select.py
deleted file mode 100644
index ca0ca82..0000000
--- a/src/bin/stats/tests/fake_select.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of select
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-import errno
-
-class error(Exception):
- pass
-
-def select(rlst, wlst, xlst, timeout):
- if type(timeout) != int and type(timeout) != float:
- raise TypeError("Error: %s must be integer or float"
- % timeout.__class__.__name__)
- for s in rlst + wlst + xlst:
- if type(s) != fake_socket.socket:
- raise TypeError("Error: %s must be a dummy socket"
- % s.__class__.__name__)
- s._called = s._called + 1
- if s._called > 3:
- raise error("Something is happened!")
- elif s._called > 2:
- raise error(errno.EINTR)
- return (rlst, wlst, xlst)
diff --git a/src/bin/stats/tests/fake_socket.py b/src/bin/stats/tests/fake_socket.py
deleted file mode 100644
index 4e3a458..0000000
--- a/src/bin/stats/tests/fake_socket.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of socket
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import re
-
-AF_INET = 'AF_INET'
-AF_INET6 = 'AF_INET6'
-_ADDRFAMILY = AF_INET
-has_ipv6 = True
-_CLOSED = False
-
-class gaierror(Exception):
- pass
-
-class error(Exception):
- pass
-
-class socket:
-
- def __init__(self, family=None):
- if family is None:
- self.address_family = _ADDRFAMILY
- else:
- self.address_family = family
- self._closed = _CLOSED
- if self._closed:
- raise error('socket is already closed!')
- self._called = 0
-
- def close(self):
- self._closed = True
-
- def fileno(self):
- return id(self)
-
- def bind(self, server_class):
- (self.server_address, self.server_port) = server_class
- if self.address_family not in set([AF_INET, AF_INET6]):
- raise error("Address family not supported by protocol: %s" % self.address_family)
- if self.address_family == AF_INET6 and not has_ipv6:
- raise error("Address family not supported in this machine: %s has_ipv6: %s"
- % (self.address_family, str(has_ipv6)))
- if self.address_family == AF_INET and re.search(':', self.server_address) is not None:
- raise gaierror("Address family for hostname not supported : %s %s" % (self.server_address, self.address_family))
- if self.address_family == AF_INET6 and re.search(':', self.server_address) is None:
- raise error("Cannot assign requested address : %s" % str(self.server_address))
- if type(self.server_port) is not int:
- raise TypeError("an integer is required: %s" % str(self.server_port))
- if self.server_port < 0 or self.server_port > 65535:
- raise OverflowError("port number must be 0-65535.: %s" % str(self.server_port))
diff --git a/src/bin/stats/tests/fake_time.py b/src/bin/stats/tests/fake_time.py
deleted file mode 100644
index 65e0237..0000000
--- a/src/bin/stats/tests/fake_time.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = "$Revision$"
-
-# This is a dummy time class against a Python standard time class.
-# It is just testing use only.
-# Other methods which time class has is not implemented.
-# (This class isn't orderloaded for time class.)
-
-# These variables are constant. These are example.
-_TEST_TIME_SECS = 1283364938.229088
-_TEST_TIME_STRF = '2010-09-01T18:15:38Z'
-
-def time():
- """
- This is a dummy time() method against time.time()
- """
- # return float constant value
- return _TEST_TIME_SECS
-
-def gmtime():
- """
- This is a dummy gmtime() method against time.gmtime()
- """
- # always return nothing
- return None
-
-def strftime(*arg):
- """
- This is a dummy gmtime() method against time.gmtime()
- """
- return _TEST_TIME_STRF
-
-
diff --git a/src/bin/stats/tests/http/Makefile.am b/src/bin/stats/tests/http/Makefile.am
deleted file mode 100644
index 879e8a8..0000000
--- a/src/bin/stats/tests/http/Makefile.am
+++ /dev/null
@@ -1,2 +0,0 @@
-EXTRA_DIST = __init__.py server.py
-CLEANFILES = __init__.pyc server.pyc
diff --git a/src/bin/stats/tests/http/__init__.py b/src/bin/stats/tests/http/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/http/server.py b/src/bin/stats/tests/http/server.py
deleted file mode 100644
index 70ed6fa..0000000
--- a/src/bin/stats/tests/http/server.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of http.server
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-
-class DummyHttpResponse:
- def __init__(self, path):
- self.path = path
- self.headers={}
- self.log = ""
-
- def _write_log(self, msg):
- self.log = self.log + msg
-
-class HTTPServer:
- """
- A mock-up class of http.server.HTTPServer
- """
- address_family = fake_socket.AF_INET
- def __init__(self, server_class, handler_class):
- self.socket = fake_socket.socket(self.address_family)
- self.server_class = server_class
- self.socket.bind(self.server_class)
- self._handler = handler_class(None, None, self)
-
- def handle_request(self):
- pass
-
- def server_close(self):
- self.socket.close()
-
-class BaseHTTPRequestHandler:
- """
- A mock-up class of http.server.BaseHTTPRequestHandler
- """
-
- def __init__(self, request, client_address, server):
- self.path = "/path/to"
- self.headers = {}
- self.server = server
- self.response = DummyHttpResponse(path=self.path)
- self.response.write = self._write
- self.wfile = self.response
-
- def send_response(self, code=0):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.code = code
-
- def send_header(self, key, value):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.headers[key] = value
-
- def end_headers(self):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.wrote_headers = True
-
- def send_error(self, code, message=None):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.code = code
- self.response.body = message
-
- def address_string(self):
- return 'dummyhost'
-
- def log_date_time_string(self):
- return '[DD/MM/YYYY HH:MI:SS]'
-
- def _write(self, obj):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.body = obj.decode()
-
diff --git a/src/bin/stats/tests/isc/Makefile.am b/src/bin/stats/tests/isc/Makefile.am
deleted file mode 100644
index 059107a..0000000
--- a/src/bin/stats/tests/isc/Makefile.am
+++ /dev/null
@@ -1,3 +0,0 @@
-SUBDIRS = cc config util
-EXTRA_DIST = __init__.py
-CLEANFILES = __init__.pyc
diff --git a/src/bin/stats/tests/isc/__init__.py b/src/bin/stats/tests/isc/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/cc/Makefile.am b/src/bin/stats/tests/isc/cc/Makefile.am
deleted file mode 100644
index ccf4dde..0000000
--- a/src/bin/stats/tests/isc/cc/Makefile.am
+++ /dev/null
@@ -1,2 +0,0 @@
-EXTRA_DIST = __init__.py session.py
-CLEANFILES = __init__.pyc session.pyc
diff --git a/src/bin/stats/tests/isc/cc/__init__.py b/src/bin/stats/tests/isc/cc/__init__.py
deleted file mode 100644
index 9a3eaf6..0000000
--- a/src/bin/stats/tests/isc/cc/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.cc.session import *
diff --git a/src/bin/stats/tests/isc/cc/session.py b/src/bin/stats/tests/isc/cc/session.py
deleted file mode 100644
index e16d6a9..0000000
--- a/src/bin/stats/tests/isc/cc/session.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import sys
-import fake_socket
-
-# set a dummy lname
-_TEST_LNAME = '123abc at xxxx'
-
-class Queue():
- def __init__(self, msg=None, env={}):
- self.msg = msg
- self.env = env
-
- def dump(self):
- return { 'msg': self.msg, 'env': self.env }
-
-class SessionError(Exception):
- pass
-
-class SessionTimeout(Exception):
- pass
-
-class Session:
- def __init__(self, socket_file=None, verbose=False):
- self._lname = _TEST_LNAME
- self.message_queue = []
- self.old_message_queue = []
- try:
- self._socket = fake_socket.socket()
- except fake_socket.error as se:
- raise SessionError(se)
- self.verbose = verbose
-
- @property
- def lname(self):
- return self._lname
-
- def close(self):
- self._socket.close()
-
- def _clear_queues(self):
- while len(self.message_queue) > 0:
- self.dequeue()
-
- def _next_sequence(self, que=None):
- return len(self.message_queue)
-
- def enqueue(self, msg=None, env={}):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- seq = self._next_sequence()
- env.update({"seq": 0}) # fixed here
- que = Queue(msg=msg, env=env)
- self.message_queue.append(que)
- if self.verbose:
- sys.stdout.write("[Session] enqueue: " + str(que.dump()) + "\n")
- return seq
-
- def dequeue(self):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- que = None
- try:
- que = self.message_queue.pop(0) # always pop at index 0
- self.old_message_queue.append(que)
- except IndexError:
- que = Queue()
- if self.verbose:
- sys.stdout.write("[Session] dequeue: " + str(que.dump()) + "\n")
- return que
-
- def get_queue(self, seq=None):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- if seq is None:
- seq = len(self.message_queue) - 1
- que = None
- try:
- que = self.message_queue[seq]
- except IndexError:
- raise IndexError
- que = Queue()
- if self.verbose:
- sys.stdout.write("[Session] get_queue: " + str(que.dump()) + "\n")
- return que
-
- def group_sendmsg(self, msg, group, instance="*", to="*"):
- return self.enqueue(msg=msg, env={
- "type": "send",
- "from": self._lname,
- "to": to,
- "group": group,
- "instance": instance })
-
- def group_recvmsg(self, nonblock=True, seq=0):
- que = self.dequeue()
- return que.msg, que.env
-
- def group_reply(self, routing, msg):
- return self.enqueue(msg=msg, env={
- "type": "send",
- "from": self._lname,
- "to": routing["from"],
- "group": routing["group"],
- "instance": routing["instance"],
- "reply": routing["seq"] })
-
- def get_message(self, group, to='*'):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- que = Queue()
- for q in self.message_queue:
- if q.env['group'] == group:
- self.message_queue.remove(q)
- self.old_message_queue.append(q)
- que = q
- if self.verbose:
- sys.stdout.write("[Session] get_message: " + str(que.dump()) + "\n")
- return q.msg
-
- def group_subscribe(self, group, instance = "*"):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
-
- def group_unsubscribe(self, group, instance = "*"):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
diff --git a/src/bin/stats/tests/isc/config/Makefile.am b/src/bin/stats/tests/isc/config/Makefile.am
deleted file mode 100644
index 5b0379a..0000000
--- a/src/bin/stats/tests/isc/config/Makefile.am
+++ /dev/null
@@ -1,2 +0,0 @@
-EXTRA_DIST = __init__.py ccsession.py
-CLEANFILES = __init__.pyc ccsession.pyc
diff --git a/src/bin/stats/tests/isc/config/__init__.py b/src/bin/stats/tests/isc/config/__init__.py
deleted file mode 100644
index 4c49e95..0000000
--- a/src/bin/stats/tests/isc/config/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.config.ccsession import *
diff --git a/src/bin/stats/tests/isc/config/ccsession.py b/src/bin/stats/tests/isc/config/ccsession.py
deleted file mode 100644
index a4e9c37..0000000
--- a/src/bin/stats/tests/isc/config/ccsession.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import json
-import os
-from isc.cc.session import Session
-
-COMMAND_CONFIG_UPDATE = "config_update"
-
-def parse_answer(msg):
- assert 'result' in msg
- try:
- return msg['result'][0], msg['result'][1]
- except IndexError:
- return msg['result'][0], None
-
-def create_answer(rcode, arg = None):
- if arg is None:
- return { 'result': [ rcode ] }
- else:
- return { 'result': [ rcode, arg ] }
-
-def parse_command(msg):
- assert 'command' in msg
- try:
- return msg['command'][0], msg['command'][1]
- except IndexError:
- return msg['command'][0], None
-
-def create_command(command_name, params = None):
- if params is None:
- return {"command": [command_name]}
- else:
- return {"command": [command_name, params]}
-
-def module_spec_from_file(spec_file, check = True):
- try:
- file = open(spec_file)
- json_str = file.read()
- module_spec = json.loads(json_str)
- file.close()
- return ModuleSpec(module_spec['module_spec'], check)
- except IOError as ioe:
- raise ModuleSpecError("JSON read error: " + str(ioe))
- except ValueError as ve:
- raise ModuleSpecError("JSON parse error: " + str(ve))
- except KeyError as err:
- raise ModuleSpecError("Data definition has no module_spec element")
-
-class ModuleSpecError(Exception):
- pass
-
-class ModuleSpec:
- def __init__(self, module_spec, check = True):
- self._module_spec = module_spec
-
- def get_config_spec(self):
- return self._module_spec['config_data']
-
- def get_commands_spec(self):
- return self._module_spec['commands']
-
- def get_module_name(self):
- return self._module_spec['module_name']
-
-class ModuleCCSessionError(Exception):
- pass
-
-class DataNotFoundError(Exception):
- pass
-
-class ConfigData:
- def __init__(self, specification):
- self.specification = specification
-
- def get_value(self, identifier):
- """Returns a tuple where the first item is the value at the
- given identifier, and the second item is absolutely False
- even if the value is an unset default or not. Raises an
- DataNotFoundError if the identifier is not found in the
- specification file.
- *** NOTE ***
- There are some differences from the original method. This
- method never handles local settings like the original
- method. But these different behaviors aren't so big issues
- for a mock-up method of stats_httpd because stats_httpd
- calls this method at only first."""
- for config_map in self.get_module_spec().get_config_spec():
- if config_map['item_name'] == identifier:
- if 'item_default' in config_map:
- return config_map['item_default'], False
- raise DataNotFoundError("item_name %s is not found in the specfile" % identifier)
-
- def get_module_spec(self):
- return self.specification
-
-class ModuleCCSession(ConfigData):
- def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None):
- module_spec = module_spec_from_file(spec_file_name)
- ConfigData.__init__(self, module_spec)
- self._module_name = module_spec.get_module_name()
- self.set_config_handler(config_handler)
- self.set_command_handler(command_handler)
- if not cc_session:
- self._session = Session(verbose=True)
- else:
- self._session = cc_session
-
- def start(self):
- pass
-
- def close(self):
- self._session.close()
-
- def check_command(self, nonblock=True):
- msg, env = self._session.group_recvmsg(nonblock)
- if not msg or 'result' in msg:
- return
- cmd, arg = parse_command(msg)
- answer = None
- if cmd == COMMAND_CONFIG_UPDATE and self._config_handler:
- answer = self._config_handler(arg)
- elif env['group'] == self._module_name and self._command_handler:
- answer = self._command_handler(cmd, arg)
- if answer:
- self._session.group_reply(env, answer)
-
- def set_config_handler(self, config_handler):
- self._config_handler = config_handler
- # should we run this right now since we've changed the handler?
-
- def set_command_handler(self, command_handler):
- self._command_handler = command_handler
-
- def get_module_spec(self):
- return self.specification
-
- def get_socket(self):
- return self._session._socket
-
diff --git a/src/bin/stats/tests/isc/util/Makefile.am b/src/bin/stats/tests/isc/util/Makefile.am
deleted file mode 100644
index b09fdee..0000000
--- a/src/bin/stats/tests/isc/util/Makefile.am
+++ /dev/null
@@ -1,2 +0,0 @@
-EXTRA_DIST = __init__.py process.py
-CLEANFILES = __init__.pyc process.pyc
diff --git a/src/bin/stats/tests/isc/util/__init__.py b/src/bin/stats/tests/isc/util/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/util/process.py b/src/bin/stats/tests/isc/util/process.py
deleted file mode 100644
index 0f764c1..0000000
--- a/src/bin/stats/tests/isc/util/process.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A dummy function of isc.util.process.rename()
-"""
-
-def rename(name=None):
- pass
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
new file mode 100644
index 0000000..5eb8f92
--- /dev/null
+++ b/src/bin/stats/tests/test_utils.py
@@ -0,0 +1,367 @@
+"""
+Utilities and mock modules for unittests of statistics modules
+
+"""
+import os
+import io
+import time
+import sys
+import threading
+import tempfile
+import json
+import signal
+
+import msgq
+import isc.config.cfgmgr
+import stats
+import stats_httpd
+
+class SignalHandler():
+ """A signal handler class for deadlock in unittest"""
+ def __init__(self, fail_handler, timeout=20):
+ """sets a schedule in SIGARM for invoking the handler via
+ unittest.TestCase after timeout seconds (default is 20)"""
+ self.fail_handler = fail_handler
+ self.orig_handler = signal.signal(signal.SIGALRM, self.sig_handler)
+ signal.alarm(timeout)
+
+ def reset(self):
+ """resets the schedule in SIGALRM"""
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.orig_handler)
+
+ def sig_handler(self, signal, frame):
+ """envokes unittest.TestCase.fail as a signal handler"""
+ self.fail_handler("A deadlock might be detected")
+
+def send_command(command_name, module_name, params=None, session=None, nonblock=False, timeout=None):
+ if session is not None:
+ cc_session = session
+ else:
+ cc_session = isc.cc.Session()
+ if timeout is not None:
+ orig_timeout = cc_session.get_timeout()
+ cc_session.set_timeout(timeout * 1000)
+ command = isc.config.ccsession.create_command(command_name, params)
+ seq = cc_session.group_sendmsg(command, module_name)
+ try:
+ (answer, env) = cc_session.group_recvmsg(nonblock, seq)
+ if answer:
+ return isc.config.ccsession.parse_answer(answer)
+ except isc.cc.SessionTimeout:
+ pass
+ finally:
+ if timeout is not None:
+ cc_session.set_timeout(orig_timeout)
+ if session is None:
+ cc_session.close()
+
+def send_shutdown(module_name, **kwargs):
+ return send_command("shutdown", module_name, **kwargs)
+
+class ThreadingServerManager:
+ def __init__(self, server, *args, **kwargs):
+ self.server = server(*args, **kwargs)
+ self.server_name = server.__name__
+ self.server._thread = threading.Thread(
+ name=self.server_name, target=self.server.run)
+ self.server._thread.daemon = True
+
+ def run(self):
+ self.server._thread.start()
+ self.server._started.wait()
+ self.server._started.clear()
+
+ def shutdown(self):
+ self.server.shutdown()
+ self.server._thread.join(0) # timeout is 0
+
+def do_nothing(*args, **kwargs): pass
+
+class dummy_sys:
+ """Dummy for sys"""
+ class dummy_io:
+ write = do_nothing
+ stdout = stderr = dummy_io()
+
+class MockMsgq:
+ def __init__(self):
+ self._started = threading.Event()
+ # suppress output to stdout and stderr
+ msgq.sys = dummy_sys()
+ msgq.print = do_nothing
+ self.msgq = msgq.MsgQ(verbose=False)
+ result = self.msgq.setup()
+ if result:
+ sys.exit("Error on Msgq startup: %s" % result)
+
+ def run(self):
+ self._started.set()
+ try:
+ self.msgq.run()
+ except Exception:
+ pass
+ finally:
+ # explicitly shut down the socket of the msgq before
+ # shutting down the msgq
+ self.msgq.listen_socket.shutdown(msgq.socket.SHUT_RDWR)
+ self.msgq.shutdown()
+
+ def shutdown(self):
+ # do nothing
+ pass
+
+class MockCfgmgr:
+ def __init__(self):
+ self._started = threading.Event()
+ self.cfgmgr = isc.config.cfgmgr.ConfigManager(
+ os.environ['CONFIG_TESTDATA_PATH'], "b10-config.db")
+ self.cfgmgr.read_config()
+
+ def run(self):
+ self._started.set()
+ try:
+ self.cfgmgr.run()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.cfgmgr.running = False
+
+class MockBoss:
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Boss",
+ "module_description": "Mock Master process",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "sendstats",
+ "command_description": "Send data to a statistics module at once",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+"""
+ _BASETIME = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+
+ def __init__(self):
+ self._started = threading.Event()
+ self.running = False
+ self.spec_file = io.StringIO(self.spec_str)
+ # create ModuleCCSession object
+ self.mccs = isc.config.ModuleCCSession(
+ self.spec_file,
+ self.config_handler,
+ self.command_handler)
+ self.spec_file.close()
+ self.cc_session = self.mccs._session
+ self.got_command_name = ''
+
+ def run(self):
+ self.mccs.start()
+ self.running = True
+ self._started.set()
+ try:
+ while self.running:
+ self.mccs.check_command(False)
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def config_handler(self, new_config):
+ return isc.config.create_answer(0)
+
+ def command_handler(self, command, *args, **kwargs):
+ self._started.set()
+ self.got_command_name = command
+ params = { "owner": "Boss",
+ "data": {
+ 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', self._BASETIME)
+ }
+ }
+ if command == 'sendstats':
+ send_command("set", "Stats", params=params, session=self.cc_session)
+ return isc.config.create_answer(0)
+ elif command == 'getstats':
+ return isc.config.create_answer(0, params)
+ return isc.config.create_answer(1, "Unknown Command")
+
+class MockAuth:
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Auth",
+ "module_description": "Mock Authoritative service",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "sendstats",
+ "command_description": "Send data to a statistics module at once",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
+ ]
+ }
+}
+"""
+ def __init__(self):
+ self._started = threading.Event()
+ self.running = False
+ self.spec_file = io.StringIO(self.spec_str)
+ # create ModuleCCSession object
+ self.mccs = isc.config.ModuleCCSession(
+ self.spec_file,
+ self.config_handler,
+ self.command_handler)
+ self.spec_file.close()
+ self.cc_session = self.mccs._session
+ self.got_command_name = ''
+ self.queries_tcp = 3
+ self.queries_udp = 2
+
+ def run(self):
+ self.mccs.start()
+ self.running = True
+ self._started.set()
+ try:
+ while self.running:
+ self.mccs.check_command(False)
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def config_handler(self, new_config):
+ return isc.config.create_answer(0)
+
+ def command_handler(self, command, *args, **kwargs):
+ self.got_command_name = command
+ if command == 'sendstats':
+ params = { "owner": "Auth",
+ "data": { 'queries.tcp': self.queries_tcp,
+ 'queries.udp': self.queries_udp } }
+ return send_command("set", "Stats", params=params, session=self.cc_session)
+ return isc.config.create_answer(1, "Unknown Command")
+
+class MyStats(stats.Stats):
+ def __init__(self):
+ self._started = threading.Event()
+ stats.Stats.__init__(self)
+
+ def run(self):
+ self._started.set()
+ try:
+ self.start()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.command_shutdown()
+
+class MyStatsHttpd(stats_httpd.StatsHttpd):
+ ORIG_SPECFILE_LOCATION = stats_httpd.SPECFILE_LOCATION
+ def __init__(self, *server_address):
+ self._started = threading.Event()
+ if server_address:
+ stats_httpd.SPECFILE_LOCATION = self.create_specfile(*server_address)
+ try:
+ stats_httpd.StatsHttpd.__init__(self)
+ finally:
+ if hasattr(stats_httpd.SPECFILE_LOCATION, "close"):
+ stats_httpd.SPECFILE_LOCATION.close()
+ stats_httpd.SPECFILE_LOCATION = self.ORIG_SPECFILE_LOCATION
+ else:
+ stats_httpd.StatsHttpd.__init__(self)
+
+ def create_specfile(self, *server_address):
+ spec_io = open(self.ORIG_SPECFILE_LOCATION)
+ try:
+ spec = json.load(spec_io)
+ spec_io.close()
+ config = spec['module_spec']['config_data']
+ for i in range(len(config)):
+ if config[i]['item_name'] == 'listen_on':
+ config[i]['item_default'] = \
+ [ dict(address=a[0], port=a[1]) for a in server_address ]
+ break
+ return io.StringIO(json.dumps(spec))
+ finally:
+ spec_io.close()
+
+ def run(self):
+ self._started.set()
+ try:
+ self.start()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.command_handler('shutdown', None)
+
+class BaseModules:
+ def __init__(self):
+ # MockMsgq
+ self.msgq = ThreadingServerManager(MockMsgq)
+ self.msgq.run()
+ # Check whether msgq is ready. A SessionTimeout is raised here if not.
+ isc.cc.session.Session().close()
+ # MockCfgmgr
+ self.cfgmgr = ThreadingServerManager(MockCfgmgr)
+ self.cfgmgr.run()
+ # MockBoss
+ self.boss = ThreadingServerManager(MockBoss)
+ self.boss.run()
+ # MockAuth
+ self.auth = ThreadingServerManager(MockAuth)
+ self.auth.run()
+
+ def shutdown(self):
+ # MockAuth
+ self.auth.shutdown()
+ # MockBoss
+ self.boss.shutdown()
+ # MockCfgmgr
+ self.cfgmgr.shutdown()
+ # MockMsgq
+ self.msgq.shutdown()
+ # remove the unused socket file
+ socket_file = self.msgq.server.msgq.socket_file
+ try:
+ if os.path.exists(socket_file):
+ os.remove(socket_file)
+ except OSError:
+ pass
diff --git a/src/bin/stats/tests/testdata/Makefile.am b/src/bin/stats/tests/testdata/Makefile.am
deleted file mode 100644
index 1b8df6d..0000000
--- a/src/bin/stats/tests/testdata/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-EXTRA_DIST = stats_test.spec
diff --git a/src/bin/stats/tests/testdata/stats_test.spec b/src/bin/stats/tests/testdata/stats_test.spec
deleted file mode 100644
index 8136756..0000000
--- a/src/bin/stats/tests/testdata/stats_test.spec
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Stats daemon",
- "config_data": [],
- "commands": [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "the_dummy",
- "command_description": "this is for testing",
- "command_args": []
- }
- ]
- }
-}
diff --git a/src/bin/tests/Makefile.am b/src/bin/tests/Makefile.am
index 4340c64..41b497f 100644
--- a/src/bin/tests/Makefile.am
+++ b/src/bin/tests/Makefile.am
@@ -1,17 +1,27 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = process_rename_test.py
+noinst_SCRIPTS = $(PYTESTS)
# .py will be generated by configure, so we don't have to include it
# in EXTRA_DIST.
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/tests/process_rename_test.py.in b/src/bin/tests/process_rename_test.py.in
index 4b45210..f96c023 100644
--- a/src/bin/tests/process_rename_test.py.in
+++ b/src/bin/tests/process_rename_test.py.in
@@ -38,8 +38,10 @@ class TestRename(unittest.TestCase):
Then scan them by looking at the source text
(without actually running them)
"""
- # Regexp to find all the *_SCRIPTS = something lines,
- # including line continuations (backslash and newline)
+ # Regexp to find all the *_SCRIPTS = something lines (except for
+ # noinst_SCRIPTS, which are scripts for tests), including line
+ # continuations (backslash and newline)
+ excluded_lines = re.compile(r'^(noinst_SCRIPTS.*$)', re.MULTILINE)
lines = re.compile(r'^\w+_SCRIPTS\s*=\s*((.|\\\n)*)$',
re.MULTILINE)
# Script name regular expression
@@ -53,7 +55,8 @@ class TestRename(unittest.TestCase):
if 'Makefile' in fs:
makefile = ''.join(open(os.path.join(d,
"Makefile")).readlines())
- for (var, _) in lines.findall(makefile):
+ for (var, _) in lines.findall(re.sub(excluded_lines, '',
+ makefile)):
for (script, _) in scripts.findall(var):
self.__scan(d, script, fun)
diff --git a/src/bin/xfrin/Makefile.am b/src/bin/xfrin/Makefile.am
index ee8505e..8d80b22 100644
--- a/src/bin/xfrin/Makefile.am
+++ b/src/bin/xfrin/Makefile.am
@@ -7,11 +7,16 @@ pkglibexec_SCRIPTS = b10-xfrin
b10_xfrindir = $(pkgdatadir)
b10_xfrin_DATA = xfrin.spec
-CLEANFILES = b10-xfrin xfrin.pyc
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.pyc
man_MANS = b10-xfrin.8
EXTRA_DIST = $(man_MANS) b10-xfrin.xml
-EXTRA_DIST += xfrin.spec
+EXTRA_DIST += xfrin.spec xfrin_messages.mes
if ENABLE_MAN
@@ -20,8 +25,18 @@ b10-xfrin.8: b10-xfrin.xml
endif
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py : xfrin_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/xfrin_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrin: xfrin.py
+b10-xfrin: xfrin.py $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrin.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/xfrin/b10-xfrin.8 b/src/bin/xfrin/b10-xfrin.8
index d0723b5..056103a 100644
--- a/src/bin/xfrin/b10-xfrin.8
+++ b/src/bin/xfrin/b10-xfrin.8
@@ -2,12 +2,12 @@
.\" Title: b10-xfrin
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: September 8, 2010
+.\" Date: October 12, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-XFRIN" "8" "September 8, 2010" "BIND10" "BIND10"
+.TH "B10\-XFRIN" "8" "October 12, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -29,23 +29,11 @@ The
\fBb10\-xfrin\fR
daemon provides the BIND 10 incoming DNS zone transfer service\&. Normally it is started by the
\fBbind10\fR(8)
-boss process\&. When triggered it can request and receive a zone transfer and store the zone in a BIND 10 zone data store\&.
-.if n \{\
-.sp
-.\}
-.RS 4
-.it 1 an-trap
-.nr an-no-space-flag 1
-.nr an-break-flag 1
-.br
-.ps +1
-\fBNote\fR
-.ps -1
-.br
-.sp
-The Y1 prototype release only supports AXFR\&. IXFR is not implemented\&.
-.sp .5v
-.RE
+boss process\&. When triggered it can request and receive a zone transfer and store the zone in a BIND 10 zone data source\&.
+.PP
+The
+\fBb10\-xfrin\fR
+daemon supports both AXFR and IXFR\&. Due to some implementation limitations of the current development release, however, it only tries AXFR by default, and care should be taken to enable IXFR\&. See the BIND 10 Guide for more details\&.
.PP
This daemon communicates with BIND 10 over a
\fBb10-msgq\fR(8)
@@ -61,14 +49,38 @@ receives its configurations from
.PP
The configurable settings are:
.PP
-\fImaster_addr\fR
-The default is 127\&.0\&.0\&.1\&.
+\fItransfers_in\fR
+defines the maximum number of inbound zone transfers that can run concurrently\&. The default is 10\&.
.PP
+
+\fIzones\fR
+is a list of zones known to the
+\fBb10\-xfrin\fR
+daemon\&. The list items are:
+\fIname\fR
+(the zone name),
+\fIclass\fR
+(defaults to
+\(lqIN\(rq),
+\fImaster_addr\fR
+(the zone master to transfer from),
\fImaster_port\fR
-The default is 53\&.
-.PP
-\fItransfers\-in\fR
-defines the maximum number of inbound zone transfers that can run concurrently\&. The default is 10\&.
+(defaults to 53),
+\fIuse_ixfr\fR
+(defaults to false), and
+\fItsig_key\fR
+(optional TSIG key to use)\&. The
+\fItsig_key\fR
+is specified using a full string colon\-delimited name:key:algorithm representation (e\&.g\&.
+\(lqfoo\&.example\&.org:EvABsfU2h7uofnmqaRCrhHunGsd=:hmac\-sha1\(rq)\&.
+.PP
+(The site\-wide
+\fImaster_addr\fR
+and
+\fImaster_port\fR
+configurations are deprecated; use the
+\fIzones\fR
+list configuration instead\&.)
.PP
The configuration commands are:
.PP
@@ -106,7 +118,9 @@ to define the class (defaults to
\fImaster\fR
to define the IP address of the authoritative server to transfer from, and
\fIport\fR
-to define the port number on the authoritative server (defaults to 53)\&.
+to define the port number on the authoritative server (defaults to 53)\&. If the address or port is not specified, it will use the value previously defined in the
+\fIzones\fR
+configuration\&.
.PP
\fBshutdown\fR
@@ -143,5 +157,5 @@ The
daemon was implemented in March 2010 by Zhang Likun of CNNIC for the ISC BIND 10 project\&.
.SH "COPYRIGHT"
.br
-Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
+Copyright \(co 2010-2011 Internet Systems Consortium, Inc. ("ISC")
.br
diff --git a/src/bin/xfrin/b10-xfrin.xml b/src/bin/xfrin/b10-xfrin.xml
index fdfe1ef..231681c 100644
--- a/src/bin/xfrin/b10-xfrin.xml
+++ b/src/bin/xfrin/b10-xfrin.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>September 8, 2010</date>
+ <date>October 12, 2011</date>
</refentryinfo>
<refmeta>
@@ -36,7 +36,7 @@
<docinfo>
<copyright>
- <year>2010</year>
+ <year>2010-2011</year>
<holder>Internet Systems Consortium, Inc. ("ISC")</holder>
</copyright>
</docinfo>
@@ -59,12 +59,23 @@
<citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
boss process.
When triggered it can request and receive a zone transfer and store
- the zone in a BIND 10 zone data store.
+ the zone in a BIND 10 zone data source.
</para>
- <note><simpara>
- This prototype release only supports AXFR. IXFR is not implemented.
- </simpara></note>
+<!-- TODO:
+xfrin only does the transfer to make it as simple as possible.
+The logic for handling transfer triggers or zone management is handled
+in separate zonemgr process.
+-->
+
+ <para>
+ The <command>b10-xfrin</command> daemon supports both AXFR and
+ IXFR. Due to some implementation limitations of the current
+ development release, however, it only tries AXFR by default,
+ and care should be taken to enable IXFR.
+ See the BIND 10 Guide for more details.
+ </para>
+<!-- TODO: http://bind10.isc.org/ticket/1279 -->
<para>
This daemon communicates with BIND 10 over a
@@ -86,20 +97,35 @@
The configurable settings are:
</para>
- <para><varname>master_addr</varname>
-<!-- TODO: how can there be a single setting for this? -->
- The default is 127.0.0.1.
+ <para><varname>transfers_in</varname>
+ defines the maximum number of inbound zone transfers
+ that can run concurrently. The default is 10.
</para>
- <para><varname>master_port</varname>
-<!-- TODO: what if custom is needed per zone? -->
- The default is 53.
+<!-- TODO: is name okay for master_addr or just IP? -->
+ <para>
+ <varname>zones</varname> is a list of zones known to the
+ <command>b10-xfrin</command> daemon.
+ The list items are:
+ <varname>name</varname> (the zone name),
+ <varname>class</varname> (defaults to <quote>IN</quote>),
+ <varname>master_addr</varname> (the zone master to transfer from),
+ <varname>master_port</varname> (defaults to 53),
+ <varname>use_ixfr</varname> (defaults to false), and
+ <varname>tsig_key</varname> (optional TSIG key to use).
+ The <varname>tsig_key</varname> is specified using a full string
+ colon-delimited name:key:algorithm representation (e.g.
+ <quote>foo.example.org:EvABsfU2h7uofnmqaRCrhHunGsd=:hmac-sha1</quote>).
</para>
+<!-- TODO: document this better -->
+<!-- TODO: the tsig_key format may change -->
- <para><varname>transfers-in</varname>
- defines the maximum number of inbound zone transfers
- that can run concurrently. The default is 10.
+ <para>
+ (The site-wide <varname>master_addr</varname> and
+ <varname>master_port</varname> configurations are deprecated;
+ use the <varname>zones</varname> list configuration instead.)
</para>
+<!-- NOTE: also tsig_key but not mentioning since so short lived. -->
<!-- TODO: formating -->
<para>
@@ -133,7 +159,7 @@
according to the SOA's REFRESH time
to tell <command>b10-xfrin</command> that the zone needs to do
a zone refresh.
- This is an internal command and not exposed to the administrator.
+ This is an internal command and not exposed to the administrator.
<!-- not defined in spec -->
</para>
@@ -148,6 +174,9 @@
the authoritative server to transfer from,
and <varname>port</varname> to define the port number on the
authoritative server (defaults to 53).
+ If the address or port is not specified, it will use the
+ value previously defined in the <varname>zones</varname>
+ configuration.
</para>
<!-- TODO: later hostname for master? -->
@@ -180,7 +209,7 @@ add a usage example of xfrin -->
</para></note>
<!-- TODO:
- it can handle more than one XFR in now,
+ it can handle more than one XFR in now,
but the problem is If SQLITE3 datasource part support multiple write
operation
-->
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index d4efbc7..8f4fa91 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -1,3 +1,5 @@
+SUBDIRS = testdata .
+
PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = xfrin_test.py
EXTRA_DIST = $(PYTESTS)
@@ -6,7 +8,10 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/xfr/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# sunstudio needs the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -18,7 +23,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
$(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(COMMON_PYTHON_PATH) \
+ TESTDATASRCDIR=$(abs_top_srcdir)/src/bin/xfrin/tests/testdata/ \
+ TESTDATAOBJDIR=$(abs_top_builddir)/src/bin/xfrin/tests/testdata/ \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrin/tests/testdata/Makefile.am b/src/bin/xfrin/tests/testdata/Makefile.am
new file mode 100644
index 0000000..5e325cb
--- /dev/null
+++ b/src/bin/xfrin/tests/testdata/Makefile.am
@@ -0,0 +1,2 @@
+EXTRA_DIST = example.com # not necessarily needed, but for reference
+EXTRA_DIST += example.com.sqlite3
diff --git a/src/bin/xfrin/tests/testdata/example.com b/src/bin/xfrin/tests/testdata/example.com
new file mode 100644
index 0000000..2afcd28
--- /dev/null
+++ b/src/bin/xfrin/tests/testdata/example.com
@@ -0,0 +1,17 @@
+;; This is a simplest form of zone file for 'example.com', which is the
+;; source of the corresponding sqlite3 DB file. This file is provided
+;; for reference purposes only; it's not actually used anywhere.
+
+example.com. 3600 IN SOA master.example.com. admin.example.com. (
+ 1230 ; serial
+ 3600 ; refresh (1 hour)
+ 1800 ; retry (30 minutes)
+ 2419200 ; expire (4 weeks)
+ 7200 ; minimum (2 hours)
+ )
+ 3600 NS dns01.example.com.
+ 3600 NS dns02.example.com.
+ 3600 NS dns03.example.com.
+dns01.example.com. 3600 IN A 192.0.2.1
+dns02.example.com. 3600 IN A 192.0.2.2
+dns03.example.com. 3600 IN A 192.0.2.3
diff --git a/src/bin/xfrin/tests/testdata/example.com.sqlite3 b/src/bin/xfrin/tests/testdata/example.com.sqlite3
new file mode 100644
index 0000000..ed241c3
Binary files /dev/null and b/src/bin/xfrin/tests/testdata/example.com.sqlite3 differ
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 0ccbbb8..1e4d942 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2009 Internet Systems Consortium.
+# Copyright (C) 2009-2011 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -14,14 +14,23 @@
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
+import shutil
import socket
+import sys
+import io
+from isc.testutils.tsigctx_mock import MockTSIGContext
from xfrin import *
+import xfrin
+from isc.xfrin.diff import Diff
+import isc.log
#
# Commonly used (mostly constant) test parameters
#
-TEST_ZONE_NAME = "example.com"
+TEST_ZONE_NAME_STR = "example.com."
+TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS_STR = 'IN'
TEST_DB_FILE = 'db_file'
TEST_MASTER_IPV4_ADDRESS = '127.0.0.1'
TEST_MASTER_IPV4_ADDRINFO = (socket.AF_INET, socket.SOCK_STREAM,
@@ -31,34 +40,150 @@ TEST_MASTER_IPV6_ADDRESS = '::1'
TEST_MASTER_IPV6_ADDRINFO = (socket.AF_INET6, socket.SOCK_STREAM,
socket.IPPROTO_TCP, '',
(TEST_MASTER_IPV6_ADDRESS, 53))
+
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
+TESTDATA_OBJDIR = os.getenv("TESTDATAOBJDIR")
# XXX: This should be a non priviledge port that is unlikely to be used.
# If some other process uses this port test will fail.
TEST_MASTER_PORT = '53535'
TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
+# SOA intended to be used for the new SOA as a result of transfer.
soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
'master.example.com. admin.example.com ' +
'1234 3600 1800 2419200 7200')
-soa_rrset = RRset(Name(TEST_ZONE_NAME), TEST_RRCLASS, RRType.SOA(),
- RRTTL(3600))
+soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
soa_rrset.add_rdata(soa_rdata)
-example_axfr_question = Question(Name(TEST_ZONE_NAME), TEST_RRCLASS,
- RRType.AXFR())
-example_soa_question = Question(Name(TEST_ZONE_NAME), TEST_RRCLASS,
- RRType.SOA())
+
+# SOA intended to be used for the current SOA at the secondary side.
+# Note that its serial is smaller than that of soa_rdata.
+begin_soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
+ 'master.example.com. admin.example.com ' +
+ '1230 3600 1800 2419200 7200')
+begin_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
+begin_soa_rrset.add_rdata(begin_soa_rdata)
+example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR())
+example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA())
default_questions = [example_axfr_question]
default_answers = [soa_rrset]
+def check_diffs(assert_fn, expected, actual):
+ '''A helper function checking the differences made in the XFR session.
+
+ This is expected called from some subclass of unittest.TestCase and
+ assert_fn is generally expected to be 'self.assertEqual' of that class.
+
+ '''
+ assert_fn(len(expected), len(actual))
+ for (diffs_exp, diffs_actual) in zip(expected, actual):
+ assert_fn(len(diffs_exp), len(diffs_actual))
+ for (diff_exp, diff_actual) in zip(diffs_exp, diffs_actual):
+ # operation should match
+ assert_fn(diff_exp[0], diff_actual[0])
+ # The diff as RRset should be equal (for simplicity we assume
+ # all RRsets contain exactly one RDATA)
+ assert_fn(diff_exp[1].get_name(), diff_actual[1].get_name())
+ assert_fn(diff_exp[1].get_type(), diff_actual[1].get_type())
+ assert_fn(diff_exp[1].get_class(), diff_actual[1].get_class())
+ assert_fn(diff_exp[1].get_rdata_count(),
+ diff_actual[1].get_rdata_count())
+ assert_fn(1, diff_exp[1].get_rdata_count())
+ assert_fn(diff_exp[1].get_rdata()[0],
+ diff_actual[1].get_rdata()[0])
+
class XfrinTestException(Exception):
pass
-def strip_mutable_tsig_data(data):
- # Unfortunately we cannot easily compare TSIG RR because we can't tweak
- # current time. As a work around this helper function strips off the time
- # dependent part of TSIG RDATA, i.e., the MAC (assuming HMAC-MD5) and
- # Time Signed.
- return data[0:-32] + data[-26:-22] + data[-6:]
+class XfrinTestTimeoutException(Exception):
+ pass
+
+class MockCC():
+ def get_default_value(self, identifier):
+ # The returned values should be identical to the spec file
+ # XXX: these should be retrieved from the spec file
+ # (see MyCCSession of xfrout_test.py.in)
+ if identifier == "zones/master_port":
+ return TEST_MASTER_PORT
+ if identifier == "zones/class":
+ return TEST_RRCLASS_STR
+ if identifier == "zones/use_ixfr":
+ return False
+
+class MockDataSourceClient():
+ '''A simple mock data source client.
+
+ This class provides a minimal set of wrappers related the data source
+ API that would be used by Diff objects. For our testing purposes they
+ only keep truck of the history of the changes.
+
+ '''
+ def __init__(self):
+ self.force_fail = False # if True, raise an exception on commit
+ self.committed_diffs = []
+ self.diffs = []
+
+ def get_class(self):
+ '''Mock version of get_class().
+
+ We simply return the commonly used constant RR class. If and when
+ we use this mock for a different RR class we need to adjust it
+ accordingly.
+
+ '''
+ return TEST_RRCLASS
+
+ def find_zone(self, zone_name):
+ '''Mock version of find_zone().
+
+ It returns itself (subsequently acting as a mock ZoneFinder) for
+ some test zone names. For some others it returns either NOTFOUND
+ or PARTIALMATCH.
+
+ '''
+ if zone_name == TEST_ZONE_NAME or \
+ zone_name == Name('no-soa.example') or \
+ zone_name == Name('dup-soa.example'):
+ return (isc.datasrc.DataSourceClient.SUCCESS, self)
+ elif zone_name == Name('no-such-zone.example'):
+ return (DataSourceClient.NOTFOUND, None)
+ elif zone_name == Name('partial-match-zone.example'):
+ return (DataSourceClient.PARTIALMATCH, self)
+ raise ValueError('Unexpected input to mock client: bug in test case?')
+
+ def find(self, name, rrtype, target, options):
+ '''Mock ZoneFinder.find().
+
+ It returns the predefined SOA RRset to queries for SOA of the common
+ test zone name. It also emulates some unusual cases for special
+ zone names.
+
+ '''
+ if name == TEST_ZONE_NAME and rrtype == RRType.SOA():
+ return (ZoneFinder.SUCCESS, begin_soa_rrset)
+ if name == Name('no-soa.example'):
+ return (ZoneFinder.NXDOMAIN, None)
+ if name == Name('dup-soa.example'):
+ dup_soa_rrset = RRset(name, TEST_RRCLASS, RRType.SOA(), RRTTL(0))
+ dup_soa_rrset.add_rdata(begin_soa_rdata)
+ dup_soa_rrset.add_rdata(soa_rdata)
+ return (ZoneFinder.SUCCESS, dup_soa_rrset)
+ raise ValueError('Unexpected input to mock finder: bug in test case?')
+
+ def get_updater(self, zone_name, replace):
+ return self
+
+ def add_rrset(self, rrset):
+ self.diffs.append(('add', rrset))
+
+ def delete_rrset(self, rrset):
+ self.diffs.append(('delete', rrset))
+
+ def commit(self):
+ if self.force_fail:
+ raise isc.datasrc.Error('Updater.commit() failed')
+ self.committed_diffs.append(self.diffs)
+ self.diffs = []
class MockXfrin(Xfrin):
# This is a class attribute of a callable object that specifies a non
@@ -69,22 +194,34 @@ class MockXfrin(Xfrin):
check_command_hook = None
def _cc_setup(self):
- self._tsig_key_str = None
+ self._tsig_key = None
+ self._module_cc = MockCC()
pass
def _get_db_file(self):
pass
-
+
def _cc_check_command(self):
self._shutdown_event.set()
if MockXfrin.check_command_hook:
MockXfrin.check_command_hook()
+ def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo,
+ tsig_key, request_type, check_soa=True):
+ # store some of the arguments for verification, then call this
+ # method in the superclass
+ self.xfrin_started_master_addr = master_addrinfo[2][0]
+ self.xfrin_started_master_port = master_addrinfo[2][1]
+ self.xfrin_started_request_type = request_type
+ return Xfrin.xfrin_start(self, zone_name, rrclass, None,
+ master_addrinfo, tsig_key,
+ request_type, check_soa)
+
class MockXfrinConnection(XfrinConnection):
- def __init__(self, sock_map, zone_name, rrclass, db_file, shutdown_event,
- master_addr):
- super().__init__(sock_map, zone_name, rrclass, db_file, shutdown_event,
- master_addr)
+ def __init__(self, sock_map, zone_name, rrclass, datasrc_client,
+ shutdown_event, master_addr, tsig_key=None):
+ super().__init__(sock_map, zone_name, rrclass, MockDataSourceClient(),
+ shutdown_event, master_addr)
self.query_data = b''
self.reply_data = b''
self.force_time_out = False
@@ -105,8 +242,11 @@ class MockXfrinConnection(XfrinConnection):
def recv(self, size):
data = self.reply_data[:size]
self.reply_data = self.reply_data[size:]
+ if len(data) == 0:
+ raise XfrinTestTimeoutException('Emulated timeout')
if len(data) < size:
- raise XfrinTestException('cannot get reply data')
+ raise XfrinTestException('cannot get reply data (' + str(size) +
+ ' bytes)')
return data
def send(self, data):
@@ -131,10 +271,11 @@ class MockXfrinConnection(XfrinConnection):
self.response_generator()
return len(data)
- def create_response_data(self, response = True, bad_qid = False,
- rcode = Rcode.NOERROR(),
- questions = default_questions,
- answers = default_answers):
+ def create_response_data(self, response=True, bad_qid=False,
+ rcode=Rcode.NOERROR(),
+ questions=default_questions,
+ answers=default_answers,
+ tsig_ctx=None):
resp = Message(Message.RENDER)
qid = self.qid
if bad_qid:
@@ -148,35 +289,426 @@ class MockXfrinConnection(XfrinConnection):
[resp.add_rrset(Message.SECTION_ANSWER, a) for a in answers]
renderer = MessageRenderer()
- resp.to_wire(renderer)
+ if tsig_ctx is not None:
+ resp.to_wire(renderer, tsig_ctx)
+ else:
+ resp.to_wire(renderer)
reply_data = struct.pack('H', socket.htons(renderer.get_length()))
reply_data += renderer.get_data()
return reply_data
+class TestXfrinState(unittest.TestCase):
+ def setUp(self):
+ self.sock_map = {}
+ self.conn = MockXfrinConnection(self.sock_map, TEST_ZONE_NAME,
+ TEST_RRCLASS, None, threading.Event(),
+ TEST_MASTER_IPV4_ADDRINFO)
+ self.conn.init_socket()
+ self.begin_soa = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ RRTTL(3600))
+ self.begin_soa.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS,
+ 'm. r. 1230 0 0 0 0'))
+ self.ns_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(),
+ RRTTL(3600))
+ self.ns_rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS,
+ 'ns.example.com'))
+ self.a_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.A(),
+ RRTTL(3600))
+ self.a_rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, '192.0.2.1'))
+
+ self.conn._datasrc_client = MockDataSourceClient()
+ self.conn._diff = Diff(self.conn._datasrc_client, TEST_ZONE_NAME)
+
+class TestXfrinStateBase(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+
+ def test_handle_rr_on_base(self):
+ # The base version of handle_rr() isn't supposed to be called
+ # directly (the argument doesn't matter in this test)
+ self.assertRaises(XfrinException, XfrinState().handle_rr, None)
+
+class TestXfrinInitialSOA(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinInitialSOA()
+
+ def test_handle_rr(self):
+ # normal case
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinFirstData()),
+ type(self.conn.get_xfrstate()))
+ self.assertEqual(1234, self.conn._end_serial)
+
+ def test_handle_not_soa(self):
+ # The given RR is not of SOA
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinFirstData(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinFirstData()
+ self.conn._request_type = RRType.IXFR()
+ self.conn._request_serial = 1230 # arbitrary chosen serial < 1234
+ self.conn._diff = None # should be replaced in the AXFR case
+
+ def test_handle_ixfr_begin_soa(self):
+ self.conn._request_type = RRType.IXFR()
+ self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual(type(XfrinIXFRDeleteSOA()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_axfr(self):
+ # If the original type is AXFR, other conditions aren't considered,
+ # and AXFR processing will continue
+ self.conn._request_type = RRType.AXFR()
+ self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+
+ def test_handle_ixfr_to_axfr(self):
+ # Detecting AXFR-compatible IXFR response by seeing a non SOA RR after
+ # the initial SOA. Should switch to AXFR.
+ self.assertFalse(self.state.handle_rr(self.conn, self.ns_rrset))
+ self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+ # The Diff for AXFR should be created at this point
+ self.assertNotEqual(None, self.conn._diff)
+
+ def test_handle_ixfr_to_axfr_by_different_soa(self):
+ # An unusual case: Response contains two consecutive SOA but the
+ # serial of the second does not match the requested one. See
+ # the documentation for XfrinFirstData.handle_rr().
+ self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+ self.assertNotEqual(None, self.conn._diff)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRDeleteSOA(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinIXFRDeleteSOA()
+ # In this state a new Diff object is expected to be created. To
+ # confirm it, we nullify it beforehand.
+ self.conn._diff = None
+
+ def test_handle_rr(self):
+ self.assertTrue(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual(type(XfrinIXFRDelete()),
+ type(self.conn.get_xfrstate()))
+ self.assertEqual([('delete', self.begin_soa)],
+ self.conn._diff.get_buffer())
+
+ def test_handle_non_soa(self):
+ self.assertRaises(XfrinException, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRDelete(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ # We need record the state in 'conn' to check the case where the
+ # state doesn't change.
+ XfrinIXFRDelete().set_xfrstate(self.conn, XfrinIXFRDelete())
+ self.state = self.conn.get_xfrstate()
+
+ def test_handle_delete_rr(self):
+ # Non SOA RRs are simply (goting to be) deleted in this state
+ self.assertTrue(self.state.handle_rr(self.conn, self.ns_rrset))
+ self.assertEqual([('delete', self.ns_rrset)],
+ self.conn._diff.get_buffer())
+ # The state shouldn't change
+ self.assertEqual(type(XfrinIXFRDelete()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_soa(self):
+ # SOA in this state means the beginning of added RRs. This SOA
+ # should also be added in the next state, so handle_rr() should return
+ # false.
+ self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual([], self.conn._diff.get_buffer())
+ self.assertEqual(1234, self.conn._current_serial)
+ self.assertEqual(type(XfrinIXFRAddSOA()),
+ type(self.conn.get_xfrstate()))
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRAddSOA(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinIXFRAddSOA()
+
+ def test_handle_rr(self):
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([('add', soa_rrset)],
+ self.conn._diff.get_buffer())
+
+ def test_handle_non_soa(self):
+ self.assertRaises(XfrinException, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRAdd(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ # We need record the state in 'conn' to check the case where the
+ # state doesn't change.
+ XfrinIXFRAdd().set_xfrstate(self.conn, XfrinIXFRAdd())
+ self.conn._current_serial = 1230
+ self.state = self.conn.get_xfrstate()
+
+ def test_handle_add_rr(self):
+ # Non SOA RRs are simply (goting to be) added in this state
+ self.assertTrue(self.state.handle_rr(self.conn, self.ns_rrset))
+ self.assertEqual([('add', self.ns_rrset)],
+ self.conn._diff.get_buffer())
+ # The state shouldn't change
+ self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
+
+ def test_handle_end_soa(self):
+ self.conn._end_serial = 1234
+ self.conn._diff.add_data(self.ns_rrset) # put some dummy change
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ # handle_rr should have caused commit, and the buffer should now be
+ # empty.
+ self.assertEqual([], self.conn._diff.get_buffer())
+
+ def test_handle_new_delete(self):
+ self.conn._end_serial = 1234
+ # SOA RR whose serial is the current one means we are going to a new
+ # difference, starting with removing that SOA.
+ self.conn._diff.add_data(self.ns_rrset) # put some dummy change
+ self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+ self.assertEqual([], self.conn._diff.get_buffer())
+ self.assertEqual(type(XfrinIXFRDeleteSOA()),
+ type(self.conn.get_xfrstate()))
+
+ def test_handle_out_of_sync(self):
+ # getting SOA with an inconsistent serial. This is an error.
+ self.conn._end_serial = 1235
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr,
+ self.conn, soa_rrset)
+
+ def test_finish_message(self):
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFREnd(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinIXFREnd()
+
+ def test_handle_rr(self):
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.assertFalse(self.state.finish_message(self.conn))
+
+class TestXfrinAXFR(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinAXFR()
+ self.conn._end_serial = 1234
+
+ def test_handle_rr(self):
+ """
+ Test we can put data inside.
+ """
+ # Put some data inside
+ self.assertTrue(self.state.handle_rr(self.conn, self.a_rrset))
+ # This test uses internal Diff structure to check the behaviour of
+ # XfrinAXFR. Maybe there could be a cleaner way, but it would be more
+ # complicated.
+ self.assertEqual([('add', self.a_rrset)], self.conn._diff.get_buffer())
+ # This SOA terminates the transfer
+ self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+ # It should have changed the state
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ # At this point, the data haven't been committed yet
+ self.assertEqual([('add', self.a_rrset), ('add', soa_rrset)],
+ self.conn._diff.get_buffer())
+
+ def test_handle_rr_mismatch_soa(self):
+ """ SOA with inconsistent serial - unexpected, but we accept it.
+
+ """
+ self.assertTrue(self.state.handle_rr(self.conn, begin_soa_rrset))
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+
+ def test_finish_message(self):
+ """
+ Check normal end of message.
+ """
+ # When a message ends, nothing happens usually
+ self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinAXFREnd(TestXfrinState):
+ def setUp(self):
+ super().setUp()
+ self.state = XfrinAXFREnd()
+
+ def test_handle_rr(self):
+ self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+ self.ns_rrset)
+
+ def test_finish_message(self):
+ self.conn._diff.add_data(self.a_rrset)
+ self.conn._diff.add_data(soa_rrset)
+ self.assertFalse(self.state.finish_message(self.conn))
+
+ # The data should have been committed
+ self.assertEqual([], self.conn._diff.get_buffer())
+ check_diffs(self.assertEqual, [[('add', self.a_rrset),
+ ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+ self.assertRaises(ValueError, self.conn._diff.commit)
+
class TestXfrinConnection(unittest.TestCase):
+ '''Convenient parent class for XFR-protocol tests.
+
+ This class provides common setups and helper methods for protocol related
+ tests on AXFR and IXFR.
+
+ '''
+
def setUp(self):
if os.path.exists(TEST_DB_FILE):
os.remove(TEST_DB_FILE)
self.sock_map = {}
- self.conn = MockXfrinConnection(self.sock_map, 'example.com.',
- TEST_RRCLASS, TEST_DB_FILE,
- threading.Event(),
+ self.conn = MockXfrinConnection(self.sock_map, TEST_ZONE_NAME,
+ TEST_RRCLASS, None, threading.Event(),
TEST_MASTER_IPV4_ADDRINFO)
- self.axfr_after_soa = False
+ self.conn.init_socket()
self.soa_response_params = {
'questions': [example_soa_question],
'bad_qid': False,
'response': True,
'rcode': Rcode.NOERROR(),
+ 'tsig': False,
'axfr_after_soa': self._create_normal_response_data
}
+ self.axfr_response_params = {
+ 'question_1st': default_questions,
+ 'question_2nd': default_questions,
+ 'answer_1st': [soa_rrset, self._create_ns()],
+ 'answer_2nd': default_answers,
+ 'tsig_1st': None,
+ 'tsig_2nd': None
+ }
def tearDown(self):
self.conn.close()
if os.path.exists(TEST_DB_FILE):
os.remove(TEST_DB_FILE)
+ def _create_normal_response_data(self):
+ # This helper method creates a simple sequence of DNS messages that
+ # forms a valid AXFR transaction. It consists of two messages: the
+ # first one containing SOA, NS, the second containing the trailing SOA.
+ question_1st = self.axfr_response_params['question_1st']
+ question_2nd = self.axfr_response_params['question_2nd']
+ answer_1st = self.axfr_response_params['answer_1st']
+ answer_2nd = self.axfr_response_params['answer_2nd']
+ tsig_1st = self.axfr_response_params['tsig_1st']
+ tsig_2nd = self.axfr_response_params['tsig_2nd']
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=question_1st, answers=answer_1st,
+ tsig_ctx=tsig_1st)
+ self.conn.reply_data += \
+ self.conn.create_response_data(questions=question_2nd,
+ answers=answer_2nd,
+ tsig_ctx=tsig_2nd)
+
+ def _create_soa_response_data(self):
+ # This helper method creates a DNS message that is supposed to be
+ # used a valid response to SOA queries prior to XFR.
+ # If tsig is True, it tries to verify the query with a locally
+ # created TSIG context (which may or may not succeed) so that the
+ # response will include a TSIG.
+ # If axfr_after_soa is True, it resets the response_generator so that
+ # a valid XFR messages will follow.
+
+ verify_ctx = None
+ if self.soa_response_params['tsig']:
+ # xfrin (currently) always uses TCP. strip off the length field.
+ query_data = self.conn.query_data[2:]
+ query_message = Message(Message.PARSE)
+ query_message.from_wire(query_data)
+ verify_ctx = TSIGContext(TSIG_KEY)
+ verify_ctx.verify(query_message.get_tsig_record(), query_data)
+
+ self.conn.reply_data = self.conn.create_response_data(
+ bad_qid=self.soa_response_params['bad_qid'],
+ response=self.soa_response_params['response'],
+ rcode=self.soa_response_params['rcode'],
+ questions=self.soa_response_params['questions'],
+ tsig_ctx=verify_ctx)
+ if self.soa_response_params['axfr_after_soa'] != None:
+ self.conn.response_generator = \
+ self.soa_response_params['axfr_after_soa']
+
+ def _create_broken_response_data(self):
+ # This helper method creates a bogus "DNS message" that only contains
+ # 4 octets of data. The DNS message parser will raise an exception.
+ bogus_data = b'xxxx'
+ self.conn.reply_data = struct.pack('H', socket.htons(len(bogus_data)))
+ self.conn.reply_data += bogus_data
+
+ def _create_a(self, address):
+ rrset = RRset(Name('a.example.com'), TEST_RRCLASS, RRType.A(),
+ RRTTL(3600))
+ rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, address))
+ return rrset
+
+ def _create_soa(self, serial):
+ rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ RRTTL(3600))
+ rdata_str = 'm. r. ' + serial + ' 3600 1800 2419200 7200'
+ rrset.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS, rdata_str))
+ return rrset
+
+ def _create_ns(self, nsname='ns.'+TEST_ZONE_NAME_STR):
+ rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(), RRTTL(3600))
+ rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, nsname))
+ return rrset
+
+class TestAXFR(TestXfrinConnection):
+ def setUp(self):
+ super().setUp()
+ XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
+
+ def __create_mock_tsig(self, key, error):
+ # This helper function creates a MockTSIGContext for a given key
+ # and TSIG error to be used as a result of verify (normally faked
+ # one)
+ mock_ctx = MockTSIGContext(key)
+ mock_ctx.error = error
+ return mock_ctx
+
+ def __match_exception(self, expected_exception, expected_msg, expression):
+ # This helper method is a higher-granularity version of assertRaises().
+ # If it's not sufficient to check the exception class (e.g., when
+ # the same type of exceptions can be thrown from many places), this
+ # method can be used to check it with the exception argument.
+ try:
+ expression()
+ except expected_exception as ex:
+ self.assertEqual(str(ex), expected_msg)
+ else:
+ self.assertFalse('exception is expected, but not raised')
+
def test_close(self):
# we shouldn't be using the global asyncore map.
self.assertEqual(len(asyncore.socket_map), 0)
@@ -192,98 +724,171 @@ class TestXfrinConnection(unittest.TestCase):
# to confirm an AF_INET6 socket has been created. A naive application
# tends to assume it's IPv4 only and hardcode AF_INET. This test
# uncovers such a bug.
- c = MockXfrinConnection({}, 'example.com.', TEST_RRCLASS, TEST_DB_FILE,
- threading.Event(),
- TEST_MASTER_IPV6_ADDRINFO)
+ c = MockXfrinConnection({}, TEST_ZONE_NAME, TEST_RRCLASS, None,
+ threading.Event(), TEST_MASTER_IPV6_ADDRINFO)
+ c.init_socket()
c.bind(('::', 0))
c.close()
def test_init_chclass(self):
- c = XfrinConnection({}, 'example.com.', RRClass.CH(), TEST_DB_FILE,
- threading.Event(), TEST_MASTER_IPV4_ADDRINFO)
+ c = MockXfrinConnection({}, TEST_ZONE_NAME, RRClass.CH(), None,
+ threading.Event(), TEST_MASTER_IPV4_ADDRINFO)
+ c.init_socket()
axfrmsg = c._create_query(RRType.AXFR())
self.assertEqual(axfrmsg.get_question()[0].get_class(),
RRClass.CH())
c.close()
+ def test_create_query(self):
+ def check_query(expected_qtype, expected_auth):
+ '''Helper method to repeat the same pattern of tests'''
+ self.assertEqual(Opcode.QUERY(), msg.get_opcode())
+ self.assertEqual(Rcode.NOERROR(), msg.get_rcode())
+ self.assertEqual(1, msg.get_rr_count(Message.SECTION_QUESTION))
+ self.assertEqual(TEST_ZONE_NAME, msg.get_question()[0].get_name())
+ self.assertEqual(expected_qtype, msg.get_question()[0].get_type())
+ self.assertEqual(0, msg.get_rr_count(Message.SECTION_ANSWER))
+ self.assertEqual(0, msg.get_rr_count(Message.SECTION_ADDITIONAL))
+ if expected_auth is None:
+ self.assertEqual(0,
+ msg.get_rr_count(Message.SECTION_AUTHORITY))
+ else:
+ self.assertEqual(1,
+ msg.get_rr_count(Message.SECTION_AUTHORITY))
+ auth_rr = msg.get_section(Message.SECTION_AUTHORITY)[0]
+ self.assertEqual(expected_auth.get_name(), auth_rr.get_name())
+ self.assertEqual(expected_auth.get_type(), auth_rr.get_type())
+ self.assertEqual(expected_auth.get_class(),
+ auth_rr.get_class())
+ # In our test scenario RDATA must be 1
+ self.assertEqual(1, expected_auth.get_rdata_count())
+ self.assertEqual(1, auth_rr.get_rdata_count())
+ self.assertEqual(expected_auth.get_rdata()[0],
+ auth_rr.get_rdata()[0])
+
+ # Actual tests start here
+ # SOA query
+ msg = self.conn._create_query(RRType.SOA())
+ check_query(RRType.SOA(), None)
+
+ # AXFR query
+ msg = self.conn._create_query(RRType.AXFR())
+ check_query(RRType.AXFR(), None)
+
+ # IXFR query
+ msg = self.conn._create_query(RRType.IXFR())
+ check_query(RRType.IXFR(), begin_soa_rrset)
+ self.assertEqual(1230, self.conn._request_serial)
+
+ def test_create_ixfr_query_fail(self):
+ # In these cases _create_query() will fail to find a valid SOA RR to
+ # insert in the IXFR query, and should raise an exception.
+
+ self.conn._zone_name = Name('no-such-zone.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+
+ self.conn._zone_name = Name('partial-match-zone.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+
+ self.conn._zone_name = Name('no-soa.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+
+ self.conn._zone_name = Name('dup-soa.example')
+ self.assertRaises(XfrinException, self.conn._create_query,
+ RRType.IXFR())
+
def test_send_query(self):
- def create_msg(query_type):
- msg = Message(Message.RENDER)
- query_id = 0x1035
- msg.set_qid(query_id)
- msg.set_opcode(Opcode.QUERY())
- msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name("example.com."), RRClass.IN(), query_type)
- msg.add_question(query_question)
- return msg
- self.conn._create_query = create_msg
- # soa request
- self.conn._send_query(RRType.SOA())
- self.assertEqual(self.conn.query_data, b'\x00\x1d\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\x06\x00\x01')
- # axfr request
- self.conn._send_query(RRType.AXFR())
- self.assertEqual(self.conn.query_data, b'\x00\x1d\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
+ def message_has_tsig(data):
+ # a simple check if the actual data contains a TSIG RR.
+ # At our level this simple check should suffice; other detailed
+ # tests regarding the TSIG protocol are done in pydnspp.
+ msg = Message(Message.PARSE)
+ msg.from_wire(data)
+ return msg.get_tsig_record() is not None
# soa request with tsig
- self.conn._tsig_ctx = TSIGContext(TSIG_KEY)
+ self.conn._tsig_key = TSIG_KEY
self.conn._send_query(RRType.SOA())
- tsig_soa_data = strip_mutable_tsig_data(self.conn.query_data)
- self.assertEqual(tsig_soa_data, b'\x00n\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01\x07example\x03com\x00\x00\x06\x00\x01\x07example\x03com\x00\x00\xfa\x00\xff\x00\x00\x00\x00\x00:\x08hmac-md5\x07sig-alg\x03reg\x03int\x00\x01,\x00\x10\x105\x00\x00\x00\x00')
+ self.assertTrue(message_has_tsig(self.conn.query_data[2:]))
# axfr request with tsig
self.conn._send_query(RRType.AXFR())
- tsig_axfr_data = strip_mutable_tsig_data(self.conn.query_data)
- self.assertEqual(tsig_axfr_data, b'\x00n\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01\x07example\x03com\x00\x00\xfc\x00\x01\x07example\x03com\x00\x00\xfa\x00\xff\x00\x00\x00\x00\x00:\x08hmac-md5\x07sig-alg\x03reg\x03int\x00\x01,\x00\x10\x105\x00\x00\x00\x00')
+ self.assertTrue(message_has_tsig(self.conn.query_data[2:]))
def test_response_with_invalid_msg(self):
self.conn.reply_data = b'aaaxxxx'
- self.assertRaises(XfrinTestException, self._handle_xfrin_response)
+ self.assertRaises(XfrinTestException,
+ self.conn._handle_xfrin_responses)
- def test_response_with_tsig(self):
- self.conn._tsig_ctx = TSIGContext(TSIG_KEY)
+ def test_response_with_tsigfail(self):
+ self.conn._tsig_key = TSIG_KEY
# server tsig check fail, return with RCODE 9 (NOTAUTH)
self.conn._send_query(RRType.SOA())
self.conn.reply_data = self.conn.create_response_data(rcode=Rcode.NOTAUTH())
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_without_end_soa(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data()
- self.assertRaises(XfrinTestException, self._handle_xfrin_response)
+ # This should result in timeout in the asyncore loop. We emulate
+ # that situation in recv() by emptying the reply data buffer.
+ self.assertRaises(XfrinTestTimeoutException,
+ self.conn._handle_xfrin_responses)
def test_response_bad_qid(self):
self.conn._send_query(RRType.AXFR())
- self.conn.reply_data = self.conn.create_response_data(bad_qid = True)
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+
+ def test_response_error_code_bad_sig(self):
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
+ self.conn._send_query(RRType.AXFR())
+ self.conn.reply_data = self.conn.create_response_data(
+ rcode=Rcode.SERVFAIL())
+ # xfrin should check TSIG before other part of incoming message
+ # validate log message for XfrinException
+ self.__match_exception(XfrinException,
+ "TSIG verify fail: BADSIG",
+ self.conn._handle_xfrin_responses)
+
+ def test_response_bad_qid_bad_key(self):
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_KEY)
+ self.conn._send_query(RRType.AXFR())
+ self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
+ # xfrin should check TSIG before other part of incoming message
+ # validate log message for XfrinException
+ self.__match_exception(XfrinException,
+ "TSIG verify fail: BADKEY",
+ self.conn._handle_xfrin_responses)
def test_response_non_response(self):
self.conn._send_query(RRType.AXFR())
- self.conn.reply_data = self.conn.create_response_data(response = False)
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.conn.reply_data = self.conn.create_response_data(response=False)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_error_code(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(
rcode=Rcode.SERVFAIL())
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_multi_question(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(
questions=[example_axfr_question, example_axfr_question])
- self.assertRaises(XfrinException, self._handle_xfrin_response)
-
- def test_response_empty_answer(self):
- self.conn._send_query(RRType.AXFR())
- self.conn.reply_data = self.conn.create_response_data(answers=[])
- # Should an empty answer trigger an exception? Even though it's very
- # unusual it's not necessarily invalid. Need to revisit.
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_non_response(self):
self.conn._send_query(RRType.AXFR())
self.conn.reply_data = self.conn.create_response_data(response = False)
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_soacheck(self):
# we need to defer the creation until we know the QID, which is
@@ -300,6 +905,18 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_soa_response_data
self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ def test_soacheck_bad_qid_bad_sig(self):
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
+ self.soa_response_params['bad_qid'] = True
+ self.conn.response_generator = self._create_soa_response_data
+ # xfrin should check TSIG before other part of incoming message
+ # validate log message for XfrinException
+ self.__match_exception(XfrinException,
+ "TSIG verify fail: BADSIG",
+ self.conn._check_soa_serial)
+
def test_soacheck_non_response(self):
self.soa_response_params['response'] = False
self.conn.response_generator = self._create_soa_response_data
@@ -310,39 +927,295 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_soa_response_data
self.assertRaises(XfrinException, self.conn._check_soa_serial)
+ def test_soacheck_with_tsig(self):
+ # Use a mock tsig context emulating a validly signed response
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.NOERROR)
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertEqual(self.conn._check_soa_serial(), XFRIN_OK)
+ self.assertEqual(self.conn._tsig_ctx.get_error(), TSIGError.NOERROR)
+
+ def test_soacheck_with_tsig_notauth(self):
+ # emulate a valid error response
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
+ self.soa_response_params['rcode'] = Rcode.NOTAUTH()
+ self.conn.response_generator = self._create_soa_response_data
+
+ self.assertRaises(XfrinException, self.conn._check_soa_serial)
+
+ def test_soacheck_with_tsig_noerror_badsig(self):
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
+
+ # emulate a normal response bad verification failure due to BADSIG.
+ # According RFC2845, in this case we should ignore it and keep
+ # waiting for a valid response until a timeout. But we immediately
+ # treat this as a final failure (just as BIND 9 does).
+ self.conn.response_generator = self._create_soa_response_data
+
+ self.assertRaises(XfrinException, self.conn._check_soa_serial)
+
+ def test_soacheck_with_tsig_unsigned_response(self):
+ # we can use a real TSIGContext for this. the response doesn't
+ # contain a TSIG while we sent a signed query. RFC2845 states
+ # we should wait for a valid response in this case, but we treat
+ # it as a fatal transaction failure, too.
+ self.conn._tsig_key = TSIG_KEY
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertRaises(XfrinException, self.conn._check_soa_serial)
+
+ def test_soacheck_with_unexpected_tsig_response(self):
+ # we reject unexpected TSIG in responses (following BIND 9's
+ # behavior)
+ self.soa_response_params['tsig'] = True
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertRaises(XfrinException, self.conn._check_soa_serial)
+
def test_response_shutdown(self):
self.conn.response_generator = self._create_normal_response_data
self.conn._shutdown_event.set()
self.conn._send_query(RRType.AXFR())
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_timeout(self):
self.conn.response_generator = self._create_normal_response_data
self.conn.force_time_out = True
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_remote_close(self):
self.conn.response_generator = self._create_normal_response_data
self.conn.force_close = True
- self.assertRaises(XfrinException, self._handle_xfrin_response)
+ self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_bad_message(self):
self.conn.response_generator = self._create_broken_response_data
self.conn._send_query(RRType.AXFR())
- self.assertRaises(Exception, self._handle_xfrin_response)
+ self.assertRaises(Exception, self.conn._handle_xfrin_responses)
- def test_response(self):
- # normal case.
+ def test_axfr_response(self):
+ # A simple normal case: AXFR consists of SOA, NS, then trailing SOA.
self.conn.response_generator = self._create_normal_response_data
self.conn._send_query(RRType.AXFR())
- # two SOAs, and only these have been transfered. the 2nd SOA is just
- # a marker, so only 1 RR has been provided in the iteration.
- self.assertEqual(self._handle_xfrin_response(), 1)
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_response_empty_answer(self):
+ '''Test with an empty AXFR answer section.
+
+ This is an unusual response, but there is no reason to reject it.
+ The second message is a complete AXFR response, and transfer should
+ succeed just like the normal case.
+
+ '''
+
+ self.axfr_response_params['answer_1st'] = []
+ self.axfr_response_params['answer_2nd'] = [soa_rrset,
+ self._create_ns(),
+ soa_rrset]
+ self.conn.response_generator = self._create_normal_response_data
+ self.conn._send_query(RRType.AXFR())
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_soa_mismatch(self):
+ '''AXFR response whose begin/end SOAs are not same.
+
+ What should we do this is moot, for now we accept it, so does BIND 9.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn._send_query(RRType.AXFR())
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.AXFR())],
+ # begin serial=1230, end serial=1234. end will be used.
+ answers=[begin_soa_rrset, ns_rr, a_rr, soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', ns_rr), ('add', a_rr), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_extra(self):
+ '''Test with an extra RR after the end of AXFR session.
+
+ The session should be rejected, and nothing should be committed.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn._send_query(RRType.AXFR())
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.AXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_qname_mismatch(self):
+ '''AXFR response with a mismatch question name.
+
+ Our implementation accepts that, so does BIND 9.
+
+ '''
+ self.axfr_response_params['question_1st'] = \
+ [Question(Name('mismatch.example'), TEST_RRCLASS, RRType.AXFR())]
+ self.conn.response_generator = self._create_normal_response_data
+ self.conn._send_query(RRType.AXFR())
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_qclass_mismatch(self):
+ '''AXFR response with a mismatch RR class.
+
+ Our implementation accepts that, so does BIND 9.
+
+ '''
+ self.axfr_response_params['question_1st'] = \
+ [Question(TEST_ZONE_NAME, RRClass.CH(), RRType.AXFR())]
+ self.conn.response_generator = self._create_normal_response_data
+ self.conn._send_query(RRType.AXFR())
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_qtype_mismatch(self):
+ '''AXFR response with a mismatch RR type.
+
+ Our implementation accepts that, so does BIND 9.
+
+ '''
+ # returning IXFR in question to AXFR query
+ self.axfr_response_params['question_1st'] = \
+ [Question(TEST_ZONE_NAME, RRClass.CH(), RRType.IXFR())]
+ self.conn.response_generator = self._create_normal_response_data
+ self.conn._send_query(RRType.AXFR())
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_axfr_response_empty_question(self):
+ '''AXFR response with an empty question.
+
+ Our implementation accepts that, so does BIND 9.
+
+ '''
+ self.axfr_response_params['question_1st'] = []
+ self.conn.response_generator = self._create_normal_response_data
+ self.conn._send_query(RRType.AXFR())
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
def test_do_xfrin(self):
self.conn.response_generator = self._create_normal_response_data
self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+ def test_do_xfrin_with_tsig(self):
+ # use TSIG with a mock context. we fake all verify results to
+ # emulate successful verification.
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.NOERROR)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('add', self._create_ns()), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_do_xfrin_with_tsig_fail(self):
+ # TSIG verify will fail for the first message. xfrin should fail
+ # immediately.
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+ self.assertEqual(1, self.conn._tsig_ctx.verify_called)
+
+ def test_do_xfrin_with_tsig_fail_for_second_message(self):
+ # Similar to the previous test, but first verify succeeds. There
+ # should be a second verify attempt, which will fail, which should
+ # make xfrin fail.
+ def fake_tsig_error(ctx):
+ if self.conn._tsig_ctx.verify_called == 1:
+ return TSIGError.NOERROR
+ return TSIGError.BAD_SIG
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, fake_tsig_error)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+ self.assertEqual(2, self.conn._tsig_ctx.verify_called)
+
+ def test_do_xfrin_with_missing_tsig(self):
+ # XFR request sent with TSIG, but the response doesn't have TSIG.
+ # xfr should fail.
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, None)
+ self.conn._tsig_ctx = MockTSIGContext(TSIG_KEY)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+ self.assertEqual(1, self.conn._tsig_ctx.verify_called)
+
+ def test_do_xfrin_with_missing_tsig_for_second_message(self):
+ # Similar to the previous test, but firt one contains TSIG and verify
+ # succeeds (due to fake). The second message lacks TSIG.
+ #
+ # Note: this test case is actually not that trivial: Skipping
+ # intermediate TSIG is allowed. In this case, however, the second
+ # message is the last one, which must contain TSIG anyway, so the
+ # expected result is correct. If/when we support skipping
+ # intermediate TSIGs, we'll need additional test cases.
+ def fake_tsig_error(ctx):
+ if self.conn._tsig_ctx.verify_called == 1:
+ return TSIGError.NOERROR
+ return TSIGError.FORMERR
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, fake_tsig_error)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+ self.assertEqual(2, self.conn._tsig_ctx.verify_called)
+
+ def test_do_xfrin_with_unexpected_tsig(self):
+ # XFR request wasn't signed, but response includes TSIG. Like BIND 9,
+ # we reject that.
+ self.axfr_response_params['tsig_1st'] = TSIGContext(TSIG_KEY)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+
+ def test_do_xfrin_with_unexpected_tsig_for_second_message(self):
+ # similar to the previous test, but the first message is normal.
+ # the second one contains an unexpected TSIG. should be rejected.
+ self.axfr_response_params['tsig_2nd'] = TSIGContext(TSIG_KEY)
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+
def test_do_xfrin_empty_response(self):
# skipping the creation of response data, so the transfer will fail.
self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
@@ -351,22 +1224,36 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_broken_response_data
self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
- def test_do_xfrin_dberror(self):
- # DB file is under a non existent directory, so its creation will fail,
- # which will make the transfer fail.
- self.conn._db_file = "not_existent/" + TEST_DB_FILE
+ def test_do_xfrin_datasrc_error(self):
+ # Emulate failure in the data source client on commit.
+ self.conn._datasrc_client.force_fail = True
+ self.conn.response_generator = self._create_normal_response_data
self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
def test_do_soacheck_and_xfrin(self):
self.conn.response_generator = self._create_soa_response_data
self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
+ def test_do_soacheck_and_xfrin_with_tsig(self):
+ # We are going to have a SOA query/response transaction, followed by
+ # AXFR, all TSIG signed. xfrin should use a new TSIG context for
+ # AXFR. We are not interested in whether verify works correctly in
+ # this test, so we simply fake the results (they need to succeed for
+ # this test)
+ self.conn._tsig_key = TSIG_KEY
+ self.conn._tsig_ctx_creator = \
+ lambda key: self.__create_mock_tsig(key, TSIGError.NOERROR)
+ self.soa_response_params['tsig'] = True
+ self.conn.response_generator = self._create_soa_response_data
+ self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
+ # We should've got 3 response messages: 1 SOA and two AXFR, but
+ # the context should be replaced for AXFR, so verify() should be
+ # called only twice for the latest context.
+ self.assertEqual(2, self.conn._tsig_ctx.verify_called)
+
def test_do_soacheck_broken_response(self):
self.conn.response_generator = self._create_broken_response_data
- # XXX: TODO: this test failed here, should xfr not raise an
- # exception but simply drop and return FAIL?
- #self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
- self.assertRaises(MessageTooShort, self.conn.do_xfrin, True)
+ self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
def test_do_soacheck_badqid(self):
# the QID mismatch would internally trigger a XfrinException exception,
@@ -375,41 +1262,396 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.response_generator = self._create_soa_response_data
self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
- def _handle_xfrin_response(self):
- # This helper methods iterates over all RRs (excluding the ending SOA)
- # transferred, and simply returns the number of RRs. The return value
- # may be used an assertion value for test cases.
- rrs = 0
- for rr in self.conn._handle_xfrin_response():
- rrs += 1
- return rrs
+class TestIXFRResponse(TestXfrinConnection):
+ def setUp(self):
+ super().setUp()
+ self.conn._query_id = self.conn.qid = 1035
+ self.conn._request_serial = 1230
+ self.conn._request_type = RRType.IXFR()
+ self._zone_name = TEST_ZONE_NAME
+ self.conn._datasrc_client = MockDataSourceClient()
+ XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
- def _create_normal_response_data(self):
- # This helper method creates a simple sequence of DNS messages that
- # forms a valid XFR transaction. It consists of two messages, each
- # containing just a single SOA RR.
- self.conn.reply_data = self.conn.create_response_data()
- self.conn.reply_data += self.conn.create_response_data()
+ def test_ixfr_response(self):
+ '''A simplest form of IXFR response.
- def _create_soa_response_data(self):
- # This helper method creates a DNS message that is supposed to be
- # used a valid response to SOA queries prior to XFR.
- # If axfr_after_soa is True, it resets the response_generator so that
- # a valid XFR messages will follow.
+ It simply updates the zone's SOA one time.
+
+ '''
self.conn.reply_data = self.conn.create_response_data(
- bad_qid=self.soa_response_params['bad_qid'],
- response=self.soa_response_params['response'],
- rcode=self.soa_response_params['rcode'],
- questions=self.soa_response_params['questions'])
- if self.soa_response_params['axfr_after_soa'] != None:
- self.conn.response_generator = self.soa_response_params['axfr_after_soa']
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_multi_sequences(self):
+ '''Similar to the previous case, but with multiple diff seqs.
+
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset,
+ # removing one A in serial 1230
+ begin_soa_rrset, self._create_a('192.0.2.1'),
+ # adding one A in serial 1231
+ self._create_soa('1231'), self._create_a('192.0.2.2'),
+ # removing one A in serial 1231
+ self._create_soa('1231'), self._create_a('192.0.2.3'),
+ # adding one A in serial 1232
+ self._create_soa('1232'), self._create_a('192.0.2.4'),
+ # removing one A in serial 1232
+ self._create_soa('1232'), self._create_a('192.0.2.5'),
+ # adding one A in serial 1234
+ soa_rrset, self._create_a('192.0.2.6'),
+ soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset),
+ ('delete', self._create_a('192.0.2.1')),
+ ('add', self._create_soa('1231')),
+ ('add', self._create_a('192.0.2.2'))],
+ [('delete', self._create_soa('1231')),
+ ('delete', self._create_a('192.0.2.3')),
+ ('add', self._create_soa('1232')),
+ ('add', self._create_a('192.0.2.4'))],
+ [('delete', self._create_soa('1232')),
+ ('delete', self._create_a('192.0.2.5')),
+ ('add', soa_rrset),
+ ('add', self._create_a('192.0.2.6'))]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_multi_messages(self):
+ '''Similar to the first case, but RRs span over multiple messages.
+
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset])
+ self.conn.reply_data += self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_broken(self):
+ '''Test with a broken response.
+
+ '''
+ # SOA sequence is out-of-sync
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+ self._create_soa('1235')])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ # no diffs should have been committed
+ check_diffs(self.assertEqual,
+ [], self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_response_extra(self):
+ '''Test with an extra RR after the end of IXFR diff sequences.
+
+ IXFR should be rejected, but complete diff sequences should be
+ committed; it's not clear whether it's compliant to the protocol
+ specification, but it is how BIND 9 works and we do the same.
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset,
+ self._create_a('192.0.2.1')])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_to_axfr_response(self):
+ '''AXFR-style IXFR response.
+
+ It simply updates the zone's SOA one time.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ # The SOA should be added exactly once, and in our implementation
+ # it should be added at the end of the sequence.
+ check_diffs(self.assertEqual,
+ [[('add', ns_rr), ('add', a_rr), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_to_axfr_response_mismatch_soa(self):
+ '''AXFR-style IXFR response, but the two SOA are not the same.
+
+ In the current implementation, we accept it and use the second SOA.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, begin_soa_rrset])
+ self.conn._handle_xfrin_responses()
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.diffs)
+ check_diffs(self.assertEqual,
+ [[('add', ns_rr), ('add', a_rr),
+ ('add', begin_soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ def test_ixfr_to_axfr_response_extra(self):
+ '''Test with an extra RR after the end of AXFR-style IXFR session.
+
+ The session should be rejected, and nothing should be committed.
+
+ '''
+ ns_rr = self._create_ns()
+ a_rr = self._create_a('192.0.2.1')
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
+ self.assertRaises(XfrinProtocolError,
+ self.conn._handle_xfrin_responses)
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.committed_diffs)
+
+class TestIXFRSession(TestXfrinConnection):
+ '''Tests for a full IXFR session (query and response).
+
+ Detailed corner cases should have been covered in test_create_query()
+ and TestIXFRResponse, so we'll only check some typical cases to confirm
+ the general logic flow.
+ '''
+ def setUp(self):
+ super().setUp()
- def _create_broken_response_data(self):
- # This helper method creates a bogus "DNS message" that only contains
- # 4 octets of data. The DNS message parser will raise an exception.
- bogus_data = b'xxxx'
- self.conn.reply_data = struct.pack('H', socket.htons(len(bogus_data)))
- self.conn.reply_data += bogus_data
+ def test_do_xfrin(self):
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+ self.conn.response_generator = create_ixfr_response
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+
+ # Check some details of the IXFR protocol processing
+ self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+ check_diffs(self.assertEqual,
+ [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+ self.conn._datasrc_client.committed_diffs)
+
+ # Check if the query was IXFR.
+ qdata = self.conn.query_data[2:]
+ qmsg = Message(Message.PARSE)
+ qmsg.from_wire(qdata, len(qdata))
+ self.assertEqual(1, qmsg.get_rr_count(Message.SECTION_QUESTION))
+ self.assertEqual(TEST_ZONE_NAME, qmsg.get_question()[0].get_name())
+ self.assertEqual(RRType.IXFR(), qmsg.get_question()[0].get_type())
+
+ def test_do_xfrin_fail(self):
+ '''IXFR fails due to a protocol error.
+
+ '''
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+ self._create_soa('1235')])
+ self.conn.response_generator = create_ixfr_response
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+
+ def test_do_xfrin_fail(self):
+ '''IXFR fails due to a bogus DNS message.
+
+ '''
+ self._create_broken_response_data()
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+
+class TestXFRSessionWithSQLite3(TestXfrinConnection):
+ '''Tests for XFR sessions using an SQLite3 DB.
+
+ These are provided mainly to confirm the implementation actually works
+ in an environment closer to actual operational environments. So we
+ only check a few common cases; other details are tested using mock
+ data sources.
+
+ '''
+ def setUp(self):
+ self.sqlite3db_src = TESTDATA_SRCDIR + '/example.com.sqlite3'
+ self.sqlite3db_obj = TESTDATA_OBJDIR + '/example.com.sqlite3.copy'
+ self.empty_sqlite3db_obj = TESTDATA_OBJDIR + '/empty.sqlite3'
+ self.sqlite3db_cfg = "{ \"database_file\": \"" +\
+ self.sqlite3db_obj + "\"}"
+ super().setUp()
+ if os.path.exists(self.sqlite3db_obj):
+ os.unlink(self.sqlite3db_obj)
+ if os.path.exists(self.empty_sqlite3db_obj):
+ os.unlink(self.empty_sqlite3db_obj)
+ shutil.copyfile(self.sqlite3db_src, self.sqlite3db_obj)
+ self.conn._datasrc_client = DataSourceClient("sqlite3",
+ self.sqlite3db_cfg)
+
+ def tearDown(self):
+ if os.path.exists(self.sqlite3db_obj):
+ os.unlink(self.sqlite3db_obj)
+ if os.path.exists(self.empty_sqlite3db_obj):
+ os.unlink(self.empty_sqlite3db_obj)
+
+ def get_zone_serial(self):
+ result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
+ self.assertEqual(DataSourceClient.SUCCESS, result)
+ result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA(),
+ None, ZoneFinder.FIND_DEFAULT)
+ self.assertEqual(ZoneFinder.SUCCESS, result)
+ self.assertEqual(1, soa.get_rdata_count())
+ return get_soa_serial(soa.get_rdata()[0])
+
+ def record_exist(self, name, type):
+ result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
+ self.assertEqual(DataSourceClient.SUCCESS, result)
+ result, soa = finder.find(name, type, None, ZoneFinder.FIND_DEFAULT)
+ return result == ZoneFinder.SUCCESS
+
+ def test_do_ixfrin_sqlite3(self):
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+ self.conn.response_generator = create_ixfr_response
+
+ # Confirm xfrin succeeds and SOA is updated
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(1234, self.get_zone_serial())
+
+ def test_do_ixfrin_sqlite3_fail(self):
+ '''Similar to the previous test, but xfrin fails due to error.
+
+ Check the DB is not changed.
+
+ '''
+ def create_ixfr_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.IXFR())],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+ self._create_soa('1235')])
+ self.conn.response_generator = create_ixfr_response
+
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(1230, self.get_zone_serial())
+
+ def test_do_ixfrin_nozone_sqlite3(self):
+ self.conn._zone_name = Name('nosuchzone.example')
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ # This should fail even before starting state transition
+ self.assertEqual(None, self.conn.get_xfrstate())
+
+ def axfr_check(self, type):
+ '''Common checks for AXFR and AXFR-style IXFR
+
+ '''
+ def create_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, type)],
+ answers=[soa_rrset, self._create_ns(), soa_rrset])
+ self.conn.response_generator = create_response
+
+ # Confirm xfrin succeeds and SOA is updated, A RR is deleted.
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertTrue(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, type))
+ self.assertEqual(1234, self.get_zone_serial())
+ self.assertFalse(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+
+ def test_do_ixfrin_axfr_sqlite3(self):
+ '''AXFR-style IXFR.
+
+ '''
+ self.axfr_check(RRType.IXFR())
+
+ def test_do_axfrin_sqlite3(self):
+ '''AXFR.
+
+ '''
+ self.axfr_check(RRType.AXFR())
+
+ def axfr_failure_check(self, type):
+ '''Similar to the previous two tests, but xfrin fails due to error.
+
+ Check the DB is not changed.
+
+ '''
+ def create_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, type)],
+ answers=[soa_rrset, self._create_ns(), soa_rrset, soa_rrset])
+ self.conn.response_generator = create_response
+
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertTrue(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, type))
+ self.assertEqual(1230, self.get_zone_serial())
+ self.assertTrue(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
+
+ def test_do_xfrin_axfr_sqlite3_fail(self):
+ '''Failure case for AXFR-style IXFR.
+
+ '''
+ self.axfr_failure_check(RRType.IXFR())
+
+ def test_do_axfrin_sqlite3_fail(self):
+ '''Failure case for AXFR.
+
+ '''
+ self.axfr_failure_check(RRType.AXFR())
+
+ def test_do_axfrin_nozone_sqlite3(self):
+ '''AXFR test with an empty SQLite3 DB file, thus no target zone there.
+
+ For now, we provide backward compatible behavior: xfrin will create
+ the zone (after even setting up the entire schema) in the zone.
+ Note: a future version of this test will make it fail.
+
+ '''
+ self.conn._db_file = self.empty_sqlite3db_obj
+ self.conn._datasrc_client = DataSourceClient(
+ "sqlite3",
+ "{ \"database_file\": \"" + self.empty_sqlite3db_obj + "\"}")
+ def create_response():
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+ RRType.AXFR())],
+ answers=[soa_rrset, self._create_ns(), soa_rrset])
+ self.conn.response_generator = create_response
+ self.conn._zone_name = Name('example.com')
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR()))
+ self.assertEqual(type(XfrinAXFREnd()),
+ type(self.conn.get_xfrstate()))
+ self.assertEqual(1234, self.get_zone_serial())
+ self.assertFalse(self.record_exist(Name('dns01.example.com'),
+ RRType.A()))
class TestXfrinRecorder(unittest.TestCase):
def setUp(self):
@@ -443,6 +1685,110 @@ class TestXfrinRecorder(unittest.TestCase):
self.recorder.decrement(TEST_ZONE_NAME)
self.assertEqual(self.recorder.xfrin_in_progress(TEST_ZONE_NAME), False)
+class TestXfrinProcess(unittest.TestCase):
+ def setUp(self):
+ self.unlocked = False
+ self.conn_closed = False
+ self.do_raise_on_close = False
+ self.do_raise_on_connect = False
+ self.do_raise_on_publish = False
+ self.master = (socket.AF_INET, socket.SOCK_STREAM,
+ (TEST_MASTER_IPV4_ADDRESS, TEST_MASTER_PORT))
+
+ def tearDown(self):
+ # whatever happens the lock acquired in xfrin_recorder.increment
+ # must always be released. We checked the condition for all test
+ # cases.
+ self.assertTrue(self.unlocked)
+
+ # Same for the connection
+ self.assertTrue(self.conn_closed)
+
+ def increment(self, zone_name):
+ '''Fake method of xfrin_recorder.increment.
+
+ '''
+ self.unlocked = False
+
+ def decrement(self, zone_name):
+ '''Fake method of xfrin_recorder.decrement.
+
+ '''
+ self.unlocked = True
+
+ def publish_xfrin_news(self, zone_name, rrclass, ret):
+ '''Fake method of serve.publish_xfrin_news
+
+ '''
+ if self.do_raise_on_publish:
+ raise XfrinTestException('Emulated exception in publish')
+
+ def connect_to_master(self, conn):
+ self.sock_fd = conn.fileno()
+ if self.do_raise_on_connect:
+ raise XfrinTestException('Emulated exception in connect')
+ return True
+
+ def conn_close(self, conn):
+ self.conn_closed = True
+ XfrinConnection.close(conn)
+ if self.do_raise_on_close:
+ raise XfrinTestException('Emulated exception in connect')
+
+ def create_xfrinconn(self, sock_map, zone_name, rrclass, datasrc_client,
+ shutdown_event, master_addrinfo, tsig_key):
+ conn = MockXfrinConnection(sock_map, zone_name, rrclass,
+ datasrc_client, shutdown_event,
+ master_addrinfo, tsig_key)
+
+ # An awkward check that would specifically identify an old bug
+ # where initialziation of XfrinConnection._tsig_ctx_creator caused
+ # self reference and subsequently led to reference leak.
+ orig_ref = sys.getrefcount(conn)
+ conn._tsig_ctx_creator = None
+ self.assertEqual(orig_ref, sys.getrefcount(conn))
+
+ # Replace some methods for connect with our internal ones for the
+ # convenience of tests
+ conn.connect_to_master = lambda : self.connect_to_master(conn)
+ conn.do_xfrin = lambda x, y : XFRIN_OK
+ conn.close = lambda : self.conn_close(conn)
+
+ return conn
+
+ def test_process_xfrin_normal(self):
+ # Normal, successful case. We only check that things are cleaned up
+ # at the tearDown time.
+ process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
+ self.master, False, None, RRType.AXFR(),
+ self.create_xfrinconn)
+
+ def test_process_xfrin_exception_on_connect(self):
+ # connect_to_master() will raise an exception. Things must still be
+ # cleaned up.
+ self.do_raise_on_connect = True
+ process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
+ self.master, False, None, RRType.AXFR(),
+ self.create_xfrinconn)
+
+ def test_process_xfrin_exception_on_close(self):
+ # connect() will result in exception, and even the cleanup close()
+ # will fail with an exception. This should be quite likely a bug,
+ # but we deal with that case.
+ self.do_raise_on_connect = True
+ self.do_raise_on_close = True
+ process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
+ self.master, False, None, RRType.AXFR(),
+ self.create_xfrinconn)
+
+ def test_process_xfrin_exception_on_publish(self):
+ # xfr succeeds but notifying the zonemgr fails with exception.
+ # everything must still be cleaned up.
+ self.do_raise_on_publish = True
+ process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
+ self.master, False, None, RRType.AXFR(),
+ self.create_xfrinconn)
+
class TestXfrin(unittest.TestCase):
def setUp(self):
# redirect output
@@ -450,7 +1796,8 @@ class TestXfrin(unittest.TestCase):
sys.stderr = open(os.devnull, 'w')
self.xfr = MockXfrin()
self.args = {}
- self.args['zone_name'] = TEST_ZONE_NAME
+ self.args['zone_name'] = TEST_ZONE_NAME_STR
+ self.args['class'] = TEST_RRCLASS_STR
self.args['port'] = TEST_MASTER_PORT
self.args['master'] = TEST_MASTER_IPV4_ADDRESS
self.args['db_file'] = TEST_DB_FILE
@@ -464,7 +1811,8 @@ class TestXfrin(unittest.TestCase):
return self.xfr._parse_zone_name_and_class(self.args)
def _do_parse_master_port(self):
- return self.xfr._parse_master_and_port(self.args)
+ name, rrclass = self._do_parse_zone_name_class()
+ return self.xfr._parse_master_and_port(self.args, name, rrclass)
def test_parse_cmd_params(self):
name, rrclass = self._do_parse_zone_name_class()
@@ -492,7 +1840,7 @@ class TestXfrin(unittest.TestCase):
def test_parse_cmd_params_bogusclass(self):
self.args['zone_class'] = 'XXX'
- self.assertRaises(XfrinException, self._do_parse_zone_name_class)
+ self.assertRaises(XfrinZoneInfoException, self._do_parse_zone_name_class)
def test_parse_cmd_params_nozone(self):
# zone name is mandatory.
@@ -502,8 +1850,7 @@ class TestXfrin(unittest.TestCase):
def test_parse_cmd_params_nomaster(self):
# master address is mandatory.
del self.args['master']
- master_addrinfo = self._do_parse_master_port()
- self.assertEqual(master_addrinfo[2][0], DEFAULT_MASTER)
+ self.assertRaises(XfrinException, self._do_parse_master_port)
def test_parse_cmd_params_bad_ip4(self):
self.args['master'] = '3.3.3.3.3'
@@ -533,6 +1880,79 @@ class TestXfrin(unittest.TestCase):
def test_command_handler_retransfer(self):
self.assertEqual(self.xfr.command_handler("retransfer",
self.args)['result'][0], 0)
+ self.assertEqual(self.args['master'], self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(self.args['port']), self.xfr.xfrin_started_master_port)
+ # By default we use AXFR (for now)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+
+ def test_command_handler_retransfer_short_command1(self):
+ # try it when only specifying the zone name (of unknown zone)
+ # this should fail because master address is not specified.
+ short_args = {}
+ short_args['zone_name'] = TEST_ZONE_NAME_STR
+ self.assertEqual(self.xfr.command_handler("retransfer",
+ short_args)['result'][0], 1)
+
+ def test_command_handler_retransfer_short_command2(self):
+ # try it when only specifying the zone name (of known zone)
+ short_args = {}
+ short_args['zone_name'] = TEST_ZONE_NAME_STR
+
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV4_ADDRESS,
+ 'master_port': TEST_MASTER_PORT
+ }
+ ]}
+ self.xfr.config_handler(zones)
+ self.assertEqual(self.xfr.command_handler("retransfer",
+ short_args)['result'][0], 0)
+ self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
+ self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(TEST_MASTER_PORT),
+ self.xfr.xfrin_started_master_port)
+
+ def test_command_handler_retransfer_short_command3(self):
+ # try it when only specifying the zone name (of known zone)
+ short_args = {}
+ # test it without the trailing root dot
+ short_args['zone_name'] = TEST_ZONE_NAME_STR[:-1]
+
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV4_ADDRESS,
+ 'master_port': TEST_MASTER_PORT
+ }
+ ]}
+ self.xfr.config_handler(zones)
+ self.assertEqual(self.xfr.command_handler("retransfer",
+ short_args)['result'][0], 0)
+ self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
+ self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(TEST_MASTER_PORT),
+ self.xfr.xfrin_started_master_port)
+
+ def test_command_handler_retransfer_short_command4(self):
+ # try it when only specifying the zone name (of known zone, with
+ # different case)
+ short_args = {}
+
+ # swap the case of the zone name in our command
+ short_args['zone_name'] = TEST_ZONE_NAME_STR.swapcase()
+
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV4_ADDRESS,
+ 'master_port': TEST_MASTER_PORT
+ }
+ ]}
+ self.xfr.config_handler(zones)
+ self.assertEqual(self.xfr.command_handler("retransfer",
+ short_args)['result'][0], 0)
+ self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
+ self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(TEST_MASTER_PORT),
+ self.xfr.xfrin_started_master_port)
def test_command_handler_retransfer_badcommand(self):
self.args['master'] = 'invalid'
@@ -540,13 +1960,15 @@ class TestXfrin(unittest.TestCase):
self.args)['result'][0], 1)
def test_command_handler_retransfer_quota(self):
+ self.args['master'] = TEST_MASTER_IPV4_ADDRESS
+
for i in range(self.xfr._max_transfers_in - 1):
- self.xfr.recorder.increment(str(i) + TEST_ZONE_NAME)
+ self.xfr.recorder.increment(Name(str(i) + TEST_ZONE_NAME_STR))
# there can be one more outstanding transfer.
self.assertEqual(self.xfr.command_handler("retransfer",
self.args)['result'][0], 0)
# make sure the # xfrs would excceed the quota
- self.xfr.recorder.increment(str(self.xfr._max_transfers_in) + TEST_ZONE_NAME)
+ self.xfr.recorder.increment(Name(str(self.xfr._max_transfers_in) + TEST_ZONE_NAME_STR))
# this one should fail
self.assertEqual(self.xfr.command_handler("retransfer",
self.args)['result'][0], 1)
@@ -570,11 +1992,61 @@ class TestXfrin(unittest.TestCase):
self.args['master'] = TEST_MASTER_IPV6_ADDRESS
self.assertEqual(self.xfr.command_handler("refresh",
self.args)['result'][0], 0)
+ self.assertEqual(TEST_MASTER_IPV6_ADDRESS,
+ self.xfr.xfrin_started_master_addr)
+ self.assertEqual(int(TEST_MASTER_PORT),
+ self.xfr.xfrin_started_master_port)
+ # By default we use AXFR (for now)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
def test_command_handler_notify(self):
# at this level, refresh is no different than retransfer.
self.args['master'] = TEST_MASTER_IPV6_ADDRESS
- # ...but right now we disable the feature due to security concerns.
+ # ...but the zone is unknown so this would return an error
+ self.assertEqual(self.xfr.command_handler("notify",
+ self.args)['result'][0], 1)
+
+ def test_command_handler_notify_known_zone_bad_addr(self):
+ # try it with a known zone
+ self.args['master'] = TEST_MASTER_IPV6_ADDRESS
+
+ # but use a different address in the actual command
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV4_ADDRESS,
+ 'master_port': TEST_MASTER_PORT
+ }
+ ]}
+ self.xfr.config_handler(zones)
+ # the command should now fail
+ self.assertEqual(self.xfr.command_handler("notify",
+ self.args)['result'][0], 1)
+
+ # also try a different port in the actual command
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV6_ADDRESS,
+ 'master_port': str(int(TEST_MASTER_PORT) + 1)
+ }
+ ]}
+ self.xfr.config_handler(zones)
+ # the command should now fail
+ self.assertEqual(self.xfr.command_handler("notify",
+ self.args)['result'][0], 1)
+
+
+ def test_command_handler_notify_known_zone(self):
+ # try it with a known zone
+ self.args['master'] = TEST_MASTER_IPV6_ADDRESS
+
+ # with a zone configuration that has a matching master address.
+ zones = { 'zones': [
+ { 'name': TEST_ZONE_NAME_STR,
+ 'master_addr': TEST_MASTER_IPV6_ADDRESS,
+ 'master_port': TEST_MASTER_PORT
+ }
+ ]}
+ self.xfr.config_handler(zones)
self.assertEqual(self.xfr.command_handler("notify",
self.args)['result'][0], 0)
@@ -586,21 +2058,202 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(self.xfr.config_handler({'transfers_in': 3})['result'][0], 0)
self.assertEqual(self.xfr._max_transfers_in, 3)
- def test_command_handler_masters(self):
- master_info = {'master_addr': '1.1.1.1', 'master_port':53}
- self.assertEqual(self.xfr.config_handler(master_info)['result'][0], 0)
+ def _check_zones_config(self, config_given):
+ if 'transfers_in' in config_given:
+ self.assertEqual(config_given['transfers_in'],
+ self.xfr._max_transfers_in)
+ for zone_config in config_given['zones']:
+ zone_name = zone_config['name']
+ zone_info = self.xfr._get_zone_info(Name(zone_name), RRClass.IN())
+ self.assertEqual(str(zone_info.master_addr), zone_config['master_addr'])
+ self.assertEqual(zone_info.master_port, zone_config['master_port'])
+ if 'tsig_key' in zone_config:
+ self.assertEqual(zone_info.tsig_key.to_text(), TSIGKey(zone_config['tsig_key']).to_text())
+ else:
+ self.assertIsNone(zone_info.tsig_key)
+ if 'use_ixfr' in zone_config and\
+ zone_config.get('use_ixfr'):
+ self.assertTrue(zone_info.use_ixfr)
+ else:
+ # if not set, should default to False
+ self.assertFalse(zone_info.use_ixfr)
+
+ def test_config_handler_zones(self):
+ # This test passes a number of good and bad configs, and checks whether
+ # the values are reflected in the structure that will dictate the
+ # actual behaviour. It also checks if bad values are correctly
+ # handled
+ config1 = { 'transfers_in': 3,
+ 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.1',
+ 'master_port': 53,
+ 'use_ixfr': False
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(config1)['result'][0], 0)
+ self._check_zones_config(config1)
+
+ config2 = { 'transfers_in': 4,
+ 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.2',
+ 'master_port': 53,
+ 'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g==",
+ 'use_ixfr': True
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(config2)['result'][0], 0)
+ self._check_zones_config(config2)
+
+ # test that configuring the zone multiple times fails
+ zones = { 'transfers_in': 5,
+ 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.1',
+ 'master_port': 53
+ },
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.2',
+ 'master_port': 53
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.3',
+ 'master_port': 53,
+ 'class': 'BADCLASS'
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'master_addr': '192.0.2.4',
+ 'master_port': 53
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': 'bad..zone.',
+ 'master_addr': '192.0.2.5',
+ 'master_port': 53
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': '',
+ 'master_addr': '192.0.2.6',
+ 'master_port': 53
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': 'test.example',
+ 'master_addr': 'badaddress',
+ 'master_port': 53
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': 'test.example',
+ 'master_addr': '192.0.2.7',
+ 'master_port': 'bad_port'
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ zones = { 'zones': [
+ { 'name': 'test.example',
+ 'master_addr': '192.0.2.7',
+ 'master_port': 53,
+ # using a bad TSIG key spec
+ 'tsig_key': "bad..example.com:SFuWd/q99SzF8Yzd1QbB9g=="
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ # let's also add a zone that is correct too, and make sure
+ # that the new config is not partially taken
+ zones = { 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.8',
+ 'master_port': 53
+ },
+ { 'name': 'test2.example.',
+ 'master_addr': '192.0.2.9',
+ 'master_port': 53,
+ 'tsig_key': 'badkey'
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(zones)['result'][0], 1)
+ # since this has failed, we should still have the previous config
+ self._check_zones_config(config2)
+
+ def test_config_handler_zones_default(self):
+ # Checking it some default config values apply. Using a separate
+ # test case for a fresh xfr object.
+ config = { 'zones': [
+ { 'name': 'test.example.',
+ 'master_addr': '192.0.2.1',
+ 'master_port': 53,
+ }
+ ]}
+ self.assertEqual(self.xfr.config_handler(config)['result'][0], 0)
+ self._check_zones_config(config)
+
+ def common_ixfr_setup(self, xfr_mode, use_ixfr):
+ # This helper method explicitly sets up a zone configuration with
+ # use_ixfr, and invokes either retransfer or refresh.
+ # Shared by some of the following test cases.
+ config = {'zones': [
+ {'name': 'example.com.',
+ 'master_addr': '192.0.2.1',
+ 'use_ixfr': use_ixfr}]}
+ self.assertEqual(self.xfr.config_handler(config)['result'][0], 0)
+ self.assertEqual(self.xfr.command_handler(xfr_mode,
+ self.args)['result'][0], 0)
- master_info = {'master_addr': '1111.1.1.1', 'master_port':53 }
- self.assertEqual(self.xfr.config_handler(master_info)['result'][0], 1)
+ def test_command_handler_retransfer_ixfr_enabled(self):
+ # If IXFR is explicitly enabled in config, IXFR will be used
+ self.common_ixfr_setup('retransfer', True)
+ self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
- master_info = {'master_addr': '2.2.2.2', 'master_port':530000 }
- self.assertEqual(self.xfr.config_handler(master_info)['result'][0], 1)
+ def test_command_handler_refresh_ixfr_enabled(self):
+ # Same for refresh
+ self.common_ixfr_setup('refresh', True)
+ self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
- master_info = {'master_addr': '2.2.2.2', 'master_port':53 }
- self.xfr.config_handler(master_info)
- self.assertEqual(self.xfr._master_addr, '2.2.2.2')
- self.assertEqual(self.xfr._master_port, 53)
+ def test_command_handler_retransfer_ixfr_disabled(self):
+ # Similar to the previous case, but explicitly disabled. AXFR should
+ # be used.
+ self.common_ixfr_setup('retransfer', False)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ def test_command_handler_refresh_ixfr_disabled(self):
+ # Same for refresh
+ self.common_ixfr_setup('refresh', False)
+ self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
def raise_interrupt():
raise KeyboardInterrupt()
@@ -633,8 +2286,187 @@ class TestMain(unittest.TestCase):
MockXfrin.check_command_hook = raise_exception
main(MockXfrin, False)
+class TestXfrinProcess(unittest.TestCase):
+ """
+ Some tests for the xfrin_process function. This replaces the
+ XfrinConnection class with itself, so we can emulate whatever behavior we
+ might want.
+
+ Currently only tests for retry if IXFR fails.
+ """
+ def setUp(self):
+ """
+ Backs up the original class implementation so it can be restored
+ and places our own version in place of the constructor.
+
+ Also sets up several internal variables to watch what happens.
+ """
+ # This will hold a "log" of what transfers were attempted.
+ self.__transfers = []
+ # This will "log" if failures or successes happened.
+ self.__published = []
+ # How many connections were created.
+ self.__created_connections = 0
+
+ def __get_connection(self, *args):
+ """
+ Provides a "connection". To mock the connection and see what it is
+ asked to do, we pretend to be the connection.
+ """
+ self.__created_connections += 1
+ return self
+
+ def connect_to_master(self):
+ """
+ Part of pretending to be the connection. It pretends it connected
+ correctly every time.
+ """
+ return True
+
+ def do_xfrin(self, check_soa, request_type):
+ """
+ Part of pretending to be the connection. It looks what answer should
+ be answered now and logs what request happened.
+ """
+ self.__transfers.append(request_type)
+ ret = self.__rets[0]
+ self.__rets = self.__rets[1:]
+ return ret
+
+ def zone_str(self):
+ """
+ Part of pretending to be the connection. It provides the logging name
+ of zone.
+ """
+ return "example.org/IN"
+
+ def publish_xfrin_news(self, zone_name, rrclass, ret):
+ """
+ Part of pretending to be the server as well. This just logs the
+ success/failure of the previous operation.
+ """
+ self.__published.append(ret)
+
+ def close(self):
+ """
+ Part of pretending to be the connection.
+ """
+ pass
+
+ def init_socket(self):
+ """
+ Part of pretending to be the connection.
+ """
+ pass
+
+ def __do_test(self, rets, transfers, request_type):
+ """
+ Do the actual test. The request type, prepared sucesses/failures
+ and expected sequence of transfers is passed to specify what test
+ should happen.
+ """
+ self.__rets = rets
+ published = rets[-1]
+ xfrin.process_xfrin(self, XfrinRecorder(), Name("example.org."),
+ RRClass.IN(), None, None, None, True, None,
+ request_type, self.__get_connection)
+ self.assertEqual([], self.__rets)
+ self.assertEqual(transfers, self.__transfers)
+ # Create a connection for each attempt
+ self.assertEqual(len(transfers), self.__created_connections)
+ self.assertEqual([published], self.__published)
+
+ def test_ixfr_ok(self):
+ """
+ Everything OK the first time, over IXFR.
+ """
+ self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR())
+
+ def test_axfr_ok(self):
+ """
+ Everything OK the first time, over AXFR.
+ """
+ self.__do_test([XFRIN_OK], [RRType.AXFR()], RRType.AXFR())
+
+ def test_axfr_fail(self):
+ """
+ The transfer failed over AXFR. Should not be retried (we don't expect
+ to fail on AXFR, but succeed on IXFR and we didn't use IXFR in the first
+ place for some reason.
+ """
+ self.__do_test([XFRIN_FAIL], [RRType.AXFR()], RRType.AXFR())
+
+ def test_ixfr_fallback(self):
+ """
+ The transfer fails over IXFR, but suceeds over AXFR. It should fall back
+ to it and say everything is OK.
+ """
+ self.__do_test([XFRIN_FAIL, XFRIN_OK], [RRType.IXFR(), RRType.AXFR()],
+ RRType.IXFR())
+
+ def test_ixfr_fail(self):
+ """
+ The transfer fails both over IXFR and AXFR. It should report failure
+ (only once) and should try both before giving up.
+ """
+ self.__do_test([XFRIN_FAIL, XFRIN_FAIL],
+ [RRType.IXFR(), RRType.AXFR()], RRType.IXFR())
+class TestFormatting(unittest.TestCase):
+ # If the formatting functions are moved to a more general library
+ # (ticket #1379), these tests should be moved with them.
+ def test_format_zone_str(self):
+ self.assertEqual("example.com/IN",
+ format_zone_str(isc.dns.Name("example.com"),
+ isc.dns.RRClass("IN")))
+ self.assertEqual("example.com/CH",
+ format_zone_str(isc.dns.Name("example.com"),
+ isc.dns.RRClass("CH")))
+ self.assertEqual("example.org/IN",
+ format_zone_str(isc.dns.Name("example.org"),
+ isc.dns.RRClass("IN")))
+
+ def test_format_addrinfo(self):
+ # This test may need to be updated if the input type is changed,
+ # right now it is a nested tuple:
+ # (family, sockettype, (address, port))
+ # of which sockettype is ignored
+ self.assertEqual("192.0.2.1:53",
+ format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+ ("192.0.2.1", 53))))
+ self.assertEqual("192.0.2.2:53",
+ format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+ ("192.0.2.2", 53))))
+ self.assertEqual("192.0.2.1:54",
+ format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+ ("192.0.2.1", 54))))
+ self.assertEqual("[2001:db8::1]:53",
+ format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+ ("2001:db8::1", 53))))
+ self.assertEqual("[2001:db8::2]:53",
+ format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+ ("2001:db8::2", 53))))
+ self.assertEqual("[2001:db8::1]:54",
+ format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+ ("2001:db8::1", 54))))
+ self.assertEqual("/some/file",
+ format_addrinfo((socket.AF_UNIX, socket.SOCK_STREAM,
+ "/some/file")))
+ # second element of passed tuple should be ignored
+ self.assertEqual("192.0.2.1:53",
+ format_addrinfo((socket.AF_INET, None,
+ ("192.0.2.1", 53))))
+ self.assertEqual("192.0.2.1:53",
+ format_addrinfo((socket.AF_INET, "Just some string",
+ ("192.0.2.1", 53))))
+ self.assertRaises(TypeError, format_addrinfo, 1)
+ self.assertRaises(TypeError, format_addrinfo,
+ (socket.AF_INET, "asdf"))
+ self.assertRaises(TypeError, format_addrinfo,
+ (socket.AF_INET, "asdf", ()))
+
if __name__== "__main__":
try:
+ isc.log.resetUnitTestRootLogger()
unittest.main()
except KeyboardInterrupt as e:
print(e)
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 1bf46c1..911b3b3 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -1,6 +1,6 @@
#!@PYTHON@
-# Copyright (C) 2010 Internet Systems Consortium.
+# Copyright (C) 2009-2011 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -28,13 +28,20 @@ from optparse import OptionParser, OptionValueError
from isc.config.ccsession import *
from isc.notify import notify_out
import isc.util.process
+from isc.datasrc import DataSourceClient, ZoneFinder
import isc.net.parse
+from isc.xfrin.diff import Diff
+from isc.log_messages.xfrin_messages import *
+
+isc.log.init("b10-xfrin")
+logger = isc.log.Logger("xfrin")
+
try:
from pydnspp import *
except ImportError as e:
# C++ loadable module may not be installed; even so the xfrin process
# must keep running, so we warn about it and move forward.
- sys.stderr.write('[b10-xfrin] failed to import DNS module: %s\n' % str(e))
+ logger.error(XFRIN_IMPORT_DNS, str(e))
isc.util.process.rename()
@@ -56,70 +63,546 @@ XFROUT_MODULE_NAME = 'Xfrout'
ZONE_MANAGER_MODULE_NAME = 'Zonemgr'
REFRESH_FROM_ZONEMGR = 'refresh_from_zonemgr'
ZONE_XFRIN_FAILED = 'zone_xfrin_failed'
+
+# Constants for debug levels.
+DBG_XFRIN_TRACE = logger.DBGLVL_TRACE_BASIC
+
+# These two default are currently hard-coded. For config this isn't
+# necessary, but we need these defaults for optional command arguments
+# (TODO: have similar support to get default values for command
+# arguments as we do for config options)
+DEFAULT_MASTER_PORT = 53
+DEFAULT_ZONE_CLASS = RRClass.IN()
+
__version__ = 'BIND10'
# define xfrin rcode
XFRIN_OK = 0
XFRIN_FAIL = 1
-DEFAULT_MASTER_PORT = '53'
-DEFAULT_MASTER = '127.0.0.1'
+class XfrinException(Exception):
+ pass
-def log_error(msg):
- sys.stderr.write("[b10-xfrin] %s\n" % str(msg))
+class XfrinProtocolError(Exception):
+ '''An exception raised for errors encountered in xfrin protocol handling.
+ '''
+ pass
-class XfrinException(Exception):
+class XfrinZoneInfoException(Exception):
+ """This exception is raised if there is an error in the given
+ configuration (part), or when a command does not have a required
+ argument or has bad arguments, for instance when the zone's master
+ address is not a valid IP address, when the zone does not
+ have a name, or when multiple settings are given for the same
+ zone."""
pass
+def _check_zone_name(zone_name_str):
+ """Checks if the given zone name is a valid domain name, and returns
+ it as a Name object. Raises an XfrinException if it is not."""
+ try:
+ # In the _zones dict, part of the key is the zone name,
+ # but due to a limitation in the Name class, we
+ # cannot directly use it as a dict key, and we use to_text()
+ #
+ # Downcase the name here for that reason.
+ return Name(zone_name_str, True)
+ except (EmptyLabel, TooLongLabel, BadLabelType, BadEscape,
+ TooLongName, IncompleteName) as ne:
+ raise XfrinZoneInfoException("bad zone name: " + zone_name_str + " (" + str(ne) + ")")
+
+def _check_zone_class(zone_class_str):
+ """If the given argument is a string: checks if the given class is
+ a valid one, and returns an RRClass object if so.
+ Raises XfrinZoneInfoException if not.
+ If it is None, this function returns the default RRClass.IN()"""
+ if zone_class_str is None:
+ return DEFAULT_ZONE_CLASS
+ try:
+ return RRClass(zone_class_str)
+ except InvalidRRClass as irce:
+ raise XfrinZoneInfoException("bad zone class: " + zone_class_str + " (" + str(irce) + ")")
+
+def format_zone_str(zone_name, zone_class):
+ """Helper function to format a zone name and class as a string of
+ the form '<name>/<class>'.
+ Parameters:
+ zone_name (isc.dns.Name) name to format
+ zone_class (isc.dns.RRClass) class to format
+ """
+ return zone_name.to_text(True) + '/' + str(zone_class)
+
+def format_addrinfo(addrinfo):
+ """Helper function to format the addrinfo as a string of the form
+ <addr>:<port> (for IPv4) or [<addr>]:port (for IPv6). For unix domain
+ sockets, and unknown address families, it returns a basic string
+ conversion of the third element of the passed tuple.
+ Parameters:
+ addrinfo: a 3-tuple consisting of address family, socket type, and,
+ depending on the family, either a 2-tuple with the address
+ and port, or a filename
+ """
+ try:
+ if addrinfo[0] == socket.AF_INET:
+ return str(addrinfo[2][0]) + ":" + str(addrinfo[2][1])
+ elif addrinfo[0] == socket.AF_INET6:
+ return "[" + str(addrinfo[2][0]) + "]:" + str(addrinfo[2][1])
+ else:
+ return str(addrinfo[2])
+ except IndexError:
+ raise TypeError("addrinfo argument to format_addrinfo() does not "
+ "appear to be consisting of (family, socktype, (addr, port))")
+
+def get_soa_serial(soa_rdata):
+ '''Extract the serial field of an SOA RDATA and returns it as an intger.
+
+ We don't have to be very efficient here, so we first dump the entire RDATA
+ as a string and convert the first corresponding field. This should be
+ sufficient in practice, but may not always work when the MNAME or RNAME
+ contains an (escaped) space character in their labels. Ideally there
+ should be a more direct and convenient way to get access to the SOA
+ fields.
+ '''
+ return int(soa_rdata.to_text().split()[2])
+
+class XfrinState:
+ '''
+ The states of the incomding *XFR state machine.
+
+ We (will) handle both IXFR and AXFR with a single integrated state
+ machine because they cannot be distinguished immediately - an AXFR
+ response to an IXFR request can only be detected when the first two (2)
+ response RRs have already been received.
+
+ The following diagram summarizes the state transition. After sending
+ the query, xfrin starts the process with the InitialSOA state (all
+ IXFR/AXFR response begins with an SOA). When it reaches IXFREnd
+ or AXFREnd, the process successfully completes.
+
+ (AXFR or
+ (recv SOA) AXFR-style IXFR) (SOA, add)
+ InitialSOA------->FirstData------------->AXFR--------->AXFREnd
+ | | ^ (post xfr
+ | | | checks, then
+ | +--+ commit)
+ | (non SOA, add)
+ |
+ | (non SOA, delete)
+ (pure IXFR,| +-------+
+ keep handling)| (Delete SOA) V |
+ + ->IXFRDeleteSOA------>IXFRDelete--+
+ ^ |
+ (see SOA, not end, | (see SOA)|
+ commit, keep handling) | |
+ | V
+ +---------IXFRAdd<----------+IXFRAddSOA
+ (non SOA, add)| ^ | (Add SOA)
+ ----------+ |
+ |(see SOA w/ end serial, commit changes)
+ V
+ IXFREnd
+
+ Note that changes are committed for every "difference sequence"
+ (i.e. changes for one SOA update). This means when an IXFR response
+ contains multiple difference sequences and something goes wrong
+ after several commits, these changes have been published and visible
+ to clients even if the IXFR session is subsequently aborted.
+ It is not clear if this is valid in terms of the protocol specification.
+ Section 4 of RFC 1995 states:
+
+ An IXFR client, should only replace an older version with a newer
+ version after all the differences have been successfully processed.
+
+ If this "replacement" is for the changes of one difference sequence
+ and "all the differences" mean the changes for that sequence, this
+ implementation strictly follows what RFC states. If this is for
+ the entire IXFR response (that may contain multiple sequences),
+ we should implement it with one big transaction and one final commit
+ at the very end.
+
+ For now, we implement it with multiple smaller commits for two
+ reasons. First, this is what BIND 9 does, and we generally port
+ the implementation logic here. BIND 9 has been supporting IXFR
+ for many years, so the fact that it still behaves this way
+ probably means it at least doesn't cause a severe operational
+ problem in practice. Second, especially because BIND 10 would
+ often uses a database backend, a larger transaction could cause an
+ undesirable effects, e.g. suspending normal lookups for a longer
+ period depending on the characteristics of the database. Even if
+ we find something wrong in a later sequeunce and abort the
+ session, we can start another incremental update from what has
+ been validated, or we can switch to AXFR to replace the zone
+ completely.
+
+ This implementation uses the state design pattern, where each state
+ is represented as a subclass of the base XfrinState class. Each concrete
+ subclass of XfrinState is assumed to define two methods: handle_rr() and
+ finish_message(). These methods handle specific part of XFR protocols
+ and (if necessary) perform the state transition.
+
+ Conceptually, XfrinState and its subclasses are a "friend" of
+ XfrinConnection and are assumed to be allowed to access its internal
+ information (even though Python does not have a strict access control
+ between different classes).
+
+ The XfrinState and its subclasses are designed to be stateless, and
+ can be used as singleton objects. For now, however, we always instantiate
+ a new object for every state transition, partly because the introduction
+ of singleton will make a code bit complicated, and partly because
+ the overhead of object instantiotion wouldn't be significant for xfrin.
+
+ '''
+ def set_xfrstate(self, conn, new_state):
+ '''Set the XfrConnection to a given new state.
+
+ As a "friend" class, this method intentionally gets access to the
+ connection's "private" method.
+
+ '''
+ conn._XfrinConnection__set_xfrstate(new_state)
+
+ def handle_rr(self, conn):
+ '''Handle one RR of an XFR response message.
+
+ Depending on the state, the RR is generally added or deleted in the
+ corresponding data source, or in some special cases indicates
+ a specifi transition, such as starting a new IXFR difference
+ sequence or completing the session.
+
+ All subclass has their specific behaviors for this method, so
+ there is no default definition. If the base class version
+ is called, it's a bug of the caller, and it's notified via
+ an XfrinException exception.
+
+ This method returns a boolean value: True if the given RR was
+ fully handled and the caller should go to the next RR; False
+ if the caller needs to call this method with the (possibly) new
+ state for the same RR again.
+
+ '''
+ raise XfrinException("Internal bug: " +
+ "XfrinState.handle_rr() called directly")
+
+ def finish_message(self, conn):
+ '''Perform any final processing after handling all RRs of a response.
+
+ This method then returns a boolean indicating whether to continue
+ receiving the message. Unless it's in the end of the entire XFR
+ session, we should continue, so this default method simply returns
+ True.
+
+ '''
+ return True
+
+class XfrinInitialSOA(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() != RRType.SOA():
+ raise XfrinProtocolError('First RR in zone transfer must be SOA ('
+ + rr.get_type().to_text() + ' received)')
+ conn._end_serial = get_soa_serial(rr.get_rdata()[0])
+
+ # FIXME: we need to check the serial is actually greater than ours.
+ # To do so, however, we need to implement serial number arithmetic.
+ # Although it wouldn't be a big task, we'll leave it for a separate
+ # task for now. (Always performing xfr could be inefficient, but
+ # shouldn't do any harm otherwise)
+
+ self.set_xfrstate(conn, XfrinFirstData())
+ return True
+
+class XfrinFirstData(XfrinState):
+ def handle_rr(self, conn, rr):
+ '''Handle the first RR after initial SOA in an XFR session.
+
+ This state happens exactly once in an XFR session, where
+ we decide whether it's incremental update ("real" IXFR) or
+ non incremental update (AXFR or AXFR-style IXFR).
+ If we initiated IXFR and the transfer begins with two SOAs
+ (the serial of the second one being equal to our serial),
+ it's incremental; otherwise it's non incremental.
+
+ This method always return False (unlike many other handle_rr()
+ methods) because this first RR must be examined again in the
+ determined update context.
+
+ Note that in the non incremental case the RR should normally be
+ something other SOA, but it's still possible it's an SOA with a
+ different serial than ours. The only possible interpretation at
+ this point is that it's non incremental update that only consists
+ of the SOA RR. It will result in broken zone (for example, it
+ wouldn't even contain an apex NS) and should be rejected at post
+ XFR processing, but in terms of the XFR session processing we
+ accept it and move forward.
+
+ Note further that, in the half-broken SOA-only transfer case,
+ these two SOAs are supposed to be the same as stated in Section 2.2
+ of RFC 5936. We don't check that condition here, either; we'll
+ leave whether and how to deal with that situation to the end of
+ the processing of non incremental update. See also a related
+ discussion at the IETF dnsext wg:
+ http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+
+ '''
+ if conn._request_type == RRType.IXFR() and \
+ rr.get_type() == RRType.SOA() and \
+ conn._request_serial == get_soa_serial(rr.get_rdata()[0]):
+ logger.debug(DBG_XFRIN_TRACE, XFRIN_GOT_INCREMENTAL_RESP,
+ conn.zone_str())
+ self.set_xfrstate(conn, XfrinIXFRDeleteSOA())
+ else:
+ logger.debug(DBG_XFRIN_TRACE, XFRIN_GOT_NONINCREMENTAL_RESP,
+ conn.zone_str())
+ # We are now going to add RRs to the new zone. We need create
+ # a Diff object. It will be used throughtout the XFR session.
+ # DISABLE FOR DEBUG
+ conn._diff = Diff(conn._datasrc_client, conn._zone_name, True)
+ self.set_xfrstate(conn, XfrinAXFR())
+ return False
+
+class XfrinIXFRDeleteSOA(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() != RRType.SOA():
+ # this shouldn't happen; should this occur it means an internal
+ # bug.
+ raise XfrinException(rr.get_type().to_text() +
+ ' RR is given in IXFRDeleteSOA state')
+ # This is the beginning state of one difference sequence (changes
+ # for one SOA update). We need to create a new Diff object now.
+ conn._diff = Diff(conn._datasrc_client, conn._zone_name)
+ conn._diff.delete_data(rr)
+ self.set_xfrstate(conn, XfrinIXFRDelete())
+ return True
+
+class XfrinIXFRDelete(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() == RRType.SOA():
+ # This is the only place where current_serial is set
+ conn._current_serial = get_soa_serial(rr.get_rdata()[0])
+ self.set_xfrstate(conn, XfrinIXFRAddSOA())
+ return False
+ conn._diff.delete_data(rr)
+ return True
+
+class XfrinIXFRAddSOA(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() != RRType.SOA():
+ # this shouldn't happen; should this occur it means an internal
+ # bug.
+ raise XfrinException(rr.get_type().to_text() +
+ ' RR is given in IXFRAddSOA state')
+ conn._diff.add_data(rr)
+ self.set_xfrstate(conn, XfrinIXFRAdd())
+ return True
+
+class XfrinIXFRAdd(XfrinState):
+ def handle_rr(self, conn, rr):
+ if rr.get_type() == RRType.SOA():
+ soa_serial = get_soa_serial(rr.get_rdata()[0])
+ if soa_serial == conn._end_serial:
+ conn._diff.commit()
+ self.set_xfrstate(conn, XfrinIXFREnd())
+ return True
+ elif soa_serial != conn._current_serial:
+ raise XfrinProtocolError('IXFR out of sync: expected ' +
+ 'serial ' +
+ str(conn._current_serial) +
+ ', got ' + str(soa_serial))
+ else:
+ conn._diff.commit()
+ self.set_xfrstate(conn, XfrinIXFRDeleteSOA())
+ return False
+ conn._diff.add_data(rr)
+ return True
+
+class XfrinIXFREnd(XfrinState):
+ def handle_rr(self, conn, rr):
+ raise XfrinProtocolError('Extra data after the end of IXFR diffs: ' +
+ rr.to_text())
+
+ def finish_message(self, conn):
+ '''Final processing after processing an entire IXFR session.
+
+ There will be more actions here, but for now we simply return False,
+ indicating there will be no more message to receive.
+
+ '''
+ return False
+
+class XfrinAXFR(XfrinState):
+ def handle_rr(self, conn, rr):
+ """
+ Handle the RR by putting it into the zone.
+ """
+ conn._diff.add_data(rr)
+ if rr.get_type() == RRType.SOA():
+ # SOA means end. Don't commit it yet - we need to perform
+ # post-transfer checks
+
+ soa_serial = get_soa_serial(rr.get_rdata()[0])
+ if conn._end_serial != soa_serial:
+ logger.warn(XFRIN_AXFR_INCONSISTENT_SOA, conn.zone_str(),
+ conn._end_serial, soa_serial)
+
+ self.set_xfrstate(conn, XfrinAXFREnd())
+ # Yes, we've eaten this RR.
+ return True
+
+class XfrinAXFREnd(XfrinState):
+ def handle_rr(self, conn, rr):
+ raise XfrinProtocolError('Extra data after the end of AXFR: ' +
+ rr.to_text())
+
+ def finish_message(self, conn):
+ """
+ Final processing after processing an entire AXFR session.
+
+ In this process all the AXFR changes are committed to the
+ data source.
+
+ There might be more actions here, but for now we simply return False,
+ indicating there will be no more message to receive.
+
+ """
+ conn._diff.commit()
+ return False
+
class XfrinConnection(asyncore.dispatcher):
'''Do xfrin in this class. '''
def __init__(self,
- sock_map, zone_name, rrclass, db_file, shutdown_event,
- master_addrinfo, tsig_key_str = None, verbose = False,
- idle_timeout = 60):
- ''' idle_timeout: max idle time for read data from socket.
- db_file: specify the data source file.
- check_soa: when it's true, check soa first before sending xfr query
+ sock_map, zone_name, rrclass, datasrc_client,
+ shutdown_event, master_addrinfo, tsig_key=None,
+ idle_timeout=60):
+ '''Constructor of the XfirnConnection class.
+
+ idle_timeout: max idle time for read data from socket.
+ datasrc_client: the data source client object used for the XFR session.
+ This will eventually replace db_file completely.
+
'''
asyncore.dispatcher.__init__(self, map=sock_map)
- self.create_socket(master_addrinfo[0], master_addrinfo[1])
+
+ # The XFR state. Conceptually this is purely private, so we emphasize
+ # the fact by the double underscore. Other classes are assumed to
+ # get access to this via get_xfrstate(), and only XfrinState classes
+ # are assumed to be allowed to modify it via __set_xfrstate().
+ self.__state = None
+
+ # Requested transfer type (RRType.AXFR or RRType.IXFR). The actual
+ # transfer type may differ due to IXFR->AXFR fallback:
+ self._request_type = None
+
+ # Zone parameters
self._zone_name = zone_name
- self._sock_map = sock_map
self._rrclass = rrclass
- self._db_file = db_file
+
+ # Data source handler
+ self._datasrc_client = datasrc_client
+
+ self._sock_map = sock_map
self._soa_rr_count = 0
self._idle_timeout = idle_timeout
- self.setblocking(1)
self._shutdown_event = shutdown_event
- self._verbose = verbose
- self._master_address = master_addrinfo[2]
+ self._master_addrinfo = master_addrinfo
+ self._tsig_key = tsig_key
self._tsig_ctx = None
- if tsig_key_str is not None:
- self._tsig_ctx = TSIGContext(TSIGKey(tsig_key_str))
+ # tsig_ctx_creator is introduced to allow tests to use a mock class for
+ # easier tests (in normal case we always use the default)
+ self._tsig_ctx_creator = lambda key : TSIGContext(key)
+
+ def init_socket(self):
+ '''Initialize the underlyig socket.
+
+ This is essentially a part of __init__() and is expected to be
+ called immediately after the constructor. It's separated from
+ the constructor because otherwise we might not be able to close
+ it if the constructor raises an exception after opening the socket.
+ '''
+ self.create_socket(self._master_addrinfo[0], self._master_addrinfo[1])
+ self.setblocking(1)
+
+ def __set_xfrstate(self, new_state):
+ self.__state = new_state
+
+ def get_xfrstate(self):
+ return self.__state
+
+ def zone_str(self):
+ '''A convenience function for logging to include zone name and class'''
+ return format_zone_str(self._zone_name, self._rrclass)
def connect_to_master(self):
'''Connect to master in TCP.'''
try:
- self.connect(self._master_address)
+ self.connect(self._master_addrinfo[2])
return True
except socket.error as e:
- self.log_msg('Failed to connect:(%s), %s' % (self._master_address,
- str(e)))
+ logger.error(XFRIN_CONNECT_MASTER, self._master_addrinfo[2],
+ str(e))
return False
+ def _get_zone_soa(self):
+ result, finder = self._datasrc_client.find_zone(self._zone_name)
+ if result != DataSourceClient.SUCCESS:
+ raise XfrinException('Zone not found in the given data ' +
+ 'source: ' + self.zone_str())
+ result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
+ None, ZoneFinder.FIND_DEFAULT)
+ if result != ZoneFinder.SUCCESS:
+ raise XfrinException('SOA RR not found in zone: ' +
+ self.zone_str())
+ # Especially for database-based zones, a working zone may be in
+ # a broken state where it has more than one SOA RR. We proactively
+ # check the condition and abort the xfr attempt if we identify it.
+ if soa_rrset.get_rdata_count() != 1:
+ raise XfrinException('Invalid number of SOA RRs for ' +
+ self.zone_str() + ': ' +
+ str(soa_rrset.get_rdata_count()))
+ return soa_rrset
+
def _create_query(self, query_type):
- '''Create dns query message. '''
+ '''Create an XFR-related query message.
+
+ query_type is either SOA, AXFR or IXFR. For type IXFR, it searches
+ the associated data source for the current SOA record to include
+ it in the query. If the corresponding zone or the SOA record
+ cannot be found, it raises an XfrinException exception. Note that
+ this may not necessarily a broken configuration; for the first attempt
+ of transfer the secondary may not have any boot-strap zone
+ information, in which case IXFR simply won't work. The xfrin
+ should then fall back to AXFR. _request_serial is recorded for
+ later use.
+ '''
msg = Message(Message.RENDER)
query_id = random.randint(0, 0xFFFF)
self._query_id = query_id
msg.set_qid(query_id)
msg.set_opcode(Opcode.QUERY())
msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name(self._zone_name), self._rrclass, query_type)
- msg.add_question(query_question)
+ msg.add_question(Question(self._zone_name, self._rrclass, query_type))
+ if query_type == RRType.IXFR():
+ # get the zone finder. this must be SUCCESS (not even
+ # PARTIALMATCH) because we are specifying the zone origin name.
+ zone_soa_rr = self._get_zone_soa()
+ msg.add_rrset(Message.SECTION_AUTHORITY, zone_soa_rr)
+ self._request_serial = get_soa_serial(zone_soa_rr.get_rdata()[0])
+ else:
+ # For AXFR, we temporarily provide backward compatible behavior
+ # where xfrin is responsible for creating zone in the corresponding
+ # DB table. Note that the code below uses the old data source
+ # API and assumes SQLite3 in an ugly manner. We'll have to
+ # develop a better way of managing zones in a generic way and
+ # eliminate the code like the one here.
+ try:
+ self._get_zone_soa()
+ except XfrinException:
+ def empty_rr_generator():
+ return []
+ isc.datasrc.sqlite3_ds.load(self._db_file,
+ self._zone_name.to_text(),
+ empty_rr_generator)
return msg
def _send_data(self, data):
@@ -136,7 +619,8 @@ class XfrinConnection(asyncore.dispatcher):
render = MessageRenderer()
# XXX Currently, python wrapper doesn't accept 'None' parameter in this case,
# we should remove the if statement and use a universal interface later.
- if self._tsig_ctx is not None:
+ if self._tsig_key is not None:
+ self._tsig_ctx = self._tsig_ctx_creator(self._tsig_key)
msg.to_wire(render, self._tsig_ctx)
else:
msg.to_wire(render)
@@ -167,6 +651,22 @@ class XfrinConnection(asyncore.dispatcher):
return data
+ def _check_response_tsig(self, msg, response_data):
+ tsig_record = msg.get_tsig_record()
+ if self._tsig_ctx is not None:
+ tsig_error = self._tsig_ctx.verify(tsig_record, response_data)
+ if tsig_error != TSIGError.NOERROR:
+ raise XfrinException('TSIG verify fail: %s' % str(tsig_error))
+ elif tsig_record is not None:
+ # If the response includes a TSIG while we didn't sign the query,
+ # we treat it as an error. RFC doesn't say anything about this
+ # case, but it clearly states the server must not sign a response
+ # to an unsigned request. Although we could be flexible, no sane
+ # implementation would return such a response, and since this is
+ # part of security mechanism, it's probably better to be more
+ # strict.
+ raise XfrinException('Unexpected TSIG in response')
+
def _check_soa_serial(self):
''' Compare the soa serial, if soa serial in master is less than
the soa serial in local, Finish xfrin.
@@ -174,13 +674,16 @@ class XfrinConnection(asyncore.dispatcher):
True: soa serial in master is bigger
'''
- self._send_query(RRType("SOA"))
+ self._send_query(RRType.SOA())
data_len = self._get_request_response(2)
msg_len = socket.htons(struct.unpack('H', data_len)[0])
soa_response = self._get_request_response(msg_len)
msg = Message(Message.PARSE)
msg.from_wire(soa_response)
+ # TSIG related checks, including an unexpected signed response
+ self._check_response_tsig(msg, soa_response)
+
# perform some minimal level validation. It's an open issue how
# strict we should be (see the comment in _check_response_header())
self._check_response_header(msg)
@@ -190,44 +693,48 @@ class XfrinConnection(asyncore.dispatcher):
# now.
return XFRIN_OK
- def do_xfrin(self, check_soa, ixfr_first = False):
- '''Do xfr by sending xfr request and parsing response. '''
+ def do_xfrin(self, check_soa, request_type=RRType.AXFR()):
+ '''Do an xfr session by sending xfr request and parsing responses.'''
try:
ret = XFRIN_OK
+ self._request_type = request_type
+ # Right now RRType.[IA]XFR().to_text() is 'TYPExxx', so we need
+ # to hardcode here.
+ request_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
if check_soa:
- logstr = 'SOA check for \'%s\' ' % self._zone_name
ret = self._check_soa_serial()
- logstr = 'transfer of \'%s\': AXFR ' % self._zone_name
if ret == XFRIN_OK:
- self.log_msg(logstr + 'started')
- # TODO: .AXFR() RRType.AXFR()
- self._send_query(RRType(252))
- isc.datasrc.sqlite3_ds.load(self._db_file, self._zone_name,
- self._handle_xfrin_response)
-
- self.log_msg(logstr + 'succeeded')
-
- except XfrinException as e:
- self.log_msg(e)
- self.log_msg(logstr + 'failed')
- ret = XFRIN_FAIL
- #TODO, recover data source.
- except isc.datasrc.sqlite3_ds.Sqlite3DSError as e:
- self.log_msg(e)
- self.log_msg(logstr + 'failed')
+ logger.info(XFRIN_XFR_TRANSFER_STARTED, request_str,
+ self.zone_str())
+ self._send_query(self._request_type)
+ self.__state = XfrinInitialSOA()
+ self._handle_xfrin_responses()
+ logger.info(XFRIN_XFR_TRANSFER_SUCCESS, request_str,
+ self.zone_str())
+
+ except (XfrinException, XfrinProtocolError) as e:
+ logger.error(XFRIN_XFR_TRANSFER_FAILURE, request_str,
+ self.zone_str(), str(e))
ret = XFRIN_FAIL
- except UserWarning as e:
- # XXX: this is an exception from our C++ library via the
- # Boost.Python binding. It would be better to have more more
- # specific exceptions, but at this moment this is the finest
- # granularity.
- self.log_msg(e)
- self.log_msg(logstr + 'failed')
+ except Exception as e:
+ # Catching all possible exceptions like this is generally not a
+ # good practice, but handling an xfr session could result in
+ # so many types of exceptions, including ones from the DNS library
+ # or from the data source library. Eventually we'd introduce a
+ # hierarchy for exception classes from a base "ISC exception" and
+ # catch it here, but until then we need broadest coverage so that
+ # we won't miss anything.
+
+ logger.error(XFRIN_XFR_OTHER_FAILURE, request_str,
+ self.zone_str(), str(e))
ret = XFRIN_FAIL
finally:
- self.close()
+ # Make sure any remaining transaction in the diff is closed
+ # (if not yet - possible in case of xfr-level exception) as soon
+ # as possible
+ self._diff = None
return ret
@@ -247,7 +754,7 @@ class XfrinConnection(asyncore.dispatcher):
raise XfrinException('error response: %s' % msg_rcode.to_text())
if not msg.get_header_flag(Message.HEADERFLAG_QR):
- raise XfrinException('response is not a response ')
+ raise XfrinException('response is not a response')
if msg.get_qid() != self._query_id:
raise XfrinException('bad query id')
@@ -257,55 +764,30 @@ class XfrinConnection(asyncore.dispatcher):
self._check_response_header(msg)
- if msg.get_rr_count(Message.SECTION_ANSWER) == 0:
- raise XfrinException('answer section is empty')
-
if msg.get_rr_count(Message.SECTION_QUESTION) > 1:
raise XfrinException('query section count greater than 1')
- def _handle_answer_section(self, answer_section):
- '''Return a generator for the reponse in one tcp package to a zone transfer.'''
-
- for rrset in answer_section:
- rrset_name = rrset.get_name().to_text()
- rrset_ttl = int(rrset.get_ttl().to_text())
- rrset_class = rrset.get_class().to_text()
- rrset_type = rrset.get_type().to_text()
-
- for rdata in rrset.get_rdata():
- # Count the soa record count
- if rrset.get_type() == RRType("SOA"):
- self._soa_rr_count += 1
-
- # XXX: the current DNS message parser can't preserve the
- # RR order or separete the beginning and ending SOA RRs.
- # As a short term workaround, we simply ignore the second
- # SOA, and ignore the erroneous case where the transfer
- # session doesn't end with an SOA.
- if (self._soa_rr_count == 2):
- # Avoid inserting soa record twice
- break
-
- rdata_text = rdata.to_text()
- yield (rrset_name, rrset_ttl, rrset_class, rrset_type,
- rdata_text)
-
- def _handle_xfrin_response(self):
- '''Return a generator for the response to a zone transfer. '''
- while True:
+ def _handle_xfrin_responses(self):
+ read_next_msg = True
+ while read_next_msg:
data_len = self._get_request_response(2)
msg_len = socket.htons(struct.unpack('H', data_len)[0])
recvdata = self._get_request_response(msg_len)
msg = Message(Message.PARSE)
- msg.from_wire(recvdata)
+ msg.from_wire(recvdata, Message.PRESERVE_ORDER)
+
+ # TSIG related checks, including an unexpected signed response
+ self._check_response_tsig(msg, recvdata)
+
+ # Perform response status validation
self._check_response_status(msg)
- answer_section = msg.get_section(Message.SECTION_ANSWER)
- for rr in self._handle_answer_section(answer_section):
- yield rr
+ for rr in msg.get_section(Message.SECTION_ANSWER):
+ rr_handled = False
+ while not rr_handled:
+ rr_handled = self.__state.handle_rr(self, rr)
- if self._soa_rr_count == 2:
- break
+ read_next_msg = self.__state.finish_message(self)
if self._shutdown_event.is_set():
raise XfrinException('xfrin is forced to stop')
@@ -322,33 +804,102 @@ class XfrinConnection(asyncore.dispatcher):
return False
- def log_info(self, msg, type='info'):
- # Overwrite the log function, log nothing
- pass
-
- def log_msg(self, msg):
- if self._verbose:
- sys.stdout.write('[b10-xfrin] %s\n' % str(msg))
-
-
-def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
- shutdown_event, master_addrinfo, check_soa, verbose,
- tsig_key_str):
- xfrin_recorder.increment(zone_name)
- sock_map = {}
- conn = XfrinConnection(sock_map, zone_name, rrclass, db_file,
- shutdown_event, master_addrinfo,
- tsig_key_str, verbose)
+def __process_xfrin(server, zone_name, rrclass, db_file,
+ shutdown_event, master_addrinfo, check_soa, tsig_key,
+ request_type, conn_class):
+ conn = None
+ exception = None
ret = XFRIN_FAIL
- if conn.connect_to_master():
- ret = conn.do_xfrin(check_soa)
+ try:
+ # Create a data source client used in this XFR session. Right now we
+ # still assume an sqlite3-based data source, and use both the old and new
+ # data source APIs. We also need to use a mock client for tests.
+ # For a temporary workaround to deal with these situations, we skip the
+ # creation when the given file is none (the test case). Eventually
+ # this code will be much cleaner.
+ datasrc_client = None
+ if db_file is not None:
+ # temporary hardcoded sqlite initialization. Once we decide on
+ # the config specification, we need to update this (TODO)
+ # this may depend on #1207, or any followup ticket created for #1207
+ datasrc_type = "sqlite3"
+ datasrc_config = "{ \"database_file\": \"" + db_file + "\"}"
+ datasrc_client = DataSourceClient(datasrc_type, datasrc_config)
+
+ # Create a TCP connection for the XFR session and perform the operation.
+ sock_map = {}
+ # In case we were asked to do IXFR and that one fails, we try again with
+ # AXFR. But only if we could actually connect to the server.
+ #
+ # So we start with retry as True, which is set to false on each attempt.
+ # In the case of connected but failed IXFR, we set it to true once again.
+ retry = True
+ while retry:
+ retry = False
+ conn = conn_class(sock_map, zone_name, rrclass, datasrc_client,
+ shutdown_event, master_addrinfo, tsig_key)
+ conn.init_socket()
+ # XXX: We still need _db_file for temporary workaround in _create_query().
+ # This should be removed when we eliminate the need for the workaround.
+ conn._db_file = db_file
+ ret = XFRIN_FAIL
+ if conn.connect_to_master():
+ ret = conn.do_xfrin(check_soa, request_type)
+ if ret == XFRIN_FAIL and request_type == RRType.IXFR():
+ # IXFR failed for some reason. It might mean the server can't
+ # handle it, or we don't have the zone or we are out of sync or
+ # whatever else. So we retry with with AXFR, as it may succeed
+ # in many such cases.
+ retry = True
+ request_type = RRType.AXFR()
+ logger.warn(XFRIN_XFR_TRANSFER_FALLBACK, conn.zone_str())
+ conn.close()
+ conn = None
+
+ except Exception as ex:
+ # If exception happens, just remember it here so that we can re-raise
+ # after cleaning up things. We don't log it here because we want
+ # eliminate smallest possibility of having an exception in logging
+ # itself.
+ exception = ex
+
+ # asyncore.dispatcher requires explicit close() unless its lifetime
+ # from born to destruction is closed within asyncore.loop, which is not
+ # the case for us. We always close() here, whether or not do_xfrin
+ # succeeds, and even when we see an unexpected exception.
+ if conn is not None:
+ conn.close()
# Publish the zone transfer result news, so zonemgr can reset the
# zone timer, and xfrout can notify the zone's slaves if the result
# is success.
server.publish_xfrin_news(zone_name, rrclass, ret)
+
+ if exception is not None:
+ raise exception
+
+def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
+ shutdown_event, master_addrinfo, check_soa, tsig_key,
+ request_type, conn_class=XfrinConnection):
+ # Even if it should be rare, the main process of xfrin session can
+ # raise an exception. In order to make sure the lock in xfrin_recorder
+ # is released in any cases, we delegate the main part to the helper
+ # function in the try block, catch any exceptions, then release the lock.
+ xfrin_recorder.increment(zone_name)
+ exception = None
+ try:
+ __process_xfrin(server, zone_name, rrclass, db_file,
+ shutdown_event, master_addrinfo, check_soa, tsig_key,
+ request_type, conn_class)
+ except Exception as ex:
+ # don't log it until we complete decrement().
+ exception = ex
xfrin_recorder.decrement(zone_name)
+ if exception is not None:
+ typestr = "AXFR" if request_type == RRType.AXFR() else "IXFR"
+ logger.error(XFRIN_XFR_PROCESS_FAILURE, typestr, zone_name.to_text(),
+ str(rrclass), str(exception))
class XfrinRecorder:
def __init__(self):
@@ -378,16 +929,115 @@ class XfrinRecorder:
self._lock.release()
return ret
+class ZoneInfo:
+ def __init__(self, config_data, module_cc):
+ """Creates a zone_info with the config data element as
+ specified by the 'zones' list in xfrin.spec. Module_cc is
+ needed to get the defaults from the specification"""
+ self._module_cc = module_cc
+ self.set_name(config_data.get('name'))
+ self.set_master_addr(config_data.get('master_addr'))
+
+ self.set_master_port(config_data.get('master_port'))
+ self.set_zone_class(config_data.get('class'))
+ self.set_tsig_key(config_data.get('tsig_key'))
+ self.set_use_ixfr(config_data.get('use_ixfr'))
+
+ def set_name(self, name_str):
+ """Set the name for this zone given a name string.
+ Raises XfrinZoneInfoException if name_str is None or if it
+ cannot be parsed."""
+ if name_str is None:
+ raise XfrinZoneInfoException("Configuration zones list "
+ "element does not contain "
+ "'name' attribute")
+ else:
+ self.name = _check_zone_name(name_str)
+
+ def set_master_addr(self, master_addr_str):
+ """Set the master address for this zone given an IP address
+ string. Raises XfrinZoneInfoException if master_addr_str is
+ None or if it cannot be parsed."""
+ if master_addr_str is None:
+ raise XfrinZoneInfoException("master address missing from config data")
+ else:
+ try:
+ self.master_addr = isc.net.parse.addr_parse(master_addr_str)
+ except ValueError:
+ logger.error(XFRIN_BAD_MASTER_ADDR_FORMAT, master_addr_str)
+ errmsg = "bad format for zone's master: " + master_addr_str
+ raise XfrinZoneInfoException(errmsg)
+
+ def set_master_port(self, master_port_str):
+ """Set the master port given a port number string. If
+ master_port_str is None, the default from the specification
+ for this module will be used. Raises XfrinZoneInfoException if
+ the string contains an invalid port number"""
+ if master_port_str is None:
+ self.master_port = self._module_cc.get_default_value("zones/master_port")
+ else:
+ try:
+ self.master_port = isc.net.parse.port_parse(master_port_str)
+ except ValueError:
+ logger.error(XFRIN_BAD_MASTER_PORT_FORMAT, master_port_str)
+ errmsg = "bad format for zone's master port: " + master_port_str
+ raise XfrinZoneInfoException(errmsg)
+
+ def set_zone_class(self, zone_class_str):
+ """Set the zone class given an RR class str (e.g. "IN"). If
+ zone_class_str is None, it will default to what is specified
+ in the specification file for this module. Raises
+ XfrinZoneInfoException if the string cannot be parsed."""
+ # TODO: remove _str
+ self.class_str = zone_class_str or self._module_cc.get_default_value("zones/class")
+ if zone_class_str == None:
+ #TODO rrclass->zone_class
+ self.rrclass = RRClass(self._module_cc.get_default_value("zones/class"))
+ else:
+ try:
+ self.rrclass = RRClass(zone_class_str)
+ except InvalidRRClass:
+ logger.error(XFRIN_BAD_ZONE_CLASS, zone_class_str)
+ errmsg = "invalid zone class: " + zone_class_str
+ raise XfrinZoneInfoException(errmsg)
+
+ def set_tsig_key(self, tsig_key_str):
+ """Set the tsig_key for this zone, given a TSIG key string
+ representation. If tsig_key_str is None, no TSIG key will
+ be set. Raises XfrinZoneInfoException if tsig_key_str cannot
+ be parsed."""
+ if tsig_key_str is None:
+ self.tsig_key = None
+ else:
+ try:
+ self.tsig_key = TSIGKey(tsig_key_str)
+ except InvalidParameter as ipe:
+ logger.error(XFRIN_BAD_TSIG_KEY_STRING, tsig_key_str)
+ errmsg = "bad TSIG key string: " + tsig_key_str
+ raise XfrinZoneInfoException(errmsg)
+
+ def set_use_ixfr(self, use_ixfr):
+ """Set use_ixfr. If set to True, it will use
+ IXFR for incoming transfers. If set to False, it will use AXFR.
+ At this moment there is no automatic fallback"""
+ # TODO: http://bind10.isc.org/ticket/1279
+ if use_ixfr is None:
+ self.use_ixfr = \
+ self._module_cc.get_default_value("zones/use_ixfr")
+ else:
+ self.use_ixfr = use_ixfr
+
+ def get_master_addr_info(self):
+ return (self.master_addr.family, socket.SOCK_STREAM,
+ (str(self.master_addr), self.master_port))
+
class Xfrin:
- def __init__(self, verbose = False):
+ def __init__(self):
self._max_transfers_in = 10
- #TODO, this is the temp way to set the zone's master.
- self._master_addr = DEFAULT_MASTER
- self._master_port = DEFAULT_MASTER_PORT
+ self._zones = {}
self._cc_setup()
self.recorder = XfrinRecorder()
self._shutdown_event = threading.Event()
- self._verbose = verbose
def _cc_setup(self):
'''This method is used only as part of initialization, but is
@@ -402,10 +1052,7 @@ class Xfrin:
self.command_handler)
self._module_cc.start()
config_data = self._module_cc.get_full_config()
- self._max_transfers_in = config_data.get("transfers_in")
- self._master_addr = config_data.get('master_addr') or self._master_addr
- self._master_port = config_data.get('master_port') or self._master_port
- self._tsig_key_str = config_data.get('tsig_key') or None
+ self.config_handler(config_data)
def _cc_check_command(self):
'''This is a straightforward wrapper for cc.check_command,
@@ -413,22 +1060,42 @@ class Xfrin:
of unit tests.'''
self._module_cc.check_command(False)
+ def _get_zone_info(self, name, rrclass):
+ """Returns the ZoneInfo object containing the configured data
+ for the given zone name. If the zone name did not have any
+ data, returns None"""
+ return self._zones.get((name.to_text(), rrclass.to_text()))
+
+ def _add_zone_info(self, zone_info):
+ """Add the zone info. Raises a XfrinZoneInfoException if a zone
+ with the same name and class is already configured"""
+ key = (zone_info.name.to_text(), zone_info.class_str)
+ if key in self._zones:
+ raise XfrinZoneInfoException("zone " + str(key) +
+ " configured multiple times")
+ self._zones[key] = zone_info
+
+ def _clear_zone_info(self):
+ self._zones = {}
+
def config_handler(self, new_config):
+ # backup all config data (should there be a problem in the new
+ # data)
+ old_max_transfers_in = self._max_transfers_in
+ old_zones = self._zones
+
self._max_transfers_in = new_config.get("transfers_in") or self._max_transfers_in
- self._tsig_key_str = new_config.get('tsig_key') or None
- if ('master_addr' in new_config) or ('master_port' in new_config):
- # User should change the port and address together.
- try:
- addr = new_config.get('master_addr') or self._master_addr
- port = new_config.get('master_port') or self._master_port
- isc.net.parse.addr_parse(addr)
- isc.net.parse.port_parse(port)
- self._master_addr = addr
- self._master_port = port
- except ValueError:
- errmsg = "bad format for zone's master: " + str(new_config)
- log_error(errmsg)
- return create_answer(1, errmsg)
+
+ if 'zones' in new_config:
+ self._clear_zone_info()
+ for zone_config in new_config.get('zones'):
+ try:
+ zone_info = ZoneInfo(zone_config, self._module_cc)
+ self._add_zone_info(zone_info)
+ except XfrinZoneInfoException as xce:
+ self._zones = old_zones
+ self._max_transfers_in = old_max_transfers_in
+ return create_answer(1, str(xce))
return create_answer(0)
@@ -453,58 +1120,118 @@ class Xfrin:
# notify command maybe has the parameters which
# specify the notifyfrom address and port, according the RFC1996, zone
# transfer should starts first from the notifyfrom, but now, let 'TODO' it.
+ # (using the value now, while we can only set one master address, would be
+ # a security hole. Once we add the ability to have multiple master addresses,
+ # we should check if it matches one of them, and then use it.)
(zone_name, rrclass) = self._parse_zone_name_and_class(args)
- (master_addr) = build_addr_info(self._master_addr, self._master_port)
- ret = self.xfrin_start(zone_name,
- rrclass,
- self._get_db_file(),
- master_addr,
- self._tsig_key_str,
- True)
- answer = create_answer(ret[0], ret[1])
+ zone_str = format_zone_str(zone_name, rrclass)
+ zone_info = self._get_zone_info(zone_name, rrclass)
+ notify_addr = self._parse_master_and_port(args, zone_name,
+ rrclass)
+ if zone_info is None:
+ # TODO what to do? no info known about zone. defaults?
+ errmsg = "Got notification to retransfer unknown zone " + zone_str
+ logger.info(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_str)
+ answer = create_answer(1, errmsg)
+ else:
+ request_type = RRType.AXFR()
+ if zone_info.use_ixfr:
+ request_type = RRType.IXFR()
+ master_addr = zone_info.get_master_addr_info()
+ if notify_addr[0] == master_addr[0] and\
+ notify_addr[2] == master_addr[2]:
+ ret = self.xfrin_start(zone_name,
+ rrclass,
+ self._get_db_file(),
+ master_addr,
+ zone_info.tsig_key, request_type,
+ True)
+ answer = create_answer(ret[0], ret[1])
+ else:
+ notify_addr_str = format_addrinfo(notify_addr)
+ master_addr_str = format_addrinfo(master_addr)
+ errmsg = "Got notification for " + zone_str\
+ + "from unknown address: " + notify_addr_str;
+ logger.info(XFRIN_NOTIFY_UNKNOWN_MASTER, zone_str,
+ notify_addr_str, master_addr_str)
+ answer = create_answer(1, errmsg)
elif command == 'retransfer' or command == 'refresh':
# Xfrin receives the retransfer/refresh from cmdctl(sent by bindctl).
# If the command has specified master address, do transfer from the
# master address, or else do transfer from the configured masters.
(zone_name, rrclass) = self._parse_zone_name_and_class(args)
- master_addr = self._parse_master_and_port(args)
+ master_addr = self._parse_master_and_port(args, zone_name,
+ rrclass)
+ zone_info = self._get_zone_info(zone_name, rrclass)
+ tsig_key = None
+ request_type = RRType.AXFR()
+ if zone_info:
+ tsig_key = zone_info.tsig_key
+ if zone_info.use_ixfr:
+ request_type = RRType.IXFR()
db_file = args.get('db_file') or self._get_db_file()
ret = self.xfrin_start(zone_name,
rrclass,
db_file,
master_addr,
- self._tsig_key_str,
+ tsig_key, request_type,
(False if command == 'retransfer' else True))
answer = create_answer(ret[0], ret[1])
else:
answer = create_answer(1, 'unknown command: ' + command)
except XfrinException as err:
- log_error('error happened for command: %s, %s' % (command, str(err)) )
+ logger.error(XFRIN_COMMAND_ERROR, command, str(err))
answer = create_answer(1, str(err))
return answer
def _parse_zone_name_and_class(self, args):
- zone_name = args.get('zone_name')
- if not zone_name:
+ zone_name_str = args.get('zone_name')
+ if zone_name_str is None:
raise XfrinException('zone name should be provided')
- rrclass = args.get('zone_class')
- if not rrclass:
- rrclass = RRClass.IN()
+ return (_check_zone_name(zone_name_str), _check_zone_class(args.get('zone_class')))
+
+ def _parse_master_and_port(self, args, zone_name, zone_class):
+ """
+ Return tuple (family, socktype, sockaddr) for address and port in given
+ args dict.
+ IPv4 and IPv6 are the only supported addresses now, so sockaddr will be
+ (address, port). The socktype is socket.SOCK_STREAM for now.
+ """
+ # check if we have configured info about this zone, in case
+ # port or master are not specified
+ zone_info = self._get_zone_info(zone_name, zone_class)
+
+ addr_str = args.get('master')
+ if addr_str is None:
+ if zone_info is not None:
+ addr = zone_info.master_addr
+ else:
+ raise XfrinException("Master address not given or "
+ "configured for " + zone_name.to_text())
else:
try:
- rrclass = RRClass(rrclass)
- except InvalidRRClass as e:
- raise XfrinException('invalid RRClass: ' + rrclass)
-
- return zone_name, rrclass
+ addr = isc.net.parse.addr_parse(addr_str)
+ except ValueError as err:
+ raise XfrinException("failed to resolve master address %s: %s" %
+ (addr_str, str(err)))
+
+ port_str = args.get('port')
+ if port_str is None:
+ if zone_info is not None:
+ port = zone_info.master_port
+ else:
+ port = DEFAULT_MASTER_PORT
+ else:
+ try:
+ port = isc.net.parse.port_parse(port_str)
+ except ValueError as err:
+ raise XfrinException("failed to parse port=%s: %s" %
+ (port_str, str(err)))
- def _parse_master_and_port(self, args):
- port = args.get('port') or self._master_port
- master = args.get('master') or self._master_addr
- return build_addr_info(master, port)
+ return (addr.family, socket.SOCK_STREAM, (str(addr), port))
def _get_db_file(self):
#TODO, the db file path should be got in auth server's configuration
@@ -528,7 +1255,8 @@ class Xfrin:
news(command: zone_new_data_ready) to zone manager and xfrout.
if xfrin failed, just tell the bad news to zone manager, so that
it can reset the refresh timer for that zone. '''
- param = {'zone_name': zone_name, 'zone_class': zone_class.to_text()}
+ param = {'zone_name': zone_name.to_text(),
+ 'zone_class': zone_class.to_text()}
if xfr_result == XFRIN_OK:
msg = create_command(notify_out.ZONE_NEW_DATA_READY_CMD, param)
# catch the exception, in case msgq has been killed.
@@ -547,8 +1275,7 @@ class Xfrin:
except isc.cc.session.SessionTimeout:
pass # for now we just ignore the failure
except socket.error as err:
- log_error("Fail to send message to %s and %s, msgq may has been killed"
- % (XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME))
+ logger.error(XFRIN_MSGQ_SEND_ERROR, XFROUT_MODULE_NAME, ZONE_MANAGER_MODULE_NAME)
else:
msg = create_command(ZONE_XFRIN_FAILED, param)
# catch the exception, in case msgq has been killed.
@@ -560,15 +1287,14 @@ class Xfrin:
except isc.cc.session.SessionTimeout:
pass # for now we just ignore the failure
except socket.error as err:
- log_error("Fail to send message to %s, msgq may has been killed"
- % ZONE_MANAGER_MODULE_NAME)
+ logger.error(XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER, ZONE_MANAGER_MODULE_NAME)
def startup(self):
while not self._shutdown_event.is_set():
self._cc_check_command()
- def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo, tsig_key_str,
- check_soa = True):
+ def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo,
+ tsig_key, request_type, check_soa=True):
if "pydnspp" not in sys.modules:
return (1, "xfrin failed, can't load dns message python library: 'pydnspp'")
@@ -582,12 +1308,12 @@ class Xfrin:
xfrin_thread = threading.Thread(target = process_xfrin,
args = (self,
self.recorder,
- zone_name, rrclass,
+ zone_name,
+ rrclass,
db_file,
self._shutdown_event,
master_addrinfo, check_soa,
- self._verbose,
- tsig_key_str))
+ tsig_key, request_type))
xfrin_thread.start()
return (0, 'zone xfrin is started')
@@ -604,25 +1330,11 @@ def set_signal_handler():
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
-def build_addr_info(addrstr, portstr):
- """
- Return tuple (family, socktype, sockaddr) for given address and port.
- IPv4 and IPv6 are the only supported addresses now, so sockaddr will be
- (address, port). The socktype is socket.SOCK_STREAM for now.
- """
- try:
- port = isc.net.parse.port_parse(portstr)
- addr = isc.net.parse.addr_parse(addrstr)
- return (addr.family, socket.SOCK_STREAM, (addrstr, port))
- except ValueError as err:
- raise XfrinException("failed to resolve master address/port=%s/%s: %s" %
- (addrstr, portstr, str(err)))
-
def set_cmd_options(parser):
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
+ help="This option is obsolete and has no effect.")
-def main(xfrin_class, use_signal = True):
+def main(xfrin_class, use_signal=True):
"""The main loop of the Xfrin daemon.
@param xfrin_class: A class of the Xfrin object. This is normally Xfrin,
@@ -639,15 +1351,14 @@ def main(xfrin_class, use_signal = True):
if use_signal:
set_signal_handler()
- xfrind = xfrin_class(verbose = options.verbose)
+ xfrind = xfrin_class()
xfrind.startup()
except KeyboardInterrupt:
- log_error("exit b10-xfrin")
+ logger.info(XFRIN_STOPPED_BY_KEYBOARD)
except isc.cc.session.SessionError as e:
- log_error(str(e))
- log_error('Error happened! is the command channel daemon running?')
+ logger.error(XFRIN_CC_SESSION_ERROR, str(e))
except Exception as e:
- log_error(str(e))
+ logger.error(XFRIN_UNKNOWN_ERROR, str(e))
if xfrind:
xfrind.shutdown()
diff --git a/src/bin/xfrin/xfrin.spec b/src/bin/xfrin/xfrin.spec
index 46bad69..c1ba61e 100644
--- a/src/bin/xfrin/xfrin.spec
+++ b/src/bin/xfrin/xfrin.spec
@@ -9,21 +9,48 @@
"item_optional": false,
"item_default": 10
},
- {
- "item_name": "master_addr",
- "item_type": "string",
- "item_optional": false,
- "item_default": ""
- },
- { "item_name": "master_port",
- "item_type": "integer",
+ { "item_name": "zones",
+ "item_type": "list",
"item_optional": false,
- "item_default": 53
- },
- { "item_name": "tsig_key",
- "item_type": "string",
- "item_optional": true,
- "item_default": ""
+ "item_default": [],
+ "list_item_spec":
+ { "item_type": "map",
+ "item_name": "zone_info",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "name",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ { "item_name": "class",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "IN"
+ },
+ {
+ "item_name": "master_addr",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ { "item_name": "master_port",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 53
+ },
+ { "item_name": "tsig_key",
+ "item_type": "string",
+ "item_optional": true
+ },
+ { "item_name": "use_ixfr",
+ "item_type": "boolean",
+ "item_optional": false,
+ "item_default": false
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
new file mode 100644
index 0000000..86cdec3
--- /dev/null
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -0,0 +1,148 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module. Possible reasons are a broken DNS message or failure
+in database connection. The error is shown in the log message.
+
+% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message. Note: due to the code structure
+this can only happen for AXFR.
+
+% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3
+The XFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+
+% XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+
+% XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4
+An XFR session failed outside the main protocol handling. This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure. Details of the error are shown in
+the log message. In general, these errors are not really expected
+ones, and indicate an installation error or a program bug. The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete. So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+
+% XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+
+% XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded
+The XFR transfer of the given zone was successfully completed.
+
+% XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1
+The given master address is not a valid IP address.
+
+% XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1
+The master port as read from the configuration is not a valid port number.
+
+% XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+
+% XFRIN_BAD_ZONE_CLASS Invalid zone class: %1
+The zone class as read from the configuration is not a valid DNS class.
+
+% XFRIN_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+
+% XFRIN_COMMAND_ERROR error while executing command '%1': %2
+There was an error while the given command was being processed. The
+error is given in the log message.
+
+% XFRIN_CONNECT_MASTER error connecting to master at %1: %2
+There was an error opening a connection to the master. The error is
+shown in the log message.
+
+% XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+
+% XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+
+% XFRIN_NOTIFY_UNKNOWN_MASTER got notification to retransfer zone %1 from %2, expected %3
+The system received a notify for the given zone, but the address it came
+from does not match the master address in the Xfrin configuration. The notify
+is ignored. This may indicate that the configuration for the master is wrong,
+that a wrong machine is sending notifies, or that fake notifies are being sent.
+
+% XFRIN_IMPORT_DNS error importing python DNS module: %1
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+
+% XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+
+% XFRIN_STARTING starting resolver with command line '%1'
+An informational message, this is output when the resolver starts up.
+
+% XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+
+% XFRIN_UNKNOWN_ERROR unknown error: %1
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+
+% XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found. This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+
+% XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1
+Non incremental transfer was detected at the "first data" of a transfer,
+which is the RR following the initial SOA. Non incremental transfer is
+either AXFR or AXFR-style IXFR. In the latter case, it means that
+in a response to IXFR query the first data is not SOA or its SOA serial
+is not equal to the requested SOA serial.
+
+% XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received
+The serial fields of the first and last SOAs of AXFR (including AXFR-style
+IXFR) are not the same. According to RFC 5936 these two SOAs must be the
+"same" (not only for the serial), but it is still not clear what the
+receiver should do if this condition does not hold. There was a discussion
+about this at the IETF dnsext wg:
+http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+and the general feeling seems that it would be better to reject the
+transfer if a mismatch is detected. On the other hand, also as noted
+in that email thread, neither BIND 9 nor NSD performs any comparison
+on the SOAs. For now, we only check the serials (ignoring other fields)
+and only leave a warning log message when a mismatch is found. If it
+turns out to happen with a real world primary server implementation
+and that server actually feeds broken data (e.g. mixed versions of
+zone), we can consider a stricter action.
diff --git a/src/bin/xfrout/Makefile.am b/src/bin/xfrout/Makefile.am
index d4f021e..6100e64 100644
--- a/src/bin/xfrout/Makefile.am
+++ b/src/bin/xfrout/Makefile.am
@@ -7,10 +7,15 @@ pkglibexec_SCRIPTS = b10-xfrout
b10_xfroutdir = $(pkgdatadir)
b10_xfrout_DATA = xfrout.spec
-CLEANFILES= b10-xfrout xfrout.pyc xfrout.spec
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-xfrout xfrout.pyc xfrout.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.pyc
man_MANS = b10-xfrout.8
-EXTRA_DIST = $(man_MANS) b10-xfrout.xml
+EXTRA_DIST = $(man_MANS) b10-xfrout.xml xfrout_messages.mes
if ENABLE_MAN
@@ -19,12 +24,21 @@ b10-xfrout.8: b10-xfrout.xml
endif
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py : xfrout_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/xfrout_messages.mes
xfrout.spec: xfrout.spec.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.spec.pre >$@
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrout: xfrout.py
+b10-xfrout: xfrout.py $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index ad71fe2..9889b80 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -134,6 +134,14 @@
data storage types.
</simpara></note>
+
+<!--
+
+tsig_key_ring list of
+tsig_key string
+
+-->
+
<!-- TODO: formating -->
<para>
The configuration commands are:
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index 11916af..ace8fc9 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -1,15 +1,17 @@
PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = xfrout_test.py
-EXTRA_DIST = $(PYTESTS)
+noinst_SCRIPTS = $(PYTESTS)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
+# We set B10_FROM_BUILD below, so that the test can refer to the in-source
+# spec file.
check-local:
if ENABLE_PYTHON_COVERAGE
touch $(abs_top_srcdir)/.coverage
@@ -18,7 +20,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/xfrout:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ B10_FROM_BUILD=$(abs_top_builddir) \
$(LIBRARY_PATH_PLACEHOLDER) \
- $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/xfrout:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+ $(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 472ef3c..0a9fd3c 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -18,10 +18,16 @@
import unittest
import os
+from isc.testutils.tsigctx_mock import MockTSIGContext
from isc.cc.session import *
+import isc.config
from pydnspp import *
from xfrout import *
import xfrout
+import isc.log
+import isc.acl.dns
+
+TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
# our fake socket, where we can read and insert messages
class MySocket():
@@ -85,17 +91,204 @@ class TestXfroutSession(unittest.TestCase):
msg.from_wire(self.mdata)
return msg
+ def create_mock_tsig_ctx(self, error):
+ # This helper function creates a MockTSIGContext for a given key
+ # and TSIG error to be used as a result of verify (normally faked
+ # one)
+ mock_ctx = MockTSIGContext(TSIG_KEY)
+ mock_ctx.error = error
+ return mock_ctx
+
+ def message_has_tsig(self, msg):
+ return msg.get_tsig_record() is not None
+
+ def create_request_data(self, with_tsig=False):
+ msg = Message(Message.RENDER)
+ query_id = 0x1035
+ msg.set_qid(query_id)
+ msg.set_opcode(Opcode.QUERY())
+ msg.set_rcode(Rcode.NOERROR())
+ query_question = Question(Name("example.com"), RRClass.IN(),
+ RRType.AXFR())
+ msg.add_question(query_question)
+
+ renderer = MessageRenderer()
+ if with_tsig:
+ tsig_ctx = MockTSIGContext(TSIG_KEY)
+ msg.to_wire(renderer, tsig_ctx)
+ else:
+ msg.to_wire(renderer)
+ request_data = renderer.get_data()
+ return request_data
+
def setUp(self):
self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
- self.log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
- self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(), self.log)
- self.mdata = bytes(b'\xd6=\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
+ self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(),
+ TSIGKeyRing(), ('127.0.0.1', 12345),
+ # When not testing ACLs, simply accept
+ isc.acl.dns.REQUEST_LOADER.load(
+ [{"action": "ACCEPT"}]),
+ {})
+ self.mdata = self.create_request_data(False)
self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
def test_parse_query_message(self):
[get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(get_rcode.to_text(), "NOERROR")
+ # tsig signed query message
+ request_data = self.create_request_data(True)
+ # BADKEY
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOTAUTH")
+ self.assertTrue(self.xfrsess._tsig_ctx is not None)
+ # NOERROR
+ self.assertEqual(TSIGKeyRing.SUCCESS,
+ self.xfrsess._tsig_key_ring.add(TSIG_KEY))
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+ self.assertTrue(self.xfrsess._tsig_ctx is not None)
+
+ def check_transfer_acl(self, acl_setter):
+ # ACL checks, put some ACL inside
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {
+ "from": "127.0.0.1",
+ "action": "ACCEPT"
+ },
+ {
+ "from": "192.0.2.1",
+ "action": "DROP"
+ }
+ ]))
+ # Localhost (the default in this test) is accepted
+ rcode, msg = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+ # This should be dropped completely, therefore returning None
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ rcode, msg = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(None, rcode)
+ # This should be refused, therefore REFUSED
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ rcode, msg = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ # TSIG signed request
+ request_data = self.create_request_data(True)
+
+ # If the TSIG check fails, it should not check ACL
+ # (If it checked ACL as well, it would just drop the request)
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._tsig_key_ring = TSIGKeyRing()
+ rcode, msg = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOTAUTH")
+ self.assertTrue(self.xfrsess._tsig_ctx is not None)
+
+ # ACL using TSIG: successful case
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.com", "action": "ACCEPT"}, {"action": "REJECT"}
+ ]))
+ self.assertEqual(TSIGKeyRing.SUCCESS,
+ self.xfrsess._tsig_key_ring.add(TSIG_KEY))
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+
+ # ACL using TSIG: key name doesn't match; should be rejected
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
+ ]))
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ # ACL using TSIG: no TSIG; should be rejected
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
+ ]))
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ #
+ # ACL using IP + TSIG: both should match
+ #
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"ALL": [{"key": "example.com"}, {"from": "192.0.2.1"}],
+ "action": "ACCEPT"},
+ {"action": "REJECT"}
+ ]))
+ # both matches
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+ # TSIG matches, but address doesn't
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+ # Address matches, but TSIG doesn't (not included)
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+ # Neither address nor TSIG matches
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ def test_transfer_acl(self):
+ # ACL checks only with the default ACL
+ def acl_setter(acl):
+ self.xfrsess._acl = acl
+ self.check_transfer_acl(acl_setter)
+
+ def test_transfer_zoneacl(self):
+ # ACL check with a per zone ACL + default ACL. The per zone ACL
+ # should match the queryied zone, so it should be used.
+ def acl_setter(acl):
+ zone_key = ('IN', 'example.com.')
+ self.xfrsess._zone_config[zone_key] = {}
+ self.xfrsess._zone_config[zone_key]['transfer_acl'] = acl
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "DROP"}])
+ self.check_transfer_acl(acl_setter)
+
+ def test_transfer_zoneacl_nomatch(self):
+ # similar to the previous one, but the per zone doesn't match the
+ # query. The default should be used.
+ def acl_setter(acl):
+ zone_key = ('IN', 'example.org.')
+ self.xfrsess._zone_config[zone_key] = {}
+ self.xfrsess._zone_config[zone_key]['transfer_acl'] = \
+ isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "DROP"}])
+ self.xfrsess._acl = acl
+ self.check_transfer_acl(acl_setter)
+
+ def test_get_transfer_acl(self):
+ # set the default ACL. If there's no specific zone ACL, this one
+ # should be used.
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "ACCEPT"}])
+ acl = self.xfrsess._get_transfer_acl(Name('example.com'), RRClass.IN())
+ self.assertEqual(acl, self.xfrsess._acl)
+
+ # install a per zone config with transfer ACL for example.com. Then
+ # that ACL will be used for example.com; for others the default ACL
+ # will still be used.
+ com_acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "REJECT"}])
+ self.xfrsess._zone_config[('IN', 'example.com.')] = {}
+ self.xfrsess._zone_config[('IN', 'example.com.')]['transfer_acl'] = \
+ com_acl
+ self.assertEqual(com_acl,
+ self.xfrsess._get_transfer_acl(Name('example.com'),
+ RRClass.IN()))
+ self.assertEqual(self.xfrsess._acl,
+ self.xfrsess._get_transfer_acl(Name('example.org'),
+ RRClass.IN()))
+
+ # Name matching should be case insensitive.
+ self.assertEqual(com_acl,
+ self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
+ RRClass.IN()))
+
def test_get_query_zone_name(self):
msg = self.getmsg()
self.assertEqual(self.xfrsess._get_query_zone_name(msg), "example.com.")
@@ -111,6 +304,14 @@ class TestXfroutSession(unittest.TestCase):
get_msg = self.sock.read_msg()
self.assertEqual(get_msg.get_rcode().to_text(), "NXDOMAIN")
+ # tsig signed message
+ msg = self.getmsg()
+ self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
+ self.xfrsess._reply_query_with_error_rcode(msg, self.sock, Rcode(3))
+ get_msg = self.sock.read_msg()
+ self.assertEqual(get_msg.get_rcode().to_text(), "NXDOMAIN")
+ self.assertTrue(self.message_has_tsig(get_msg))
+
def test_send_message(self):
msg = self.getmsg()
msg.make_response()
@@ -146,12 +347,6 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(msg.get_rcode(), rcode)
self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_AA))
- def test_reply_query_with_format_error(self):
- msg = self.getmsg()
- self.xfrsess._reply_query_with_format_error(msg, self.sock)
- get_msg = self.sock.read_msg()
- self.assertEqual(get_msg.get_rcode().to_text(), "FORMERR")
-
def test_create_rrset_from_db_record(self):
rrset = self.xfrsess._create_rrset_from_db_record(self.soa_record)
self.assertEqual(rrset.get_name().to_text(), "example.com.")
@@ -162,11 +357,16 @@ class TestXfroutSession(unittest.TestCase):
def test_send_message_with_last_soa(self):
rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
-
msg = self.getmsg()
msg.make_response()
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, 0)
+
+ # packet number less than TSIG_SIGN_EVERY_NTH
+ packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
+ 0, packet_neet_not_sign)
get_msg = self.sock.read_msg()
+ # tsig context is not exist
+ self.assertFalse(self.message_has_tsig(get_msg))
self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
@@ -180,6 +380,42 @@ class TestXfroutSession(unittest.TestCase):
rdata = answer.get_rdata()
self.assertEqual(rdata[0].to_text(), self.soa_record[7])
+ # msg is the TSIG_SIGN_EVERY_NTH one
+ # sending the message with last soa together
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
+ 0, TSIG_SIGN_EVERY_NTH)
+ get_msg = self.sock.read_msg()
+ # tsig context is not exist
+ self.assertFalse(self.message_has_tsig(get_msg))
+
+ def test_send_message_with_last_soa_with_tsig(self):
+ # create tsig context
+ self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
+
+ rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
+ msg = self.getmsg()
+ msg.make_response()
+
+ # packet number less than TSIG_SIGN_EVERY_NTH
+ packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+ # msg is not the TSIG_SIGN_EVERY_NTH one
+ # sending the message with last soa together
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
+ 0, packet_neet_not_sign)
+ get_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(get_msg))
+
+ self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
+ self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
+ self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
+
+ # msg is the TSIG_SIGN_EVERY_NTH one
+ # sending the message with last soa together
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
+ 0, TSIG_SIGN_EVERY_NTH)
+ get_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(get_msg))
+
def test_trigger_send_message_with_last_soa(self):
rrset_a = RRset(Name("example.com"), RRClass.IN(), RRType.A(), RRTTL(3600))
rrset_a.add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
@@ -187,15 +423,21 @@ class TestXfroutSession(unittest.TestCase):
msg = self.getmsg()
msg.make_response()
-
msg.add_rrset(Message.SECTION_ANSWER, rrset_a)
- # give the function a value that is larger than MAX-len(rrset)
- self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, 65520)
+ # length larger than MAX-len(rrset)
+ length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
+ # packet number less than TSIG_SIGN_EVERY_NTH
+ packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+
+ # give the function a value that is larger than MAX-len(rrset)
# this should have triggered the sending of two messages
# (1 with the rrset we added manually, and 1 that triggered
# the sending in _with_last_soa)
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
+ packet_neet_not_sign)
get_msg = self.sock.read_msg()
+ self.assertFalse(self.message_has_tsig(get_msg))
self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
@@ -208,6 +450,7 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(rdata[0].to_text(), "192.0.2.1")
get_msg = self.sock.read_msg()
+ self.assertFalse(self.message_has_tsig(get_msg))
self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 0)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
@@ -223,6 +466,45 @@ class TestXfroutSession(unittest.TestCase):
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
+ def test_trigger_send_message_with_last_soa_with_tsig(self):
+ self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
+ rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
+ msg = self.getmsg()
+ msg.make_response()
+ msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+
+ # length larger than MAX-len(rrset)
+ length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
+ # packet number less than TSIG_SIGN_EVERY_NTH
+ packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+
+ # give the function a value that is larger than MAX-len(rrset)
+ # this should have triggered the sending of two messages
+ # (1 with the rrset we added manually, and 1 that triggered
+ # the sending in _with_last_soa)
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
+ packet_neet_not_sign)
+ get_msg = self.sock.read_msg()
+ # msg is not the TSIG_SIGN_EVERY_NTH one, it shouldn't be tsig signed
+ self.assertFalse(self.message_has_tsig(get_msg))
+ # the last packet should be tsig signed
+ get_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(get_msg))
+ # and it should not have sent anything else
+ self.assertEqual(0, len(self.sock.sendqueue))
+
+
+ # msg is the TSIG_SIGN_EVERY_NTH one, it should be tsig signed
+ self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
+ xfrout.TSIG_SIGN_EVERY_NTH)
+ get_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(get_msg))
+ # the last packet should be tsig signed
+ get_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(get_msg))
+ # and it should not have sent anything else
+ self.assertEqual(0, len(self.sock.sendqueue))
+
def test_get_rrset_len(self):
rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
self.assertEqual(82, get_rrset_len(rrset_soa))
@@ -313,9 +595,56 @@ class TestXfroutSession(unittest.TestCase):
reply_msg = self.sock.read_msg()
self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 2)
-class MyCCSession():
+ def test_reply_xfrout_query_noerror_with_tsig(self):
+ rrset_data = (4, 3, 'a.example.com.', 'com.example.', 3600, 'A', None, '192.168.1.1')
+ global sqlite3_ds
+ global xfrout
+ def get_zone_soa(zonename, file):
+ return self.soa_record
+
+ def get_zone_datas(zone, file):
+ zone_rrsets = []
+ for i in range(0, 100):
+ zone_rrsets.insert(i, rrset_data)
+ return zone_rrsets
+
+ def get_rrset_len(rrset):
+ return 65520
+
+ sqlite3_ds.get_zone_soa = get_zone_soa
+ sqlite3_ds.get_zone_datas = get_zone_datas
+ xfrout.get_rrset_len = get_rrset_len
+
+ self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
+ self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock, "example.com.")
+
+ # tsig signed first package
+ reply_msg = self.sock.read_msg()
+ self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
+ self.assertTrue(self.message_has_tsig(reply_msg))
+ # (TSIG_SIGN_EVERY_NTH - 1) packets have no tsig
+ for i in range(0, xfrout.TSIG_SIGN_EVERY_NTH - 1):
+ reply_msg = self.sock.read_msg()
+ self.assertFalse(self.message_has_tsig(reply_msg))
+ # TSIG_SIGN_EVERY_NTH packet has tsig
+ reply_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(reply_msg))
+
+ for i in range(0, 100 - TSIG_SIGN_EVERY_NTH):
+ reply_msg = self.sock.read_msg()
+ self.assertFalse(self.message_has_tsig(reply_msg))
+ # tsig signed last package
+ reply_msg = self.sock.read_msg()
+ self.assertTrue(self.message_has_tsig(reply_msg))
+
+ # and it should not have sent anything else
+ self.assertEqual(0, len(self.sock.sendqueue))
+
+class MyCCSession(isc.config.ConfigData):
def __init__(self):
- pass
+ module_spec = isc.config.module_spec_from_file(
+ xfrout.SPECFILE_LOCATION)
+ ConfigData.__init__(self, module_spec)
def get_remote_config_value(self, module_name, identifier):
if module_name == "Auth" and identifier == "database_file":
@@ -326,18 +655,42 @@ class MyCCSession():
class MyUnixSockServer(UnixSockServer):
def __init__(self):
- self._lock = threading.Lock()
- self._transfers_counter = 0
self._shutdown_event = threading.Event()
- self._max_transfers_out = 10
+ self._common_init()
self._cc = MyCCSession()
- self._log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
+ self.update_config_data(self._cc.get_full_config())
class TestUnixSockServer(unittest.TestCase):
def setUp(self):
self.write_sock, self.read_sock = socket.socketpair()
self.unix = MyUnixSockServer()
+ def test_guess_remote(self):
+ """Test we can guess the remote endpoint when we have only the
+ file descriptor. This is needed, because we get only that one
+ from auth."""
+ # We test with UDP, as it can be "connected" without other
+ # endpoint
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.connect(('127.0.0.1', 12345))
+ self.assertEqual(('127.0.0.1', 12345),
+ self.unix._guess_remote(sock.fileno()))
+ if socket.has_ipv6:
+ # Don't check IPv6 address on hosts not supporting them
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
+ sock.connect(('::1', 12345))
+ self.assertEqual(('::1', 12345, 0, 0),
+ self.unix._guess_remote(sock.fileno()))
+ # Try when pretending there's no IPv6 support
+ # (No need to pretend when there's really no IPv6)
+ xfrout.socket.has_ipv6 = False
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.connect(('127.0.0.1', 12345))
+ self.assertEqual(('127.0.0.1', 12345),
+ self.unix._guess_remote(sock.fileno()))
+ # Return it back
+ xfrout.socket.has_ipv6 = True
+
def test_receive_query_message(self):
send_msg = b"\xd6=\x00\x00\x00\x01\x00"
msg_len = struct.pack('H', socket.htons(len(send_msg)))
@@ -346,9 +699,121 @@ class TestUnixSockServer(unittest.TestCase):
recv_msg = self.unix._receive_query_message(self.read_sock)
self.assertEqual(recv_msg, send_msg)
- def test_updata_config_data(self):
+ def check_default_ACL(self):
+ context = isc.acl.dns.RequestContext(socket.getaddrinfo("127.0.0.1",
+ 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP,
+ socket.AI_NUMERICHOST)[0][4])
+ self.assertEqual(isc.acl.acl.ACCEPT, self.unix._acl.execute(context))
+
+ def check_loaded_ACL(self, acl):
+ context = isc.acl.dns.RequestContext(socket.getaddrinfo("127.0.0.1",
+ 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP,
+ socket.AI_NUMERICHOST)[0][4])
+ self.assertEqual(isc.acl.acl.ACCEPT, acl.execute(context))
+ context = isc.acl.dns.RequestContext(socket.getaddrinfo("192.0.2.1",
+ 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP,
+ socket.AI_NUMERICHOST)[0][4])
+ self.assertEqual(isc.acl.acl.REJECT, acl.execute(context))
+
+ def test_update_config_data(self):
+ self.check_default_ACL()
+ tsig_key_str = 'example.com:SFuWd/q99SzF8Yzd1QbB9g=='
+ tsig_key_list = [tsig_key_str]
+ bad_key_list = ['bad..example.com:SFuWd/q99SzF8Yzd1QbB9g==']
self.unix.update_config_data({'transfers_out':10 })
self.assertEqual(self.unix._max_transfers_out, 10)
+ self.assertTrue(self.unix.tsig_key_ring is not None)
+ self.check_default_ACL()
+
+ self.unix.update_config_data({'transfers_out':9,
+ 'tsig_key_ring':tsig_key_list})
+ self.assertEqual(self.unix._max_transfers_out, 9)
+ self.assertEqual(self.unix.tsig_key_ring.size(), 1)
+ self.unix.tsig_key_ring.remove(Name("example.com."))
+ self.assertEqual(self.unix.tsig_key_ring.size(), 0)
+
+ # bad tsig key
+ config_data = {'transfers_out':9, 'tsig_key_ring': bad_key_list}
+ self.assertRaises(None, self.unix.update_config_data(config_data))
+ self.assertEqual(self.unix.tsig_key_ring.size(), 0)
+
+ # Load the ACL
+ self.unix.update_config_data({'transfer_acl': [{'from': '127.0.0.1',
+ 'action': 'ACCEPT'}]})
+ self.check_loaded_ACL(self.unix._acl)
+ # Pass a wrong data there and check it does not replace the old one
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'transfer_acl': ['Something bad']})
+ self.check_loaded_ACL(self.unix._acl)
+
+ def test_zone_config_data(self):
+ # By default, there's no specific zone config
+ self.assertEqual({}, self.unix._zone_config)
+
+ # Adding config for a specific zone. The config is empty unless
+ # explicitly specified.
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com',
+ 'class': 'IN'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # zone class can be omitted
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # zone class, name are stored in the "normalized" form. class
+ # strings are upper cased, names are down cased.
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'EXAMPLE.com'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # invalid zone class, name will result in exceptions
+ self.assertRaises(EmptyLabel,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'bad..example'}]})
+ self.assertRaises(InvalidRRClass,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com',
+ 'class': 'badclass'}]})
+
+ # Configuring a couple of more zones
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com'},
+ {'origin': 'example.com',
+ 'class': 'CH'},
+ {'origin': 'example.org'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+ self.assertEqual({}, self.unix._zone_config[('CH', 'example.com.')])
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.org.')])
+
+ # Duplicate data: should be rejected with an exception
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com'},
+ {'origin': 'example.org'},
+ {'origin': 'example.com'}]})
+
+ def test_zone_config_data_with_acl(self):
+ # Similar to the previous test, but with transfer_acl config
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com',
+ 'transfer_acl':
+ [{'from': '127.0.0.1',
+ 'action': 'ACCEPT'}]}]})
+ acl = self.unix._zone_config[('IN', 'example.com.')]['transfer_acl']
+ self.check_loaded_ACL(acl)
+
+ # invalid ACL syntax will be rejected with exception
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com',
+ 'transfer_acl':
+ [{'action': 'BADACTION'}]}]})
def test_get_db_file(self):
self.assertEqual(self.unix.get_db_file(), "initdb.file")
@@ -457,7 +922,7 @@ class TestInitialization(unittest.TestCase):
self.setEnv("BIND10_XFROUT_SOCKET_FILE", None)
xfrout.init_paths()
self.assertEqual(xfrout.UNIX_SOCKET_FILE,
- "@@LOCALSTATEDIR@@/auth_xfrout_conn")
+ "@@LOCALSTATEDIR@@/@PACKAGE_NAME@/auth_xfrout_conn")
def testProvidedSocket(self):
self.setEnv("B10_FROM_BUILD", None)
@@ -466,4 +931,5 @@ class TestInitialization(unittest.TestCase):
self.assertEqual(xfrout.UNIX_SOCKET_FILE, "The/Socket/File")
if __name__== "__main__":
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index 17ca3eb..cf3b04f 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -26,7 +26,6 @@ from isc.datasrc import sqlite3_ds
from socketserver import *
import os
from isc.config.ccsession import *
-from isc.log.log import *
from isc.cc import SessionError, SessionTimeout
from isc.notify import notify_out
import isc.util.process
@@ -36,16 +35,36 @@ import errno
from optparse import OptionParser, OptionValueError
from isc.util import socketserver_mixin
+from isc.log_messages.xfrout_messages import *
+
+isc.log.init("b10-xfrout")
+logger = isc.log.Logger("xfrout")
+
try:
from libutil_io_python import *
from pydnspp import *
except ImportError as e:
# C++ loadable module may not be installed; even so the xfrout process
# must keep running, so we warn about it and move forward.
- sys.stderr.write('[b10-xfrout] failed to import DNS or isc.util.io module: %s\n' % str(e))
+ log.error(XFROUT_IMPORT, str(e))
+
+from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
+from isc.acl.dns import REQUEST_LOADER
isc.util.process.rename()
+class XfroutConfigError(Exception):
+ """An exception indicating an error in updating xfrout configuration.
+
+ This exception is raised when the xfrout process encouters an error in
+ handling configuration updates. Not all syntax error can be caught
+ at the module-CC layer, so xfrout needs to (explicitly or implicitly)
+ validate the given configuration data itself. When it finds an error
+ it raises this exception (either directly or by converting an exception
+ from other modules) as a unified error in configuration.
+ """
+ pass
+
def init_paths():
global SPECFILE_PATH
global AUTH_SPECFILE_PATH
@@ -66,15 +85,15 @@ def init_paths():
if "BIND10_XFROUT_SOCKET_FILE" in os.environ:
UNIX_SOCKET_FILE = os.environ["BIND10_XFROUT_SOCKET_FILE"]
else:
- UNIX_SOCKET_FILE = "@@LOCALSTATEDIR@@/auth_xfrout_conn"
+ UNIX_SOCKET_FILE = "@@LOCALSTATEDIR@@/@PACKAGE_NAME@/auth_xfrout_conn"
init_paths()
SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
-MAX_TRANSFERS_OUT = 10
VERBOSE_MODE = False
-
+# tsig sign every N axfr packets.
+TSIG_SIGN_EVERY_NTH = 96
XFROUT_MAX_MESSAGE_SIZE = 65535
@@ -86,41 +105,103 @@ def get_rrset_len(rrset):
class XfroutSession():
- def __init__(self, sock_fd, request_data, server, log):
- # The initializer for the superclass may call functions
- # that need _log to be set, so we set it first
+ def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
+ default_acl, zone_config):
self._sock_fd = sock_fd
self._request_data = request_data
self._server = server
- self._log = log
+ self._tsig_key_ring = tsig_key_ring
+ self._tsig_ctx = None
+ self._tsig_len = 0
+ self._remote = remote
+ self._acl = default_acl
+ self._zone_config = zone_config
self.handle()
+ def create_tsig_ctx(self, tsig_record, tsig_key_ring):
+ return TSIGContext(tsig_record.get_name(), tsig_record.get_rdata().get_algorithm(),
+ tsig_key_ring)
+
def handle(self):
''' Handle a xfrout query, send xfrout response '''
try:
self.dns_xfrout_start(self._sock_fd, self._request_data)
#TODO, avoid catching all exceptions
except Exception as e:
- self._log.log_message("error", str(e))
+ logger.error(XFROUT_HANDLE_QUERY_ERROR, e)
+ pass
os.close(self._sock_fd)
+ def _check_request_tsig(self, msg, request_data):
+ ''' If request has a tsig record, perform tsig related checks '''
+ tsig_record = msg.get_tsig_record()
+ if tsig_record is not None:
+ self._tsig_len = tsig_record.get_length()
+ self._tsig_ctx = self.create_tsig_ctx(tsig_record, self._tsig_key_ring)
+ tsig_error = self._tsig_ctx.verify(tsig_record, request_data)
+ if tsig_error != TSIGError.NOERROR:
+ return Rcode.NOTAUTH()
+
+ return Rcode.NOERROR()
+
def _parse_query_message(self, mdata):
''' parse query message to [socket,message]'''
#TODO, need to add parseHeader() in case the message header is invalid
try:
msg = Message(Message.PARSE)
Message.from_wire(msg, mdata)
- except Exception as err:
- self._log.log_message("error", str(err))
+ except Exception as err: # Exception is too broad
+ logger.error(XFROUT_PARSE_QUERY_ERROR, err)
return Rcode.FORMERR(), None
- return Rcode.NOERROR(), msg
+ # TSIG related checks
+ rcode = self._check_request_tsig(msg, mdata)
+
+ if rcode == Rcode.NOERROR():
+ # ACL checks
+ zone_name = msg.get_question()[0].get_name()
+ zone_class = msg.get_question()[0].get_class()
+ acl = self._get_transfer_acl(zone_name, zone_class)
+ acl_result = acl.execute(
+ isc.acl.dns.RequestContext(self._remote,
+ msg.get_tsig_record()))
+ if acl_result == DROP:
+ logger.info(XFROUT_QUERY_DROPPED, zone_name, zone_class,
+ self._remote[0], self._remote[1])
+ return None, None
+ elif acl_result == REJECT:
+ logger.info(XFROUT_QUERY_REJECTED, zone_name, zone_class,
+ self._remote[0], self._remote[1])
+ return Rcode.REFUSED(), msg
+
+ return rcode, msg
+
+ def _get_transfer_acl(self, zone_name, zone_class):
+ '''Return the ACL that should be applied for a given zone.
+
+ The zone is identified by a tuple of name and RR class.
+ If a per zone configuration for the zone exists and contains
+ transfer_acl, that ACL will be used; otherwise, the default
+ ACL will be used.
+
+ '''
+ # Internally zone names are managed in lower cased label characters,
+ # so we first need to convert the name.
+ zone_name_lower = Name(zone_name.to_text(), True)
+ config_key = (zone_class.to_text(), zone_name_lower.to_text())
+ if config_key in self._zone_config and \
+ 'transfer_acl' in self._zone_config[config_key]:
+ return self._zone_config[config_key]['transfer_acl']
+ return self._acl
def _get_query_zone_name(self, msg):
question = msg.get_question()[0]
return question.get_name().to_text()
+ def _get_query_zone_class(self, msg):
+ question = msg.get_question()[0]
+ return question.get_class().to_text()
def _send_data(self, sock_fd, data):
size = len(data)
@@ -130,32 +211,32 @@ class XfroutSession():
total_count += count
- def _send_message(self, sock_fd, msg):
+ def _send_message(self, sock_fd, msg, tsig_ctx=None):
render = MessageRenderer()
# As defined in RFC5936 section3.4, perform case-preserving name
# compression for AXFR message.
render.set_compress_mode(MessageRenderer.CASE_SENSITIVE)
render.set_length_limit(XFROUT_MAX_MESSAGE_SIZE)
- msg.to_wire(render)
+
+ # XXX Currently, python wrapper doesn't accept 'None' parameter in this case,
+ # we should remove the if statement and use a universal interface later.
+ if tsig_ctx is not None:
+ msg.to_wire(render, tsig_ctx)
+ else:
+ msg.to_wire(render)
+
header_len = struct.pack('H', socket.htons(render.get_length()))
self._send_data(sock_fd, header_len)
self._send_data(sock_fd, render.get_data())
def _reply_query_with_error_rcode(self, msg, sock_fd, rcode_):
- msg.make_response()
- msg.set_rcode(rcode_)
- self._send_message(sock_fd, msg)
-
-
- def _reply_query_with_format_error(self, msg, sock_fd):
- '''query message format isn't legal.'''
if not msg:
return # query message is invalid. send nothing back.
msg.make_response()
- msg.set_rcode(Rcode.FORMERR())
- self._send_message(sock_fd, msg)
+ msg.set_rcode(rcode_)
+ self._send_message(sock_fd, msg, self._tsig_ctx)
def _zone_has_soa(self, zone):
'''Judge if the zone has an SOA record.'''
@@ -204,22 +285,32 @@ class XfroutSession():
def dns_xfrout_start(self, sock_fd, msg_query):
rcode_, msg = self._parse_query_message(msg_query)
#TODO. create query message and parse header
- if rcode_ != Rcode.NOERROR():
- return self._reply_query_with_format_error(msg, sock_fd)
+ if rcode_ is None: # Dropped by ACL
+ return
+ elif rcode_ == Rcode.NOTAUTH() or rcode_ == Rcode.REFUSED():
+ return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
+ elif rcode_ != Rcode.NOERROR():
+ return self._reply_query_with_error_rcode(msg, sock_fd,
+ Rcode.FORMERR())
zone_name = self._get_query_zone_name(msg)
+ zone_class_str = self._get_query_zone_class(msg)
+ # TODO: should we not also include class in the check?
rcode_ = self._check_xfrout_available(zone_name)
+
if rcode_ != Rcode.NOERROR():
- self._log.log_message("info", "transfer of '%s/IN' failed: %s",
- zone_name, rcode_.to_text())
- return self. _reply_query_with_error_rcode(msg, sock_fd, rcode_)
+ logger.info(XFROUT_AXFR_TRANSFER_FAILED, zone_name,
+ zone_class_str, rcode_.to_text())
+ return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
try:
- self._log.log_message("info", "transfer of '%s/IN': AXFR started" % zone_name)
+ logger.info(XFROUT_AXFR_TRANSFER_STARTED, zone_name, zone_class_str)
self._reply_xfrout_query(msg, sock_fd, zone_name)
- self._log.log_message("info", "transfer of '%s/IN': AXFR end" % zone_name)
except Exception as err:
- self._log.log_message("error", str(err))
+ logger.error(XFROUT_AXFR_TRANSFER_ERROR, zone_name,
+ zone_class_str, str(err))
+ pass
+ logger.info(XFROUT_AXFR_TRANSFER_DONE, zone_name, zone_class_str)
self._server.decrease_transfers_counter()
return
@@ -248,37 +339,43 @@ class XfroutSession():
rrset_.add_rdata(rdata_)
return rrset_
- def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa, message_upper_len):
+ def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa, message_upper_len,
+ count_since_last_tsig_sign):
'''Add the SOA record to the end of message. If it can't be
added, a new message should be created to send out the last soa .
'''
rrset_len = get_rrset_len(rrset_soa)
- if message_upper_len + rrset_len < XFROUT_MAX_MESSAGE_SIZE:
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
- else:
+ if (count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH and
+ message_upper_len + rrset_len >= XFROUT_MAX_MESSAGE_SIZE):
+ # If tsig context exist, sign the packet with serial number TSIG_SIGN_EVERY_NTH
+ self._send_message(sock_fd, msg, self._tsig_ctx)
+ msg = self._clear_message(msg)
+ elif (count_since_last_tsig_sign != TSIG_SIGN_EVERY_NTH and
+ message_upper_len + rrset_len + self._tsig_len >= XFROUT_MAX_MESSAGE_SIZE):
self._send_message(sock_fd, msg)
msg = self._clear_message(msg)
- msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
- self._send_message(sock_fd, msg)
+ # If tsig context exist, sign the last packet
+ msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+ self._send_message(sock_fd, msg, self._tsig_ctx)
def _reply_xfrout_query(self, msg, sock_fd, zone_name):
#TODO, there should be a better way to insert rrset.
+ count_since_last_tsig_sign = TSIG_SIGN_EVERY_NTH
msg.make_response()
msg.set_header_flag(Message.HEADERFLAG_AA)
soa_record = sqlite3_ds.get_zone_soa(zone_name, self._server.get_db_file())
rrset_soa = self._create_rrset_from_db_record(soa_record)
msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
- message_upper_len = get_rrset_len(rrset_soa)
+ message_upper_len = get_rrset_len(rrset_soa) + self._tsig_len
for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
if self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
- self._log.log_message("info", "xfrout process is being shutdown")
+ logger.info(XFROUT_STOPPING)
return
-
# TODO: RRType.SOA() ?
if RRType(rr_data[5]) == RRType("SOA"): #ignore soa record
continue
@@ -294,28 +391,48 @@ class XfroutSession():
message_upper_len += rrset_len
continue
- self._send_message(sock_fd, msg)
+ # If tsig context exist, sign every N packets
+ if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
+ count_since_last_tsig_sign = 0
+ self._send_message(sock_fd, msg, self._tsig_ctx)
+ else:
+ self._send_message(sock_fd, msg)
+
+ count_since_last_tsig_sign += 1
msg = self._clear_message(msg)
msg.add_rrset(Message.SECTION_ANSWER, rrset_) # Add the rrset to the new message
- message_upper_len = rrset_len
- self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len)
+ # Reserve tsig space for signed packet
+ if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
+ message_upper_len = rrset_len + self._tsig_len
+ else:
+ message_upper_len = rrset_len
-class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
+ self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len,
+ count_since_last_tsig_sign)
+
+class UnixSockServer(socketserver_mixin.NoPollMixIn,
+ ThreadingUnixStreamServer):
'''The unix domain socket server which accept xfr query sent from auth server.'''
- def __init__(self, sock_file, handle_class, shutdown_event, config_data, cc, log):
+ def __init__(self, sock_file, handle_class, shutdown_event, config_data,
+ cc):
self._remove_unused_sock_file(sock_file)
self._sock_file = sock_file
socketserver_mixin.NoPollMixIn.__init__(self)
ThreadingUnixStreamServer.__init__(self, sock_file, handle_class)
- self._lock = threading.Lock()
- self._transfers_counter = 0
self._shutdown_event = shutdown_event
self._write_sock, self._read_sock = socket.socketpair()
- self._log = log
- self.update_config_data(config_data)
+ self._common_init()
self._cc = cc
+ self.update_config_data(config_data)
+
+ def _common_init(self):
+ '''Initialization shared with the mock server class used for tests'''
+ self._lock = threading.Lock()
+ self._transfers_counter = 0
+ self._zone_config = {}
+ self._acl = None # this will be initialized in update_config_data()
def _receive_query_message(self, sock):
''' receive request message from sock'''
@@ -341,7 +458,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
request, client_address = self.get_request()
except socket.error:
- self._log.log_message("error", "Failed to fetch request")
+ logger.error(XFROUT_FETCH_REQUEST_ERROR)
return
# Check self._shutdown_event to ensure the real shutdown comes.
@@ -355,7 +472,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
(rlist, wlist, xlist) = ([], [], [])
continue
else:
- self._log.log_message("error", "Error with select(): %s" %e)
+ logger.error(XFROUT_SOCKET_SELECT_ERROR, str(e))
break
# self.server._shutdown_event will be set by now, if it is not a false
@@ -365,9 +482,8 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
self.process_request(request)
- except:
- self._log.log_message("error", "Exception happened during processing of %s"
- % str(client_address))
+ except Exception as pre:
+ log.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
break
def _handle_request_noblock(self):
@@ -386,7 +502,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
# xfrout unix socket server, to check whether there is another
# xfrout running.
if sock_fd == FD_COMM_ERROR:
- self._log.log_message("error", "Failed to receive the file descriptor for XFR connection")
+ logger.error(XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR)
return
# receive request msg
@@ -394,16 +510,41 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
if not request_data:
return
- t = threading.Thread(target = self.finish_request,
+ t = threading.Thread(target=self.finish_request,
args = (sock_fd, request_data))
if self.daemon_threads:
t.daemon = True
t.start()
+ def _guess_remote(self, sock_fd):
+ """
+ Guess remote address and port of the socket. The sock_fd must be a
+ socket
+ """
+ # This uses a trick. If the socket is IPv4 in reality and we pretend
+ # it to be IPv6, it returns IPv4 address anyway. This doesn't seem
+ # to care about the SOCK_STREAM parameter at all (which it really is,
+ # except for testing)
+ if socket.has_ipv6:
+ sock = socket.fromfd(sock_fd, socket.AF_INET6, socket.SOCK_STREAM)
+ else:
+ # To make it work even on hosts without IPv6 support
+ # (Any idea how to simulate this in test?)
+ sock = socket.fromfd(sock_fd, socket.AF_INET, socket.SOCK_STREAM)
+ return sock.getpeername()
def finish_request(self, sock_fd, request_data):
- '''Finish one request by instantiating RequestHandlerClass.'''
- self.RequestHandlerClass(sock_fd, request_data, self, self._log)
+ '''Finish one request by instantiating RequestHandlerClass.
+
+ This method creates a XfroutSession object.
+ '''
+ self._lock.acquire()
+ acl = self._acl
+ zone_config = self._zone_config
+ self._lock.release()
+ self.RequestHandlerClass(sock_fd, request_data, self,
+ self.tsig_key_ring,
+ self._guess_remote(sock_fd), acl, zone_config)
def _remove_unused_sock_file(self, sock_file):
'''Try to remove the socket file. If the file is being used
@@ -411,8 +552,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
If it's not a socket file or nobody is listening
, it will be removed. If it can't be removed, exit from python. '''
if self._sock_file_in_use(sock_file):
- self._log.log_message("error", "Fail to start xfrout process, unix socket file '%s'"
- " is being used by another xfrout process\n" % sock_file)
+ logger.error(XFROUT_UNIX_SOCKET_FILE_IN_USE, sock_file)
sys.exit(0)
else:
if not os.path.exists(sock_file):
@@ -421,7 +561,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
os.unlink(sock_file)
except OSError as err:
- self._log.log_message("error", "[b10-xfrout] Fail to remove file %s: %s\n" % (sock_file, err))
+ logger.error(XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR, sock_file, str(err))
sys.exit(0)
def _sock_file_in_use(self, sock_file):
@@ -442,16 +582,83 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
try:
os.unlink(self._sock_file)
except Exception as e:
- self._log.log_message('error', str(e))
+ logger.error(XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR, self._sock_file, str(e))
+ pass
def update_config_data(self, new_config):
- '''Apply the new config setting of xfrout module. '''
- self._log.log_message('info', 'update config data start.')
+ '''Apply the new config setting of xfrout module.
+
+ '''
self._lock.acquire()
- self._max_transfers_out = new_config.get('transfers_out')
- self._log.log_message('info', 'max transfer out : %d', self._max_transfers_out)
+ try:
+ logger.info(XFROUT_NEW_CONFIG)
+ new_acl = self._acl
+ if 'transfer_acl' in new_config:
+ try:
+ new_acl = REQUEST_LOADER.load(new_config['transfer_acl'])
+ except LoaderError as e:
+ raise XfroutConfigError('Failed to parse transfer_acl: ' +
+ str(e))
+
+ new_zone_config = self._zone_config
+ zconfig_data = new_config.get('zone_config')
+ if zconfig_data is not None:
+ new_zone_config = self.__create_zone_config(zconfig_data)
+
+ self._acl = new_acl
+ self._zone_config = new_zone_config
+ self._max_transfers_out = new_config.get('transfers_out')
+ self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
+ except Exception as e:
+ self._lock.release()
+ raise e
self._lock.release()
- self._log.log_message('info', 'update config data complete.')
+ logger.info(XFROUT_NEW_CONFIG_DONE)
+
+ def __create_zone_config(self, zone_config_list):
+ new_config = {}
+ for zconf in zone_config_list:
+ # convert the class, origin (name) pair. First build pydnspp
+ # object to reject invalid input.
+ zclass_str = zconf.get('class')
+ if zclass_str is None:
+ #zclass_str = 'IN' # temporary
+ zclass_str = self._cc.get_default_value('zone_config/class')
+ zclass = RRClass(zclass_str)
+ zorigin = Name(zconf['origin'], True)
+ config_key = (zclass.to_text(), zorigin.to_text())
+
+ # reject duplicate config
+ if config_key in new_config:
+ raise XfroutConfigError('Duplicate zone_config for ' +
+ str(zorigin) + '/' + str(zclass))
+
+ # create a new config entry, build any given (and known) config
+ new_config[config_key] = {}
+ if 'transfer_acl' in zconf:
+ try:
+ new_config[config_key]['transfer_acl'] = \
+ REQUEST_LOADER.load(zconf['transfer_acl'])
+ except LoaderError as e:
+ raise XfroutConfigError('Failed to parse transfer_acl ' +
+ 'for ' + zorigin.to_text() + '/' +
+ zclass_str + ': ' + str(e))
+ return new_config
+
+ def set_tsig_key_ring(self, key_list):
+ """Set the tsig_key_ring , given a TSIG key string list representation. """
+
+ # XXX add values to configure zones/tsig options
+ self.tsig_key_ring = TSIGKeyRing()
+ # If key string list is empty, create a empty tsig_key_ring
+ if not key_list:
+ return
+
+ for key_item in key_list:
+ try:
+ self.tsig_key_ring.add(TSIGKey(key_item))
+ except InvalidParameter as ipe:
+ logger.error(XFROUT_BAD_TSIG_KEY_STRING, str(key_item))
def get_db_file(self):
file, is_default = self._cc.get_remote_config_value("Auth", "database_file")
@@ -483,30 +690,28 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
class XfroutServer:
def __init__(self):
self._unix_socket_server = None
- self._log = None
self._listen_sock_file = UNIX_SOCKET_FILE
self._shutdown_event = threading.Event()
self._cc = isc.config.ModuleCCSession(SPECFILE_LOCATION, self.config_handler, self.command_handler)
self._config_data = self._cc.get_full_config()
self._cc.start()
self._cc.add_remote_config(AUTH_SPECFILE_LOCATION);
- self._log = isc.log.NSLogger(self._config_data.get('log_name'), self._config_data.get('log_file'),
- self._config_data.get('log_severity'), self._config_data.get('log_versions'),
- self._config_data.get('log_max_bytes'), True)
self._start_xfr_query_listener()
self._start_notifier()
def _start_xfr_query_listener(self):
'''Start a new thread to accept xfr query. '''
- self._unix_socket_server = UnixSockServer(self._listen_sock_file, XfroutSession,
- self._shutdown_event, self._config_data,
- self._cc, self._log);
+ self._unix_socket_server = UnixSockServer(self._listen_sock_file,
+ XfroutSession,
+ self._shutdown_event,
+ self._config_data,
+ self._cc)
listener = threading.Thread(target=self._unix_socket_server.serve_forever)
listener.start()
def _start_notifier(self):
datasrc = self._unix_socket_server.get_db_file()
- self._notifier = notify_out.NotifyOut(datasrc, self._log)
+ self._notifier = notify_out.NotifyOut(datasrc)
self._notifier.dispatcher()
def send_notify(self, zone_name, zone_class):
@@ -521,11 +726,13 @@ class XfroutServer:
continue
self._config_data[key] = new_config[key]
- if self._log:
- self._log.update_config(new_config)
-
if self._unix_socket_server:
- self._unix_socket_server.update_config_data(self._config_data)
+ try:
+ self._unix_socket_server.update_config_data(self._config_data)
+ except Exception as e:
+ answer = create_answer(1,
+ "Failed to handle new configuration: " +
+ str(e))
return answer
@@ -551,7 +758,7 @@ class XfroutServer:
def command_handler(self, cmd, args):
if cmd == "shutdown":
- self._log.log_message("info", "Received shutdown command.")
+ logger.info(XFROUT_RECEIVED_SHUTDOWN_COMMAND)
self.shutdown()
answer = create_answer(0)
@@ -559,8 +766,7 @@ class XfroutServer:
zone_name = args.get('zone_name')
zone_class = args.get('zone_class')
if zone_name and zone_class:
- self._log.log_message("info", "zone '%s/%s': receive notify others command" \
- % (zone_name, zone_class))
+ logger.info(XFROUT_NOTIFY_COMMAND, zone_name, zone_class)
self.send_notify(zone_name, zone_class)
answer = create_answer(0)
else:
@@ -603,15 +809,15 @@ if '__main__' == __name__:
xfrout_server = XfroutServer()
xfrout_server.run()
except KeyboardInterrupt:
- sys.stderr.write("[b10-xfrout] exit xfrout process\n")
+ logger.INFO(XFROUT_STOPPED_BY_KEYBOARD)
except SessionError as e:
- sys.stderr.write("[b10-xfrout] Error creating xfrout, "
- "is the command channel daemon running?\n")
- except SessionTimeout as e:
- sys.stderr.write("[b10-xfrout] Error creating xfrout, "
- "is the configuration manager running?\n")
+ logger.error(XFROUT_CC_SESSION_ERROR, str(e))
except ModuleCCSessionError as e:
- sys.stderr.write("[b10-xfrout] exit xfrout process:%s\n" % str(e))
+ logger.error(XFROUT_MODULECC_SESSION_ERROR, str(e))
+ except XfroutConfigError as e:
+ logger.error(XFROUT_CONFIG_ERROR, str(e))
+ except SessionTimeout as e:
+ logger.error(XFROUT_CC_SESSION_TIMEOUT_ERROR)
if xfrout_server:
xfrout_server.shutdown()
diff --git a/src/bin/xfrout/xfrout.spec.pre.in b/src/bin/xfrout/xfrout.spec.pre.in
index 941db72..0891a57 100644
--- a/src/bin/xfrout/xfrout.spec.pre.in
+++ b/src/bin/xfrout/xfrout.spec.pre.in
@@ -16,27 +16,90 @@
},
{
"item_name": "log_file",
- "item_type": "string",
+ "item_type": "string",
"item_optional": false,
"item_default": "@@LOCALSTATEDIR@@/@PACKAGE@/log/Xfrout.log"
},
{
"item_name": "log_severity",
- "item_type": "string",
+ "item_type": "string",
"item_optional": false,
- "item_default": "debug"
+ "item_default": "debug"
},
{
"item_name": "log_versions",
- "item_type": "integer",
+ "item_type": "integer",
"item_optional": false,
- "item_default": 5
+ "item_default": 5
},
{
"item_name": "log_max_bytes",
- "item_type": "integer",
+ "item_type": "integer",
"item_optional": false,
- "item_default": 1048576
+ "item_default": 1048576
+ },
+ {
+ "item_name": "tsig_key_ring",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [],
+ "list_item_spec" :
+ {
+ "item_name": "tsig_key",
+ "item_type": "string",
+ "item_optional": true
+ }
+ },
+ {
+ "item_name": "transfer_acl",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [{"action": "ACCEPT"}],
+ "list_item_spec":
+ {
+ "item_name": "acl_element",
+ "item_type": "any",
+ "item_optional": true
+ }
+ },
+ {
+ "item_name": "zone_config",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [],
+ "list_item_spec":
+ {
+ "item_name": "zone_config_element",
+ "item_type": "map",
+ "item_optional": true,
+ "item_default": { "origin": "" },
+ "map_item_spec": [
+ {
+ "item_name": "origin",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ {
+ "item_name": "class",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "IN"
+ },
+ {
+ "item_name": "transfer_acl",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [{"action": "ACCEPT"}],
+ "list_item_spec":
+ {
+ "item_name": "acl_element",
+ "item_type": "any",
+ "item_optional": true
+ }
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
new file mode 100644
index 0000000..b2e432c
--- /dev/null
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -0,0 +1,162 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrout messages python module.
+
+% XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+
+% XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+
+% XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+# Still a TODO, but when implemented, REFUSED can also mean
+# the client is not allowed to transfer the zone
+
+% XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started
+A transfer out of the given zone has started.
+
+% XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+
+% XFROUT_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+
+% XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1
+There was a problem in the lower level module handling configuration and
+control commands. This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization. A detailed error message from the module
+will also be displayed.
+
+% XFROUT_CONFIG_ERROR error found in configuration data: %1
+The xfrout process encountered an error when installing the configuration at
+startup time. Details of the error are included in the log message.
+
+% XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+
+% XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+
+% XFROUT_HANDLE_QUERY_ERROR error while handling query: %1
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+
+% XFROUT_IMPORT error importing python module: %1
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+
+% XFROUT_NEW_CONFIG Update xfrout configuration
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+
+% XFROUT_NEW_CONFIG_DONE Update xfrout configuration done
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+
+% XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+
+% XFROUT_PARSE_QUERY_ERROR error parsing query: %1
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+
+% XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+
+% XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+
+% XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+
+% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+
+% XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+
+% XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+
+% XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+
+% XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+
+% XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+
+% XFROUT_STOPPING the xfrout daemon is shutting down
+The current transfer is aborted, as the xfrout daemon is shutting down.
+
+% XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+
diff --git a/src/bin/zonemgr/Makefile.am b/src/bin/zonemgr/Makefile.am
index 410279a..aa427fd 100644
--- a/src/bin/zonemgr/Makefile.am
+++ b/src/bin/zonemgr/Makefile.am
@@ -7,10 +7,15 @@ pkglibexec_SCRIPTS = b10-zonemgr
b10_zonemgrdir = $(pkgdatadir)
b10_zonemgr_DATA = zonemgr.spec
-CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.pyc
man_MANS = b10-zonemgr.8
-EXTRA_DIST = $(man_MANS) b10-zonemgr.xml
+EXTRA_DIST = $(man_MANS) b10-zonemgr.xml zonemgr_messages.mes
if ENABLE_MAN
@@ -19,10 +24,20 @@ b10-zonemgr.8: b10-zonemgr.xml
endif
+# Build logging source file from message files
+$(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py : zonemgr_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/zonemgr_messages.mes
+
zonemgr.spec: zonemgr.spec.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.spec.pre >$@
-b10-zonemgr: zonemgr.py
+b10-zonemgr: zonemgr.py $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.py >$@
chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/zonemgr/b10-zonemgr.8 b/src/bin/zonemgr/b10-zonemgr.8
index fbd0602..bfc0a7b 100644
--- a/src/bin/zonemgr/b10-zonemgr.8
+++ b/src/bin/zonemgr/b10-zonemgr.8
@@ -2,12 +2,12 @@
.\" Title: b10-zonemgr
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: October 18, 2010
+.\" Date: May 19, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-ZONEMGR" "8" "October 18, 2010" "BIND10" "BIND10"
+.TH "B10\-ZONEMGR" "8" "May 19, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -46,11 +46,6 @@ receives its configurations from
The configurable settings are:
.PP
-\fIjitter_scope\fR
-defines the random jitter range subtracted from the refresh and retry timers to avoid many zones from refreshing at the same time\&. The refresh or retry time actually used is a random time between the defined refresh or retry time and it multiplied by the
-\fIjitter_scope\fR\&. This is re\-evaluated after each refresh or retry\&. This value is a real number and the maximum is 0\&.5 (half of the refresh or retry time)\&. The default is 0\&.25\&. Set to 0 to disable the jitter\&.
-.PP
-
\fIlowerbound_refresh\fR
defines the minimum SOA REFRESH time in seconds\&. The default is 10\&.
.PP
@@ -59,10 +54,36 @@ defines the minimum SOA REFRESH time in seconds\&. The default is 10\&.
defines the minimum SOA RETRY time in seconds\&. The default is 5\&.
.PP
+\fIrefresh_jitter\fR
+This value is a real number\&. The maximum amount is 0\&.5\&. The default is 0\&.25\&.
+.PP
+
+\fIreload_jitter\fR
+This value is a real number\&. The default is 0\&.75\&.
+.PP
+
\fImax_transfer_timeout\fR
defines the maximum amount of time in seconds for a transfer\&.
The default is 14400 (4 hours)\&.
.PP
+
+\fIsecondary_zones\fR
+is a list of slave zones that the
+\fBb10\-zonemgr\fR
+should keep timers for\&. The list items include the
+\fIname\fR
+(which defines the zone name) and the
+\fIclass\fR
+(which defaults to
+\(lqIN\(rq)\&.
+.PP
+(A deprecated configuration is
+\fIjitter_scope\fR
+which is superceded by
+\fIrefresh_jitter\fR
+and
+\fIreload_jitter\fR\&.)
+.PP
The configuration commands are:
.PP
@@ -107,5 +128,5 @@ The
daemon was designed in July 2010 by CNNIC for the ISC BIND 10 project\&.
.SH "COPYRIGHT"
.br
-Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
+Copyright \(co 2010-2011 Internet Systems Consortium, Inc. ("ISC")
.br
diff --git a/src/bin/zonemgr/b10-zonemgr.xml b/src/bin/zonemgr/b10-zonemgr.xml
index 4d796ee..00f5d04 100644
--- a/src/bin/zonemgr/b10-zonemgr.xml
+++ b/src/bin/zonemgr/b10-zonemgr.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>October 18, 2010</date>
+ <date>May 19, 2011</date>
</refentryinfo>
<refmeta>
@@ -36,7 +36,7 @@
<docinfo>
<copyright>
- <year>2010</year>
+ <year>2010-2011</year>
<holder>Internet Systems Consortium, Inc. ("ISC")</holder>
</copyright>
</docinfo>
@@ -92,6 +92,39 @@
<para>
The configurable settings are:
</para>
+
+ <para>
+ <varname>lowerbound_refresh</varname>
+ defines the minimum SOA REFRESH time in seconds.
+ The default is 10.
+ </para>
+
+ <para>
+ <varname>lowerbound_retry</varname>
+ defines the minimum SOA RETRY time in seconds.
+ The default is 5.
+ </para>
+
+ <para>
+ <varname>refresh_jitter</varname>
+ This value is a real number.
+ The maximum amount is 0.5.
+ The default is 0.25.
+ </para>
+<!-- TODO: needs to be documented -->
+<!-- TODO: Set to 0 to disable the jitter. -->
+
+ <para>
+ <varname>reload_jitter</varname>
+ This value is a real number.
+ The default is 0.75.
+ </para>
+<!-- TODO: needs to be documented -->
+<!-- TODO: Set to 0 to disable the jitter. -->
+<!-- what does 0 do? -->
+<!-- TODO: no max? -->
+
+<!-- TODO: remove this. This is old removed config
<para>
<varname>jitter_scope</varname>
defines the random jitter range subtracted from the refresh
@@ -106,16 +139,8 @@
The default is 0.25.
Set to 0 to disable the jitter.
</para>
- <para>
- <varname>lowerbound_refresh</varname>
- defines the minimum SOA REFRESH time in seconds.
- The default is 10.
- </para>
- <para>
- <varname>lowerbound_retry</varname>
- defines the minimum SOA RETRY time in seconds.
- The default is 5.
- </para>
+-->
+
<para>
<varname>max_transfer_timeout</varname>
defines the maximum amount of time in seconds for a transfer.
@@ -123,6 +148,21 @@
The default is 14400 (4 hours).
</para>
+<!-- TODO: this duplicates list in Xfrin too -->
+ <para>
+ <varname>secondary_zones</varname> is a list of slave zones
+ that the <command>b10-zonemgr</command> should keep timers for.
+ The list items include the <varname>name</varname> (which
+ defines the zone name) and the <varname>class</varname>
+ (which defaults to <quote>IN</quote>).
+ </para>
+
+ <para>
+ (A deprecated configuration is <varname>jitter_scope</varname>
+ which is superceded by <varname>refresh_jitter</varname>
+ and <varname>reload_jitter</varname>.)
+ </para>
+
<!-- TODO: formating -->
<para>
The configuration commands are:
diff --git a/src/bin/zonemgr/tests/Makefile.am b/src/bin/zonemgr/tests/Makefile.am
index 496c1a4..769d332 100644
--- a/src/bin/zonemgr/tests/Makefile.am
+++ b/src/bin/zonemgr/tests/Makefile.am
@@ -3,6 +3,13 @@ PYTESTS = zonemgr_test.py
EXTRA_DIST = $(PYTESTS)
CLEANFILES = initdb.file
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -12,6 +19,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/zonemgr/tests/zonemgr_test.py b/src/bin/zonemgr/tests/zonemgr_test.py
index 479ca68..80e41b3 100644
--- a/src/bin/zonemgr/tests/zonemgr_test.py
+++ b/src/bin/zonemgr/tests/zonemgr_test.py
@@ -152,6 +152,16 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertTrue((time1 + 3600 * (1 - self.zone_refresh._refresh_jitter)) <= zone_timeout)
self.assertTrue(zone_timeout <= time2 + 3600)
+ # No soa rdata
+ self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_soa_rdata"] = None
+ time3 = time.time()
+ self.zone_refresh._set_zone_retry_timer(ZONE_NAME_CLASS1_IN)
+ zone_timeout = self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["next_refresh_time"]
+ time4 = time.time()
+ self.assertTrue((time3 + self.zone_refresh._lowerbound_retry * (1 - self.zone_refresh._refresh_jitter))
+ <= zone_timeout)
+ self.assertTrue(zone_timeout <= time4 + self.zone_refresh._lowerbound_retry)
+
def test_zone_not_exist(self):
self.assertFalse(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS1_IN))
self.assertTrue(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS1_CH))
@@ -304,8 +314,8 @@ class TestZonemgrRefresh(unittest.TestCase):
def get_zone_soa2(zone_name, db_file):
return None
sqlite3_ds.get_zone_soa = get_zone_soa2
- self.assertRaises(ZonemgrException, self.zone_refresh.zonemgr_add_zone, \
- ZONE_NAME_CLASS1_IN)
+ self.zone_refresh.zonemgr_add_zone(ZONE_NAME_CLASS2_IN)
+ self.assertTrue(self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS2_IN]["zone_soa_rdata"] is None)
sqlite3_ds.get_zone_soa = old_get_zone_soa
def test_zone_handle_notify(self):
@@ -362,6 +372,15 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ZONE_NAME_CLASS3_CH)
self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ZONE_NAME_CLASS3_IN)
+ old_get_zone_soa = sqlite3_ds.get_zone_soa
+ def get_zone_soa(zone_name, db_file):
+ return None
+ sqlite3_ds.get_zone_soa = get_zone_soa
+ self.zone_refresh.zone_refresh_fail(ZONE_NAME_CLASS1_IN)
+ self.assertEqual(self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_state"],
+ ZONE_EXPIRED)
+ sqlite3_ds.get_zone_soa = old_get_zone_soa
+
def test_find_need_do_refresh_zone(self):
time1 = time.time()
self.zone_refresh._zonemgr_refresh_info = {
@@ -434,6 +453,16 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertTrue(zone_state == ZONE_REFRESHING)
def test_update_config_data(self):
+ # make sure it doesn't fail if we only provide secondary zones
+ config_data = {
+ "secondary_zones": [ { "name": "example.net.",
+ "class": "IN" } ]
+ }
+ self.zone_refresh.update_config_data(config_data)
+ self.assertTrue(("example.net.", "IN") in
+ self.zone_refresh._zonemgr_refresh_info)
+
+ # update all values
config_data = {
"lowerbound_refresh" : 60,
"lowerbound_retry" : 30,
@@ -449,6 +478,55 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertEqual(0.25, self.zone_refresh._refresh_jitter)
self.assertEqual(0.75, self.zone_refresh._reload_jitter)
+ # make sure they are not reset when we only update one
+ config_data = {
+ "reload_jitter" : 0.35,
+ }
+ self.zone_refresh.update_config_data(config_data)
+ self.assertEqual(60, self.zone_refresh._lowerbound_refresh)
+ self.assertEqual(30, self.zone_refresh._lowerbound_retry)
+ self.assertEqual(19800, self.zone_refresh._max_transfer_timeout)
+ self.assertEqual(0.25, self.zone_refresh._refresh_jitter)
+ self.assertEqual(0.35, self.zone_refresh._reload_jitter)
+
+ # and make sure we restore the previous config if something
+ # goes wrong
+ config_data = {
+ "lowerbound_refresh" : 61,
+ "lowerbound_retry" : 31,
+ "max_transfer_timeout" : 19801,
+ "refresh_jitter" : 0.21,
+ "reload_jitter" : 0.71,
+ "secondary_zones": [ { "name": "doesnotexist",
+ "class": "IN" } ]
+ }
+ self.zone_refresh.update_config_data(config_data)
+ name_class = ("doesnotexist.", "IN")
+ self.assertTrue(self.zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
+ is None)
+ # The other configs should be updated successfully
+ self.assertEqual(61, self.zone_refresh._lowerbound_refresh)
+ self.assertEqual(31, self.zone_refresh._lowerbound_retry)
+ self.assertEqual(19801, self.zone_refresh._max_transfer_timeout)
+ self.assertEqual(0.21, self.zone_refresh._refresh_jitter)
+ self.assertEqual(0.71, self.zone_refresh._reload_jitter)
+
+ # Make sure we accept 0 as a value
+ config_data = {
+ "lowerbound_refresh" : 60,
+ "lowerbound_retry" : 30,
+ "max_transfer_timeout" : 19800,
+ "refresh_jitter" : 0,
+ "reload_jitter" : 0.75,
+ "secondary_zones": []
+ }
+ self.zone_refresh.update_config_data(config_data)
+ self.assertEqual(60, self.zone_refresh._lowerbound_refresh)
+ self.assertEqual(30, self.zone_refresh._lowerbound_retry)
+ self.assertEqual(19800, self.zone_refresh._max_transfer_timeout)
+ self.assertEqual(0, self.zone_refresh._refresh_jitter)
+ self.assertEqual(0.75, self.zone_refresh._reload_jitter)
+
def test_shutdown(self):
self.zone_refresh._check_sock = self.zone_refresh._master_socket
listener = self.zone_refresh.run_timer()
@@ -471,10 +549,11 @@ class TestZonemgrRefresh(unittest.TestCase):
self.zone_refresh._zonemgr_refresh_info)
# This one does not exist
config.set_zone_list_from_name_classes(["example.net", "CH"])
- self.assertRaises(ZonemgrException,
- self.zone_refresh.update_config_data, config)
- # So it should not affect the old ones
- self.assertTrue(("example.net.", "IN") in
+ self.zone_refresh.update_config_data(config)
+ self.assertFalse(("example.net.", "CH") in
+ self.zone_refresh._zonemgr_refresh_info)
+ # Simply skip loading soa for the zone, the other configs should be updated successful
+ self.assertFalse(("example.net.", "IN") in
self.zone_refresh._zonemgr_refresh_info)
# Make sure it works even when we "accidentally" forget the final dot
config.set_zone_list_from_name_classes([("example.net", "IN")])
@@ -541,15 +620,18 @@ class TestZonemgr(unittest.TestCase):
config_data3 = {"refresh_jitter" : 0.7}
self.zonemgr.config_handler(config_data3)
self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
- # The zone doesn't exist in database, it should be rejected
+ # The zone doesn't exist in database, simply skip loading soa for it and log an warning
self.zonemgr._zone_refresh = ZonemgrRefresh(None, "initdb.file", None,
config_data1)
config_data1["secondary_zones"] = [{"name": "nonexistent.example",
"class": "IN"}]
- self.assertNotEqual(self.zonemgr.config_handler(config_data1),
- {"result": [0]})
- # As it is rejected, the old value should be kept
- self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
+ self.assertEqual(self.zonemgr.config_handler(config_data1),
+ {"result": [0]})
+ # other configs should be updated successfully
+ name_class = ("nonexistent.example.", "IN")
+ self.assertTrue(self.zonemgr._zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
+ is None)
+ self.assertEqual(0.1, self.zonemgr._config_data.get("refresh_jitter"))
def test_get_db_file(self):
self.assertEqual("initdb.file", self.zonemgr.get_db_file())
diff --git a/src/bin/zonemgr/zonemgr.py.in b/src/bin/zonemgr/zonemgr.py.in
index cc6d7b9..5bdb765 100755
--- a/src/bin/zonemgr/zonemgr.py.in
+++ b/src/bin/zonemgr/zonemgr.py.in
@@ -37,6 +37,16 @@ from isc.datasrc import sqlite3_ds
from optparse import OptionParser, OptionValueError
from isc.config.ccsession import *
import isc.util.process
+from isc.log_messages.zonemgr_messages import *
+
+# Initialize logging for called modules.
+isc.log.init("b10-zonemgr")
+logger = isc.log.Logger("zonemgr")
+
+# Constants for debug levels.
+DBG_START_SHUT = logger.DBGLVL_START_SHUT
+DBG_ZONEMGR_COMMAND = logger.DBGLVL_COMMAND
+DBG_ZONEMGR_BASIC = logger.DBGLVL_TRACE_BASIC
isc.util.process.rename()
@@ -77,13 +87,6 @@ REFRESH_OFFSET = 3
RETRY_OFFSET = 4
EXPIRED_OFFSET = 5
-# verbose mode
-VERBOSE_MODE = False
-
-def log_msg(msg):
- if VERBOSE_MODE:
- sys.stdout.write("[b10-zonemgr] %s\n" % str(msg))
-
class ZonemgrException(Exception):
pass
@@ -93,7 +96,6 @@ class ZonemgrRefresh:
do zone refresh.
Zone timers can be started by calling run_timer(), and it
can be stopped by calling shutdown() in another thread.
-
"""
def __init__(self, cc, db_file, slave_socket, config_data):
@@ -101,6 +103,11 @@ class ZonemgrRefresh:
self._check_sock = slave_socket
self._db_file = db_file
self._zonemgr_refresh_info = {}
+ self._lowerbound_refresh = None
+ self._lowerbound_retry = None
+ self._max_transfer_timeout = None
+ self._refresh_jitter = None
+ self._reload_jitter = None
self.update_config_data(config_data)
self._running = False
@@ -135,7 +142,10 @@ class ZonemgrRefresh:
"""Set zone next refresh time after zone refresh fail.
now + retry - retry_jitter <= next_refresh_time <= now + retry
"""
- zone_retry_time = float(self._get_zone_soa_rdata(zone_name_class).split(" ")[RETRY_OFFSET])
+ if (self._get_zone_soa_rdata(zone_name_class) is not None):
+ zone_retry_time = float(self._get_zone_soa_rdata(zone_name_class).split(" ")[RETRY_OFFSET])
+ else:
+ zone_retry_time = 0.0
zone_retry_time = max(self._lowerbound_retry, zone_retry_time)
self._set_zone_timer(zone_name_class, zone_retry_time, self._refresh_jitter * zone_retry_time)
@@ -152,6 +162,7 @@ class ZonemgrRefresh:
def zone_refresh_success(self, zone_name_class):
"""Update zone info after zone refresh success"""
if (self._zone_not_exist(zone_name_class)):
+ logger.error(ZONEMGR_UNKNOWN_ZONE_SUCCESS, zone_name_class[0], zone_name_class[1])
raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
"belong to zonemgr" % zone_name_class)
self.zonemgr_reload_zone(zone_name_class)
@@ -162,10 +173,12 @@ class ZonemgrRefresh:
def zone_refresh_fail(self, zone_name_class):
"""Update zone info after zone refresh fail"""
if (self._zone_not_exist(zone_name_class)):
+ logger.error(ZONEMGR_UNKNOWN_ZONE_FAIL, zone_name_class[0], zone_name_class[1])
raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
"belong to zonemgr" % zone_name_class)
# Is zone expired?
- if (self._zone_is_expired(zone_name_class)):
+ if ((self._get_zone_soa_rdata(zone_name_class) is None) or
+ self._zone_is_expired(zone_name_class)):
self._set_zone_state(zone_name_class, ZONE_EXPIRED)
else:
self._set_zone_state(zone_name_class, ZONE_OK)
@@ -174,6 +187,7 @@ class ZonemgrRefresh:
def zone_handle_notify(self, zone_name_class, master):
"""Handle zone notify"""
if (self._zone_not_exist(zone_name_class)):
+ logger.error(ZONEMGR_UNKNOWN_ZONE_NOTIFIED, zone_name_class[0], zone_name_class[1])
raise ZonemgrException("[b10-zonemgr] Notified zone (%s, %s) "
"doesn't belong to zonemgr" % zone_name_class)
self._set_zone_notifier_master(zone_name_class, master)
@@ -186,19 +200,23 @@ class ZonemgrRefresh:
def zonemgr_add_zone(self, zone_name_class):
""" Add a zone into zone manager."""
- log_msg("Loading zone (%s, %s)" % zone_name_class)
+
+ logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_LOAD_ZONE, zone_name_class[0], zone_name_class[1])
zone_info = {}
zone_soa = sqlite3_ds.get_zone_soa(str(zone_name_class[0]), self._db_file)
- if not zone_soa:
- raise ZonemgrException("[b10-zonemgr] zone (%s, %s) doesn't have soa." % zone_name_class)
- zone_info["zone_soa_rdata"] = zone_soa[7]
+ if zone_soa is None:
+ logger.warn(ZONEMGR_NO_SOA, zone_name_class[0], zone_name_class[1])
+ zone_info["zone_soa_rdata"] = None
+ zone_reload_time = 0.0
+ else:
+ zone_info["zone_soa_rdata"] = zone_soa[7]
+ zone_reload_time = float(zone_soa[7].split(" ")[RETRY_OFFSET])
zone_info["zone_state"] = ZONE_OK
zone_info["last_refresh_time"] = self._get_current_time()
self._zonemgr_refresh_info[zone_name_class] = zone_info
# Imposes some random jitters to avoid many zones need to do refresh at the same time.
- zone_reload_jitter = float(zone_soa[7].split(" ")[RETRY_OFFSET])
- zone_reload_jitter = max(self._lowerbound_retry, zone_reload_jitter)
- self._set_zone_timer(zone_name_class, zone_reload_jitter, self._reload_jitter * zone_reload_jitter)
+ zone_reload_time = max(self._lowerbound_retry, zone_reload_time)
+ self._set_zone_timer(zone_name_class, zone_reload_time, self._reload_jitter * zone_reload_time)
def _zone_is_expired(self, zone_name_class):
"""Judge whether a zone is expired or not."""
@@ -260,7 +278,7 @@ class ZonemgrRefresh:
except isc.cc.session.SessionTimeout:
pass # for now we just ignore the failure
except socket.error:
- sys.stderr.write("[b10-zonemgr] Failed to send to module %s, the session has been closed." % module_name)
+ logger.error(ZONEMGR_SEND_FAIL, module_name)
def _find_need_do_refresh_zone(self):
"""Find the first zone need do refresh, if no zone need
@@ -269,7 +287,8 @@ class ZonemgrRefresh:
zone_need_refresh = None
for zone_name_class in self._zonemgr_refresh_info.keys():
zone_state = self._get_zone_state(zone_name_class)
- # If hasn't received refresh response but are within refresh timeout, skip the zone
+ # If hasn't received refresh response but are within refresh
+ # timeout, skip the zone
if (ZONE_REFRESHING == zone_state and
(self._get_zone_refresh_timeout(zone_name_class) > self._get_current_time())):
continue
@@ -289,7 +308,7 @@ class ZonemgrRefresh:
def _do_refresh(self, zone_name_class):
"""Do zone refresh."""
- log_msg("Do refresh for zone (%s, %s)." % zone_name_class)
+ logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_REFRESH_ZONE, zone_name_class[0], zone_name_class[1])
self._set_zone_state(zone_name_class, ZONE_REFRESHING)
self._set_zone_refresh_timeout(zone_name_class, self._get_current_time() + self._max_transfer_timeout)
notify_master = self._get_zone_notifier_master(zone_name_class)
@@ -346,7 +365,7 @@ class ZonemgrRefresh:
if e.args[0] == errno.EINTR:
(rlist, wlist, xlist) = ([], [], [])
else:
- sys.stderr.write("[b10-zonemgr] Error with select(); %s\n" % e)
+ logger.error(ZONEMGR_SELECT_ERROR, e);
break
for fd in rlist:
@@ -360,12 +379,14 @@ class ZonemgrRefresh:
def run_timer(self, daemon=False):
"""
- Keep track of zone timers. Spawns and starts a thread. The thread object is returned.
+ Keep track of zone timers. Spawns and starts a thread. The thread object
+ is returned.
You can stop it by calling shutdown().
"""
# Small sanity check
if self._running:
+ logger.error(ZONEMGR_TIMER_THREAD_RUNNING)
raise RuntimeError("Trying to run the timers twice at the same time")
# Prepare the launch
@@ -390,6 +411,7 @@ class ZonemgrRefresh:
called from a different thread.
"""
if not self._running:
+ logger.error(ZONEMGR_NO_TIMER_THREAD)
raise RuntimeError("Trying to shutdown, but not running")
# Ask the thread to stop
@@ -404,37 +426,56 @@ class ZonemgrRefresh:
def update_config_data(self, new_config):
""" update ZonemgrRefresh config """
- backup = self._zonemgr_refresh_info.copy()
+ # Get a new value, but only if it is defined (commonly used below)
+ # We don't use "value or default", because if value would be
+ # 0, we would take default
+ def val_or_default(value, default):
+ if value is not None:
+ return value
+ else:
+ return default
+
+ self._lowerbound_refresh = val_or_default(
+ new_config.get('lowerbound_refresh'), self._lowerbound_refresh)
+
+ self._lowerbound_retry = val_or_default(
+ new_config.get('lowerbound_retry'), self._lowerbound_retry)
+
+ self._max_transfer_timeout = val_or_default(
+ new_config.get('max_transfer_timeout'), self._max_transfer_timeout)
+
+ self._refresh_jitter = val_or_default(
+ new_config.get('refresh_jitter'), self._refresh_jitter)
+
+ self._reload_jitter = val_or_default(
+ new_config.get('reload_jitter'), self._reload_jitter)
+
try:
required = {}
- # Add new zones
- for secondary_zone in new_config.get('secondary_zones'):
- name = secondary_zone['name']
- # Be tolerant to sclerotic users who forget the final dot
- if name[-1] != '.':
- name = name + '.'
- name_class = (name, secondary_zone['class'])
- required[name_class] = True
- # Add it only if it isn't there already
- if not name_class in self._zonemgr_refresh_info:
- self.zonemgr_add_zone(name_class)
- # Drop the zones that are no longer there
- # Do it in two phases, python doesn't like deleting while iterating
- to_drop = []
- for old_zone in self._zonemgr_refresh_info:
- if not old_zone in required:
- to_drop.append(old_zone)
- for drop in to_drop:
- del self._zonemgr_refresh_info[drop]
- # If we are not able to find it in database, restore the original
+ secondary_zones = new_config.get('secondary_zones')
+ if secondary_zones is not None:
+ # Add new zones
+ for secondary_zone in new_config.get('secondary_zones'):
+ name = secondary_zone['name']
+ # Be tolerant to sclerotic users who forget the final dot
+ if name[-1] != '.':
+ name = name + '.'
+ name_class = (name, secondary_zone['class'])
+ required[name_class] = True
+ # Add it only if it isn't there already
+ if not name_class in self._zonemgr_refresh_info:
+ # If we are not able to find it in database, log an warning
+ self.zonemgr_add_zone(name_class)
+ # Drop the zones that are no longer there
+ # Do it in two phases, python doesn't like deleting while iterating
+ to_drop = []
+ for old_zone in self._zonemgr_refresh_info:
+ if not old_zone in required:
+ to_drop.append(old_zone)
+ for drop in to_drop:
+ del self._zonemgr_refresh_info[drop]
except:
- self._zonemgr_refresh_info = backup
raise
- self._lowerbound_refresh = new_config.get('lowerbound_refresh')
- self._lowerbound_retry = new_config.get('lowerbound_retry')
- self._max_transfer_timeout = new_config.get('max_transfer_timeout')
- self._refresh_jitter = new_config.get('refresh_jitter')
- self._reload_jitter = new_config.get('reload_jitter')
class Zonemgr:
"""Zone manager class."""
@@ -474,8 +515,8 @@ class Zonemgr:
return db_file
def shutdown(self):
- """Shutdown the zonemgr process. the thread which is keeping track of zone
- timers should be terminated.
+ """Shutdown the zonemgr process. The thread which is keeping track of
+ zone timers should be terminated.
"""
self._zone_refresh.shutdown()
@@ -515,17 +556,17 @@ class Zonemgr:
# jitter should not be bigger than half of the original value
if config_data.get('refresh_jitter') > 0.5:
config_data['refresh_jitter'] = 0.5
- log_msg("[b10-zonemgr] refresh_jitter is too big, its value will "
- "be set to 0.5")
-
+ logger.warn(ZONEMGR_JITTER_TOO_BIG)
def _parse_cmd_params(self, args, command):
zone_name = args.get("zone_name")
if not zone_name:
+ logger.error(ZONEMGR_NO_ZONE_NAME)
raise ZonemgrException("zone name should be provided")
zone_class = args.get("zone_class")
if not zone_class:
+ logger.error(ZONEMGR_NO_ZONE_CLASS)
raise ZonemgrException("zone class should be provided")
if (command != ZONE_NOTIFY_COMMAND):
@@ -533,6 +574,7 @@ class Zonemgr:
master_str = args.get("master")
if not master_str:
+ logger.error(ZONEMGR_NO_MASTER_ADDRESS)
raise ZonemgrException("master address should be provided")
return ((zone_name, zone_class), master_str)
@@ -540,15 +582,16 @@ class Zonemgr:
def command_handler(self, command, args):
"""Handle command receivd from command channel.
- ZONE_NOTIFY_COMMAND is issued by Auth process; ZONE_XFRIN_SUCCESS_COMMAND
- and ZONE_XFRIN_FAILED_COMMAND are issued by Xfrin process; shutdown is issued
- by a user or Boss process. """
+ ZONE_NOTIFY_COMMAND is issued by Auth process;
+ ZONE_XFRIN_SUCCESS_COMMAND and ZONE_XFRIN_FAILED_COMMAND are issued by
+ Xfrin process;
+ shutdown is issued by a user or Boss process. """
answer = create_answer(0)
if command == ZONE_NOTIFY_COMMAND:
""" Handle Auth notify command"""
# master is the source sender of the notify message.
zone_name_class, master = self._parse_cmd_params(args, command)
- log_msg("Received notify command for zone (%s, %s)." % zone_name_class)
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_NOTIFY, zone_name_class[0], zone_name_class[1])
with self._lock:
self._zone_refresh.zone_handle_notify(zone_name_class, master)
# Send notification to zonemgr timer thread
@@ -557,6 +600,7 @@ class Zonemgr:
elif command == ZONE_XFRIN_SUCCESS_COMMAND:
""" Handle xfrin success command"""
zone_name_class = self._parse_cmd_params(args, command)
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_SUCCESS, zone_name_class[0], zone_name_class[1])
with self._lock:
self._zone_refresh.zone_refresh_success(zone_name_class)
self._master_socket.send(b" ")# make self._slave_socket readble
@@ -564,14 +608,17 @@ class Zonemgr:
elif command == ZONE_XFRIN_FAILED_COMMAND:
""" Handle xfrin fail command"""
zone_name_class = self._parse_cmd_params(args, command)
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_FAILED, zone_name_class[0], zone_name_class[1])
with self._lock:
self._zone_refresh.zone_refresh_fail(zone_name_class)
self._master_socket.send(b" ")# make self._slave_socket readble
elif command == "shutdown":
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_SHUTDOWN)
self.shutdown()
else:
+ logger.warn(ZONEMGR_RECEIVE_UNKNOWN, str(command))
answer = create_answer(1, "Unknown command:" + str(command))
return answer
@@ -598,25 +645,29 @@ def set_cmd_options(parser):
if '__main__' == __name__:
try:
+ logger.debug(DBG_START_SHUT, ZONEMGR_STARTING)
parser = OptionParser()
set_cmd_options(parser)
(options, args) = parser.parse_args()
- VERBOSE_MODE = options.verbose
+ if options.verbose:
+ logger.set_severity("DEBUG", 99)
set_signal_handler()
zonemgrd = Zonemgr()
zonemgrd.run()
except KeyboardInterrupt:
- sys.stderr.write("[b10-zonemgr] exit zonemgr process\n")
+ logger.info(ZONEMGR_KEYBOARD_INTERRUPT)
+
except isc.cc.session.SessionError as e:
- sys.stderr.write("[b10-zonemgr] Error creating zonemgr, "
- "is the command channel daemon running?\n")
+ logger.error(ZONEMGR_SESSION_ERROR)
+
except isc.cc.session.SessionTimeout as e:
- sys.stderr.write("[b10-zonemgr] Error creating zonemgr, "
- "is the configuration manager running?\n")
+ logger.error(ZONEMGR_SESSION_TIMEOUT)
+
except isc.config.ModuleCCSessionError as e:
- sys.stderr.write("[b10-zonemgr] exit zonemgr process: %s\n" % str(e))
+ logger.error(ZONEMGR_CCSESSION_ERROR, str(e))
if zonemgrd and zonemgrd.running:
zonemgrd.shutdown()
+ logger.debug(DBG_START_SHUT, ZONEMGR_SHUTDOWN)
diff --git a/src/bin/zonemgr/zonemgr_messages.mes b/src/bin/zonemgr/zonemgr_messages.mes
new file mode 100644
index 0000000..8abec5d
--- /dev/null
+++ b/src/bin/zonemgr/zonemgr_messages.mes
@@ -0,0 +1,145 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the zonemgr messages python module.
+
+% ZONEMGR_CCSESSION_ERROR command channel session error: %1
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+
+% ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+
+% ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+
+% ZONEMGR_LOAD_ZONE loading zone %1 (class %2)
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+
+% ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+
+% ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+
+% ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+
+% ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+
+% ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+
+% ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+
+% ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+
+% ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+
+% ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+
+% ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+
+% ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+
+% ZONEMGR_SELECT_ERROR error with select(): %1
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+
+% ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+
+% ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+
+% ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+
+% ZONEMGR_SHUTDOWN zone manager has shut down
+A debug message, output when the zone manager has shut down completely.
+
+% ZONEMGR_STARTING zone manager starting
+A debug message output when the zone manager starts up.
+
+% ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
diff --git a/src/cppcheck-suppress.lst b/src/cppcheck-suppress.lst
index 4659dc4..1020ffe 100644
--- a/src/cppcheck-suppress.lst
+++ b/src/cppcheck-suppress.lst
@@ -3,13 +3,9 @@
debug
missingInclude
// This is a template, and should be excluded from the check
-unreadVariable:src/lib/dns/rdata/template.cc:60
-// These three trigger warnings due to the incomplete implementation. This is
-// our problem, but we need to suppress the warnings for now.
-functionConst:src/lib/cache/resolver_cache.h
-functionConst:src/lib/cache/message_cache.h
-functionConst:src/lib/cache/rrset_cache.h
+unreadVariable:src/lib/dns/rdata/template.cc:61
// Intentional self assignment tests. Suppress warning about them.
selfAssignment:src/lib/dns/tests/name_unittest.cc:293
selfAssignment:src/lib/dns/tests/rdata_unittest.cc:228
-selfAssignment:src/lib/dns/tests/tsigkey_unittest.cc:125
+selfAssignment:src/lib/dns/tests/tsigkey_unittest.cc:137
+selfAssignment:src/lib/dns/tests/rdata_txt_like_unittest.cc:222
diff --git a/src/lib/Makefile.am b/src/lib/Makefile.am
index 81ddd18..a569ea7 100644
--- a/src/lib/Makefile.am
+++ b/src/lib/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = exceptions util log cryptolink dns cc config python xfr \
- bench asiolink asiodns nsas cache resolve testutils datasrc \
- server_common
+SUBDIRS = exceptions util log cryptolink dns cc config acl xfr bench \
+ asiolink asiodns nsas cache resolve testutils datasrc \
+ server_common python dhcp
diff --git a/src/lib/acl/Makefile.am b/src/lib/acl/Makefile.am
new file mode 100644
index 0000000..92b7869
--- /dev/null
+++ b/src/lib/acl/Makefile.am
@@ -0,0 +1,27 @@
+SUBDIRS = . tests
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+# The core library
+lib_LTLIBRARIES = libacl.la
+libacl_la_SOURCES = acl.h
+libacl_la_SOURCES += check.h
+libacl_la_SOURCES += ip_check.h ip_check.cc
+libacl_la_SOURCES += logic_check.h
+libacl_la_SOURCES += loader.h loader.cc
+
+libacl_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+libacl_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libacl_la_LIBADD += $(top_builddir)/src/lib/util/libutil.la
+
+# DNS specialized one
+lib_LTLIBRARIES += libdnsacl.la
+
+libdnsacl_la_SOURCES = dns.h dns.cc dnsname_check.h
+
+libdnsacl_la_LIBADD = libacl.la
+libdnsacl_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
+
+CLEANFILES = *.gcno *.gcda
diff --git a/src/lib/acl/acl.h b/src/lib/acl/acl.h
new file mode 100644
index 0000000..76039c9
--- /dev/null
+++ b/src/lib/acl/acl.h
@@ -0,0 +1,143 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_ACL_H
+#define ACL_ACL_H
+
+#include "check.h"
+#include <vector>
+
+#include <boost/shared_ptr.hpp>
+#include <boost/noncopyable.hpp>
+
+namespace isc {
+namespace acl {
+
+/**
+ * \brief Default actions an ACL could perform.
+ *
+ * This is the default for the ACL class. It is possible to specify any other
+ * data type, as the ACL class does nothing about them, but these look
+ * reasonable, so they are provided for convenience. It is not specified what
+ * exactly these mean and it's up to whoever uses them.
+ */
+enum BasicAction {
+ ACCEPT,
+ REJECT,
+ DROP
+};
+
+/**
+ * \brief The ACL itself.
+ *
+ * It holds bunch of ordered entries, each one consisting of a check (
+ * of any kind, it might be even compound) and an action that is returned
+ * whenever the action matches. They are tested in the order and first
+ * match counts.
+ *
+ * This is non-copyable. It seems that there's no need to copy them (even
+ * when it would be technically possible), so we forbid it just to prevent
+ * copying it by accident. If there really is legitimate use, this restriction
+ * can be removed.
+ *
+ * The class is template. It is possible to specify on which context the checks
+ * match and which actions it returns. The actions must be copyable
+ * for this to work and it is expected to be something small, usually an enum
+ * (but other objects are also possible).
+ *
+ * \note There are protected functions. In fact, you should consider them
+ * private, they are protected so tests can get inside. This class
+ * is not expected to be subclassed in real applications.
+ */
+template<typename Context, typename Action = BasicAction> class ACL :
+ public boost::noncopyable {
+public:
+ /**
+ * \brief Constructor.
+ *
+ * \param default_action It is the action that is returned when the checked
+ * things "falls off" the end of the list (when no rule matched).
+ */
+ ACL(const Action& default_action) : default_action_(default_action)
+ {}
+
+ /**
+ * \brief Pointer to the check.
+ *
+ * We use the shared pointer, because we are not able to copy the checks.
+ * However, we might need to copy the entries (when we concatenate ACLs
+ * together in future).
+ */
+ typedef boost::shared_ptr<const Check<Context> > ConstCheckPtr;
+
+ /**
+ * \brief The actual main function that decides.
+ *
+ * This is the function that takes the entries one by one, checks
+ * the context against conditions and if it matches, returns the
+ * action that belongs to the first matched entry or default action
+ * if nothing matches.
+ *
+ * \param context The thing that should be checked. It is directly
+ * passed to the checks.
+ *
+ * \return The action for the ACL entry that first matches the context.
+ */
+ const Action& execute(const Context& context) const {
+ const typename Entries::const_iterator end(entries_.end());
+ for (typename Entries::const_iterator i(entries_.begin()); i != end;
+ ++i) {
+ if (i->first->matches(context)) {
+ return (i->second);
+ }
+ }
+ return (default_action_);
+ }
+
+ /**
+ * \brief Add new entry at the end of the list.
+ *
+ * \note We currently allow only adding at the end. This is enough for now,
+ * but we may need more when we start implementing some kind optimisations,
+ * including replacements, reorderings and removals.
+ *
+ * \param check The check to test if the thing matches.
+ * \param action The action to return when the thing matches this check.
+ */
+ void append(ConstCheckPtr check, const Action& action) {
+ entries_.push_back(Entry(check, action));
+ }
+private:
+ // Just type abbreviations.
+ typedef std::pair<ConstCheckPtr, Action> Entry;
+ typedef std::vector<Entry> Entries;
+ /// \brief The default action, when nothing mathes.
+ const Action default_action_;
+ /// \brief The entries we have.
+ Entries entries_;
+protected:
+ /**
+ * \brief Get the default action.
+ *
+ * This is for testing purposes only.
+ */
+ const Action& getDefaultAction() const {
+ return (default_action_);
+ }
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/acl/check.h b/src/lib/acl/check.h
new file mode 100644
index 0000000..3297d4b
--- /dev/null
+++ b/src/lib/acl/check.h
@@ -0,0 +1,195 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_CHECK_H
+#define ACL_CHECK_H
+
+#include <vector>
+#include <typeinfo>
+#include <sstream>
+
+namespace isc {
+namespace acl {
+
+/**
+ * \brief ACL check base class.
+ *
+ * It is intended that all ACL checks are inherited (maybe indirectly) from
+ * this base class. This will allow us to define new types of checks without
+ * changing any of the code that is using it and with the correct
+ * implementation even without changing the thing that parses configuration
+ * and creates instances of the checks.
+ *
+ * It is implemented as a template. This allows easy reuse of the code for
+ * checking of different types of things (packets of different protocols, etc).
+ * We'll implement the loader and compound checks as templates as well (
+ * and just make sure they are instantiated for each type of thing we want
+ * to check). While most of concrete checks will be specific for one protocol
+ * (or whatever the entity we check is), it makes sense to implement some of
+ * these as templates as well (for example the IP address check, for whatever
+ * context that contains member called ip and has the right methods).
+ *
+ * The Context carries whatever information might be checked for that protocol
+ * (eg. the packet, information where it came from, to what port, ...).
+ */
+template<typename Context> class Check {
+protected:
+ /// \brief Constructor.
+ ///
+ /// Just to make sure this thing is not directly instantiated.
+ Check() { }
+public:
+ /**
+ * \brief The check itself.
+ *
+ * The actual check will be performed here. Every concrete child class
+ * will reimplement it and decide based on the context passed if it
+ * matches.
+ *
+ * The caller should expect this method can throw. The list of exceptions
+ * isn't restricted, as we don't know what kind of checks will be needed.
+ * An exception should be considered as it is impossible to check the
+ * condition. It should lead to either blackholing the packet or returning
+ * some 500-like error (ServFail).
+ *
+ * \param context The thing we are trying to match against this check.
+ * \return true if the context satisfies the check, false otherwise.
+ */
+ virtual bool matches(const Context& context) const = 0;
+
+ /**
+ * \brief Cost for unknown cost estimate.
+ *
+ * This indicates that the estimate for cost is not provided. This
+ * is arbitrary large value, meaning "somehow longish time". To be
+ * on the safe side, we guess more and be just happily suprirised
+ * if it turns out to run faster.
+ */
+ static const unsigned UNKNOWN_COST;
+
+ /**
+ * \brief The expected cost of single match.
+ *
+ * This is here to provide some kind of cost information to optimising
+ * routines. It is in units without any real size, just bigger number
+ * means the check takes longer time. It is expected to be linear scale.
+ * It doesn't need to be exact, but better accuracy might lead to better
+ * optimisations. As of writing this, no optimisations exist yet, but
+ * are expected to exist in future.
+ *
+ * The default is UNKNOWN_COST.
+ */
+ virtual unsigned cost() const {
+ return (UNKNOWN_COST);
+ }
+
+ /// \brief Virtual destructor, as we're virtual
+ virtual ~ Check() { }
+
+ /**
+ * \brief Conversion to text.
+ *
+ * This is meant for debugging purposes, it doesn't have to
+ * serialise the whole information stored in this Check.
+ *
+ * If the check is compound, it should not include the subexpressions
+ * (while we're able to build whatever treeish representation using
+ * CompoundCheck::subexpressions, we're not able to separate them
+ * automatically, as this may produce any kind of free-form string).
+ */
+ virtual std::string toText() const {
+ std::stringstream output;
+ output << typeid(*this).name() << "@" << this;
+ return (output.rdbuf()->str());
+ }
+};
+
+// This seems to be the propper way for static template members
+template<typename Context> const unsigned Check<Context>::UNKNOWN_COST = 10000;
+
+/**
+ * \brief Base class for compound checks.
+ *
+ * While some checks will be a match against some property of the information
+ * passed (eg. the sender's IP address must be in some range), others will
+ * combine results of more checks together to get their own. This is base class
+ * for the second type, allowing listing of the subexpressions (mostly for
+ * debugging purposes to print the whole tree of matches and possible future
+ * optimisations which would like to crawl the expression tree).
+ */
+template<typename Context> class CompoundCheck : public Check<Context> {
+public:
+ /// \brief Abbreviated name for list of subexpressions
+ typedef std::vector<const Check<Context>*> Checks;
+
+ /**
+ * \brief Get the list of subexpressions.
+ *
+ * The result contains pointers to the all subexpressions this check holds
+ * (and therefore might call during its own match() function).
+ *
+ * Using shared pointers looks an overkill here. All the checks must be
+ * alive for the whole life of this one and this check will hold their
+ * ownership. Therefore the only thing the caller needs to do is to make
+ * sure this check is not deleted while it's still using the ones from the
+ * result.
+ *
+ * This method must not throw except for the standard allocation exceptions
+ * to allocate the result.
+ */
+ virtual Checks getSubexpressions() const = 0;
+
+ /**
+ * \brief If the result depends only on results of subexpressions.
+ *
+ * Some optimisations might use the fact that a compound expression is
+ * a function of results of its subexpressions (subchecks) only. But
+ * some compound checks might want to look into the provided context in
+ * their match() as well as looking at the results of the subexpressions.
+ *
+ * This function informs the optimisation routines if it is safe to use
+ * these optimisations.
+ *
+ * \return true if the check depends only on results of subexpressions
+ * only, false if it examines the context itself as well.
+ * \note The default implementation returns true, as it is expected to
+ * be the case in majority of cases.
+ */
+ virtual bool pure() const { return (true); }
+
+ /**
+ * \brief Default compound cost function.
+ *
+ * It is simply sum of all subexpressions, as an expected upper bound
+ * on the cost. This expects that the combining itself is cheap relatively
+ * to the checks performed by the subexpressions. In most cases, this
+ * should be good enough, but it can be reimplemented in situations
+ * where most of the subexpressions will be avoided in usual situations.
+ * Replacing the default of 10000 from Check.
+ */
+ virtual unsigned cost() const {
+ Checks checks(getSubexpressions());
+ unsigned result(0);
+ for (typename Checks::const_iterator i(checks.begin());
+ i != checks.end(); ++ i) {
+ result += (*i)->cost();
+ }
+ return (result);
+ }
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/acl/dns.cc b/src/lib/acl/dns.cc
new file mode 100644
index 0000000..b9cf91f
--- /dev/null
+++ b/src/lib/acl/dns.cc
@@ -0,0 +1,140 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <boost/shared_ptr.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/name.h>
+#include <dns/tsigrecord.h>
+
+#include <cc/data.h>
+
+#include <acl/dns.h>
+#include <acl/ip_check.h>
+#include <acl/dnsname_check.h>
+#include <acl/loader.h>
+#include <acl/logic_check.h>
+
+using namespace std;
+using boost::shared_ptr;
+using namespace isc::dns;
+using namespace isc::data;
+
+namespace isc {
+namespace acl {
+
+/// The specialization of \c IPCheck for access control with \c RequestContext.
+///
+/// It returns \c true if the remote (source) IP address of the request
+/// matches the expression encapsulated in the \c IPCheck, and returns
+/// \c false if not.
+template <>
+bool
+IPCheck<dns::RequestContext>::matches(
+ const dns::RequestContext& request) const
+{
+ return (compare(request.remote_address.getData(),
+ request.remote_address.getFamily()));
+}
+
+namespace dns {
+
+/// The specialization of \c NameCheck for access control with
+/// \c RequestContext.
+///
+/// It returns \c true if the request contains a TSIG record and its key
+/// (owner) name is equal to the name stored in the check; otherwise
+/// it returns \c false.
+template<>
+bool
+NameCheck<RequestContext>::matches(const RequestContext& request) const {
+ return (request.tsig != NULL && request.tsig->getName() == name_);
+}
+
+vector<string>
+internal::RequestCheckCreator::names() const {
+ // Probably we should eventually build this vector in a more
+ // sophisticated way. For now, it's simple enough to hardcode
+ // everything.
+ vector<string> supported_names;
+ supported_names.push_back("from");
+ supported_names.push_back("key");
+ return (supported_names);
+}
+
+shared_ptr<RequestCheck>
+internal::RequestCheckCreator::create(const string& name,
+ ConstElementPtr definition,
+ // unused:
+ const acl::Loader<RequestContext>&)
+{
+ if (!definition) {
+ isc_throw(LoaderError,
+ "NULL pointer is passed to RequestCheckCreator");
+ }
+
+ if (name == "from") {
+ return (shared_ptr<internal::RequestIPCheck>(
+ new internal::RequestIPCheck(definition->stringValue())));
+ } else if (name == "key") {
+ return (shared_ptr<internal::RequestKeyCheck>(
+ new internal::RequestKeyCheck(
+ Name(definition->stringValue()))));
+ } else {
+ // This case shouldn't happen (normally) as it should have been
+ // rejected at the loader level. But we explicitly catch the case
+ // and throw an exception for that.
+ isc_throw(LoaderError, "Invalid check name for RequestCheck: " <<
+ name);
+ }
+}
+
+RequestLoader&
+getRequestLoader() {
+ static RequestLoader* loader(NULL);
+ if (loader == NULL) {
+ // Creator registration may throw, so we first store the new loader
+ // in an auto pointer in order to provide the strong exception
+ // guarantee.
+ auto_ptr<RequestLoader> loader_ptr =
+ auto_ptr<RequestLoader>(new RequestLoader(REJECT));
+
+ // Register default check creator(s)
+ loader_ptr->registerCreator(shared_ptr<internal::RequestCheckCreator>(
+ new internal::RequestCheckCreator()));
+ loader_ptr->registerCreator(
+ shared_ptr<NotCreator<RequestContext> >(
+ new NotCreator<RequestContext>("NOT")));
+ loader_ptr->registerCreator(
+ shared_ptr<LogicCreator<AnyOfSpec, RequestContext> >(
+ new LogicCreator<AnyOfSpec, RequestContext>("ANY")));
+ loader_ptr->registerCreator(
+ shared_ptr<LogicCreator<AllOfSpec, RequestContext> >(
+ new LogicCreator<AllOfSpec, RequestContext>("ALL")));
+
+ // From this point there shouldn't be any exception thrown
+ loader = loader_ptr.release();
+ }
+
+ return (*loader);
+}
+
+}
+}
+}
diff --git a/src/lib/acl/dns.h b/src/lib/acl/dns.h
new file mode 100644
index 0000000..d08fcf3
--- /dev/null
+++ b/src/lib/acl/dns.h
@@ -0,0 +1,154 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_DNS_H
+#define ACL_DNS_H 1
+
+#include <string>
+#include <vector>
+
+#include <boost/shared_ptr.hpp>
+
+#include <cc/data.h>
+
+#include <acl/ip_check.h>
+#include <acl/dnsname_check.h>
+#include <acl/loader.h>
+
+namespace isc {
+namespace dns {
+class TSIGRecord;
+}
+namespace acl {
+namespace dns {
+
+/**
+ * \brief DNS request to be checked.
+ *
+ * This plays the role of Context of the generic template ACLs (in namespace
+ * isc::acl).
+ *
+ * It is a simple structure holding just the bunch of information. Therefore
+ * the names don't end up with an underscore; there are no methods so they
+ * can't be confused with local variables.
+ *
+ * This structure is generally expected to be ephemeral and read-only: It
+ * would be constructed immediately before a particular ACL is checked
+ * and used only for the ACL match purposes. Due to this nature, and since
+ * ACL processing is often performance sensitive (typically it's performed
+ * against all incoming packets), the construction is designed to be
+ * lightweight: it tries to avoid expensive data copies or dynamic memory
+ * allocation as much as possible. Specifically, the constructor can
+ * take a pointer or reference to an object and keeps it as a reference
+ * (not making a local copy). This also means the caller is responsible for
+ * keeping the passed parameters valid while this structure is used.
+ * This should generally be reasonable as this structure is expected to be
+ * used only for a very short period as stated above.
+ *
+ * Based on the minimalist philosophy, the initial implementation only
+ * maintains the remote (source) IP address of the request and (optionally)
+ * the TSIG record included in the request. We may add more parameters of
+ * the request as we see the need for them. Possible additional parameters
+ * are the local (destination) IP address, the remote and local port numbers,
+ * various fields of the DNS request (e.g. a particular header flag value).
+ */
+struct RequestContext {
+ /// The constructor
+ ///
+ /// This is a trivial constructor that perform straightforward
+ /// initialization of the member variables from the given parameters.
+ ///
+ /// \exception None
+ ///
+ /// \param remote_address_param The remote IP address
+ /// \param tsig_param A valid pointer to the TSIG record included in
+ /// the request or NULL if the request doesn't contain a TSIG.
+ RequestContext(const IPAddress& remote_address_param,
+ const isc::dns::TSIGRecord* tsig_param) :
+ remote_address(remote_address_param),
+ tsig(tsig_param)
+ {}
+
+ ///
+ /// \name Parameter variables
+ ///
+ /// These member variables must be immutable so that the integrity of
+ /// the structure is kept throughout its lifetime. The easiest way is
+ /// to declare the variable as const. If it's not possible for a
+ /// particular variable, it must be defined as private and accessible
+ /// only via an accessor method.
+ //@{
+ /// \brief The remote IP address (eg. the client's IP address).
+ const IPAddress& remote_address;
+
+ /// \brief The TSIG record included in the request message, if any.
+ ///
+ /// If the request doesn't include a TSIG, this member will be NULL.
+ const isc::dns::TSIGRecord* const tsig;
+ //@}
+};
+
+/// \brief DNS based check.
+typedef acl::Check<RequestContext> RequestCheck;
+/// \brief DNS based compound check.
+typedef acl::CompoundCheck<RequestContext> CompoundCheck;
+/// \brief DNS based ACL.
+typedef acl::ACL<RequestContext> RequestACL;
+/// \brief DNS based ACL loader.
+typedef acl::Loader<RequestContext> RequestLoader;
+
+/**
+ * \brief Loader singleton access function.
+ *
+ * This function returns a loader of ACLs. It is expected applications
+ * will use this function instead of creating their own loaders, because
+ * one is enough, this one will have registered default checks and it
+ * is known one, so any plugins can registrer additional checks as well.
+ */
+RequestLoader& getRequestLoader();
+
+// The following is essentially private to the implementation and could
+// be hidden in the implementation file. But it's visible via this header
+// file for testing purposes. They are not supposed to be used by normal
+// applications directly, and to signal the intent, they are given inside
+// a separate namespace.
+namespace internal {
+
+// Shortcut typedef
+typedef isc::acl::IPCheck<RequestContext> RequestIPCheck;
+typedef isc::acl::dns::NameCheck<RequestContext> RequestKeyCheck;
+
+class RequestCheckCreator : public acl::Loader<RequestContext>::CheckCreator {
+public:
+ virtual std::vector<std::string> names() const;
+
+ virtual boost::shared_ptr<RequestCheck>
+ create(const std::string& name, isc::data::ConstElementPtr definition,
+ const acl::Loader<RequestContext>& loader);
+
+ /// Until we are sure how the various rules work for this case, we won't
+ /// allow unexpected special interpretation for list definitions.
+ virtual bool allowListAbbreviation() const { return (false); }
+};
+} // end of namespace "internal"
+
+} // end of namespace "dns"
+} // end of namespace "acl"
+} // end of namespace "isc"
+
+#endif
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/dnsname_check.h b/src/lib/acl/dnsname_check.h
new file mode 100644
index 0000000..7498d99
--- /dev/null
+++ b/src/lib/acl/dnsname_check.h
@@ -0,0 +1,83 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DNSNAME_CHECK_H
+#define __DNSNAME_CHECK_H 1
+
+#include <dns/name.h>
+
+#include <acl/check.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+
+/// ACL check for DNS names
+///
+/// This class is intended to perform a match between a domain name
+/// specified in an ACL and a given name. The primary usage of this class
+/// is an ACL match for TSIG keys, where an ACL would contain a list of
+/// acceptable key names and the \c match() method would compare the owner
+/// name of a TSIG record against the specified names.
+///
+/// This class could be used for other kinds of names such as the query name
+/// of normal DNS queries.
+///
+/// The class is templated on the type of a context structure passed to the
+/// matches() method, and a template specialisation for that method must be
+/// supplied for the class to be used.
+template <typename Context>
+class NameCheck : public Check<Context> {
+public:
+ /// The constructor
+ ///
+ /// \exception std::bad_alloc Resource allocation fails in copying the
+ /// name
+ ///
+ /// \param name The domain name to be matched in \c matches().
+ NameCheck(const isc::dns::Name& name) : name_(name) {}
+
+ /// Destructor
+ virtual ~NameCheck() {}
+
+ /// The check method
+ ///
+ /// Matches the passed argument to the condition stored here. Different
+ /// specializations must be provided for different argument types, and the
+ /// program will fail to compile if a required specialisation is not
+ /// provided.
+ ///
+ /// \param context Information to be matched
+ virtual bool matches(const Context& context) const;
+
+ /// Returns the name specified on construction.
+ ///
+ /// This is mainly for testing purposes.
+ ///
+ /// \exception None
+ const isc::dns::Name& getName() const { return (name_); }
+
+private:
+ const isc::dns::Name name_;
+};
+
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+#endif // __DNSNAME_CHECK_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/ip_check.cc b/src/lib/acl/ip_check.cc
new file mode 100644
index 0000000..76aacca
--- /dev/null
+++ b/src/lib/acl/ip_check.cc
@@ -0,0 +1,141 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sys/socket.h>
+
+#include <exceptions/exceptions.h>
+
+#include <boost/lexical_cast.hpp>
+
+#include <acl/ip_check.h>
+
+using namespace std;
+using namespace isc;
+
+namespace isc {
+namespace acl {
+namespace internal {
+
+uint8_t
+createMask(size_t prefixlen) {
+
+ if (prefixlen == 0) {
+ return (0);
+
+ } else if (prefixlen <= 8) {
+
+ // In the following discussion:
+ //
+ // w is the width of the data type in bits.
+ // m is the value of prefixlen, the number of most signifcant bits we
+ // want to set.
+ // ** is exponentiation (i.e. 2**n is 2 raised to the power of n).
+ //
+ // We note that the value of 2**m - 1 gives a value with the least
+ // significant m bits set. For a data type of width w, this means that
+ // the most signficant (w-m) bits are clear.
+ //
+ // Hence the value 2**(w-m) - 1 gives a result with the least signficant
+ // w-m bits set and the most significant m bits clear. The 1's
+ // complement of this value gives is the result we want.
+ //
+ // Final note: at this point in the logic, m is non-zero, so w-m < w.
+ // This means 1<<(w-m) will fit into a variable of width w bits. In
+ // other words, in the expression below, no term will cause an integer
+ // overflow.
+ return (~((1 << (8 - prefixlen)) - 1));
+ }
+
+ // Mask size is too large. (Note that prefixlen is unsigned, so can't be
+ // negative.)
+ isc_throw(isc::OutOfRange, "prefixlen argument must be between 0 and 8");
+}
+
+pair<string, int>
+splitIPAddress(const string& ipprefix) {
+
+ // Split string into its components - an address and a prefix length.
+ // We initialize by assuming that there is no slash in the string given.
+ string address = ipprefix;
+ string prefixlen = "";
+
+ const size_t slashpos = ipprefix.find('/');
+ if ((ipprefix.size() == 0) || (slashpos == 0) ||
+ (slashpos == (ipprefix.size() - 1))) {
+ // Nothing in prefix, or it starts with or ends with a slash.
+ isc_throw(isc::InvalidParameter, "address prefix of " << ipprefix <<
+ " is not valid");
+
+ } else if (slashpos != string::npos) {
+ // There is a slash somewhere in the string, split the string on it.
+ // Don't worry about multiple slashes - if there are some, they will
+ // appear in the prefixlen segment and will be detected when an attempt
+ // is made to convert it to a number.
+ address = ipprefix.substr(0, slashpos);
+ prefixlen = ipprefix.substr(slashpos + 1);
+ }
+
+ // Set the default value for the prefix length. As the type of the address
+ // is not known at the point this function is called, the maximum
+ // allowable value is also not known. The value of 0 is reserved for
+ // a "match any address" match.
+ int prefix_size = -1;
+
+ // If there is a prefixlength, attempt to convert it.
+ if (!prefixlen.empty()) {
+ try {
+ prefix_size = boost::lexical_cast<int>(prefixlen);
+ if (prefix_size < 0) {
+ isc_throw(isc::InvalidParameter, "address prefix of " <<
+ ipprefix << " is not valid");
+ }
+ } catch (boost::bad_lexical_cast&) {
+ isc_throw(isc::InvalidParameter, "prefix length of '" <<
+ prefixlen << "' is not valid");
+ }
+ }
+
+ return (make_pair(address, prefix_size));
+}
+} // namespace internal
+
+namespace {
+const uint8_t*
+getSockAddrData(const struct sockaddr& sa) {
+ const void* sa_ptr = &sa;
+ const void* data_ptr;
+ if (sa.sa_family == AF_INET) {
+ const struct sockaddr_in* sin =
+ static_cast<const struct sockaddr_in*>(sa_ptr);
+ data_ptr = &sin->sin_addr;
+ } else if (sa.sa_family == AF_INET6) {
+ const struct sockaddr_in6* sin6 =
+ static_cast<const struct sockaddr_in6*>(sa_ptr);
+ data_ptr = &sin6->sin6_addr;
+ } else {
+ isc_throw(BadValue, "Unsupported address family for IPAddress: " <<
+ static_cast<int>(sa.sa_family));
+ }
+ return (static_cast<const uint8_t*>(data_ptr));
+}
+}
+
+IPAddress::IPAddress(const struct sockaddr& sa) :
+ family(sa.sa_family),
+ data(getSockAddrData(sa)),
+ length(family == AF_INET ?
+ sizeof(struct in_addr) : sizeof(struct in6_addr))
+{}
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/acl/ip_check.h b/src/lib/acl/ip_check.h
new file mode 100644
index 0000000..794b943
--- /dev/null
+++ b/src/lib/acl/ip_check.h
@@ -0,0 +1,417 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __IP_CHECK_H
+#define __IP_CHECK_H
+
+#include <sys/socket.h>
+
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <vector>
+
+#include <boost/static_assert.hpp>
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sys/socket.h> // for AF_INET/AF_INET6
+#include <netinet/in.h>
+
+#include <acl/check.h>
+#include <exceptions/exceptions.h>
+#include <util/strutil.h>
+
+namespace isc {
+namespace acl {
+
+// Free functions. These are not supposed to be used outside this module,
+// but are declared public for testing. To try to conceal them, they are
+// put in an "internal" namespace.
+
+namespace internal {
+
+/// \brief Convert prefix length to mask
+///
+/// Given a prefix length and a data type, return a value of that data type
+/// with the most significant "prefix length" bits set. For example, if the
+/// data type is an uint8_t and the prefix length is 3, the function would
+/// return a uint8_t holding the binary value 11100000. This value is used as
+/// a mask in the address checks.
+///
+/// \param prefixlen number of bits to be set in the mask. This must be
+/// between 0 and 8.
+///
+/// \return uint8_t with the most significant "prefixlen" bits set.
+///
+/// \exception OutOfRange prefixlen is too large for the data type.
+
+uint8_t createMask(size_t prefixlen);
+
+/// \brief Split IP Address Prefix
+///
+/// Splits an IP address prefix (given in the form of "xxxxxx/n" or "xxxxx" into
+/// a string representing the IP address and a number giving the length of the
+/// prefix. (In the latter case, the prefix is equal in length to the width in
+/// width in bits of the data type holding the address.) An exception will be
+/// thrown if the string format is invalid or if the prefix length is invalid.
+///
+/// N.B. This function does NOT check that the address component is a valid IP
+/// address; this is done elsewhere in the address parsing process.
+///
+/// \param ipprefix Address or address prefix. The string should be passed
+/// without leading or trailing spaces.
+///
+/// \return Pair of (string, int) holding the address string and the prefix
+/// length. The second element is -1 if no prefix was given.
+///
+/// \exception InvalidParameter Address prefix not of the expected syntax
+
+std::pair<std::string, int>
+splitIPAddress(const std::string& ipprefix);
+
+} // namespace internal
+
+/// \brief A simple representation of IP address.
+///
+/// This structure provides address family independent interfaces of an
+/// IP(v4 or v6) address, so that the application can perform
+/// \c IPCheck::matches without knowing which version of address it is
+/// handling. (For example, consider the standard socket API: it uses
+/// the generic \c sockaddr structure to represent endpoints).
+///
+/// An object of this class could be constructed from various types of
+/// sources, but in the initial implementation there's only one constructor,
+/// which takes a \c sockaddr structure. For efficiency the \c IPAddress
+/// object only retains a reference to the necessary part of \c sockaddr.
+/// Therefore the corresponding \c sockaddr instance must be valid while the
+/// \c IPAddress object is used.
+///
+/// This class is copyable so that a fixed object can be easily reused for
+/// different addresses. To ensure internal integrity, specific member
+/// variables are kept private and only accessible via read-only accessor
+/// methods. Due to this, it is ensured, for example, that if \c getFamily()
+/// returns \c AF_INET6, \c getLength() always returns 16.
+///
+/// All accessor methods are straightforward and exception free.
+///
+/// In future, we may introduce the default constructor to further improve
+/// reusability.
+struct IPAddress {
+ /// The constructor from socket address structure.
+ ///
+ /// This constructor set up the internal data based on the actual type
+ /// \c sa. For example, if \c sa.sa_family is \c AF_INET, it assumes
+ /// \c sa actually refers to a \c sockaddr_in structure.
+ /// The behavior when this assumption isn't held is undefined.
+ ///
+ /// \param sa A reference to the socket address structure from which the
+ /// \c IPAddress is to be constructed.
+ explicit IPAddress(const struct sockaddr& sa);
+
+ /// Return the address family of the address
+ ///
+ /// It's AF_INET for IPv4 and AF_INET6 for IPv6.
+ int getFamily() const { return (family); }
+
+ /// Return the binary representation of the address in network byte order.
+ ///
+ /// Only the \c getLength() bytes from the returned pointer are ensured
+ /// to be valid. In addition, if the \c sockaddr structure given on
+ /// construction was dynamically allocated, the data is valid only until
+ /// the \c sockaddr is invalidated.
+ const uint8_t* getData() const { return (data); }
+
+ /// Return the length of the address.
+ size_t getLength() const { return (length); }
+private:
+ int family;
+ const uint8_t* data;
+ size_t length;
+};
+
+/// \brief IP Check
+///
+/// This class performs a match between an IP address prefix specified in an ACL
+/// and a given IP address. The check works for both IPv4 and IPv6 addresses.
+///
+/// The class is templated on the type of a context structure passed to the
+/// matches() method, and a template specialisation for that method must be
+/// supplied for the class to be used.
+
+template <typename Context>
+class IPCheck : public Check<Context> {
+private:
+ // Size of uint8_t array needed to hold different address types
+ static const size_t IPV6_SIZE = sizeof(struct in6_addr);
+ static const size_t IPV4_SIZE = sizeof(struct in_addr);
+
+ // Confirm our assumption of relative sizes - this allows us to assume that
+ // an array sized for an IPv6 address can hold an IPv4 address.
+ BOOST_STATIC_ASSERT(sizeof(struct in6_addr) > sizeof(struct in_addr));
+
+public:
+ /// \brief String Constructor
+ ///
+ /// Constructs an IP Check object from an address or address prefix in the
+ /// form <ip-address>/n".
+ ///
+ /// Also allowed are the special keywords "any4" and "any6", which match
+ /// any IPv4 or IPv6 address. These must be specified in lowercase.
+ ///
+ /// \param ipprefix IP address prefix in the form "<ip-address>/n"
+ /// (where the "/n" part is optional and should be valid for the
+ /// address). If "n" is specified as zero, the match is for any
+ /// address in that address family. The address can also be
+ /// given as "any4" or "any6".
+ IPCheck(const std::string& ipprefix) : family_(0) {
+
+ // Ensure array elements are correctly initialized with zeroes.
+ std::fill(address_, address_ + IPV6_SIZE, 0);
+ std::fill(mask_, mask_ + IPV6_SIZE, 0);
+
+ // Only deal with the string after we've removed leading and trailing
+ // spaces.
+ const std::string mod_prefix = isc::util::str::trim(ipprefix);
+
+ // Check for special cases first.
+ if (mod_prefix == "any4") {
+ family_ = AF_INET;
+
+ } else if (mod_prefix == "any6") {
+ family_ = AF_INET6;
+
+ } else {
+
+ // General address prefix. Split into address part and prefix
+ // length.
+ const std::pair<std::string, int> result =
+ internal::splitIPAddress(mod_prefix);
+
+ // Try to convert the address. If successful, the result is in
+ // network-byte order (most significant components at lower
+ // addresses).
+ int status = inet_pton(AF_INET6, result.first.c_str(), address_);
+ if (status == 1) {
+ // It was an IPv6 address.
+ family_ = AF_INET6;
+ } else {
+ // IPv6 interpretation failed, try IPv4.
+ status = inet_pton(AF_INET, result.first.c_str(), address_);
+ if (status == 1) {
+ family_ = AF_INET;
+ }
+ }
+
+ // Handle errors.
+ if (status == 0) {
+ isc_throw(isc::InvalidParameter, "address prefix of " <<
+ ipprefix << " is not valid");
+ } else if (status < 0) {
+ isc_throw(isc::Unexpected, "address conversion of " <<
+ ipprefix << " failed due to a system error");
+ }
+
+ // All done, so set the mask used in the address comparison.
+ setMask(result.second);
+ }
+ }
+
+ /// \brief Destructor
+ virtual ~IPCheck() {}
+
+ /// \brief The check itself
+ ///
+ /// Matches the passed argument to the condition stored here. Different
+ /// specialisations must be provided for different argument types, and the
+ /// program will fail to compile if a required specialisation is not
+ /// provided.
+ ///
+ /// It is expected that matches() will extract the address information from
+ /// the Context structure, and use compare() to actually perform the
+ /// comparison.
+ ///
+ /// \param context Information to be matched
+ virtual bool matches(const Context& context) const;
+
+ /// \brief Estimated cost
+ ///
+ /// Assume that the cost of the match is linear and depends on the
+ /// maximum number of comparison operations.
+ ///
+ /// \return Estimated cost of the comparison
+ virtual unsigned cost() const {
+ return ((family_ == AF_INET) ? IPV4_SIZE : IPV6_SIZE);
+ }
+
+ ///@{
+ /// Access methods - mainly for testing
+
+ /// \return Stored IP address
+ std::vector<uint8_t> getAddress() const {
+ const size_t vector_len = (family_ == AF_INET ? IPV4_SIZE : IPV6_SIZE);
+ return (std::vector<uint8_t>(address_, address_ + vector_len));
+ }
+
+ /// \return Network mask applied to match
+ std::vector<uint8_t> getMask() const {
+ const size_t vector_len = (family_ == AF_INET ? IPV4_SIZE : IPV6_SIZE);
+ return (std::vector<uint8_t>(mask_, mask_ + vector_len));
+ }
+
+ /// \return Prefix length of the match
+ size_t getPrefixlen() const {
+ // Work this out by counting bits in the mask.
+ size_t count = 0;
+ for (size_t i = 0; i < IPV6_SIZE; ++i) {
+ if (mask_[i] == 0xff) {
+ // All bits set in this byte
+ count += 8;
+ continue;
+
+ } else if (mask_[i] != 0) {
+ // Only some bits set in this byte. Count them.
+ uint8_t byte = mask_[i];
+ for (int j = 0; j < 8; ++j) {
+ count += byte & 0x01; // Add one if the bit is set
+ byte >>= 1; // Go for next bit
+ }
+ }
+ break;
+ }
+ return (count);
+ }
+
+ /// \return Address family
+ int getFamily() const {
+ return (family_);
+ }
+ ///@}
+
+protected:
+ /// \brief Comparison
+ ///
+ /// This is the actual comparison function that checks the IP address passed
+ /// to this class with the matching information in the class itself. It is
+ /// expected to be called from matches().
+ ///
+ /// \param testaddr Address (in network byte order) to test against the
+ /// check condition in the class. This is expected to
+ /// be IPV6_SIZE or IPV4_SIZE bytes long.
+ /// \param family Address family of testaddr.
+ ///
+ /// \return true if the address matches, false if it does not.
+ virtual bool compare(const uint8_t* testaddr, int family) const {
+
+ if (family != family_) {
+ // Can't match if the address is of the wrong family
+ return (false);
+ }
+
+ // Simple check failed, so have to do a complete match. To check that
+ // the address given matches the stored network address and mask, we
+ // check the simple condition that:
+ //
+ // address_given & mask_ == stored_address & mask_
+ //
+ // The result is checked for all bytes for which there are bits set in
+ // the mask. We stop at the first non-match (or when we run out of bits
+ // in the mask).
+ //
+ // Note that the mask represents a contiguous set of bits. As such, as
+ // soon as we find a mask byte of zeroes, we have run past the part of
+ // the address where we need to match.
+ //
+ // Note also that when checking an IPv4 address, the constructor has
+ // set all bytes in the mask beyond the first four bytes to zero.
+ // As the loop stops when it encounters a zero mask byte, if the
+ // ACL is for an IPV4 address, the loop will never check more than four
+ // bytes.
+
+ bool match = true;
+ for (int i = 0; match && (i < IPV6_SIZE) && (mask_[i] != 0); ++i) {
+ match = ((testaddr[i] & mask_[i]) == (address_[i] & mask_[i]));
+ }
+ return (match);
+ }
+
+private:
+ /// \brief Set Mask
+ ///
+ /// Sets up the mask from the prefix length. This involves setting
+ /// an individual mask in each byte of the mask array.
+ ///
+ /// The actual allowed value of the prefix length depends on the address
+ /// family.
+ ///
+ /// \param requested Requested prefix length size. If negative, the
+ /// maximum for the address family is assumed. (A negative value
+ /// will arise if the string constructor was used and no mask size
+ /// was given.)
+ void setMask(int requested) {
+
+ // Set the maximum number of bits allowed in the mask, and request
+ // that number of bits if no prefix length was given in the constructor.
+ const int maxmask = 8 * ((family_ == AF_INET) ? IPV4_SIZE : IPV6_SIZE);
+ if (requested < 0) {
+ requested = maxmask;
+ }
+
+ // Validate that the mask is valid.
+ if (requested <= maxmask) {
+
+ // Loop, setting the bits in the set of mask bytes until all the
+ // specified bits have been used up. As both IPv4 and IPv6
+ // addresses are stored in network-byte order, this works in
+ // both cases.
+ size_t bits_left = requested; // Bits remaining to set
+ int i = -1;
+ while (bits_left > 0) {
+ if (bits_left >= 8) {
+ mask_[++i] = ~0; // All bits set
+ bits_left -= 8;
+
+ } else if (bits_left > 0) {
+ mask_[++i] = internal::createMask(bits_left);
+ bits_left = 0;
+ }
+ }
+ } else {
+ isc_throw(isc::OutOfRange,
+ "mask size of " << requested << " is invalid " <<
+ "for the given address family");
+ }
+ }
+
+ // Member variables.
+ uint8_t address_[IPV6_SIZE]; ///< Address in binary form
+ uint8_t mask_[IPV6_SIZE]; ///< Address mask
+ int family_; ///< Address family
+};
+
+// Some compilers seem to need this to be explicitly defined outside the class
+template <typename Context>
+const size_t IPCheck<Context>::IPV6_SIZE;
+
+template <typename Context>
+const size_t IPCheck<Context>::IPV4_SIZE;
+
+} // namespace acl
+} // namespace isc
+
+#endif // __IP_CHECK_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/loader.cc b/src/lib/acl/loader.cc
new file mode 100644
index 0000000..8ca7e28
--- /dev/null
+++ b/src/lib/acl/loader.cc
@@ -0,0 +1,46 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "loader.h"
+
+using namespace std;
+
+namespace isc {
+namespace acl {
+
+BasicAction defaultActionLoader(data::ConstElementPtr actionEl) {
+ try {
+ const string action(actionEl->stringValue());
+ if (action == "ACCEPT") {
+ return (ACCEPT);
+ } else if (action == "REJECT") {
+ return (REJECT);
+ } else if (action == "DROP") {
+ return (DROP);
+ } else {
+ throw LoaderError(__FILE__, __LINE__,
+ string("Unknown action '" + action + "'").
+ c_str(),
+ actionEl);
+ }
+ }
+ catch (const data::TypeError&) {
+ throw LoaderError(__FILE__, __LINE__,
+ "Invalid element type for action, must be string",
+ actionEl);
+ }
+}
+
+}
+}
diff --git a/src/lib/acl/loader.h b/src/lib/acl/loader.h
new file mode 100644
index 0000000..fc69b44
--- /dev/null
+++ b/src/lib/acl/loader.h
@@ -0,0 +1,479 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_LOADER_H
+#define ACL_LOADER_H
+
+#include <exceptions/exceptions.h>
+#include <acl/acl.h>
+#include <cc/data.h>
+#include <boost/function.hpp>
+#include <boost/shared_ptr.hpp>
+#include <map>
+
+namespace isc {
+namespace acl {
+
+class AnyOfSpec;
+class AllOfSpec;
+template<typename Mode, typename Context> class LogicOperator;
+
+/**
+ * \brief Exception for bad ACL specifications.
+ *
+ * This will be thrown by the Loader if the ACL description is malformed
+ * in some way.
+ *
+ * It also can hold optional JSON element where was the error detected, so
+ * it can be examined.
+ *
+ * Checks may subclass this exception for similar errors if they see it fit.
+ */
+class LoaderError : public BadValue {
+private:
+ const data::ConstElementPtr element_;
+public:
+ /**
+ * \brief Constructor.
+ *
+ * Should be used with isc_throw if the fourth argument isn't used.
+ *
+ * \param file The file where the throw happened.
+ * \param line Similar as file, just for the line number.
+ * \param what Human readable description of what happened.
+ * \param element This might be passed to hold the JSON element where
+ * the error was detected.
+ */
+ LoaderError(const char* file, size_t line, const char* what,
+ data::ConstElementPtr element = data::ConstElementPtr()) :
+ BadValue(file, line, what),
+ element_(element)
+ {}
+
+ ~ LoaderError() throw() {}
+
+ /**
+ * \brief Get the element.
+ *
+ * This returns the element where the error was detected. Note that it
+ * might be NULL in some situations.
+ */
+ const data::ConstElementPtr& element() const {
+ return (element_);
+ }
+};
+
+/**
+ * \brief Loader of the default actions of ACLs.
+ *
+ * Declared outside the Loader class, as this one does not need to be
+ * templated. This will throw LoaderError if the parameter isn't string
+ * or if it doesn't contain one of the accepted values.
+ *
+ * \param action The JSON representation of the action. It must be a string
+ * and contain one of "ACCEPT", "REJECT" or "DROP.
+ * \note We could define different names or add aliases if needed.
+ */
+BasicAction defaultActionLoader(data::ConstElementPtr action);
+
+/**
+ * \brief Loader of ACLs.
+ *
+ * The goal of this class is to convert JSON description of an ACL to object
+ * of the ACL class (including the checks inside it).
+ *
+ * The class can be used to load the checks only. This is supposed to be used
+ * by compound checks to create the subexpressions.
+ *
+ * To allow any kind of checks to exist in the application, creators are
+ * registered for the names of the checks.
+ *
+ * An ACL definition looks like this:
+ * \verbatim
+ [
+ {
+ "action": "ACCEPT",
+ "match-type": <parameter>
+ },
+ {
+ "action": "REJECT",
+ "match-type": <parameter>,
+ "another-match-type": [<parameter1>, <parameter2>]
+ },
+ {
+ "action": "DROP"
+ }
+ ]
+ \endverbatim
+ *
+ * This is a list of elements. Each element must have an "action"
+ * entry/keyword. That one specifies which action is returned if this
+ * element matches (the value of the key is passed to the action loader
+ * (see the constructor). It may be any piece of JSON which the action
+ * loader expects.
+ *
+ * The rest of the element are matches. The left side is the name of the
+ * match type (for example match for source IP address or match for message
+ * size). The parameter is whatever is needed to describe the match and
+ * depends on the match type, the loader passes it verbatim to creator
+ * of that match type.
+ *
+ * There may be multiple match types in single element. In such case, all
+ * of the matches must match for the element to take action (so, in the second
+ * element, both "match-type" and "another-match-type" must be satisfied).
+ * If there's no match in the element, the action is taken/returned without
+ * conditions, every time (makes sense as the last entry, as the ACL will
+ * never get past it).
+ *
+ * The second entry shows another thing - if there's a list as the value
+ * for some match and the match itself is not expecting a list, it is taken
+ * as an "or" - a match for at last one of the choices in the list must match.
+ * So, for the second entry, both "match-type" and "another-match-type" must
+ * be satisfied, but the another one is satisfied by either parameter1 or
+ * parameter2.
+ */
+template<typename Context, typename Action = BasicAction> class Loader {
+public:
+ /**
+ * \brief Constructor.
+ *
+ * \param defaultAction The default action for created ACLs.
+ * \param actionLoader is the loader which will be used to convert actions
+ * from their JSON representation. The default value is suitable for
+ * the BasicAction enum. If you did not specify the second
+ * template argument, you don't need to specify this loader.
+ */
+ Loader(const Action& defaultAction,
+ const boost::function1<Action, data::ConstElementPtr>
+ &actionLoader = &defaultActionLoader) :
+ default_action_(defaultAction),
+ action_loader_(actionLoader)
+ {}
+
+ /**
+ * \brief Creator of the checks.
+ *
+ * This can be registered within the Loader and will be used to create the
+ * checks. It is expected multiple creators (for multiple types, one can
+ * handle even multiple names) will be created and registered to support
+ * range of things we could check. This allows for customizing/extending
+ * the loader.
+ */
+ class CheckCreator {
+ public:
+ /** \brief Virtual class needs virtual destructor */
+ virtual ~CheckCreator() {}
+
+ /**
+ * \brief List of names supported by this loader.
+ *
+ * List of all names for which this loader is able to create the
+ * checks. There can be multiple names, to support both aliases
+ * to the same checks and creators capable of creating multiple
+ * types of checks.
+ */
+ virtual std::vector<std::string> names() const = 0;
+
+ /**
+ * \brief Creates the check.
+ *
+ * This function does the actual creation. It is passed all the
+ * relevant data and is supposed to return shared pointer to the
+ * check.
+ *
+ * It is expected to throw the LoaderError exception when the
+ * definition is invalid.
+ *
+ * \param name The type name of the check. If the creator creates
+ * only one type of check, it can safely ignore this parameter.
+ * \param definition The part of JSON describing the parameters of
+ * check. As there's no way for the loader to know how the
+ * parameters might look like, they are not checked in any way.
+ * Therefore it's up to the creator (or the check being created)
+ * to validate the data and throw if it is bad.
+ * \param loader Current loader calling this creator. This can be used
+ * to load subexpressions in case of compound check.
+ */
+ virtual boost::shared_ptr<Check<Context> > create(
+ const std::string& name, data::ConstElementPtr definition,
+ const Loader<Context, Action>& loader) = 0;
+
+ /**
+ * \brief Is list or-abbreviation allowed?
+ *
+ * If this returns true and the parameter (eg. the value we check
+ * against, the one that is passed as the second parameter of create)
+ * is list, the loader will call the create method with each element of
+ * the list and aggregate all the results in OR compound check. If it
+ * is false, the parameter is passed verbatim no matter if it is or
+ * isn't a list. For example, IP check will have this as true (so
+ * multiple IP addresses can be passed as options), but AND operator
+ * will return false and handle the list of subexpressions itself.
+ *
+ * The rationale behind this is that it is common to specify list of
+ * something that matches (eg. list of IP addresses).
+ */
+ virtual bool allowListAbbreviation() const {
+ return (true);
+ }
+ };
+
+ /**
+ * \brief Register another check creator.
+ *
+ * Adds a creator to the list of known ones. The creator's list of names
+ * must be disjoint with the names already known to the creator or the
+ * LoaderError exception is thrown. In such case, the creator is not
+ * registered under any of the names. In case of other exceptions, like
+ * bad_alloc, only weak exception safety is guaranteed.
+ *
+ * \param creator Shared pointer to the creator.
+ * \note We don't support deregistration yet, but it is expected it will
+ * be needed in future, when we have some kind of plugins. These
+ * plugins might want to unload, in which case they would need to
+ * deregister their creators. It is expected they would pass the same
+ * pointer to such method as they pass here.
+ */
+ void registerCreator(boost::shared_ptr<CheckCreator> creator) {
+ // First check we can insert all the names
+ typedef std::vector<std::string> Strings;
+ const Strings names(creator->names());
+ for (Strings::const_iterator i(names.begin()); i != names.end();
+ ++i) {
+ if (creators_.find(*i) != creators_.end()) {
+ isc_throw(LoaderError, "The loader already contains creator "
+ "named " << *i);
+ }
+ }
+ // Now insert them
+ for (Strings::const_iterator i(names.begin()); i != names.end();
+ ++i) {
+ creators_[*i] = creator;
+ }
+ }
+
+ /**
+ * \brief Load a check.
+ *
+ * This parses a check dict (block, the one element of ACL) and calls a
+ * creator (or creators, if more than one check is found inside) for it. It
+ * ignores the "action" key, as it is a reserved keyword used to specify
+ * actions inside the ACL.
+ *
+ * This may throw LoaderError if it is not a dict or if some of the type
+ * names is not known (there's no creator registered for it). The
+ * exceptions from creators aren't caught.
+ *
+ * \param description The JSON description of the check.
+ */
+ boost::shared_ptr<Check<Context> > loadCheck(const data::ConstElementPtr&
+ description) const
+ {
+ // Get the description as a map
+ typedef std::map<std::string, data::ConstElementPtr> Map;
+ Map map;
+ try {
+ map = description->mapValue();
+ }
+ catch (const data::TypeError&) {
+ isc_throw_1(LoaderError, "Check description is not a map",
+ description);
+ }
+ // Call the internal part with extracted map
+ return (loadCheck(description, map));
+ }
+
+ /**
+ * \brief Load an ACL.
+ *
+ * This parses an ACL list, creates the checks and actions of each element
+ * and returns it.
+ *
+ * No exceptions from \c loadCheck (therefore from whatever creator is
+ * used) and from the actionLoader passed to constructor are caught.
+ *
+ * \exception InvalidParameter The given element is NULL (most likely a
+ * caller's bug)
+ * \exception LoaderError The given element isn't a list or the
+ * "action" key is missing in some element
+ *
+ * \param description The JSON list of ACL.
+ *
+ * \return The newly created ACL object
+ */
+ boost::shared_ptr<ACL<Context, Action> > load(const data::ConstElementPtr&
+ description) const
+ {
+ if (!description) {
+ isc_throw(isc::InvalidParameter,
+ "Null description is passed to ACL loader");
+ }
+
+ // We first check it's a list, so we can use the list reference
+ // (the list may be huge)
+ if (description->getType() != data::Element::list) {
+ isc_throw_1(LoaderError, "ACL not a list", description);
+ }
+ // First create an empty ACL
+ const List &list(description->listValue());
+ boost::shared_ptr<ACL<Context, Action> > result(
+ new ACL<Context, Action>(default_action_));
+ // Run trough the list of elements
+ for (List::const_iterator i(list.begin()); i != list.end(); ++i) {
+ Map map;
+ try {
+ map = (*i)->mapValue();
+ }
+ catch (const data::TypeError&) {
+ isc_throw_1(LoaderError, "ACL element not a map", *i);
+ }
+ // Create an action for the element
+ const Map::const_iterator action(map.find("action"));
+ if (action == map.end()) {
+ isc_throw_1(LoaderError, "No action in ACL element", *i);
+ }
+ const Action acValue(action_loader_(action->second));
+ // Now create the check if there's one
+ if (map.size() >= 2) { // One is the action, another one the check
+ result->append(loadCheck(*i, map), acValue);
+ } else {
+ // In case there's no check, this matches every time. We
+ // simulate it by our own private "True" check.
+ result->append(boost::shared_ptr<Check<Context> >(new True()),
+ acValue);
+ }
+ }
+ return (result);
+ }
+
+private:
+ // Some type aliases to save typing
+ typedef std::map<std::string, boost::shared_ptr<CheckCreator> > Creators;
+ typedef std::map<std::string, data::ConstElementPtr> Map;
+ typedef std::vector<data::ConstElementPtr> List;
+ // Private members
+ Creators creators_;
+ const Action default_action_;
+ const boost::function1<Action, data::ConstElementPtr> action_loader_;
+
+ /**
+ * \brief Internal version of loadCheck.
+ *
+ * This is the internal part, shared between load and loadCheck.
+ * \param description The bit of JSON (used in exceptions).
+ * \param map The extracted map describing the check. It does change
+ * the map.
+ */
+ boost::shared_ptr<Check<Context> > loadCheck(const data::ConstElementPtr&
+ description, Map& map) const
+ {
+ // Remove the action keyword
+ map.erase("action");
+ // Now, do we have any definition? Or is it and abbreviation?
+ switch (map.size()) {
+ case 0:
+ isc_throw_1(LoaderError, "Check description is empty",
+ description);
+ case 1: {
+ // Get the first and only item
+ const Map::const_iterator checkDesc(map.begin());
+ const std::string& name(checkDesc->first);
+ const typename Creators::const_iterator
+ creatorIt(creators_.find(name));
+ if (creatorIt == creators_.end()) {
+ isc_throw_1(LoaderError, "No creator for ACL check " <<
+ name, description);
+ }
+ if (creatorIt->second->allowListAbbreviation() &&
+ checkDesc->second->getType() == data::Element::list) {
+ // Or-abbreviated form - create an OR and put everything
+ // inside.
+ const std::vector<data::ConstElementPtr>&
+ params(checkDesc->second->listValue());
+ boost::shared_ptr<LogicOperator<AnyOfSpec, Context> >
+ oper(new LogicOperator<AnyOfSpec, Context>);
+ for (std::vector<data::ConstElementPtr>::const_iterator
+ i(params.begin());
+ i != params.end(); ++i) {
+ oper->addSubexpression(
+ creatorIt->second->create(name, *i, *this));
+ }
+ return (oper);
+ }
+ // Create the check and return it
+ return (creatorIt->second->create(name, checkDesc->second,
+ *this));
+ }
+ default: {
+ // This is the AND-abbreviated form. We need to create an
+ // AND (or "ALL") operator, loop trough the whole map and
+ // fill it in. We do a small trick - we create bunch of
+ // single-item maps, call this loader recursively (therefore
+ // it will get into the "case 1" branch, where there is
+ // the actual loading) and use the results to fill the map.
+ //
+ // We keep the description the same, there's nothing we could
+ // take out (we could create a new one, but that would be
+ // confusing, as it is used for error messages only).
+ boost::shared_ptr<LogicOperator<AllOfSpec, Context> >
+ oper(new LogicOperator<AllOfSpec, Context>);
+ for (Map::const_iterator i(map.begin()); i != map.end(); ++i) {
+ Map singleSubexpr;
+ singleSubexpr.insert(*i);
+ oper->addSubexpression(loadCheck(description,
+ singleSubexpr));
+ }
+ return (oper);
+ }
+ }
+ }
+
+ /**
+ * \brief Check that always matches.
+ *
+ * This one is used internally for ACL elements without condition. We may
+ * want to make this publicly accesible sometime maybe, but for now,
+ * there's no need.
+ */
+ class True : public Check<Context> {
+ public:
+ virtual bool matches(const Context&) const { return (true); };
+ virtual unsigned cost() const { return (1); }
+ // We don't write "true" here, as this one was created using empty
+ // input
+ virtual std::string toText() const { return ""; }
+ };
+};
+
+}
+}
+
+/*
+ * This include at the end of the file is unusual. But we need to include it,
+ * we use template classes from there. However, they need to be present only
+ * at instantiation of our class, which will happen below this header.
+ *
+ * The problem is, the header uses us as well, therefore there's a circular
+ * dependency. If we loaded it at the beginning and someone loaded us first,
+ * the logic_check header wouldn't have our definitions. This way, no matter
+ * in which order they are loaded, the definitions from this header will be
+ * above the ones from logic_check.
+ */
+#include "logic_check.h"
+
+#endif
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/logic_check.h b/src/lib/acl/logic_check.h
new file mode 100644
index 0000000..92441e8
--- /dev/null
+++ b/src/lib/acl/logic_check.h
@@ -0,0 +1,286 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ACL_LOGIC_CHECK_H
+#define ACL_LOGIC_CHECK_H
+
+#include "check.h"
+#include "loader.h"
+
+namespace isc {
+namespace acl {
+
+/// \brief Constants for the AnyOf implementation
+class AnyOfSpec {
+public:
+ static bool start() { return (false); }
+ static bool terminate(const bool another) {
+ return (another);
+ }
+};
+
+/// \brief Constants for the AllOf implementation
+class AllOfSpec {
+public:
+ static bool start() { return (true); }
+ static bool terminate(const bool another) {
+ return (!another);
+ }
+};
+
+/**
+ * \brief Logic operators
+ *
+ * This class implements the AllOf and AnyOf compound checks. As their
+ * behaviour is almost the same, the same template class is used. Which
+ * one it is depends on the Mode template parameter. The Mode should be
+ * one of AnyOfSpec or AllOfSpec, which provide some commands for the
+ * internal implementation. It would be nice to provide typedefs for
+ * them, but it is impossible to do so, as we have the Context template
+ * parameter as well and C++ doesn't like templated typedefs.
+ *
+ * The object holds several subexpressions and returns true if all
+ * of the subexpressions return true (in case of AllOfSpec Mode) or
+ * at last one of them return true (in case of AnyOfSpec Mode). If
+ * some subexpression guarantees the result (eg. some returns false
+ * in case of AllOfSpec), the rest is not tried for performance
+ * reasons.
+ */
+template<typename Mode, typename Context>
+class LogicOperator : public CompoundCheck<Context> {
+public:
+ /**
+ * \brief Add another subexpression.
+ *
+ * This adds another subexpression to the list of checked expressions.
+ * This is usually done shortly after the creation, before using the
+ * check for matches.
+ *
+ * Currently there's no way to place the expression into arbitrary place
+ * or to remove it. It might turn out it would be needed in future to
+ * optimise or it might even turn out we need shared pointers for it.
+ *
+ * \param expr The new expression to put inside.
+ */
+ void addSubexpression(const boost::shared_ptr<Check<Context> >& expr) {
+ checks_.push_back(expr);
+ }
+ /**
+ * \brief The current list of subexpressions.
+ */
+ virtual typename CompoundCheck<Context>::Checks getSubexpressions() const {
+ typename CompoundCheck<Context>::Checks result;
+ for (typename Checks::const_iterator i(checks_.begin());
+ i != checks_.end(); ++i) {
+ result.push_back(i->get());
+ }
+ return (result);
+ }
+ /**
+ * \brief The match of the check.
+ *
+ * Runs the subexpressions, one by one, and then decides based on that
+ * what to return.
+ */
+ virtual bool matches(const Context& context) const {
+ /*
+ * This might look slightly complicated. However, this is just
+ * generalized version of multi-and or multi-or. The usual
+ * implementation of multi-and starts with true and if one with
+ * false is found, it turns to be false forever and false is
+ * returned. It is exactly the other way around with or.
+ *
+ * So, if we ever find one that makes it the other one than start
+ * (false in case of and, true in case of or), we can just stop and
+ * return that one right away. If it meets no such expression, we
+ * get to the end and return the default.
+ */
+ for (typename Checks::const_iterator i(checks_.begin());
+ i != checks_.end(); ++i) {
+ if (Mode::terminate((*i)->matches(context))) {
+ return (!Mode::start());
+ }
+ }
+ return (Mode::start());
+ }
+private:
+ /// \brief List of subexpressions
+ typedef typename std::vector<boost::shared_ptr<Check<Context> > > Checks;
+ Checks checks_;
+};
+
+/**
+ * \brief Creator for the LogicOperator compound check.
+ *
+ * This class can load the ANY and ALL operators from JSON. They expect
+ * a list of subexpressions as a parameter, eg. like this:
+ *
+ * \verbatim
+ * {"ANY": [
+ * {"ip": "1.2.3.4"},
+ * {"ip": "5.6.7.8"}
+ * ]}
+ * \endverbatim
+ *
+ * It uses the loader to load the subexpressions, therefore whatever is
+ * supported there is supported here as well.
+ *
+ * The Mode template parameter has the same meaning as with LogicOperator,
+ * it is used to know which operators to create.
+ */
+template<typename Mode, typename Context, typename Action = BasicAction>
+class LogicCreator : public Loader<Context, Action>::CheckCreator {
+public:
+ /**
+ * \brief Constructor.
+ *
+ * \param name The name for which the loader will work. In practice,
+ * it will usually be ANY or ALL (depending on the mode), but
+ * anything else can be used as well.
+ */
+ LogicCreator(const std::string& name) :
+ name_(name)
+ {}
+ /// \brief Returns vector containing the name.
+ virtual std::vector<std::string> names() const {
+ std::vector<std::string> result;
+ result.push_back(name_);
+ return (result);
+ }
+ /**
+ * \brief Converts a JSON description into the logic operator.
+ *
+ * This is the place where the actual loading happens. It creates
+ * the logic operator and calls the loader on each of the list
+ * elements, placing the result into the logic operator.
+ *
+ * The first parameter is ignored and is there only to match interface.
+ *
+ * \param definition The JSON definition of the subexpressions. This must
+ * be a list (if it isn't, the LoaderError is thrown) and the elements
+ * must be loadable by the loader (the exceptions from it are not
+ * caught).
+ * \param loader The loader to use for loading of subexpressions.
+ */
+ virtual boost::shared_ptr<Check<Context> > create(const std::string&,
+ data::ConstElementPtr
+ definition,
+ const Loader<Context,
+ Action>& loader)
+ {
+ std::vector<data::ConstElementPtr> subexprs;
+ try {
+ subexprs = definition->listValue();
+ }
+ catch (const data::TypeError&) {
+ isc_throw_1(LoaderError, "Logic operator takes list", definition);
+ }
+ boost::shared_ptr<LogicOperator<Mode, Context> >
+ result(new LogicOperator<Mode, Context>);
+ for (std::vector<data::ConstElementPtr>::const_iterator
+ i(subexprs.begin());
+ i != subexprs.end(); ++i) {
+ result->addSubexpression(loader.loadCheck(*i));
+ }
+ return (result);
+ }
+ virtual bool allowListAbbreviation() const { return (false); }
+private:
+ const std::string name_;
+};
+
+/**
+ * \brief The NOT operator for ACLs.
+ *
+ * This simply returns the negation of whatever returns the subexpression.
+ */
+template<typename Context>
+class NotOperator : public CompoundCheck<Context> {
+public:
+ /**
+ * \brief Constructor
+ *
+ * \param expr The subexpression to be negated by this NOT.
+ */
+ NotOperator(const boost::shared_ptr<Check<Context> >& expr) :
+ expr_(expr)
+ { }
+ /**
+ * \brief The list of subexpressions
+ *
+ * \return The vector will contain single value and it is the expression
+ * passed by constructor.
+ */
+ virtual typename CompoundCheck<Context>::Checks getSubexpressions() const {
+ typename CompoundCheck<Context>::Checks result;
+ result.push_back(expr_.get());
+ return (result);
+ }
+ /// \brief The matching function
+ virtual bool matches(const Context& context) const {
+ return (!expr_->matches(context));
+ }
+private:
+ /// \brief The subexpression
+ const boost::shared_ptr<Check<Context> > expr_;
+};
+
+template<typename Context, typename Action = BasicAction>
+class NotCreator : public Loader<Context, Action>::CheckCreator {
+public:
+ /**
+ * \brief Constructor
+ *
+ * \param name The name of the NOT operator to be loaded as.
+ */
+ NotCreator(const std::string& name) :
+ name_(name)
+ { }
+ /**
+ * \brief List of the names this loads
+ *
+ * \return Single-value vector containing the name passed to the
+ * constructor.
+ */
+ virtual std::vector<std::string> names() const {
+ std::vector<std::string> result;
+ result.push_back(name_);
+ return (result);
+ }
+ /// \brief Create the check.
+ virtual boost::shared_ptr<Check<Context> > create(const std::string&,
+ data::ConstElementPtr
+ definition,
+ const Loader<Context,
+ Action>& loader)
+ {
+ return (boost::shared_ptr<Check<Context> >(new NotOperator<Context>(
+ loader.loadCheck(definition))));
+ }
+ /**
+ * \brief Or-abbreviated form.
+ *
+ * This returns false. In theory, the NOT operator could be used with
+ * the abbreviated form, but it would be confusing. Such syntax is
+ * therefore explicitly forbidden.
+ */
+ virtual bool allowListAbbreviation() const { return (false); }
+public:
+ const std::string name_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/acl/tests/Makefile.am b/src/lib/acl/tests/Makefile.am
new file mode 100644
index 0000000..6369511
--- /dev/null
+++ b/src/lib/acl/tests/Makefile.am
@@ -0,0 +1,40 @@
+AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+CLEANFILES = *.gcno *.gcda
+
+TESTS =
+if HAVE_GTEST
+TESTS += run_unittests
+run_unittests_SOURCES = run_unittests.cc
+run_unittests_SOURCES += acl_test.cc
+run_unittests_SOURCES += check_test.cc
+run_unittests_SOURCES += dns_test.cc
+run_unittests_SOURCES += ip_check_unittest.cc
+run_unittests_SOURCES += dnsname_check_unittest.cc
+run_unittests_SOURCES += loader_test.cc
+run_unittests_SOURCES += logcheck.h
+run_unittests_SOURCES += creators.h
+run_unittests_SOURCES += logic_check_test.cc
+run_unittests_SOURCES += sockaddr.h
+
+run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/acl/tests/acl_test.cc b/src/lib/acl/tests/acl_test.cc
new file mode 100644
index 0000000..15ffef6
--- /dev/null
+++ b/src/lib/acl/tests/acl_test.cc
@@ -0,0 +1,90 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/shared_ptr.hpp>
+
+#include "logcheck.h"
+
+using namespace isc::acl;
+using namespace isc::acl::tests;
+using boost::shared_ptr;
+
+namespace {
+
+// Test version of the Acl class. It adds few methods to examine the protected
+// data, but does not change the implementation.
+class TestACL : public ACL<Log> {
+public:
+ TestACL() :
+ ACL<Log>(DROP)
+ {}
+ // Check the stored default action there
+ void checkDefaultAction(BasicAction ac) {
+ EXPECT_EQ(getDefaultAction(), ac);
+ }
+};
+
+// The test fixture. Contains some members so they don't need to be manually
+// created each time and some convenience functions.
+class ACLTest : public ::testing::Test {
+public:
+ ACLTest() :
+ next_check_(0)
+ {}
+ TestACL acl_;
+ Log log_;
+ size_t next_check_;
+ boost::shared_ptr<Check<Log> > getCheck(bool accepts) {
+ return (shared_ptr<Check<Log> >(new ConstCheck(accepts,
+ next_check_++)));
+ }
+};
+
+/*
+ * This tests the default action and that nothing is run if nothing is
+ * inserted (it's hard to imagine otherwise though).
+ *
+ * We use the default ACL unchanged from the test class.
+ */
+TEST_F(ACLTest, emptyRule) {
+ acl_.checkDefaultAction(DROP);
+ EXPECT_EQ(DROP, acl_.execute(log_));
+ // No test was run
+ log_.checkFirst(0);
+}
+
+/*
+ * This tests the default action in case no check matches.
+ */
+TEST_F(ACLTest, noMatch) {
+ acl_.append(getCheck(false), ACCEPT);
+ acl_.append(getCheck(false), REJECT);
+ EXPECT_EQ(DROP, acl_.execute(log_));
+ // The first two checks were actually run (and didn't match)
+ log_.checkFirst(2);
+}
+
+/*
+ * Checks that it takes the first matching check and returns the
+ * value. Also checks that the others aren't run at all.
+ */
+TEST_F(ACLTest, firstMatch) {
+ acl_.append(getCheck(false), ACCEPT);
+ acl_.append(getCheck(true), REJECT);
+ acl_.append(getCheck(true), ACCEPT);
+ EXPECT_EQ(REJECT, acl_.execute(log_));
+ log_.checkFirst(2);
+}
+
+}
diff --git a/src/lib/acl/tests/check_test.cc b/src/lib/acl/tests/check_test.cc
new file mode 100644
index 0000000..e83e8f2
--- /dev/null
+++ b/src/lib/acl/tests/check_test.cc
@@ -0,0 +1,70 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+#include <acl/check.h>
+
+using namespace isc::acl;
+
+namespace {
+
+// This test has two function. For one, it checks the default implementations
+// do what they should and it makes sure the template actually compiles
+// (as templates are syntax-checked upon instantiation).
+
+// This is a test check that just passes the boolean it gets.
+class Pass : public Check<bool> {
+public:
+ virtual bool matches(const bool& value) const { return (value); }
+};
+
+// This is a simple test compound check. It contains two Pass checks
+// and passes result of the first one.
+
+class First : public CompoundCheck<bool> {
+public:
+ // The internal checks are public, so we can check the addresses
+ Pass first, second;
+ virtual Checks getSubexpressions() const {
+ Checks result;
+ result.push_back(&first);
+ result.push_back(&second);
+ return (result);
+ }
+ virtual bool matches(const bool& value) const {
+ return (first.matches(value));
+ }
+};
+
+TEST(Check, defaultCheckValues) {
+ Pass p;
+ EXPECT_EQ(Check<bool>::UNKNOWN_COST, p.cost());
+ EXPECT_TRUE(p.matches(true));
+ EXPECT_FALSE(p.matches(false));
+ // The exact text is compiler dependant, but we check it returns something
+ // and can be compiled
+ EXPECT_FALSE(p.toText().empty());
+}
+
+TEST(Check, defaultCompoundValues) {
+ First f;
+ EXPECT_EQ(2 * Check<bool>::UNKNOWN_COST, f.cost());
+ EXPECT_TRUE(f.pure());
+ First::Checks c(f.getSubexpressions());
+ ASSERT_EQ(2, c.size());
+ EXPECT_EQ(&f.first, c[0]);
+ EXPECT_EQ(&f.second, c[1]);
+}
+
+}
diff --git a/src/lib/acl/tests/creators.h b/src/lib/acl/tests/creators.h
new file mode 100644
index 0000000..584df71
--- /dev/null
+++ b/src/lib/acl/tests/creators.h
@@ -0,0 +1,158 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// This is not a public header, but some code shared between tests
+// This one contains various creators to test the loader and other creators
+
+#ifndef CREATORS_H
+#define CREATORS_H
+
+#include "logcheck.h"
+
+#include <cc/data.h>
+#include <acl/loader.h>
+#include <string>
+
+namespace isc {
+namespace acl {
+namespace tests {
+
+// A check that doesn't check anything but remembers it's own name
+// and data
+class NamedCheck : public Check<Log> {
+public:
+ NamedCheck(const std::string& name, isc::data::ConstElementPtr data) :
+ name_(name),
+ data_(data)
+ {}
+ virtual bool matches(const Log&) const { return (true); }
+ const std::string name_;
+ const isc::data::ConstElementPtr data_;
+};
+
+// The creator of NamedCheck
+class NamedCreator : public Loader<Log>::CheckCreator {
+public:
+ NamedCreator(const std::string& name, bool abbreviatedList = true) :
+ abbreviated_list_(abbreviatedList)
+ {
+ names_.push_back(name);
+ }
+ NamedCreator(const std::vector<std::string>& names) :
+ names_(names),
+ abbreviated_list_(true)
+ {}
+ std::vector<std::string> names() const {
+ return (names_);
+ }
+ boost::shared_ptr<Check<Log> > create(const std::string& name,
+ isc::data::ConstElementPtr data,
+ const Loader<Log>&)
+ {
+ bool found(false);
+ for (std::vector<std::string>::const_iterator i(names_.begin());
+ i != names_.end(); ++i) {
+ if (*i == name) {
+ found = true;
+ break;
+ }
+ }
+ EXPECT_TRUE(found) << "Name " << name << " passed to creator which "
+ "doesn't handle it.";
+ return (boost::shared_ptr<Check<Log> >(new NamedCheck(name, data)));
+ }
+ bool allowListAbbreviation() const {
+ return (abbreviated_list_);
+ }
+private:
+ std::vector<std::string> names_;
+ const bool abbreviated_list_;
+};
+
+// To be thrown in tests internally
+class TestCreatorError {};
+
+// This will throw every time it should create something
+class ThrowCreator : public Loader<Log>::CheckCreator {
+public:
+ std::vector<std::string> names() const {
+ std::vector<std::string> result;
+ result.push_back("throw");
+ return (result);
+ }
+ boost::shared_ptr<Check<Log> > create(const std::string&,
+ isc::data::ConstElementPtr,
+ const Loader<Log>&)
+ {
+ throw TestCreatorError();
+ }
+};
+
+// This throws whenever the match is called on it
+class ThrowCheck : public Check<Log> {
+public:
+ virtual bool matches(const Log&) const {
+ throw TestCreatorError();
+ }
+};
+
+// And creator for it
+class ThrowCheckCreator : public Loader<Log>::CheckCreator {
+public:
+ std::vector<std::string> names() const {
+ std::vector<std::string> result;
+ result.push_back("throwcheck");
+ return (result);
+ }
+ boost::shared_ptr<Check<Log> > create(const std::string&,
+ isc::data::ConstElementPtr,
+ const Loader<Log>&)
+ {
+ return (boost::shared_ptr<Check<Log> >(new ThrowCheck()));
+ }
+};
+
+class LogCreator : public Loader<Log>::CheckCreator {
+public:
+ std::vector<std::string> names() const {
+ std::vector<std::string> result;
+ result.push_back("logcheck");
+ return (result);
+ }
+ /*
+ * For simplicity, we just take two values as a list, first is the
+ * logging cell used, the second is result of the check. No error checking
+ * is done, if there's bug in the test, it will throw TypeError for us.
+ */
+ boost::shared_ptr<Check<Log> > create(const std::string&,
+ isc::data::ConstElementPtr definition,
+ const Loader<Log>&)
+ {
+ std::vector<isc::data::ConstElementPtr> list(definition->listValue());
+ int logpos(list[0]->intValue());
+ bool accept(list[1]->boolValue());
+ return (boost::shared_ptr<ConstCheck>(new ConstCheck(accept, logpos)));
+ }
+ // We take a list, so don't interpret it for us
+ virtual bool allowListAbbreviation() const { return (false); }
+};
+
+}
+}
+}
+#endif
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/tests/dns_test.cc b/src/lib/acl/tests/dns_test.cc
new file mode 100644
index 0000000..b3ddbf4
--- /dev/null
+++ b/src/lib/acl/tests/dns_test.cc
@@ -0,0 +1,271 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <vector>
+#include <string>
+
+#include <boost/scoped_ptr.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/name.h>
+#include <dns/tsigkey.h>
+#include <dns/tsigrecord.h>
+#include <dns/rdataclass.h>
+
+#include <cc/data.h>
+#include <acl/dns.h>
+#include <acl/loader.h>
+#include <acl/check.h>
+#include <acl/ip_check.h>
+
+#include "sockaddr.h"
+
+#include <gtest/gtest.h>
+
+using namespace std;
+using boost::scoped_ptr;
+using namespace isc::dns;
+using namespace isc::dns::rdata;
+using namespace isc::data;
+using namespace isc::acl;
+using namespace isc::acl::dns;
+using isc::acl::LoaderError;
+
+namespace {
+
+TEST(DNSACL, getRequestLoader) {
+ dns::RequestLoader* l(&getRequestLoader());
+ ASSERT_TRUE(l != NULL);
+ EXPECT_EQ(l, &getRequestLoader());
+ EXPECT_NO_THROW(l->load(Element::fromJSON("[{\"action\": \"DROP\"}]")));
+
+ // Confirm it can load the ACl syntax acceptable to a default creator.
+ // Tests to see whether the loaded rules work correctly will be in
+ // other dedicated tests below.
+ EXPECT_NO_THROW(l->load(Element::fromJSON("[{\"action\": \"DROP\","
+ " \"from\": \"192.0.2.1\"}]")));
+}
+
+class RequestCheckCreatorTest : public ::testing::Test {
+protected:
+ dns::internal::RequestCheckCreator creator_;
+
+ typedef boost::shared_ptr<const dns::RequestCheck> ConstRequestCheckPtr;
+ ConstRequestCheckPtr check_;
+};
+
+TEST_F(RequestCheckCreatorTest, names) {
+ const vector<string> names = creator_.names();
+ EXPECT_EQ(2, names.size());
+ EXPECT_TRUE(find(names.begin(), names.end(), "from") != names.end());
+ EXPECT_TRUE(find(names.begin(), names.end(), "key") != names.end());
+}
+
+TEST_F(RequestCheckCreatorTest, allowListAbbreviation) {
+ EXPECT_FALSE(creator_.allowListAbbreviation());
+}
+
+// The following two tests check the creator for the form of
+// 'from: "IP prefix"'. We don't test many variants of prefixes, which
+// are done in the tests for IPCheck.
+TEST_F(RequestCheckCreatorTest, createIPv4Check) {
+ check_ = creator_.create("from", Element::fromJSON("\"192.0.2.1\""),
+ getRequestLoader());
+ const dns::internal::RequestIPCheck& ipcheck_ =
+ dynamic_cast<const dns::internal::RequestIPCheck&>(*check_);
+ EXPECT_EQ(AF_INET, ipcheck_.getFamily());
+ EXPECT_EQ(32, ipcheck_.getPrefixlen());
+ const vector<uint8_t> check_address(ipcheck_.getAddress());
+ ASSERT_EQ(4, check_address.size());
+ const uint8_t expected_address[] = { 192, 0, 2, 1 };
+ EXPECT_TRUE(equal(check_address.begin(), check_address.end(),
+ expected_address));
+}
+
+TEST_F(RequestCheckCreatorTest, createIPv6Check) {
+ check_ = creator_.create("from",
+ Element::fromJSON("\"2001:db8::5300/120\""),
+ getRequestLoader());
+ const dns::internal::RequestIPCheck& ipcheck =
+ dynamic_cast<const dns::internal::RequestIPCheck&>(*check_);
+ EXPECT_EQ(AF_INET6, ipcheck.getFamily());
+ EXPECT_EQ(120, ipcheck.getPrefixlen());
+ const vector<uint8_t> check_address(ipcheck.getAddress());
+ ASSERT_EQ(16, check_address.size());
+ const uint8_t expected_address[] = { 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x53, 0x00 };
+ EXPECT_TRUE(equal(check_address.begin(), check_address.end(),
+ expected_address));
+}
+
+TEST_F(RequestCheckCreatorTest, createTSIGKeyCheck) {
+ check_ = creator_.create("key", Element::fromJSON("\"key.example.com\""),
+ getRequestLoader());
+ const dns::internal::RequestKeyCheck& keycheck =
+ dynamic_cast<const dns::internal::RequestKeyCheck&>(*check_);
+ EXPECT_EQ(Name("key.example.com"), keycheck.getName());
+}
+
+TEST_F(RequestCheckCreatorTest, badCreate) {
+ // Invalid name
+ EXPECT_THROW(creator_.create("bad", Element::fromJSON("\"192.0.2.1\""),
+ getRequestLoader()), LoaderError);
+
+ // Invalid type of parameter
+ EXPECT_THROW(creator_.create("from", Element::fromJSON("4"),
+ getRequestLoader()),
+ isc::data::TypeError);
+ EXPECT_THROW(creator_.create("from", Element::fromJSON("[]"),
+ getRequestLoader()),
+ isc::data::TypeError);
+ EXPECT_THROW(creator_.create("key", Element::fromJSON("1"),
+ getRequestLoader()),
+ isc::data::TypeError);
+ EXPECT_THROW(creator_.create("key", Element::fromJSON("{}"),
+ getRequestLoader()),
+ isc::data::TypeError);
+
+ // Syntax error for IPCheck
+ EXPECT_THROW(creator_.create("from", Element::fromJSON("\"bad\""),
+ getRequestLoader()),
+ isc::InvalidParameter);
+
+ // Syntax error for Name (key) Check
+ EXPECT_THROW(creator_.create("key", Element::fromJSON("\"bad..name\""),
+ getRequestLoader()),
+ EmptyLabel);
+
+ // NULL pointer
+ EXPECT_THROW(creator_.create("from", ConstElementPtr(), getRequestLoader()),
+ LoaderError);
+}
+
+class RequestCheckTest : public ::testing::Test {
+protected:
+ typedef boost::shared_ptr<const dns::RequestCheck> ConstRequestCheckPtr;
+
+ // A helper shortcut to create a single IP check for the given prefix.
+ ConstRequestCheckPtr createIPCheck(const string& prefix) {
+ return (creator_.create("from", Element::fromJSON(
+ string("\"") + prefix + string("\"")),
+ getRequestLoader()));
+ }
+
+ // A helper shortcut to create a single Name (key) check for the given
+ // name.
+ ConstRequestCheckPtr createKeyCheck(const string& key_name) {
+ return (creator_.create("key", Element::fromJSON(
+ string("\"") + key_name + string("\"")),
+ getRequestLoader()));
+ }
+
+ // create a one time request context for a specific test. Note that
+ // getSockaddr() uses a static storage, so it cannot be called more than
+ // once in a single test.
+ const dns::RequestContext& getRequest4(const TSIGRecord* tsig = NULL) {
+ ipaddr.reset(new IPAddress(tests::getSockAddr("192.0.2.1")));
+ request.reset(new dns::RequestContext(*ipaddr, tsig));
+ return (*request);
+ }
+ const dns::RequestContext& getRequest6(const TSIGRecord* tsig = NULL) {
+ ipaddr.reset(new IPAddress(tests::getSockAddr("2001:db8::1")));
+ request.reset(new dns::RequestContext(*ipaddr, tsig));
+ return (*request);
+ }
+
+ // create a one time TSIG Record for a specific test. The only parameter
+ // of the record that matters is the key name; others are hardcoded with
+ // arbitrarily chosen values.
+ const TSIGRecord* getTSIGRecord(const string& key_name) {
+ tsig_rdata.reset(new any::TSIG(TSIGKey::HMACMD5_NAME(), 0, 0, 0, NULL,
+ 0, 0, 0, NULL));
+ tsig.reset(new TSIGRecord(Name(key_name), *tsig_rdata));
+ return (tsig.get());
+ }
+
+private:
+ scoped_ptr<IPAddress> ipaddr;
+ scoped_ptr<dns::RequestContext> request;
+ scoped_ptr<any::TSIG> tsig_rdata;
+ scoped_ptr<TSIGRecord> tsig;
+ dns::internal::RequestCheckCreator creator_;
+};
+
+TEST_F(RequestCheckTest, checkIPv4) {
+ // Exact match
+ EXPECT_TRUE(createIPCheck("192.0.2.1")->matches(getRequest4()));
+ // Exact match (negative)
+ EXPECT_FALSE(createIPCheck("192.0.2.53")->matches(getRequest4()));
+ // Prefix match
+ EXPECT_TRUE(createIPCheck("192.0.2.0/24")->matches(getRequest4()));
+ // Prefix match (negative)
+ EXPECT_FALSE(createIPCheck("192.0.1.0/24")->matches(getRequest4()));
+ // Address family mismatch (the first 4 bytes of the IPv6 address has the
+ // same binary representation as the client's IPv4 address, which
+ // shouldn't confuse the match logic)
+ EXPECT_FALSE(createIPCheck("c000:0201::")->matches(getRequest4()));
+}
+
+TEST_F(RequestCheckTest, checkIPv6) {
+ // The following are a set of tests of the same concept as checkIPv4
+ EXPECT_TRUE(createIPCheck("2001:db8::1")->matches(getRequest6()));
+ EXPECT_FALSE(createIPCheck("2001:db8::53")->matches(getRequest6()));
+ EXPECT_TRUE(createIPCheck("2001:db8::/64")->matches(getRequest6()));
+ EXPECT_FALSE(createIPCheck("2001:db8:1::/64")->matches(getRequest6()));
+ EXPECT_FALSE(createIPCheck("32.1.13.184")->matches(getRequest6()));
+}
+
+TEST_F(RequestCheckTest, checkTSIGKey) {
+ EXPECT_TRUE(createKeyCheck("key.example.com")->matches(
+ getRequest4(getTSIGRecord("key.example.com"))));
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(
+ getRequest4(getTSIGRecord("badkey.example.com"))));
+
+ // Same for IPv6 (which shouldn't matter)
+ EXPECT_TRUE(createKeyCheck("key.example.com")->matches(
+ getRequest6(getTSIGRecord("key.example.com"))));
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(
+ getRequest6(getTSIGRecord("badkey.example.com"))));
+
+ // by default the test request doesn't have a TSIG key, which shouldn't
+ // match any key checks.
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(getRequest4()));
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(getRequest6()));
+}
+
+// The following tests test only the creators are registered, they are tested
+// elsewhere
+
+TEST(DNSACL, notLoad) {
+ EXPECT_NO_THROW(getRequestLoader().loadCheck(isc::data::Element::fromJSON(
+ "{\"NOT\": {\"from\": \"192.0.2.1\"}}")));
+}
+
+TEST(DNSACL, allLoad) {
+ EXPECT_NO_THROW(getRequestLoader().loadCheck(isc::data::Element::fromJSON(
+ "{\"ALL\": [{\"from\": \"192.0.2.1\"}]}")));
+}
+
+TEST(DNSACL, anyLoad) {
+ EXPECT_NO_THROW(getRequestLoader().loadCheck(isc::data::Element::fromJSON(
+ "{\"ANY\": [{\"from\": \"192.0.2.1\"}]}")));
+}
+
+}
diff --git a/src/lib/acl/tests/dnsname_check_unittest.cc b/src/lib/acl/tests/dnsname_check_unittest.cc
new file mode 100644
index 0000000..95b5314
--- /dev/null
+++ b/src/lib/acl/tests/dnsname_check_unittest.cc
@@ -0,0 +1,59 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/name.h>
+
+#include <acl/dnsname_check.h>
+
+using namespace isc::dns;
+using namespace isc::acl::dns;
+
+// Provide a specialization of the DNSNameCheck::matches() method.
+namespace isc {
+namespace acl {
+namespace dns {
+template <>
+bool NameCheck<Name>::matches(const Name& name) const {
+ return (name_ == name);
+}
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+namespace {
+TEST(DNSNameCheck, construct) {
+ EXPECT_EQ(Name("example.com"),
+ NameCheck<Name>(Name("example.com")).getName());
+
+ // Construct the same check with an explicit trailing dot. Should result
+ // in the same result.
+ EXPECT_EQ(Name("example.com"),
+ NameCheck<Name>(Name("example.com.")).getName());
+}
+
+TEST(DNSNameCheck, match) {
+ NameCheck<Name> check(Name("example.com"));
+ EXPECT_TRUE(check.matches(Name("example.com")));
+ EXPECT_FALSE(check.matches(Name("example.org")));
+
+ // comparison is case insensitive
+ EXPECT_TRUE(check.matches(Name("EXAMPLE.COM")));
+
+ // this is exact match. so super/sub domains don't match
+ EXPECT_FALSE(check.matches(Name("com")));
+ EXPECT_FALSE(check.matches(Name("www.example.com")));
+}
+} // Unnamed namespace
diff --git a/src/lib/acl/tests/ip_check_unittest.cc b/src/lib/acl/tests/ip_check_unittest.cc
new file mode 100644
index 0000000..8b8c498
--- /dev/null
+++ b/src/lib/acl/tests/ip_check_unittest.cc
@@ -0,0 +1,617 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <string.h>
+
+#include <gtest/gtest.h>
+#include <acl/ip_check.h>
+
+#include "sockaddr.h"
+
+using namespace isc::acl;
+using namespace isc::acl::internal;
+using namespace std;
+
+namespace {
+const size_t IPV4_SIZE = 4;
+const size_t IPV6_SIZE = 16;
+
+// Simple struct holding either an IPV4 or IPV6 address. This is the "Context"
+// used for the tests.
+//
+// The structure is also used for converting an IPV4 address to a four-byte
+// array.
+struct GeneralAddress {
+ int family; // Family of the address
+ vector<uint8_t> addr; // Address type. Size indicates what it holds
+
+ // Convert uint32_t address in host-byte order to a uint8_t vector in
+ // network-byte order.
+ vector<uint8_t> convertUint32(uint32_t address) {
+ BOOST_STATIC_ASSERT(sizeof(uint32_t) == IPV4_SIZE);
+
+ vector<uint8_t> result(IPV4_SIZE);
+
+ // Address is in network-byte order, so copy to the array. The
+ // MS byte is at the lowest address.
+ result[3] = address & 0xff;
+ result[2] = (address >> 8) & 0xff;
+ result[1] = (address >> 16) & 0xff;
+ result[0] = (address >> 24) & 0xff;
+
+ return (result);
+ }
+
+ // Convenience constructor for V4 address. As it is not marked as explicit,
+ // it allows the automatic promotion of a uint32_t to a GeneralAddress data
+ // type in calls to matches().
+ GeneralAddress(uint32_t address) : family(AF_INET), addr()
+ {
+ addr = convertUint32(address);
+ }
+
+ // Convenience constructor for V6 address. As it is not marked as explicit,
+ // it allows the automatic promotion of a vector<uint8_t> to a
+ // GeneralAddress data type in calls to matches().
+ GeneralAddress(const vector<uint8_t>& address) : family(AF_INET6),
+ addr(address)
+ {
+ if (address.size() != IPV6_SIZE) {
+ isc_throw(isc::InvalidParameter, "vector passed to GeneralAddress "
+ "constructor is " << address.size() << " bytes long - it "
+ "should be " << IPV6_SIZE << " bytes instead");
+ }
+ }
+
+ // A couple of convenience methods for checking equality with different
+ // representations of an address.
+
+ // Check that the IPV4 address is the same as that given.
+ bool equals(uint32_t address) {
+ if (family == AF_INET) {
+ const vector<uint8_t> byte_address = convertUint32(address);
+ return (equal(byte_address.begin(), byte_address.end(),
+ addr.begin()));
+ }
+ return (false);
+ }
+
+ // Check that the array is equal to that given.
+ bool equals(const vector<uint8_t>& byte_address) {
+ if (addr.size() == byte_address.size()) {
+ return (equal(byte_address.begin(), byte_address.end(),
+ addr.begin()));
+ }
+ return (false);
+ }
+};
+} // Unnamed namespace
+
+// Provide a specialisation of the IPCheck::matches() method for the
+// GeneralAddress class.
+
+namespace isc {
+namespace acl {
+template <>
+bool IPCheck<GeneralAddress>::matches(const GeneralAddress& address) const {
+ return (compare(&address.addr[0], address.family));
+}
+} // namespace acl
+} // namespace isc
+
+namespace {
+/// *** Free Function Tests ***
+
+// Test the createMask() function.
+TEST(IPFunctionCheck, CreateMask) {
+
+ // Invalid arguments should throw.
+ EXPECT_THROW(createMask(9), isc::OutOfRange);
+
+ // Check on all possible 8-bit values.
+ uint16_t expected = 0xff00;
+ for (size_t i = 0; i <= 8; ++i, expected >>= 1) {
+ EXPECT_EQ(static_cast<uint8_t>(expected & 0xff), createMask(i));
+ }
+}
+
+// Test the splitIPAddress() function.
+TEST(IPFunctionCheck, SplitIPAddress) {
+ pair<string, uint32_t> result;
+
+ result = splitIPAddress("192.0.2.1");
+ EXPECT_EQ(string("192.0.2.1"), result.first);
+ EXPECT_EQ(-1, result.second);
+
+ result = splitIPAddress("192.0.2.1/24");
+ EXPECT_EQ(string("192.0.2.1"), result.first);
+ EXPECT_EQ(24, result.second);
+
+ result = splitIPAddress("2001:db8::/128");
+ EXPECT_EQ(string("2001:db8::"), result.first);
+ EXPECT_EQ(128, result.second);
+
+ result = splitIPAddress("192.0.2.1/0");
+ EXPECT_EQ(string("192.0.2.1"), result.first);
+ EXPECT_EQ(0, result.second);
+
+ EXPECT_THROW(splitIPAddress("192.0.2.43/27 "), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("192.0.2.43/-1"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("192.0.2.43//1"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("192.0.2.43/1/"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("/192.0.2.43/1"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("2001:db8::/xxxx"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("2001:db8::/32/s"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("1/"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress("/1"), isc::InvalidParameter);
+ EXPECT_THROW(splitIPAddress(" 1/ "), isc::InvalidParameter);
+}
+
+TEST(IPAddress, constructIPv4) {
+ IPAddress ipaddr(tests::getSockAddr("192.0.2.1"));
+ const char expected_data[4] = { 192, 0, 2, 1 };
+ EXPECT_EQ(AF_INET, ipaddr.getFamily());
+ EXPECT_EQ(4, ipaddr.getLength());
+ EXPECT_EQ(0, memcmp(expected_data, ipaddr.getData(), 4));
+}
+
+TEST(IPAddress, constructIPv6) {
+ IPAddress ipaddr(tests::getSockAddr("2001:db8:1234:abcd::53"));
+ const char expected_data[16] = { 0x20, 0x01, 0x0d, 0xb8, 0x12, 0x34, 0xab,
+ 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x53 };
+ EXPECT_EQ(AF_INET6, ipaddr.getFamily());
+ EXPECT_EQ(16, ipaddr.getLength());
+ EXPECT_EQ(0, memcmp(expected_data, ipaddr.getData(), 16));
+}
+
+TEST(IPAddress, badConstruct) {
+ struct sockaddr sa;
+ sa.sa_family = AF_UNSPEC;
+ EXPECT_THROW(IPAddress ipaddr(sa), isc::BadValue);
+}
+
+// *** IPv4 Tests ***
+
+TEST(IPCheck, V4StringConstructor) {
+
+ // Constructor with no prefix length given (32 is assumed).
+ IPCheck<GeneralAddress> acl1("192.0.2.255");
+ EXPECT_EQ(32, acl1.getPrefixlen());
+ EXPECT_EQ(AF_INET, acl1.getFamily());
+
+ vector<uint8_t> stored1 = acl1.getAddress();
+ EXPECT_EQ(IPV4_SIZE, stored1.size());
+ GeneralAddress expected1(0xc00002ff);
+ EXPECT_TRUE(expected1.equals(stored1));
+
+ // Constructor with valid mask given
+ IPCheck<GeneralAddress> acl2("192.0.2.0/24");
+ EXPECT_EQ(24, acl2.getPrefixlen());
+ EXPECT_EQ(AF_INET, acl2.getFamily());
+
+ vector<uint8_t> stored2 = acl2.getAddress();
+ EXPECT_EQ(IPV4_SIZE, stored2.size());
+ GeneralAddress expected2(0xc0000200);
+ EXPECT_TRUE(expected2.equals(stored2));
+
+ // More valid masks
+ IPCheck<GeneralAddress> acl3("192.0.2.1/0");
+ EXPECT_EQ(0, acl3.getPrefixlen());
+ EXPECT_EQ(AF_INET, acl3.getFamily());
+
+ vector<uint8_t> stored3 = acl3.getAddress();
+ EXPECT_EQ(IPV4_SIZE, stored3.size());
+ GeneralAddress expected3(0xc0000201);
+ EXPECT_TRUE(expected3.equals(stored3));
+
+ IPCheck<GeneralAddress> acl4("192.0.2.2/32");
+ EXPECT_EQ(32, acl4.getPrefixlen());
+ EXPECT_EQ(AF_INET, acl4.getFamily());
+
+ vector<uint8_t> stored4 = acl4.getAddress();
+ EXPECT_EQ(IPV4_SIZE, stored4.size());
+ GeneralAddress expected4(0xc0000202);
+ EXPECT_TRUE(expected4.equals(stored4));
+
+ // Any match
+ IPCheck<GeneralAddress> acl5("any4");
+ EXPECT_EQ(0, acl5.getPrefixlen());
+ EXPECT_EQ(AF_INET, acl5.getFamily());
+
+ vector<uint8_t> stored5 = acl5.getAddress();
+ EXPECT_EQ(IPV4_SIZE, stored5.size());
+ GeneralAddress expected5(0);
+ EXPECT_TRUE(expected5.equals(stored5));
+
+ // Invalid prefix lengths
+ EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/33"), isc::OutOfRange);
+
+ // ... and invalid strings
+ EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/-1"),
+ isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/24/3"),
+ isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>("192.0.2.0/ww"),
+ isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>("aa.255.255.0/ww"),
+ isc::InvalidParameter);
+}
+
+TEST(IPCheck, V4CopyConstructor) {
+ IPCheck<GeneralAddress> acl1("192.0.2.1/24");
+ IPCheck<GeneralAddress> acl2(acl1);
+
+ EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+ EXPECT_EQ(acl1.getFamily(), acl2.getFamily());
+
+ vector<uint8_t> net1 = acl1.getMask();
+ vector<uint8_t> net2 = acl2.getMask();
+ EXPECT_EQ(net1.size(), net2.size());
+ EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+
+ net1 = acl1.getAddress();
+ net2 = acl2.getAddress();
+ EXPECT_EQ(net1.size(), net2.size());
+ EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+}
+
+TEST(IPCheck, V4AssignmentOperator) {
+ IPCheck<GeneralAddress> acl1("192.0.2.0/24");
+ IPCheck<GeneralAddress> acl2("192.0.2.128/25");
+ acl2 = acl1;
+
+ EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+ EXPECT_EQ(acl1.getFamily(), acl2.getFamily());
+
+ vector<uint8_t> net1 = acl1.getMask();
+ vector<uint8_t> net2 = acl2.getMask();
+ EXPECT_EQ(net1.size(), net2.size());
+ EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+
+ net1 = acl1.getAddress();
+ net2 = acl2.getAddress();
+ EXPECT_EQ(net1.size(), net2.size());
+ EXPECT_TRUE(equal(net1.begin(), net1.end(), net2.begin()));
+}
+
+// Check that the comparison works - note that "matches" just calls the
+// internal compare() code. (Also note that the argument to matches() will be
+// automatically converted to the GeneralAddress data type used for the tests
+// because of its constructor taking a uint32_t argument.
+
+TEST(IPCheck, V4Compare) {
+ // Exact address - match if given address matches stored address.
+ IPCheck<GeneralAddress> acl1("192.0.2.255/32");
+ EXPECT_TRUE(acl1.matches(0xc00002ff));
+ EXPECT_FALSE(acl1.matches(0xc00002fe));
+ EXPECT_FALSE(acl1.matches(0x13457f13));
+
+ IPCheck<GeneralAddress> acl2("192.0.2.255/27");
+ EXPECT_TRUE(acl2.matches(0xc00002ff));
+ EXPECT_TRUE(acl2.matches(0xc00002fe));
+ EXPECT_TRUE(acl2.matches(0xc00002ee));
+ EXPECT_FALSE(acl2.matches(0xc00002de));
+ EXPECT_FALSE(acl2.matches(0xd00002fe));
+ EXPECT_FALSE(acl2.matches(0x13457f13));
+
+ // Match if "any4" is specified
+ IPCheck<GeneralAddress> acl3("any4");
+ EXPECT_TRUE(acl3.matches(0xc00002ff));
+ EXPECT_TRUE(acl3.matches(0xc00002fe));
+ EXPECT_TRUE(acl3.matches(0xc00002ee));
+ EXPECT_TRUE(acl3.matches(0xc00002de));
+ EXPECT_TRUE(acl3.matches(0xd00002fe));
+ EXPECT_TRUE(acl3.matches(0x13457f13));
+
+ IPCheck<GeneralAddress> acl4("0.0.0.0/0");
+ EXPECT_TRUE(acl4.matches(0xc00002ff));
+ EXPECT_TRUE(acl4.matches(0xc00002fe));
+ EXPECT_TRUE(acl4.matches(0xc00002ee));
+ EXPECT_TRUE(acl4.matches(0xc00002de));
+ EXPECT_TRUE(acl4.matches(0xd00002fe));
+ EXPECT_TRUE(acl4.matches(0x13457f13));
+
+ IPCheck<GeneralAddress> acl5("192.0.2.255/0");
+ EXPECT_TRUE(acl5.matches(0xc00002ff));
+ EXPECT_TRUE(acl5.matches(0xc00002fe));
+ EXPECT_TRUE(acl5.matches(0xc00002ee));
+ EXPECT_TRUE(acl5.matches(0xc00002de));
+ EXPECT_TRUE(acl5.matches(0xd00002fe));
+ EXPECT_TRUE(acl5.matches(0x13457f13));
+}
+
+// *** IPV6 Tests ***
+
+// Some constants used in the tests
+
+const char* V6ADDR_1_STRING = "2001:0db8:1122:3344:5566:7788:99aa:bbcc";
+const uint8_t V6ADDR_1[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x11, 0x22, 0x33, 0x44,
+ 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc
+};
+
+const char* V6ADDR_2_STRING = "2001:0db8::dead:beef";
+const uint8_t V6ADDR_2[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 48 bits
+const uint8_t V6ADDR_2_48[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0xff, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 49 bits
+const uint8_t V6ADDR_2_49[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x7f, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 50 bits
+const uint8_t V6ADDR_2_50[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x3f, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 51 bits
+const uint8_t V6ADDR_2_51[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x1f, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 51 bits
+const uint8_t V6ADDR_2_52[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x0f, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xef
+};
+
+// Identical to V6ADDR_2 to 127 bits
+const uint8_t V6ADDR_2_127[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xbe, 0xee
+};
+
+const uint8_t V6ADDR_3[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+const uint8_t V6ADDR_4[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+TEST(IPCheck, V6StringConstructor) {
+ IPCheck<GeneralAddress> acl1(V6ADDR_1_STRING);
+ vector<uint8_t> address = acl1.getAddress();
+
+ EXPECT_EQ(128, acl1.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl1.getFamily());
+ EXPECT_EQ(IPV6_SIZE, address.size());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_1));
+
+ IPCheck<GeneralAddress> acl2(string(V6ADDR_2_STRING) + string("/51"));
+ address = acl2.getAddress();
+ EXPECT_EQ(IPV6_SIZE, address.size());
+ EXPECT_EQ(51, acl2.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl2.getFamily());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_2));
+
+ IPCheck<GeneralAddress> acl3(string(V6ADDR_2_STRING) + string("/127"));
+ address = acl3.getAddress();
+ EXPECT_EQ(IPV6_SIZE, address.size());
+ EXPECT_EQ(127, acl3.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl3.getFamily());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_2));
+
+ IPCheck<GeneralAddress> acl4("::1");
+ address = acl4.getAddress();
+ EXPECT_EQ(IPV6_SIZE, address.size());
+ EXPECT_EQ(128, acl4.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl4.getFamily());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_3));
+
+ // Any match. In these cases, the address should all be zeroes.
+ IPCheck<GeneralAddress> acl5("any6");
+ address = acl5.getAddress();
+ EXPECT_EQ(IPV6_SIZE, address.size());
+ EXPECT_EQ(0, acl5.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl5.getFamily());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_4));
+
+ IPCheck<GeneralAddress> acl6("::/0");
+ address = acl6.getAddress();
+ EXPECT_EQ(0, acl6.getPrefixlen());
+ EXPECT_EQ(AF_INET6, acl6.getFamily());
+ EXPECT_TRUE(equal(address.begin(), address.end(), V6ADDR_4));
+
+ // Some invalid strings
+ EXPECT_THROW(IPCheck<GeneralAddress>("::1/129"), isc::OutOfRange);
+ EXPECT_THROW(IPCheck<GeneralAddress>("::1/24/3"), isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>(":::1/24"), isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>("2001:0db8::abcd/ww"),
+ isc::InvalidParameter);
+ EXPECT_THROW(IPCheck<GeneralAddress>("2xx1:0db8::abcd/32"),
+ isc::InvalidParameter);
+}
+
+TEST(IPCheck, V6CopyConstructor) {
+ IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/52"));
+ IPCheck<GeneralAddress> acl2(acl1);
+
+ vector<uint8_t> acl1_address = acl1.getAddress();
+ vector<uint8_t> acl2_address = acl1.getAddress();
+ EXPECT_EQ(sizeof(V6ADDR_1), acl1_address.size());
+ EXPECT_EQ(acl1_address.size(), acl2_address.size());
+ EXPECT_TRUE(equal(acl1_address.begin(), acl1_address.end(),
+ acl2_address.begin()));
+
+ EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+
+ vector<uint8_t> acl1_mask = acl1.getMask();
+ vector<uint8_t> acl2_mask = acl1.getMask();
+ EXPECT_EQ(sizeof(V6ADDR_1), acl1_mask.size());
+ EXPECT_EQ(acl1_mask.size(), acl2_mask.size());
+ EXPECT_TRUE(equal(acl1_mask.begin(), acl1_mask.end(),
+ acl2_mask.begin()));
+}
+
+TEST(IPCheck, V6AssignmentOperator) {
+ IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/52"));
+ IPCheck<GeneralAddress> acl2(string(V6ADDR_1_STRING) + string("/48"));
+
+ acl2 = acl1;
+
+ vector<uint8_t> acl1_address = acl1.getAddress();
+ vector<uint8_t> acl2_address = acl2.getAddress();
+ EXPECT_EQ(sizeof(V6ADDR_1), acl1_address.size());
+ EXPECT_EQ(acl1_address.size(), acl2_address.size());
+ EXPECT_TRUE(equal(acl1_address.begin(), acl1_address.end(),
+ acl2_address.begin()));
+
+ EXPECT_EQ(acl1.getPrefixlen(), acl2.getPrefixlen());
+
+ vector<uint8_t> acl1_mask = acl1.getMask();
+ vector<uint8_t> acl2_mask = acl2.getMask();
+ EXPECT_EQ(sizeof(V6ADDR_1), acl1_mask.size());
+ EXPECT_EQ(acl1_mask.size(), acl2_mask.size());
+ EXPECT_TRUE(equal(acl1_mask.begin(), acl1_mask.end(),
+ acl2_mask.begin()));
+}
+
+TEST(IPCheck, V6Compare) {
+ // Set up some data.
+ vector<uint8_t> v6addr_2(V6ADDR_2, V6ADDR_2 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_48(V6ADDR_2_48, V6ADDR_2_48 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_49(V6ADDR_2_49, V6ADDR_2_49 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_50(V6ADDR_2_50, V6ADDR_2_50 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_51(V6ADDR_2_51, V6ADDR_2_51 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_52(V6ADDR_2_52, V6ADDR_2_52 + IPV6_SIZE);
+ vector<uint8_t> v6addr_2_127(V6ADDR_2_127, V6ADDR_2_127 + IPV6_SIZE);
+ vector<uint8_t> v6addr_3(V6ADDR_3, V6ADDR_3 + IPV6_SIZE);
+
+ // Exact address - match if given address matches stored address.
+ IPCheck<GeneralAddress> acl1(string(V6ADDR_2_STRING) + string("/128"));
+ EXPECT_TRUE(acl1.matches(v6addr_2));
+ EXPECT_FALSE(acl1.matches(v6addr_2_127));
+ EXPECT_FALSE(acl1.matches(v6addr_2_52));
+ EXPECT_FALSE(acl1.matches(v6addr_2_51));
+ EXPECT_FALSE(acl1.matches(v6addr_2_50));
+ EXPECT_FALSE(acl1.matches(v6addr_2_49));
+ EXPECT_FALSE(acl1.matches(v6addr_2_48));
+ EXPECT_FALSE(acl1.matches(v6addr_3));
+
+ // Match to various prefixes.
+ IPCheck<GeneralAddress> acl2(string(V6ADDR_2_STRING) + string("/127"));
+ EXPECT_TRUE(acl2.matches(v6addr_2));
+ EXPECT_TRUE(acl2.matches(v6addr_2_127));
+ EXPECT_FALSE(acl2.matches(v6addr_2_52));
+ EXPECT_FALSE(acl2.matches(v6addr_2_51));
+ EXPECT_FALSE(acl2.matches(v6addr_2_50));
+ EXPECT_FALSE(acl2.matches(v6addr_2_49));
+ EXPECT_FALSE(acl2.matches(v6addr_2_48));
+ EXPECT_FALSE(acl2.matches(v6addr_3));
+
+ IPCheck<GeneralAddress> acl3(string(V6ADDR_2_STRING) + string("/52"));
+ EXPECT_TRUE(acl3.matches(v6addr_2));
+ EXPECT_TRUE(acl3.matches(v6addr_2_127));
+ EXPECT_TRUE(acl3.matches(v6addr_2_52));
+ EXPECT_FALSE(acl3.matches(v6addr_2_51));
+ EXPECT_FALSE(acl3.matches(v6addr_2_50));
+ EXPECT_FALSE(acl3.matches(v6addr_2_49));
+ EXPECT_FALSE(acl3.matches(v6addr_2_48));
+ EXPECT_FALSE(acl3.matches(v6addr_3));
+
+ IPCheck<GeneralAddress> acl4(string(V6ADDR_2_STRING) + string("/51"));
+ EXPECT_TRUE(acl4.matches(v6addr_2));
+ EXPECT_TRUE(acl4.matches(v6addr_2_127));
+ EXPECT_TRUE(acl4.matches(v6addr_2_52));
+ EXPECT_TRUE(acl4.matches(v6addr_2_51));
+ EXPECT_FALSE(acl4.matches(v6addr_2_50));
+ EXPECT_FALSE(acl4.matches(v6addr_2_49));
+ EXPECT_FALSE(acl4.matches(v6addr_2_48));
+ EXPECT_FALSE(acl4.matches(v6addr_3));
+
+ IPCheck<GeneralAddress> acl5(string(V6ADDR_2_STRING) + string("/50"));
+ EXPECT_TRUE(acl5.matches(v6addr_2));
+ EXPECT_TRUE(acl5.matches(v6addr_2_127));
+ EXPECT_TRUE(acl5.matches(v6addr_2_52));
+ EXPECT_TRUE(acl5.matches(v6addr_2_51));
+ EXPECT_TRUE(acl5.matches(v6addr_2_50));
+ EXPECT_FALSE(acl5.matches(v6addr_2_49));
+ EXPECT_FALSE(acl5.matches(v6addr_2_48));
+ EXPECT_FALSE(acl5.matches(v6addr_3));
+
+ IPCheck<GeneralAddress> acl6(string(V6ADDR_2_STRING) + string("/0"));
+ EXPECT_TRUE(acl6.matches(v6addr_2));
+ EXPECT_TRUE(acl6.matches(v6addr_2_127));
+ EXPECT_TRUE(acl6.matches(v6addr_2_52));
+ EXPECT_TRUE(acl6.matches(v6addr_2_51));
+ EXPECT_TRUE(acl6.matches(v6addr_2_50));
+ EXPECT_TRUE(acl6.matches(v6addr_2_49));
+ EXPECT_TRUE(acl6.matches(v6addr_2_48));
+ EXPECT_TRUE(acl6.matches(v6addr_3));
+
+ // Match on any address
+ IPCheck<GeneralAddress> acl7("any6");
+ EXPECT_TRUE(acl7.matches(v6addr_2));
+ EXPECT_TRUE(acl7.matches(v6addr_2_127));
+ EXPECT_TRUE(acl7.matches(v6addr_2_52));
+ EXPECT_TRUE(acl7.matches(v6addr_2_51));
+ EXPECT_TRUE(acl7.matches(v6addr_2_50));
+ EXPECT_TRUE(acl7.matches(v6addr_2_49));
+ EXPECT_TRUE(acl7.matches(v6addr_2_48));
+}
+
+// *** Mixed-mode tests - mainly to check that no exception is thrown ***
+
+TEST(IPCheck, MixedMode) {
+
+ // ACL has a V4 address specified, check against a V6 address.
+ IPCheck<GeneralAddress> acl1("192.0.2.255/24");
+ GeneralAddress test1(vector<uint8_t>(V6ADDR_1, V6ADDR_1 + IPV6_SIZE));
+ EXPECT_NO_THROW(acl1.matches(test1));
+ EXPECT_FALSE(acl1.matches(test1));
+
+ // Now the reverse - the ACL is specified with a V6 address.
+ IPCheck<GeneralAddress> acl2(V6ADDR_2_STRING);
+ GeneralAddress test2(0x12345678);
+ EXPECT_FALSE(acl2.matches(test2));
+
+ // Ensure only a V4 address matches "any4".
+ IPCheck<GeneralAddress> acl3("any4");
+ EXPECT_FALSE(acl3.matches(test1));
+ EXPECT_TRUE(acl3.matches(test2));
+
+ // ... and check the reverse
+ IPCheck<GeneralAddress> acl4("any6");
+ EXPECT_TRUE(acl4.matches(test1));
+ EXPECT_FALSE(acl4.matches(test2));
+
+ // Check where the bit pattern of an IPv4 address matches that of an IPv6
+ // one.
+ IPCheck<GeneralAddress> acl5("2001:db8::/32");
+ GeneralAddress test5(0x20010db8);
+ EXPECT_FALSE(acl5.matches(test5));
+
+ // ... and where the reverse is true. (2001:db8 corresponds to 32.1.13.184).
+ IPCheck<GeneralAddress> acl6("32.1.13.184");
+ GeneralAddress test6(vector<uint8_t>(V6ADDR_1, V6ADDR_1 + IPV6_SIZE));
+ EXPECT_FALSE(acl6.matches(test6));
+}
+} // Unnamed namespace
diff --git a/src/lib/acl/tests/loader_test.cc b/src/lib/acl/tests/loader_test.cc
new file mode 100644
index 0000000..1705c0a
--- /dev/null
+++ b/src/lib/acl/tests/loader_test.cc
@@ -0,0 +1,383 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "creators.h"
+#include <exceptions/exceptions.h>
+#include <acl/loader.h>
+#include <string>
+#include <gtest/gtest.h>
+
+using namespace std;
+using namespace boost;
+using namespace isc::acl;
+using namespace isc::acl::tests;
+using isc::data::Element;
+using isc::data::ConstElementPtr;
+
+namespace {
+
+// We don't use the EXPECT_THROW macro, as it doesn't allow us
+// to examine the exception. We want to check the element is stored
+// there as well.
+void testActionLoaderException(const string& JSON) {
+ SCOPED_TRACE("Should throw with input: " + JSON);
+ ConstElementPtr elem(Element::fromJSON(JSON));
+ try {
+ defaultActionLoader(elem);
+ FAIL() << "It did not throw";
+ }
+ catch (const LoaderError& error) {
+ // Yes, comparing for pointer equality, that is enough, it
+ // should return the exact instance of the JSON object
+ EXPECT_EQ(elem, error.element());
+ }
+}
+
+// Test the defaultActionLoader function
+TEST(LoaderHelpers, DefaultActionLoader) {
+ // First the three valid inputs
+ EXPECT_EQ(ACCEPT, defaultActionLoader(Element::fromJSON("\"ACCEPT\"")));
+ EXPECT_EQ(REJECT, defaultActionLoader(Element::fromJSON("\"REJECT\"")));
+ EXPECT_EQ(DROP, defaultActionLoader(Element::fromJSON("\"DROP\"")));
+ // Now few invalid ones
+ // String, but unknown one
+ testActionLoaderException("\"UNKNOWN\"");
+ testActionLoaderException("42");
+ testActionLoaderException("true");
+ testActionLoaderException("null");
+ testActionLoaderException("[]");
+ testActionLoaderException("{}");
+}
+
+class LoaderTest : public ::testing::Test {
+public:
+ LoaderTest() :
+ loader_(REJECT)
+ {}
+ Loader<Log> loader_;
+ Log log_;
+ // Some convenience functions to set up
+
+ // Create a NamedCreator, convert to shared pointer
+ shared_ptr<NamedCreator> namedCreator(const string& name,
+ bool abbreviatedList = true)
+ {
+ return (shared_ptr<NamedCreator>(new NamedCreator(name,
+ abbreviatedList)));
+ }
+ // Create and add a NamedCreator
+ void addNamed(const string& name, bool abbreviatedList = true) {
+ EXPECT_NO_THROW(loader_.registerCreator(
+ namedCreator(name, abbreviatedList)));
+ }
+ template<class Result> shared_ptr<Result> loadCheckAny(const string&
+ definition)
+ {
+ SCOPED_TRACE("Loading check " + definition);
+ shared_ptr<Check<Log> > loaded;
+ EXPECT_NO_THROW(loaded = loader_.loadCheck(
+ Element::fromJSON(definition)));
+ shared_ptr<Result> result(dynamic_pointer_cast<Result>(
+ loaded));
+ EXPECT_TRUE(result);
+ return (result);
+ }
+ // Load a check and convert it to named check to examine it
+ shared_ptr<NamedCheck> loadCheck(const string& definition) {
+ return (loadCheckAny<NamedCheck>(definition));
+ }
+ // The loadCheck throws an exception
+ void checkException(const string& JSON) {
+ SCOPED_TRACE("Loading check exception: " + JSON);
+ ConstElementPtr input(Element::fromJSON(JSON));
+ // Not using EXPECT_THROW, we want to examine the exception
+ try {
+ loader_.loadCheck(input);
+ FAIL() << "Should have thrown";
+ }
+ catch (const LoaderError& e) {
+ // It should be identical copy, so checking pointers
+ EXPECT_EQ(input, e.element());
+ }
+ }
+ // Insert the throw, throwcheck and logcheck checks into the loader
+ void aclSetup() {
+ try {
+ loader_.registerCreator(shared_ptr<ThrowCreator>(new
+ ThrowCreator()));
+ loader_.registerCreator(shared_ptr<ThrowCheckCreator>(
+ new ThrowCheckCreator()));
+ loader_.registerCreator(shared_ptr<LogCreator>(new LogCreator()));
+ }
+ // We ignore this exception here, because it happens when we try to
+ // insert the creators multiple times. This is harmless.
+ catch (const LoaderError&) {}
+ }
+ // Create an ACL, run it, check it's result and how many first
+ // log items it marked
+ //
+ // Works with preset names throw and logcheck
+ void aclRun(const string& JSON, BasicAction expectedResult,
+ size_t logged)
+ {
+ SCOPED_TRACE("Running ACL for " + JSON);
+ aclSetup();
+ shared_ptr<ACL<Log> > acl;
+ EXPECT_NO_THROW(acl = loader_.load(Element::fromJSON(JSON)));
+ EXPECT_EQ(expectedResult, acl->execute(log_));
+ log_.checkFirst(logged);
+ }
+ // Check it throws an error when creating the ACL
+ void aclException(const string& JSON) {
+ SCOPED_TRACE("Trying to load bad " + JSON);
+ aclSetup();
+ EXPECT_THROW(loader_.load(Element::fromJSON(JSON)), LoaderError);
+ }
+ // Check that the subexpression is NamedCheck with correct data
+ void isSubexprNamed(const CompoundCheck<Log>* compound, size_t index,
+ const string& name, ConstElementPtr data)
+ {
+ if (index < compound->getSubexpressions().size()) {
+ const NamedCheck*
+ check(dynamic_cast<const NamedCheck*>(compound->
+ getSubexpressions()
+ [index]));
+ ASSERT_TRUE(check) << "The subexpression is of different type";
+ EXPECT_EQ(name, check->name_);
+ EXPECT_TRUE(data->equals(*check->data_));
+ }
+ }
+};
+
+// Test that it does not accept duplicate creator
+TEST_F(LoaderTest, CreatorDuplicity) {
+ addNamed("name");
+ EXPECT_THROW(loader_.registerCreator(namedCreator("name")), LoaderError);
+}
+
+// Test that when it does not accept a duplicate, nothing is inserted
+TEST_F(LoaderTest, CreatorDuplicateUnchanged) {
+ addNamed("name1");
+ vector<string> names;
+ names.push_back("name2");
+ names.push_back("name1");
+ names.push_back("name3");
+ EXPECT_THROW(loader_.registerCreator(
+ shared_ptr<NamedCreator>(new NamedCreator(names))), LoaderError);
+ // It should now reject both name2 and name3 as not known
+ checkException("{\"name2\": null}");
+ checkException("{\"name3\": null}");
+}
+
+// Test that we can register a creator and load a check with the name
+TEST_F(LoaderTest, SimpleCheckLoad) {
+ addNamed("name");
+ shared_ptr<NamedCheck> check(loadCheck("{\"name\": 42}"));
+ EXPECT_EQ("name", check->name_);
+ EXPECT_TRUE(check->data_->equals(*Element::fromJSON("42")));
+}
+
+// As above, but there are multiple creators registered within the loader
+TEST_F(LoaderTest, MultiCreatorCheckLoad) {
+ addNamed("name1");
+ addNamed("name2");
+ shared_ptr<NamedCheck> check(loadCheck("{\"name2\": 42}"));
+ EXPECT_EQ("name2", check->name_);
+ EXPECT_TRUE(check->data_->equals(*Element::fromJSON("42")));
+}
+
+// Similar to above, but there's a creator with multiple names
+TEST_F(LoaderTest, MultiNameCheckLoad) {
+ addNamed("name1");
+ vector<string> names;
+ names.push_back("name2");
+ names.push_back("name3");
+ EXPECT_NO_THROW(loader_.registerCreator(shared_ptr<NamedCreator>(
+ new NamedCreator(names))));
+ shared_ptr<NamedCheck> check(loadCheck("{\"name3\": 42}"));
+ EXPECT_EQ("name3", check->name_);
+ EXPECT_TRUE(check->data_->equals(*Element::fromJSON("42")));
+}
+
+// Invalid format is rejected
+TEST_F(LoaderTest, InvalidFormatCheck) {
+ checkException("[]");
+ checkException("42");
+ checkException("\"hello\"");
+ checkException("null");
+}
+
+// Empty check is rejected
+TEST_F(LoaderTest, EmptyCheck) {
+ checkException("{}");
+}
+
+// The name isn't known
+TEST_F(LoaderTest, UnkownName) {
+ checkException("{\"unknown\": null}");
+}
+
+// Exception from the creator is propagated
+TEST_F(LoaderTest, CheckPropagate) {
+ loader_.registerCreator(shared_ptr<ThrowCreator>(new ThrowCreator()));
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"throw\": null}")),
+ TestCreatorError);
+}
+
+// The abbreviated form of check
+TEST_F(LoaderTest, AndAbbrev) {
+ addNamed("name1");
+ addNamed("name2");
+ shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+ loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": 2}"));
+ // If we don't have anything loaded, the rest would crash. It is already
+ // reported from within loadCheckAny if it isn't loaded.
+ if (oper) {
+ // The subexpressions are correct
+ EXPECT_EQ(2, oper->getSubexpressions().size());
+ // Note: this test relies on the ordering in which map returns it's
+ // elements, which is in the lexicographical order of the strings.
+ // This is not required from our interface, but is easier to write
+ // the test.
+ isSubexprNamed(&*oper, 0, "name1", Element::fromJSON("1"));
+ isSubexprNamed(&*oper, 1, "name2", Element::fromJSON("2"));
+ }
+}
+
+// The abbreviated form of parameters
+TEST_F(LoaderTest, OrAbbrev) {
+ addNamed("name1");
+ shared_ptr<LogicOperator<AnyOfSpec, Log> > oper(
+ loadCheckAny<LogicOperator<AnyOfSpec, Log> >("{\"name1\": [1, 2]}"));
+ // If we don't have anything loaded, the rest would crash. It is already
+ // reported from within loadCheckAny if it isn't loaded.
+ if (oper) {
+ // The subexpressions are correct
+ EXPECT_EQ(2, oper->getSubexpressions().size());
+ isSubexprNamed(&*oper, 0, "name1", Element::fromJSON("1"));
+ isSubexprNamed(&*oper, 1, "name1", Element::fromJSON("2"));
+ }
+}
+
+// Combined abbreviated form, both at once
+
+// The abbreviated form of check
+TEST_F(LoaderTest, BothAbbrev) {
+ addNamed("name1");
+ addNamed("name2");
+ shared_ptr<LogicOperator<AllOfSpec, Log> > oper(
+ loadCheckAny<LogicOperator<AllOfSpec, Log> >("{\"name1\": 1, \"name2\": [3, 4]}"));
+ // If we don't have anything loaded, the rest would crash. It is already
+ // reported from within loadCheckAny if it isn't loaded.
+ if (oper) {
+ // The subexpressions are correct
+ ASSERT_EQ(2, oper->getSubexpressions().size());
+ // Note: this test relies on the ordering in which map returns it's
+ // elements, which is in the lexicographical order of the strings.
+ // This is not required from our interface, but is easier to write
+ // the test.
+ isSubexprNamed(&*oper, 0, "name1", Element::fromJSON("1"));
+ const LogicOperator<AnyOfSpec, Log>*
+ orOper(dynamic_cast<const LogicOperator<AnyOfSpec, Log>*>(
+ oper->getSubexpressions()[1]));
+ ASSERT_TRUE(orOper) << "Different type than AnyOf operator";
+ EXPECT_EQ(2, orOper->getSubexpressions().size());
+ isSubexprNamed(orOper, 0, "name2", Element::fromJSON("3"));
+ isSubexprNamed(orOper, 1, "name2", Element::fromJSON("4"));
+ }
+}
+
+// But this is not abbreviated form, this should be passed directly to the
+// creator
+TEST_F(LoaderTest, ListCheck) {
+ addNamed("name1", false);
+ shared_ptr<NamedCheck> check(loadCheck("{\"name1\": [1, 2]}"));
+ EXPECT_EQ("name1", check->name_);
+ EXPECT_TRUE(check->data_->equals(*Element::fromJSON("[1, 2]")));
+}
+
+// Check the action key is ignored as it should be
+TEST_F(LoaderTest, CheckNoAction) {
+ addNamed("name1");
+ shared_ptr<NamedCheck> check(loadCheck("{\"name1\": 1, \"action\": 2}"));
+ EXPECT_EQ("name1", check->name_);
+ EXPECT_TRUE(check->data_->equals(*Element::fromJSON("1")));
+}
+
+// The empty ACL can be created and run, providing the default action
+TEST_F(LoaderTest, EmptyACL) {
+ aclRun("[]", REJECT, 0);
+}
+
+// We can create a simple ACL, which will return the correct default
+// action
+TEST_F(LoaderTest, NoMatchACL) {
+ aclRun("[{\"logcheck\": [0, false], \"action\": \"ACCEPT\"}]",
+ REJECT, 1);
+}
+
+// We can created more complicated ACL, it will match at the second
+// check
+TEST_F(LoaderTest, MatchACL) {
+ aclRun("["
+ " {\"logcheck\": [0, false], \"action\": \"DROP\"},"
+ " {\"logcheck\": [1, true], \"action\": \"ACCEPT\"}"
+ "]", ACCEPT, 2);
+}
+
+// ACL without a check (matches unconditionally)
+// We add another one check after it, to make sure it is really not run
+TEST_F(LoaderTest, NoCheckACL) {
+ aclRun("["
+ " {\"action\": \"DROP\"},"
+ " {\"throwcheck\": 1, \"action\": \"ACCEPT\"}"
+ "]", DROP, 0);
+}
+
+// Malformed things are rejected
+TEST_F(LoaderTest, InvalidACLFormat) {
+ // Not a list
+ aclException("{}");
+ aclException("42");
+ aclException("true");
+ aclException("null");
+ aclException("\"hello\"");
+ // Malformed element
+ aclException("[42]");
+ aclException("[\"hello\"]");
+ aclException("[[]]");
+ aclException("[true]");
+ aclException("[null]");
+}
+
+// If there's no action keyword, it is rejected
+TEST_F(LoaderTest, NoAction) {
+ aclException("[{}]");
+ aclException("[{\"logcheck\": [0, true]}]");
+}
+
+// Exceptions from check creation is propagated
+TEST_F(LoaderTest, ACLPropagate) {
+ aclSetup();
+ EXPECT_THROW(loader_.load(
+ Element::fromJSON(
+ "[{\"action\": \"ACCEPT\", \"throw\": 1}]")),
+ TestCreatorError);
+}
+
+TEST_F(LoaderTest, nullDescription) {
+ EXPECT_THROW(loader_.load(ConstElementPtr()), isc::InvalidParameter);
+}
+
+}
diff --git a/src/lib/acl/tests/logcheck.h b/src/lib/acl/tests/logcheck.h
new file mode 100644
index 0000000..424c53d
--- /dev/null
+++ b/src/lib/acl/tests/logcheck.h
@@ -0,0 +1,94 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef LOGCHECK_H
+#define LOGCHECK_H
+
+#include <gtest/gtest.h>
+#include <acl/acl.h>
+#include <cassert>
+
+// This is not a public header, it is used only inside the tests.
+
+namespace isc {
+namespace acl {
+namespace tests {
+
+// This is arbitrary guess of size for the log. If it's too small for your
+// test, just make it bigger.
+const size_t LOG_SIZE = 10;
+
+// This will remember which checks did run already.
+struct Log {
+ // The actual log cells, if i-th check did run
+ mutable bool run[LOG_SIZE];
+ Log() {
+ // Nothing run yet
+ for (size_t i(0); i < LOG_SIZE; ++ i) {
+ run[i] = false;
+ }
+ }
+ // Checks that the first amount of checks did run and the rest didn't.
+ void checkFirst(size_t amount) const {
+ ASSERT_LE(amount, LOG_SIZE) << "Wrong test: amount bigger than size "
+ "of log";
+ {
+ SCOPED_TRACE("Checking that the first amount of checks did run");
+ for (size_t i(0); i < amount; ++ i) {
+ EXPECT_TRUE(run[i]) << "Check #" << i << " did not run.";
+ }
+ }
+
+ {
+ SCOPED_TRACE("Checking that the rest did not run");
+ for (size_t i(amount); i < LOG_SIZE; ++ i) {
+ EXPECT_FALSE(run[i]) << "Check #" << i << "did run.";
+ }
+ }
+ }
+};
+
+// This returns true or false every time, no matter what is passed to it.
+// But it logs that it did run.
+class ConstCheck : public Check<Log> {
+public:
+ ConstCheck(bool accepts, size_t logNum) :
+ logNum_(logNum),
+ accepts_(accepts)
+ {
+ assert(logNum < LOG_SIZE); // If this fails, the LOG_SIZE is too small
+ }
+ virtual bool matches(const Log& log) const {
+ /*
+ * This is abuse of the context. It is designed to carry the
+ * information to check, not to modify it. However, this is the
+ * easiest way to do the test, so we go against the design.
+ */
+ log.run[logNum_] = true;
+ return (accepts_);
+ }
+private:
+ size_t logNum_;
+ bool accepts_;
+};
+
+}
+}
+}
+
+#endif
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/tests/logic_check_test.cc b/src/lib/acl/tests/logic_check_test.cc
new file mode 100644
index 0000000..1c80277
--- /dev/null
+++ b/src/lib/acl/tests/logic_check_test.cc
@@ -0,0 +1,291 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "creators.h"
+#include <acl/logic_check.h>
+#include <typeinfo>
+#include <boost/shared_ptr.hpp> // for static_pointer_cast
+
+using namespace std;
+using namespace boost;
+using namespace isc::acl;
+using namespace isc::acl::tests;
+using isc::data::Element;
+
+namespace {
+
+// Test the defs in AnyOfSpec
+TEST(LogicOperators, AnyOfSpec) {
+ EXPECT_FALSE(AnyOfSpec::start());
+ EXPECT_FALSE(AnyOfSpec::terminate(false));
+ EXPECT_TRUE(AnyOfSpec::terminate(true));
+}
+
+// Test the defs in AllOfSpec
+TEST(LogicOperators, AllOfSpec) {
+ EXPECT_TRUE(AllOfSpec::start());
+ EXPECT_TRUE(AllOfSpec::terminate(false));
+ EXPECT_FALSE(AllOfSpec::terminate(true));
+}
+
+// Generic test of one check
+template<typename Mode>
+void
+testCheck(bool emptyResult) {
+ // It can be created
+ LogicOperator<Mode, Log> oper;
+ // It is empty by default
+ EXPECT_EQ(0, oper.getSubexpressions().size());
+ // And returns true, as all 0 of the subexpressions return true
+ Log log;
+ EXPECT_EQ(emptyResult, oper.matches(log));
+ log.checkFirst(0);
+ // Fill it with some subexpressions
+ typedef shared_ptr<ConstCheck> CheckPtr;
+ oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 0)));
+ oper.addSubexpression(CheckPtr(new ConstCheck(emptyResult, 1)));
+ // Check what happens when only the default-valued are there
+ EXPECT_EQ(2, oper.getSubexpressions().size());
+ EXPECT_EQ(emptyResult, oper.matches(log));
+ log.checkFirst(2);
+ oper.addSubexpression(CheckPtr(new ConstCheck(!emptyResult, 2)));
+ oper.addSubexpression(CheckPtr(new ConstCheck(!emptyResult, 3)));
+ // They are listed there
+ EXPECT_EQ(4, oper.getSubexpressions().size());
+ // Now, the last one kills it, but the first ones will run, the fourth
+ // won't
+ EXPECT_EQ(!emptyResult, oper.matches(log));
+ log.checkFirst(3);
+}
+
+TEST(LogicOperators, AllOf) {
+ testCheck<AllOfSpec>(true);
+}
+
+TEST(LogicOperators, AnyOf) {
+ testCheck<AnyOfSpec>(false);
+}
+
+// Fixture for the tests of the creators
+class LogicCreatorTest : public ::testing::Test {
+private:
+ typedef shared_ptr<Loader<Log>::CheckCreator> CreatorPtr;
+public:
+ // Register some creators, both tested ones and some auxiliary ones for
+ // help
+ LogicCreatorTest():
+ loader_(REJECT)
+ {
+ loader_.registerCreator(CreatorPtr(new
+ LogicCreator<AnyOfSpec, Log>("ANY")));
+ loader_.registerCreator(CreatorPtr(new
+ LogicCreator<AllOfSpec, Log>("ALL")));
+ loader_.registerCreator(CreatorPtr(new ThrowCreator));
+ loader_.registerCreator(CreatorPtr(new LogCreator));
+ loader_.registerCreator(CreatorPtr(new NotCreator<Log>("NOT")));
+ }
+ // To mark which parts of the check did run
+ Log log_;
+ // The loader
+ Loader<Log> loader_;
+ // Some convenience shortcut names
+ typedef LogicOperator<AnyOfSpec, Log> AnyOf;
+ typedef LogicOperator<AllOfSpec, Log> AllOf;
+ typedef shared_ptr<AnyOf> AnyOfPtr;
+ typedef shared_ptr<AllOf> AllOfPtr;
+ // Loads the JSON as a check and tries to convert it to the given check
+ // subclass
+ template<typename Result> shared_ptr<Result> load(const string& JSON) {
+ shared_ptr<Check<Log> > result;
+ EXPECT_NO_THROW(result = loader_.loadCheck(Element::fromJSON(JSON)));
+ /*
+ * Optimally, we would use a dynamic_pointer_cast here to both
+ * convert the pointer and to check the type is correct. However,
+ * clang++ seems to be confused by templates and creates two typeids
+ * for the same templated type (even with the same parameters),
+ * therfore considering the types different, even if they are the same.
+ * This leads to false alarm in the test. Luckily, it generates the
+ * same name for both typeids, so we use them instead (which is enough
+ * to test the correct type of Check is returned). Then we can safely
+ * cast statically, as we don't use any kind of nasty things like
+ * multiple inheritance.
+ */
+ EXPECT_STREQ(typeid(Result).name(), typeid(*result.get()).name());
+ shared_ptr<Result>
+ resultConverted(static_pointer_cast<Result>(result));
+ EXPECT_NE(shared_ptr<Result>(), resultConverted);
+ return (resultConverted);
+ }
+};
+
+// Test it can load empty ones
+TEST_F(LogicCreatorTest, empty) {
+ AnyOfPtr emptyAny(load<AnyOf>("{\"ANY\": []}"));
+ EXPECT_EQ(0, emptyAny->getSubexpressions().size());
+ AllOfPtr emptyAll(load<AllOf>("{\"ALL\": []}"));
+ EXPECT_EQ(0, emptyAll->getSubexpressions().size());
+}
+
+// Test it rejects invalid inputs (not a list as a parameter)
+TEST_F(LogicCreatorTest, invalid) {
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ANY\": null}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ANY\": {}}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ANY\": true}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ANY\": 42}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ANY\": \"hello\"}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ALL\": null}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ALL\": {}}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ALL\": true}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ALL\": 42}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"ALL\": \"hello\"}")),
+ LoaderError);
+}
+
+// Exceptions from subexpression creation isn't caught
+TEST_F(LogicCreatorTest, propagate) {
+ EXPECT_THROW(loader_.loadCheck(
+ Element::fromJSON("{\"ANY\": [{\"throw\": null}]}")),
+ TestCreatorError);
+ EXPECT_THROW(loader_.loadCheck(
+ Element::fromJSON("{\"ALL\": [{\"throw\": null}]}")),
+ TestCreatorError);
+}
+
+// We can create more complex ANY check and run it correctly
+TEST_F(LogicCreatorTest, anyRun) {
+ AnyOfPtr any(load<AnyOf>("{\"ANY\": ["
+ " {\"logcheck\": [0, false]},"
+ " {\"logcheck\": [1, true]},"
+ " {\"logcheck\": [2, true]}"
+ "]}"));
+ EXPECT_EQ(3, any->getSubexpressions().size());
+ EXPECT_TRUE(any->matches(log_));
+ log_.checkFirst(2);
+}
+
+// We can create more complex ALL check and run it correctly
+TEST_F(LogicCreatorTest, allRun) {
+ AllOfPtr any(load<AllOf>("{\"ALL\": ["
+ " {\"logcheck\": [0, true]},"
+ " {\"logcheck\": [1, false]},"
+ " {\"logcheck\": [2, false]}"
+ "]}"));
+ EXPECT_EQ(3, any->getSubexpressions().size());
+ EXPECT_FALSE(any->matches(log_));
+ log_.checkFirst(2);
+}
+
+// Or is able to return false
+TEST_F(LogicCreatorTest, anyFalse) {
+ AnyOfPtr any(load<AnyOf>("{\"ANY\": ["
+ " {\"logcheck\": [0, false]},"
+ " {\"logcheck\": [1, false]},"
+ " {\"logcheck\": [2, false]}"
+ "]}"));
+ EXPECT_EQ(3, any->getSubexpressions().size());
+ EXPECT_FALSE(any->matches(log_));
+ log_.checkFirst(3);
+}
+
+// And is able to return true
+TEST_F(LogicCreatorTest, andTrue) {
+ AllOfPtr all(load<AllOf>("{\"ALL\": ["
+ " {\"logcheck\": [0, true]},"
+ " {\"logcheck\": [1, true]},"
+ " {\"logcheck\": [2, true]}"
+ "]}"));
+ EXPECT_EQ(3, all->getSubexpressions().size());
+ EXPECT_TRUE(all->matches(log_));
+ log_.checkFirst(3);
+}
+
+// We can nest them together
+TEST_F(LogicCreatorTest, nested) {
+ AllOfPtr all(load<AllOf>("{\"ALL\": ["
+ " {\"ANY\": ["
+ " {\"logcheck\": [0, true]},"
+ " {\"logcheck\": [2, true]},"
+ " ]},"
+ " {\"logcheck\": [1, false]}"
+ "]}"));
+ EXPECT_EQ(2, all->getSubexpressions().size());
+ /*
+ * This has the same problem as load function above, and we use the
+ * same solution here.
+ */
+ ASSERT_STREQ(typeid(LogicOperator<AnyOfSpec, Log>).name(),
+ typeid(*all->getSubexpressions()[0]).name());
+ const LogicOperator<AnyOfSpec, Log>*
+ any(static_cast<const LogicOperator<AnyOfSpec, Log>*>
+ (all->getSubexpressions()[0]));
+ EXPECT_EQ(2, any->getSubexpressions().size());
+ EXPECT_FALSE(all->matches(log_));
+ log_.checkFirst(2);
+}
+
+void notTest(bool value) {
+ NotOperator<Log> notOp(shared_ptr<Check<Log> >(new ConstCheck(value, 0)));
+ Log log;
+ // It returns negated value
+ EXPECT_EQ(!value, notOp.matches(log));
+ // And runs the only one thing there
+ log.checkFirst(1);
+ // Check the getSubexpressions does sane things
+ ASSERT_EQ(1, notOp.getSubexpressions().size());
+ EXPECT_EQ(value, notOp.getSubexpressions()[0]->matches(log));
+}
+
+TEST(Not, trueValue) {
+ notTest(true);
+}
+
+TEST(Not, falseValue) {
+ notTest(false);
+}
+
+TEST_F(LogicCreatorTest, notInvalid) {
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": null}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": \"hello\"}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": true}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": 42}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": []}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": [{"
+ "\"logcheck\": [0, true]"
+ "}]}")),
+ LoaderError);
+}
+
+TEST_F(LogicCreatorTest, notValid) {
+ shared_ptr<NotOperator<Log> > notOp(load<NotOperator<Log> >("{\"NOT\":"
+ " {\"logcheck\":"
+ " [0, true]}}"));
+ EXPECT_FALSE(notOp->matches(log_));
+ log_.checkFirst(1);
+}
+
+}
diff --git a/src/lib/acl/tests/run_unittests.cc b/src/lib/acl/tests/run_unittests.cc
new file mode 100644
index 0000000..8dc59a2
--- /dev/null
+++ b/src/lib/acl/tests/run_unittests.cc
@@ -0,0 +1,24 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <util/unittests/run_all.h>
+
+int
+main(int argc, char* argv[]) {
+ ::testing::InitGoogleTest(&argc, argv);
+ isc::log::initLogger();
+ return (isc::util::unittests::run_all());
+}
diff --git a/src/lib/acl/tests/sockaddr.h b/src/lib/acl/tests/sockaddr.h
new file mode 100644
index 0000000..bd30451
--- /dev/null
+++ b/src/lib/acl/tests/sockaddr.h
@@ -0,0 +1,69 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __ACL_TEST_SOCKADDR_H
+#define __ACL_TEST_SOCKADDR_H 1
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netdb.h>
+#include <string.h>
+
+#include <exceptions/exceptions.h>
+
+namespace isc {
+namespace acl {
+namespace tests {
+
+// This is a helper function that returns a sockaddr for the given textual
+// IP address. Note that "inline" is crucial because this function is defined
+// in a header file included in multiple .cc files. Without inline it would
+// produce an external linkage and cause troubles at link time.
+//
+// Note that this function uses a static storage for the return value.
+// So if it's called more than once in a singe context (e.g., in the same
+// EXPECT_xx()), it's unlikely to work as expected.
+inline const struct sockaddr&
+getSockAddr(const char* const addr) {
+ struct addrinfo hints, *res;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = AI_NUMERICHOST;
+
+ if (getaddrinfo(addr, NULL, &hints, &res) == 0) {
+ static struct sockaddr_storage ss;
+ void* ss_ptr = &ss;
+ memcpy(ss_ptr, res->ai_addr, res->ai_addrlen);
+ freeaddrinfo(res);
+ return (*static_cast<struct sockaddr*>(ss_ptr));
+ }
+
+ // We don't expect getaddrinfo to fail for our tests. But if that
+ // ever happens we throw an exception to make sure the corresponding test
+ // fail (either due to a failure of *_NO_THROW or the uncaught exception).
+ isc_throw(Unexpected,
+ "failed to convert textual IP address to sockaddr for " <<
+ addr);
+}
+
+} // end of namespace "tests"
+} // end of namespace "acl"
+} // end of namespace "isc"
+
+#endif // __ACL_TEST_SOCKADDR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/asiodns/Makefile.am b/src/lib/asiodns/Makefile.am
index 2a6c3ac..2d246ef 100644
--- a/src/lib/asiodns/Makefile.am
+++ b/src/lib/asiodns/Makefile.am
@@ -8,13 +8,13 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
AM_CXXFLAGS = $(B10_CXXFLAGS)
-CLEANFILES = *.gcno *.gcda asiodef.h asiodef.cc
+CLEANFILES = *.gcno *.gcda asiodns_messages.h asiodns_messages.cc
# Define rule to build logging source files from message file
-asiodef.h asiodef.cc: asiodef.mes
- $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/asiodns/asiodef.mes
+asiodns_messages.h asiodns_messages.cc: asiodns_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/asiodns/asiodns_messages.mes
-BUILT_SOURCES = asiodef.h asiodef.cc
+BUILT_SOURCES = asiodns_messages.h asiodns_messages.cc
lib_LTLIBRARIES = libasiodns.la
libasiodns_la_SOURCES = dns_answer.h
@@ -26,9 +26,9 @@ libasiodns_la_SOURCES += tcp_server.cc tcp_server.h
libasiodns_la_SOURCES += udp_server.cc udp_server.h
libasiodns_la_SOURCES += io_fetch.cc io_fetch.h
-nodist_libasiodns_la_SOURCES = asiodef.cc asiodef.h
+nodist_libasiodns_la_SOURCES = asiodns_messages.cc asiodns_messages.h
-EXTRA_DIST = asiodef.mes
+EXTRA_DIST = asiodns_messages.mes
# Note: the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS)
diff --git a/src/lib/asiodns/asiodef.mes b/src/lib/asiodns/asiodef.mes
deleted file mode 100644
index 3f2e80c..0000000
--- a/src/lib/asiodns/asiodef.mes
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX ASIODNS_
-$NAMESPACE isc::asiodns
-
-% FETCHCOMP upstream fetch to %1(%2) has now completed
-A debug message, this records the the upstream fetch (a query made by the
-resolver on behalf of its client) to the specified address has completed.
-
-% FETCHSTOP upstream fetch to %1(%2) has been stopped
-An external component has requested the halting of an upstream fetch. This
-is an allowed operation, and the message should only appear if debug is
-enabled.
-
-% OPENSOCK error %1 opening %2 socket to %3(%4)
-The asynchronous I/O code encountered an error when trying to open a socket
-of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
-message.
-
-% RECVSOCK error %1 reading %2 data from %3(%4)
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
-
-% SENDSOCK error %1 sending data using %2 to %3(%4)
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
-
-% RECVTMO receive timeout while waiting for data from %1(%2)
-An upstream fetch from the specified address timed out. This may happen for
-any number of reasons and is most probably a problem at the remote server
-or a problem on the network. The message will only appear if debug is
-enabled.
-
-% UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
-
-% UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message). This message should
-not appear and may indicate an internal error. Please enter a bug report.
diff --git a/src/lib/asiodns/asiodns_messages.mes b/src/lib/asiodns/asiodns_messages.mes
new file mode 100644
index 0000000..feb75d4
--- /dev/null
+++ b/src/lib/asiodns/asiodns_messages.mes
@@ -0,0 +1,56 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::asiodns
+
+% ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed
+A debug message, this records that the upstream fetch (a query made by the
+resolver on behalf of its client) to the specified address has completed.
+
+% ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped
+An external component has requested the halting of an upstream fetch. This
+is an allowed operation, and the message should only appear if debug is
+enabled.
+
+% ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)
+The asynchronous I/O code encountered an error when trying to open a socket
+of the specified protocol in order to send a message to the target address.
+The number of the system error that caused the problem is given in the
+message.
+
+% ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+
+% ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)
+An upstream fetch from the specified address timed out. This may happen for
+any number of reasons and is most probably a problem at the remote server
+or a problem on the network. The message will only appear if debug is
+enabled.
+
+% ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+
+% ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+
+% ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
diff --git a/src/lib/asiodns/io_fetch.cc b/src/lib/asiodns/io_fetch.cc
index e535381..466be3e 100644
--- a/src/lib/asiodns/io_fetch.cc
+++ b/src/lib/asiodns/io_fetch.cc
@@ -41,7 +41,7 @@
#include <log/logger.h>
#include <log/macros.h>
-#include <asiodns/asiodef.h>
+#include <asiodns/asiodns_messages.h>
#include <asiodns/io_fetch.h>
#include <util/buffer.h>
@@ -61,17 +61,13 @@ namespace asiodns {
/// Use the ASIO logger
-namespace {
-
isc::log::Logger logger("asiolink");
+
// Log debug verbosity
-enum {
- DBG_IMPORTANT = 1,
- DBG_COMMON = 20,
- DBG_ALL = 50
-};
-}
+const int DBG_IMPORTANT = DBGLVL_TRACE_BASIC;
+const int DBG_COMMON = DBGLVL_TRACE_DETAIL;
+const int DBG_ALL = DBGLVL_TRACE_DETAIL + 20;
/// \brief IOFetch Data
///
@@ -158,7 +154,7 @@ struct IOFetchData {
stopped(false),
timeout(wait),
packet(false),
- origin(ASIODNS_UNKORIGIN),
+ origin(ASIODNS_UNKNOWN_ORIGIN),
staging(),
qid(QidGenerator::getInstance().generateQid())
{}
@@ -209,16 +205,6 @@ IOFetch::IOFetch(Protocol protocol, IOService& service,
msg->setHeaderFlag(Message::HEADERFLAG_CD,
query_message->getHeaderFlag(Message::HEADERFLAG_CD));
- ConstEDNSPtr edns(query_message->getEDNS());
- const bool dnssec_ok = edns && edns->getDNSSECAwareness();
- if (edns) {
- EDNSPtr edns_response(new EDNS());
- edns_response->setDNSSECAwareness(dnssec_ok);
- // TODO: We should make our own edns bufsize length configurable
- edns_response->setUDPSize(Message::DEFAULT_MAX_EDNS0_UDPSIZE);
- msg->setEDNS(edns_response);
- }
-
initIOFetch(msg, protocol, service,
**(query_message->beginQuestion()),
address, port, buff, cb, wait);
@@ -238,6 +224,9 @@ IOFetch::initIOFetch(MessagePtr& query_msg, Protocol protocol, IOService& servic
query_msg->setRcode(Rcode::NOERROR());
query_msg->setHeaderFlag(Message::HEADERFLAG_RD);
query_msg->addQuestion(question);
+ EDNSPtr edns_query(new EDNS());
+ edns_query->setUDPSize(Message::DEFAULT_MAX_EDNS0_UDPSIZE);
+ query_msg->setEDNS(edns_query);
MessageRenderer renderer(*data_->msgbuf);
query_msg->toWire(renderer);
}
@@ -287,7 +276,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
// Open a connection to the target system. For speed, if the operation
// is synchronous (i.e. UDP operation) we bypass the yield.
- data_->origin = ASIODNS_OPENSOCK;
+ data_->origin = ASIODNS_OPEN_SOCKET;
if (data_->socket->isOpenSynchronous()) {
data_->socket->open(data_->remote_snd.get(), *this);
} else {
@@ -297,7 +286,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
do {
// Begin an asynchronous send, and then yield. When the send completes,
// we will resume immediately after this point.
- data_->origin = ASIODNS_SENDSOCK;
+ data_->origin = ASIODNS_SEND_DATA;
CORO_YIELD data_->socket->asyncSend(data_->msgbuf->getData(),
data_->msgbuf->getLength(), data_->remote_snd.get(), *this);
@@ -320,7 +309,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
// received all the data before copying it back to the user's buffer.
// And we want to minimise the amount of copying...
- data_->origin = ASIODNS_RECVSOCK;
+ data_->origin = ASIODNS_READ_DATA;
data_->cumulative = 0; // No data yet received
data_->offset = 0; // First data into start of buffer
data_->received->clear(); // Clear the receive buffer
@@ -336,7 +325,7 @@ IOFetch::operator()(asio::error_code ec, size_t length) {
// Finished with this socket, so close it. This will not generate an
// I/O error, but reset the origin to unknown in case we change this.
- data_->origin = ASIODNS_UNKORIGIN;
+ data_->origin = ASIODNS_UNKNOWN_ORIGIN;
data_->socket->close();
/// We are done
@@ -374,13 +363,13 @@ IOFetch::stop(Result result) {
data_->stopped = true;
switch (result) {
case TIME_OUT:
- LOG_DEBUG(logger, DBG_COMMON, ASIODNS_RECVTMO).
+ LOG_DEBUG(logger, DBG_COMMON, ASIODNS_READ_TIMEOUT).
arg(data_->remote_snd->getAddress().toText()).
arg(data_->remote_snd->getPort());
break;
case SUCCESS:
- LOG_DEBUG(logger, DBG_ALL, ASIODNS_FETCHCOMP).
+ LOG_DEBUG(logger, DBG_ALL, ASIODNS_FETCH_COMPLETED).
arg(data_->remote_rcv->getAddress().toText()).
arg(data_->remote_rcv->getPort());
break;
@@ -389,13 +378,13 @@ IOFetch::stop(Result result) {
// Fetch has been stopped for some other reason. This is
// allowed but as it is unusual it is logged, but with a lower
// debug level than a timeout (which is totally normal).
- LOG_DEBUG(logger, DBG_IMPORTANT, ASIODNS_FETCHSTOP).
+ LOG_DEBUG(logger, DBG_IMPORTANT, ASIODNS_FETCH_STOPPED).
arg(data_->remote_snd->getAddress().toText()).
arg(data_->remote_snd->getPort());
break;
default:
- LOG_ERROR(logger, ASIODNS_UNKRESULT).
+ LOG_ERROR(logger, ASIODNS_UNKNOWN_RESULT).
arg(data_->remote_snd->getAddress().toText()).
arg(data_->remote_snd->getPort());
}
@@ -419,10 +408,10 @@ IOFetch::stop(Result result) {
void IOFetch::logIOFailure(asio::error_code ec) {
// Should only get here with a known error code.
- assert((data_->origin == ASIODNS_OPENSOCK) ||
- (data_->origin == ASIODNS_SENDSOCK) ||
- (data_->origin == ASIODNS_RECVSOCK) ||
- (data_->origin == ASIODNS_UNKORIGIN));
+ assert((data_->origin == ASIODNS_OPEN_SOCKET) ||
+ (data_->origin == ASIODNS_SEND_DATA) ||
+ (data_->origin == ASIODNS_READ_DATA) ||
+ (data_->origin == ASIODNS_UNKNOWN_ORIGIN));
static const char* PROTOCOL[2] = {"TCP", "UDP"};
LOG_ERROR(logger, data_->origin).arg(ec.value()).
diff --git a/src/lib/asiodns/tests/Makefile.am b/src/lib/asiodns/tests/Makefile.am
index fd65d0b..f49d485 100644
--- a/src/lib/asiodns/tests/Makefile.am
+++ b/src/lib/asiodns/tests/Makefile.am
@@ -25,15 +25,15 @@ run_unittests_SOURCES += io_fetch_unittest.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDADD = $(GTEST_LDADD)
-run_unittests_LDADD += $(SQLITE_LIBS)
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
# Note: the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS)
diff --git a/src/lib/asiodns/tests/io_fetch_unittest.cc b/src/lib/asiodns/tests/io_fetch_unittest.cc
index 2464b6d..52a51a1 100644
--- a/src/lib/asiodns/tests/io_fetch_unittest.cc
+++ b/src/lib/asiodns/tests/io_fetch_unittest.cc
@@ -130,6 +130,9 @@ public:
msg.setRcode(Rcode::NOERROR());
msg.setHeaderFlag(Message::HEADERFLAG_RD);
msg.addQuestion(question_);
+ EDNSPtr msg_edns(new EDNS());
+ msg_edns->setUDPSize(Message::DEFAULT_MAX_EDNS0_UDPSIZE);
+ msg.setEDNS(msg_edns);
MessageRenderer renderer(*msgbuf_);
msg.toWire(renderer);
MessageRenderer renderer2(*expected_buffer_);
diff --git a/src/lib/asiodns/tests/run_unittests.cc b/src/lib/asiodns/tests/run_unittests.cc
index c285f9e..5cacdaf 100644
--- a/src/lib/asiodns/tests/run_unittests.cc
+++ b/src/lib/asiodns/tests/run_unittests.cc
@@ -13,16 +13,17 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
-#include <log/root_logger_name.h>
+#include <log/logger_support.h>
#include <dns/tests/unittest_util.h>
int
main(int argc, char* argv[])
{
::testing::InitGoogleTest(&argc, argv); // Initialize Google test
- isc::log::setRootLoggerName("unittest"); // Set a root logger name
+ isc::log::initLogger(); // Initialize logging
isc::UnitTestUtil::addDataPath(TEST_DATA_DIR); // Add location of test data
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/asiolink/Makefile.am b/src/lib/asiolink/Makefile.am
index 22b3a8e..5444547 100644
--- a/src/lib/asiolink/Makefile.am
+++ b/src/lib/asiolink/Makefile.am
@@ -7,9 +7,12 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
CLEANFILES = *.gcno *.gcda
-# This is a wrapper library solely used for b10-auth. The ASIO header files
-# have some code fragments that would hit gcc's unused-parameter warning,
-# which would make the build fail with -Werror (our default setting).
+# This is a wrapper library.
+
+# The ASIO header files have some code fragments that would hit
+# gcc's unused-parameter warning, which would make the build fail
+# with -Werror (our default setting).
+
lib_LTLIBRARIES = libasiolink.la
libasiolink_la_SOURCES = asiolink.h
libasiolink_la_SOURCES += dummy_io_cb.h
diff --git a/src/lib/asiolink/README b/src/lib/asiolink/README
index 66091b1..b9e38f9 100644
--- a/src/lib/asiolink/README
+++ b/src/lib/asiolink/README
@@ -20,3 +20,10 @@ Some of the classes defined here--for example, IOSocket, IOEndpoint,
and IOAddress--are to be used by BIND 10 modules as wrappers around
ASIO-specific classes.
+
+Logging
+-------
+
+At this point, nothing is logged by this low-level library. We may
+revisit that in the future, if we find suitable messages to log, but
+right now there are also no loggers initialized or called.
diff --git a/src/lib/asiolink/dummy_io_cb.h b/src/lib/asiolink/dummy_io_cb.h
index 2081906..bcaefe9 100644
--- a/src/lib/asiolink/dummy_io_cb.h
+++ b/src/lib/asiolink/dummy_io_cb.h
@@ -39,7 +39,8 @@ public:
/// \brief Asynchronous I/O callback method
///
- /// \param error Unused
+ /// TODO: explain why this method should never be called.
+ /// This should be unused.
void operator()(asio::error_code)
{
// TODO: log an error if this method ever gets called.
@@ -47,8 +48,8 @@ public:
/// \brief Asynchronous I/O callback method
///
- /// \param error Unused
- /// \param length Unused
+ /// TODO: explain why this method should never be called.
+ /// This should be unused.
void operator()(asio::error_code, size_t)
{
// TODO: log an error if this method ever gets called.
diff --git a/src/lib/asiolink/interval_timer.cc b/src/lib/asiolink/interval_timer.cc
index 0ed06eb..9873e9b 100644
--- a/src/lib/asiolink/interval_timer.cc
+++ b/src/lib/asiolink/interval_timer.cc
@@ -14,11 +14,9 @@
#include <config.h>
-#include <unistd.h> // for some IPC/network system calls
-#include <sys/socket.h>
-#include <netinet/in.h>
-
#include <boost/bind.hpp>
+#include <boost/enable_shared_from_this.hpp>
+#include <boost/shared_ptr.hpp>
#include <exceptions/exceptions.h>
@@ -29,7 +27,16 @@
namespace isc {
namespace asiolink {
-class IntervalTimerImpl {
+/// This class holds a call back function of asynchronous operations.
+/// To ensure the object is alive while an asynchronous operation refers
+/// to it, we use shared_ptr and enable_shared_from_this.
+/// The object will be destructed in case IntervalTimer has been destructed
+/// and no asynchronous operation refers to it.
+/// Please follow the link to get an example:
+/// http://think-async.com/asio/asio-1.4.8/doc/asio/tutorial/tutdaytime3.html#asio.tutorial.tutdaytime3.the_tcp_connection_class
+class IntervalTimerImpl :
+ public boost::enable_shared_from_this<IntervalTimerImpl>
+{
private:
// prohibit copy
IntervalTimerImpl(const IntervalTimerImpl& source);
@@ -53,14 +60,18 @@ private:
long interval_;
// asio timer
asio::deadline_timer timer_;
+ // interval_ will be set to this value in destructor in order to detect
+ // use-after-free type of bugs.
+ static const long INVALIDATED_INTERVAL = -1;
};
IntervalTimerImpl::IntervalTimerImpl(IOService& io_service) :
interval_(0), timer_(io_service.get_io_service())
{}
-IntervalTimerImpl::~IntervalTimerImpl()
-{}
+IntervalTimerImpl::~IntervalTimerImpl() {
+ interval_ = INVALIDATED_INTERVAL;
+}
void
IntervalTimerImpl::setup(const IntervalTimer::Callback& cbfunc,
@@ -81,42 +92,46 @@ IntervalTimerImpl::setup(const IntervalTimer::Callback& cbfunc,
// At this point the timer is not running yet and will not expire.
// After calling IOService::run(), the timer will expire.
update();
- return;
}
void
IntervalTimerImpl::update() {
- if (interval_ == 0) {
- // timer has been canceled. Do nothing.
- return;
- }
try {
// Update expire time to (current time + interval_).
timer_.expires_from_now(boost::posix_time::millisec(interval_));
+ // Reset timer.
+ // Pass a function bound with a shared_ptr to this.
+ timer_.async_wait(boost::bind(&IntervalTimerImpl::callback,
+ shared_from_this(),
+ asio::placeholders::error));
} catch (const asio::system_error& e) {
- isc_throw(isc::Unexpected, "Failed to update timer");
+ isc_throw(isc::Unexpected, "Failed to update timer: " << e.what());
+ } catch (const boost::bad_weak_ptr&) {
+ // Can't happen. It means a severe internal bug.
+ assert(0);
}
- // Reset timer.
- timer_.async_wait(boost::bind(&IntervalTimerImpl::callback, this, _1));
}
void
-IntervalTimerImpl::callback(const asio::error_code& cancelled) {
- // Do not call cbfunc_ in case the timer was cancelled.
- // The timer will be canelled in the destructor of asio::deadline_timer.
- if (!cancelled) {
- cbfunc_();
+IntervalTimerImpl::callback(const asio::error_code& ec) {
+ assert(interval_ != INVALIDATED_INTERVAL);
+ if (interval_ == 0 || ec) {
+ // timer has been canceled. Do nothing.
+ } else {
// Set next expire time.
update();
+ // Invoke the call back function.
+ cbfunc_();
}
}
-IntervalTimer::IntervalTimer(IOService& io_service) {
- impl_ = new IntervalTimerImpl(io_service);
-}
+IntervalTimer::IntervalTimer(IOService& io_service) :
+ impl_(new IntervalTimerImpl(io_service))
+{}
IntervalTimer::~IntervalTimer() {
- delete impl_;
+ // Cancel the timer to make sure cbfunc_() will not be called any more.
+ cancel();
}
void
diff --git a/src/lib/asiolink/interval_timer.h b/src/lib/asiolink/interval_timer.h
index 8de16cb..57ec1c3 100644
--- a/src/lib/asiolink/interval_timer.h
+++ b/src/lib/asiolink/interval_timer.h
@@ -16,6 +16,7 @@
#define __ASIOLINK_INTERVAL_TIMER_H 1
#include <boost/function.hpp>
+#include <boost/shared_ptr.hpp>
#include <asiolink/io_service.h>
@@ -42,9 +43,6 @@ class IntervalTimerImpl;
/// The call back function will not be called if the instance of this class is
/// destroyed before the timer is expired.
///
-/// Note: Destruction of an instance of this class while call back is pending
-/// causes throwing an exception from \c IOService.
-///
/// Sample code:
/// \code
/// void function_to_call_back() {
@@ -100,12 +98,12 @@ public:
/// \param interval Interval in milliseconds (greater than 0)
///
/// Note: IntervalTimer will not pass \c asio::error_code to
- /// call back function. In case the timer is cancelled, the function
+ /// call back function. In case the timer is canceled, the function
/// will not be called.
///
/// \throw isc::InvalidParameter cbfunc is empty
/// \throw isc::BadValue interval is less than or equal to 0
- /// \throw isc::Unexpected ASIO library error
+ /// \throw isc::Unexpected internal runtime error
void setup(const Callback& cbfunc, const long interval);
/// Cancel the timer.
@@ -127,7 +125,7 @@ public:
long getInterval() const;
private:
- IntervalTimerImpl* impl_;
+ boost::shared_ptr<IntervalTimerImpl> impl_;
};
} // namespace asiolink
diff --git a/src/lib/asiolink/io_address.cc b/src/lib/asiolink/io_address.cc
index 7f7a6fc..0fe1db4 100644
--- a/src/lib/asiolink/io_address.cc
+++ b/src/lib/asiolink/io_address.cc
@@ -15,6 +15,7 @@
#include <config.h>
#include <unistd.h> // for some IPC/network system calls
+#include <stdint.h>
#include <sys/socket.h>
#include <netinet/in.h>
@@ -23,7 +24,7 @@
#include <exceptions/exceptions.h>
#include <asiolink/io_address.h>
#include <asiolink/io_error.h>
-
+#include <boost/static_assert.hpp>
using namespace asio;
using asio::ip::udp;
@@ -49,11 +50,32 @@ IOAddress::IOAddress(const ip::address& asio_address) :
asio_address_(asio_address)
{}
+IOAddress::IOAddress(uint32_t v4address):
+ asio_address_(asio::ip::address_v4(v4address)) {
+
+}
+
string
IOAddress::toText() const {
return (asio_address_.to_string());
}
+IOAddress
+IOAddress::from_bytes(short family, const uint8_t* data) {
+ if (data == NULL) {
+ isc_throw(BadValue, "NULL pointer received.");
+ } else
+ if ( (family != AF_INET) && (family != AF_INET6) ) {
+ isc_throw(BadValue, "Invalid family type. Only AF_INET and AF_INET6"
+ << "are supported");
+ }
+
+ BOOST_STATIC_ASSERT(INET6_ADDRSTRLEN >= INET_ADDRSTRLEN);
+ char addr_str[INET6_ADDRSTRLEN];
+ inet_ntop(family, data, addr_str, INET6_ADDRSTRLEN);
+ return IOAddress(string(addr_str));
+}
+
short
IOAddress::getFamily() const {
if (asio_address_.is_v4()) {
@@ -63,5 +85,19 @@ IOAddress::getFamily() const {
}
}
+const asio::ip::address&
+IOAddress::getAddress() const {
+ return asio_address_;
+}
+
+IOAddress::operator uint32_t() const {
+ if (getAddress().is_v4()) {
+ return (getAddress().to_v4().to_ulong());
+ } else {
+ isc_throw(BadValue, "Can't convert " << toText()
+ << " address to IPv4.");
+ }
+}
+
} // namespace asiolink
} // namespace isc
diff --git a/src/lib/asiolink/io_address.h b/src/lib/asiolink/io_address.h
index 655b727..c40e5b9 100644
--- a/src/lib/asiolink/io_address.h
+++ b/src/lib/asiolink/io_address.h
@@ -19,6 +19,7 @@
// this file. In particular, asio.hpp should never be included here.
// See the description of the namespace below.
#include <unistd.h> // for some network system calls
+#include <stdint.h> // for uint32_t
#include <asio/ip/address.hpp>
#include <functional>
@@ -29,6 +30,12 @@
namespace isc {
namespace asiolink {
+ /// Defines length of IPv6 address.
+ const static size_t V6ADDRESS_LEN = 16;
+
+ /// Defines length of IPv4 address.
+ const static size_t V4ADDRESS_LEN = 4;
+
/// \brief The \c IOAddress class represents an IP addresses (version
/// agnostic)
///
@@ -65,6 +72,15 @@ public:
IOAddress(const asio::ip::address& asio_address);
//@}
+ /// @brief Constructor for ip::address_v4 object.
+ ///
+ /// This constructor is intented to be used when constructing
+ /// IPv4 address out of uint32_t type. Passed value must be in
+ /// network byte order
+ ///
+ /// @param v4address IPv4 address represnted by uint32_t
+ IOAddress(uint32_t v4address);
+
/// \brief Convert the address to a string.
///
/// This method is basically expected to be exception free, but
@@ -74,11 +90,29 @@ public:
/// \return A string representation of the address.
std::string toText() const;
+ /// \brief Returns const reference to the underlying address object.
+ ///
+ /// This is useful, when access to interface offerted by
+ // asio::ip::address_v4 and asio::ip::address_v6 is beneficial.
+ ///
+ /// \return A const reference to asio::ip::address object
+ const asio::ip::address& getAddress() const;
+
/// \brief Returns the address family
///
/// \return AF_INET for IPv4 or AF_INET6 for IPv6.
short getFamily() const;
+
+ /// \brief Creates an address from over wire data.
+ ///
+ /// \param family AF_NET for IPv4 or AF_NET6 for IPv6.
+ /// \param data pointer to first char of data
+ ///
+ /// \return Created IOAddress object
+ static IOAddress
+ from_bytes(short family, const uint8_t* data);
+
/// \brief Compare addresses for equality
///
/// \param other Address to compare against.
@@ -115,6 +149,14 @@ public:
return (nequals(other));
}
+ /// \brief Converts IPv4 address to uint32_t
+ ///
+ /// Will throw BadValue exception if that is not IPv4
+ /// address.
+ ///
+ /// \return uint32_t that represents IPv4 address in
+ /// network byte order
+ operator uint32_t () const;
private:
asio::ip::address asio_address_;
diff --git a/src/lib/asiolink/io_asio_socket.h b/src/lib/asiolink/io_asio_socket.h
index 864708c..aeac63d 100644
--- a/src/lib/asiolink/io_asio_socket.h
+++ b/src/lib/asiolink/io_asio_socket.h
@@ -82,8 +82,6 @@ class IOEndpoint;
/// derived class for testing purposes rather than providing factory methods
/// (i.e., getDummy variants below).
///
-/// TODO: Check if IOAsioSocket class is still needed
-///
/// \param C Template parameter identifying type of the callback object.
template <typename C>
@@ -328,10 +326,9 @@ public:
///
/// A call that is a no-op on UDP sockets, this opens a connection to the
/// system identified by the given endpoint.
+ /// The endpoint and callback are unused.
///
- /// \param endpoint Unused
- /// \param callback Unused.
- ///false indicating that the operation completed synchronously.
+ /// \return false indicating that the operation completed synchronously.
virtual bool open(const IOEndpoint*, C&) {
return (false);
}
@@ -339,23 +336,14 @@ public:
/// \brief Send Asynchronously
///
/// Must be supplied as it is abstract in the base class.
- ///
- /// \param data Unused
- /// \param length Unused
- /// \param endpoint Unused
- /// \param callback Unused
+ /// This is unused.
virtual void asyncSend(const void*, size_t, const IOEndpoint*, C&) {
}
/// \brief Receive Asynchronously
///
/// Must be supplied as it is abstract in the base class.
- ///
- /// \param data Unused
- /// \param length Unused
- /// \param offset Unused
- /// \param endpoint Unused
- /// \param callback Unused
+ /// The parameters are unused.
virtual void asyncReceive(void* data, size_t, size_t, IOEndpoint*, C&) {
}
diff --git a/src/lib/asiolink/io_endpoint.h b/src/lib/asiolink/io_endpoint.h
index 756fa3b..11ea97b 100644
--- a/src/lib/asiolink/io_endpoint.h
+++ b/src/lib/asiolink/io_endpoint.h
@@ -20,6 +20,8 @@
// See the description of the namespace below.
#include <unistd.h> // for some network system calls
+#include <sys/socket.h> // for sockaddr
+
#include <functional>
#include <string>
@@ -90,6 +92,44 @@ public:
/// \brief Returns the address family of the endpoint.
virtual short getFamily() const = 0;
+ /// \brief Returns the address of the endpoint in the form of sockaddr
+ /// structure.
+ ///
+ /// The actual instance referenced by the returned value of this method
+ /// is of per address family structure: For IPv4 (AF_INET), it's
+ /// \c sockaddr_in; for IPv6 (AF_INET6), it's \c sockaddr_in6.
+ /// The corresponding port and address members of the underlying structure
+ /// will be set in the network byte order.
+ ///
+ /// This method is "redundant" in that all information to construct the
+ /// \c sockaddr is available via the other "get" methods.
+ /// It is still defined for performance sensitive applications that need
+ /// to get the address information, such as for address based access
+ /// control at a high throughput. Internally it is implemented with
+ /// minimum overhead such as data copy (this is another reason why this
+ /// method returns a reference).
+ ///
+ /// As a tradeoff, this method is more fragile; it assumes that the
+ /// underlying ASIO implementation stores the address information in
+ /// the form of \c sockaddr and it can be accessed in an efficient way.
+ /// This is the case as of this writing, but if the underlying
+ /// implementation changes this method may become much slower or its
+ /// interface may have to be changed, too.
+ ///
+ /// It is therefore discouraged for normal applications to use this
+ /// method. Unless the application is very performance sensitive, it
+ /// should use the other "get" method to retrieve specific information
+ /// of the endpoint.
+ ///
+ /// The returned reference is only valid while the corresponding
+ /// \c IOEndpoint is valid. Once it's destructed the reference will
+ /// become invalid.
+ ///
+ /// \exception None
+ /// \return Reference to a \c sockaddr structure corresponding to the
+ /// endpoint.
+ virtual const struct sockaddr& getSockAddr() const = 0;
+
bool operator==(const IOEndpoint& other) const;
bool operator!=(const IOEndpoint& other) const;
@@ -121,3 +161,7 @@ public:
} // namespace asiolink
} // namespace isc
#endif // __IO_ENDPOINT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/asiolink/tcp_endpoint.h b/src/lib/asiolink/tcp_endpoint.h
index 3e420f3..a54f6b2 100644
--- a/src/lib/asiolink/tcp_endpoint.h
+++ b/src/lib/asiolink/tcp_endpoint.h
@@ -84,6 +84,10 @@ public:
return (asio_endpoint_.address());
}
+ virtual const struct sockaddr& getSockAddr() const {
+ return (*asio_endpoint_.data());
+ }
+
virtual uint16_t getPort() const {
return (asio_endpoint_.port());
}
@@ -113,3 +117,7 @@ private:
} // namespace asiolink
} // namespace isc
#endif // __TCP_ENDPOINT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/asiolink/tests/Makefile.am b/src/lib/asiolink/tests/Makefile.am
index bfdf7c1..984cf07 100644
--- a/src/lib/asiolink/tests/Makefile.am
+++ b/src/lib/asiolink/tests/Makefile.am
@@ -10,6 +10,12 @@ if USE_STATIC_LINK
AM_LDFLAGS = -static
endif
+# Some versions of GCC warn about some versions of Boost regarding
+# missing initializer for members in its posix_time.
+# https://svn.boost.org/trac/boost/ticket/3477
+# But older GCC compilers don't have the flag.
+AM_CXXFLAGS += $(WARNING_NO_MISSING_FIELD_INITIALIZERS_CFLAG)
+
CLEANFILES = *.gcno *.gcda
TESTS =
@@ -28,13 +34,12 @@ run_unittests_SOURCES += udp_socket_unittest.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDADD = $(GTEST_LDADD)
-run_unittests_LDADD += $(SQLITE_LIBS)
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
-run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
# Note: the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS)
diff --git a/src/lib/asiolink/tests/interval_timer_unittest.cc b/src/lib/asiolink/tests/interval_timer_unittest.cc
index c24e60e..420cb90 100644
--- a/src/lib/asiolink/tests/interval_timer_unittest.cc
+++ b/src/lib/asiolink/tests/interval_timer_unittest.cc
@@ -18,7 +18,7 @@
#include <asio.hpp>
#include <asiolink/asiolink.h>
-#include <boost/date_time/posix_time/posix_time_types.hpp>
+#include <boost/date_time/posix_time/posix_time.hpp>
namespace {
// TODO: Consider this margin
@@ -28,7 +28,7 @@ const boost::posix_time::time_duration TIMER_MARGIN_MSEC =
using namespace isc::asiolink;
-// This fixture is for testing IntervalTimer. Some callback functors are
+// This fixture is for testing IntervalTimer. Some callback functors are
// registered as callback function of the timer to test if they are called
// or not.
class IntervalTimerTest : public ::testing::Test {
@@ -50,7 +50,9 @@ protected:
};
class TimerCallBackCounter : public std::unary_function<void, void> {
public:
- TimerCallBackCounter(IntervalTimerTest* test_obj) : test_obj_(test_obj) {
+ TimerCallBackCounter(IntervalTimerTest* test_obj) :
+ test_obj_(test_obj)
+ {
counter_ = 0;
}
void operator()() {
@@ -164,18 +166,20 @@ TEST_F(IntervalTimerTest, startIntervalTimer) {
itimer.setup(TimerCallBack(this), 100);
EXPECT_EQ(100, itimer.getInterval());
io_service_.run();
- // reaches here after timer expired
+ // Control reaches here after io_service_ was stopped by TimerCallBack.
+
// delta: difference between elapsed time and 100 milliseconds.
- boost::posix_time::time_duration delta =
- (boost::posix_time::microsec_clock::universal_time() - start)
- - boost::posix_time::millisec(100);
- if (delta.is_negative()) {
- delta.invert_sign();
- }
- // expect TimerCallBack is called; timer_called_ is true
+ boost::posix_time::time_duration test_runtime =
+ boost::posix_time::microsec_clock::universal_time() - start;
+ EXPECT_FALSE(test_runtime.is_negative()) <<
+ "test duration " << test_runtime <<
+ " negative - clock skew?";
+ // Expect TimerCallBack is called; timer_called_ is true
EXPECT_TRUE(timer_called_);
- // expect interval is 100 milliseconds +/- TIMER_MARGIN_MSEC.
- EXPECT_TRUE(delta < TIMER_MARGIN_MSEC);
+ // Expect test_runtime is 100 milliseconds or longer.
+ EXPECT_TRUE(test_runtime > boost::posix_time::milliseconds(100)) <<
+ "test runtime " << test_runtime.total_milliseconds() <<
+ "msec " << ">= 100";
}
TEST_F(IntervalTimerTest, destructIntervalTimer) {
@@ -238,7 +242,7 @@ TEST_F(IntervalTimerTest, cancel) {
}
TEST_F(IntervalTimerTest, overwriteIntervalTimer) {
- // Calling setup() multiple times updates call back function and interval.
+ // Call setup() multiple times to update call back function and interval.
//
// There are two timers:
// itimer (A)
@@ -260,7 +264,7 @@ TEST_F(IntervalTimerTest, overwriteIntervalTimer) {
// 0 100 200 300 400 500 600 700 800 (ms)
// (A) i-------------+----C----s
// ^ ^stop io_service
- // |change call back function
+ // |change call back function and interval
// (B) i------------------+-------------------S
// ^(stop io_service on fail)
//
@@ -273,24 +277,11 @@ TEST_F(IntervalTimerTest, overwriteIntervalTimer) {
itimer.setup(TimerCallBackCounter(this), 300);
itimer_overwriter.setup(TimerCallBackOverwriter(this, itimer), 400);
io_service_.run();
- // reaches here after timer expired
- // if interval is updated, it takes
- // 400 milliseconds for TimerCallBackOverwriter
- // + 100 milliseconds for TimerCallBack (stop)
- // = 500 milliseconds.
- // otherwise (test fails), it takes
- // 400 milliseconds for TimerCallBackOverwriter
- // + 400 milliseconds for TimerCallBackOverwriter (stop)
- // = 800 milliseconds.
- // delta: difference between elapsed time and 400 + 100 milliseconds
- boost::posix_time::time_duration delta =
- (boost::posix_time::microsec_clock::universal_time() - start)
- - boost::posix_time::millisec(400 + 100);
- if (delta.is_negative()) {
- delta.invert_sign();
- }
- // expect callback function is updated: TimerCallBack is called
+ // Control reaches here after io_service_ was stopped by
+ // TimerCallBackCounter or TimerCallBackOverwriter.
+
+ // Expect callback function is updated: TimerCallBack is called
EXPECT_TRUE(timer_called_);
- // expect interval is updated
- EXPECT_TRUE(delta < TIMER_MARGIN_MSEC);
+ // Expect interval is updated: return value of getInterval() is updated
+ EXPECT_EQ(itimer.getInterval(), 100);
}
diff --git a/src/lib/asiolink/tests/io_address_unittest.cc b/src/lib/asiolink/tests/io_address_unittest.cc
index 18b181e..4322283 100644
--- a/src/lib/asiolink/tests/io_address_unittest.cc
+++ b/src/lib/asiolink/tests/io_address_unittest.cc
@@ -18,6 +18,8 @@
#include <asiolink/io_error.h>
#include <asiolink/io_address.h>
+#include <cstring>
+
using namespace isc::asiolink;
TEST(IOAddressTest, fromText) {
@@ -61,3 +63,39 @@ TEST(IOAddressTest, Family) {
EXPECT_EQ(AF_INET, IOAddress("192.0.2.1").getFamily());
EXPECT_EQ(AF_INET6, IOAddress("2001:0DB8:0:0::0012").getFamily());
}
+
+TEST(IOAddressTest, from_bytes) {
+ // 2001:db8:1::dead:beef
+ uint8_t v6[] = {
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0, 0,
+ 0, 0, 0, 0, 0xde, 0xad, 0xbe, 0xef };
+
+ uint8_t v4[] = { 192, 0 , 2, 3 };
+
+ IOAddress addr("::");
+ EXPECT_NO_THROW({
+ addr = IOAddress::from_bytes(AF_INET6, v6);
+ });
+ EXPECT_EQ("2001:db8:1::dead:beef", addr.toText());
+
+ EXPECT_NO_THROW({
+ addr = IOAddress::from_bytes(AF_INET, v4);
+ });
+ EXPECT_EQ(addr.toText(), IOAddress("192.0.2.3").toText());
+}
+
+TEST(IOAddressTest, uint32) {
+ IOAddress addr1("192.0.2.5");
+
+ // operator uint_32() is used here
+ uint32_t tmp = addr1;
+
+ uint32_t expected = (192U << 24) + (0U << 16) + (2U << 8) + 5U;
+
+ EXPECT_EQ(expected, tmp);
+
+ // now let's try opposite conversion
+ IOAddress addr3 = IOAddress(expected);
+
+ EXPECT_EQ(addr3.toText(), "192.0.2.5");
+}
diff --git a/src/lib/asiolink/tests/io_endpoint_unittest.cc b/src/lib/asiolink/tests/io_endpoint_unittest.cc
index ce21fde..c7283ec 100644
--- a/src/lib/asiolink/tests/io_endpoint_unittest.cc
+++ b/src/lib/asiolink/tests/io_endpoint_unittest.cc
@@ -15,14 +15,25 @@
#include <config.h>
#include <gtest/gtest.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netdb.h>
+#include <string.h>
+
+#include <boost/shared_ptr.hpp>
+
#include <asiolink/io_endpoint.h>
#include <asiolink/io_error.h>
+using boost::shared_ptr;
using namespace isc::asiolink;
+namespace {
+typedef shared_ptr<const IOEndpoint> ConstIOEndpointPtr;
+
TEST(IOEndpointTest, createUDPv4) {
- const IOEndpoint* ep;
- ep = IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 53210);
+ ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.1"), 53210));
EXPECT_EQ("192.0.2.1", ep->getAddress().toText());
EXPECT_EQ(53210, ep->getPort());
EXPECT_EQ(AF_INET, ep->getFamily());
@@ -31,8 +42,8 @@ TEST(IOEndpointTest, createUDPv4) {
}
TEST(IOEndpointTest, createTCPv4) {
- const IOEndpoint* ep;
- ep = IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 5301);
+ ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.1"), 5301));
EXPECT_EQ("192.0.2.1", ep->getAddress().toText());
EXPECT_EQ(5301, ep->getPort());
EXPECT_EQ(AF_INET, ep->getFamily());
@@ -41,8 +52,9 @@ TEST(IOEndpointTest, createTCPv4) {
}
TEST(IOEndpointTest, createUDPv6) {
- const IOEndpoint* ep;
- ep = IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1234"), 5302);
+ ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1234"),
+ 5302));
EXPECT_EQ("2001:db8::1234", ep->getAddress().toText());
EXPECT_EQ(5302, ep->getPort());
EXPECT_EQ(AF_INET6, ep->getFamily());
@@ -51,8 +63,9 @@ TEST(IOEndpointTest, createUDPv6) {
}
TEST(IOEndpointTest, createTCPv6) {
- const IOEndpoint* ep;
- ep = IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1234"), 5303);
+ ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1234"),
+ 5303));
EXPECT_EQ("2001:db8::1234", ep->getAddress().toText());
EXPECT_EQ(5303, ep->getPort());
EXPECT_EQ(AF_INET6, ep->getFamily());
@@ -61,23 +74,55 @@ TEST(IOEndpointTest, createTCPv6) {
}
TEST(IOEndpointTest, equality) {
- std::vector<const IOEndpoint *> epv;
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1234"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1234"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1234"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1234"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1235"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1235"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1235"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1235"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"), 5303));
- epv.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"), 5304));
- epv.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"), 5304));
+ std::vector<ConstIOEndpointPtr> epv;
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1234"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1234"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1234"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1234"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1235"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1235"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1235"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1235"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.1"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.1"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.1"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.1"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.2"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.2"), 5303)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.2"), 5304)));
+ epv.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.2"), 5304)));
for (size_t i = 0; i < epv.size(); ++i) {
for (size_t j = 0; j < epv.size(); ++j) {
@@ -92,23 +137,55 @@ TEST(IOEndpointTest, equality) {
// Create a second array with exactly the same values. We use create()
// again to make sure we get different endpoints
- std::vector<const IOEndpoint *> epv2;
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1234"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1234"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1234"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1234"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1235"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1235"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("2001:db8::1235"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::1235"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"), 5303));
- epv2.push_back(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"), 5304));
- epv2.push_back(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"), 5304));
+ std::vector<ConstIOEndpointPtr> epv2;
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1234"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1234"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1234"), 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1234"), 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1235"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1235"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1235"), 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("2001:db8::1235"), 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("192.0.2.1"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.1"), 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.1"),
+ 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"),
+ 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"),
+ 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"),
+ 5303)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"),
+ 5304)));
+ epv2.push_back(ConstIOEndpointPtr(
+ IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.2"),
+ 5304)));
for (size_t i = 0; i < epv.size(); ++i) {
EXPECT_TRUE(*epv[i] == *epv2[i]);
@@ -122,3 +199,46 @@ TEST(IOEndpointTest, createIPProto) {
IOError);
}
+void
+sockAddrMatch(const struct sockaddr& actual_sa,
+ const char* const expected_addr_text,
+ const char* const expected_port_text)
+{
+ struct addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_DGRAM; // this shouldn't matter
+ hints.ai_flags = AI_NUMERICHOST | AI_NUMERICSERV;
+
+ struct addrinfo* res;
+ ASSERT_EQ(0, getaddrinfo(expected_addr_text, expected_port_text, &hints,
+ &res));
+ EXPECT_EQ(res->ai_family, actual_sa.sa_family);
+#ifdef HAVE_SA_LEN
+ // ASIO doesn't seem to set sa_len, so we set it to the expected value
+ res->ai_addr->sa_len = actual_sa.sa_len;
+#endif
+ EXPECT_EQ(0, memcmp(res->ai_addr, &actual_sa, res->ai_addrlen));
+ freeaddrinfo(res);
+}
+
+TEST(IOEndpointTest, getSockAddr) {
+ // UDP/IPv4
+ ConstIOEndpointPtr ep(IOEndpoint::create(IPPROTO_UDP,
+ IOAddress("192.0.2.1"), 53210));
+ sockAddrMatch(ep->getSockAddr(), "192.0.2.1", "53210");
+
+ // UDP/IPv6
+ ep.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::53"), 53));
+ sockAddrMatch(ep->getSockAddr(), "2001:db8::53", "53");
+
+ // TCP/IPv4
+ ep.reset(IOEndpoint::create(IPPROTO_TCP, IOAddress("192.0.2.2"), 53211));
+ sockAddrMatch(ep->getSockAddr(), "192.0.2.2", "53211");
+
+ // TCP/IPv6
+ ep.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress("2001:db8::5300"), 35));
+ sockAddrMatch(ep->getSockAddr(), "2001:db8::5300", "35");
+}
+
+}
diff --git a/src/lib/asiolink/tests/run_unittests.cc b/src/lib/asiolink/tests/run_unittests.cc
index 97bcb65..b07ce7e 100644
--- a/src/lib/asiolink/tests/run_unittests.cc
+++ b/src/lib/asiolink/tests/run_unittests.cc
@@ -13,15 +13,13 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
-
-#include <log/root_logger_name.h>
-#include <dns/tests/unittest_util.h>
+#include <util/unittests/run_all.h>
+#include <log/logger_manager.h>
int
main(int argc, char* argv[])
{
::testing::InitGoogleTest(&argc, argv); // Initialize Google test
- isc::log::setRootLoggerName("unittest"); // Set a root logger name
-
- return (RUN_ALL_TESTS());
+ isc::log::LoggerManager::init("unittest"); // Set a root logger name
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/asiolink/udp_endpoint.h b/src/lib/asiolink/udp_endpoint.h
index 5c8a1fe..c5ba3bd 100644
--- a/src/lib/asiolink/udp_endpoint.h
+++ b/src/lib/asiolink/udp_endpoint.h
@@ -84,6 +84,10 @@ public:
return (asio_endpoint_.address());
}
+ virtual const struct sockaddr& getSockAddr() const {
+ return (*asio_endpoint_.data());
+ }
+
virtual uint16_t getPort() const {
return (asio_endpoint_.port());
}
@@ -113,3 +117,7 @@ private:
} // namespace asiolink
} // namespace isc
#endif // __UDP_ENDPOINT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/bench/Makefile.am b/src/lib/bench/Makefile.am
index 866404f..514b3b3 100644
--- a/src/lib/bench/Makefile.am
+++ b/src/lib/bench/Makefile.am
@@ -6,6 +6,6 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
CLEANFILES = *.gcno *.gcda
-lib_LTLIBRARIES = libbench.la
+noinst_LTLIBRARIES = libbench.la
libbench_la_SOURCES = benchmark_util.h benchmark_util.cc
EXTRA_DIST = benchmark.h
diff --git a/src/lib/bench/tests/Makefile.am b/src/lib/bench/tests/Makefile.am
index 4259b0e..3f8a678 100644
--- a/src/lib/bench/tests/Makefile.am
+++ b/src/lib/bench/tests/Makefile.am
@@ -14,10 +14,11 @@ run_unittests_SOURCES += loadquery_unittest.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD = $(top_builddir)/src/lib/bench/libbench.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
-run_unittests_LDADD += $(top_builddir)/src/lib/bench/libbench.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(GTEST_LDADD)
endif
diff --git a/src/lib/bench/tests/run_unittests.cc b/src/lib/bench/tests/run_unittests.cc
index 85d4548..450f5dc 100644
--- a/src/lib/bench/tests/run_unittests.cc
+++ b/src/lib/bench/tests/run_unittests.cc
@@ -13,10 +13,11 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/cache/Makefile.am b/src/lib/cache/Makefile.am
index bfbe24a..9871a5e 100644
--- a/src/lib/cache/Makefile.am
+++ b/src/lib/cache/Makefile.am
@@ -31,5 +31,14 @@ libcache_la_SOURCES += cache_entry_key.h cache_entry_key.cc
libcache_la_SOURCES += rrset_copy.h rrset_copy.cc
libcache_la_SOURCES += local_zone_data.h local_zone_data.cc
libcache_la_SOURCES += message_utility.h message_utility.cc
+libcache_la_SOURCES += logger.h logger.cc
+nodist_libcache_la_SOURCES = cache_messages.cc cache_messages.h
-CLEANFILES = *.gcno *.gcda
+BUILT_SOURCES = cache_messages.cc cache_messages.h
+
+cache_messages.cc cache_messages.h: cache_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/cache/cache_messages.mes
+
+CLEANFILES = *.gcno *.gcda cache_messages.cc cache_messages.h
+
+EXTRA_DIST = cache_messages.mes
diff --git a/src/lib/cache/TODO b/src/lib/cache/TODO
index aa7e3b0..31825e4 100644
--- a/src/lib/cache/TODO
+++ b/src/lib/cache/TODO
@@ -12,7 +12,8 @@
* When the rrset beging updated is an NS rrset, NSAS should be updated
together.
* Share the NXDOMAIN info between different type queries. current implementation
- can only cache for the type that user quired, for example, if user query A
+ can only cache for the type that user queried, for example, if user query A
record of a.example. and the server replied with NXDOMAIN, this should be
cached for all the types queries of a.example.
-
+* Add the interfaces for resizing and serialization (loading and dumping) to
+ cache.
diff --git a/src/lib/cache/cache_messages.mes b/src/lib/cache/cache_messages.mes
new file mode 100644
index 0000000..19102ae
--- /dev/null
+++ b/src/lib/cache/cache_messages.mes
@@ -0,0 +1,148 @@
+# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::cache
+
+% CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+
+% CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+
+% CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data
+Debug message. The requested data was not found in the local zone data.
+
+% CACHE_LOCALZONE_UPDATE updating local zone element at key %1
+Debug message issued when there's update to the local zone section of cache.
+
+% CACHE_MESSAGES_DEINIT deinitialized message cache
+Debug message. It is issued when the server deinitializes the message cache.
+
+% CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+
+% CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+
+% CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+
+% CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+
+% CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+
+% CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache
+Debug message. The message cache didn't find any entry for the given key.
+
+% CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
+
+% CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+
+% CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+
+% CACHE_RESOLVER_INIT initializing resolver cache for class %1
+Debug message. The resolver cache is being created for this given class.
+
+% CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+
+% CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+
+% CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+
+% CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+
+% CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+
+% CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+
+% CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+
+% CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3
+Debug message. The resolver is updating a message in the cache.
+
+% CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3
+Debug message. The resolver is updating an RRset in the cache.
+
+% CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+
+% CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+
+% CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+
+% CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+
+% CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache
+Debug message. The resolver is trying to look up data in the RRset cache.
+
+% CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+
+% CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+
+% CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+
+% CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache
+Debug message. The RRset is updating its data with this given RRset.
diff --git a/src/lib/cache/local_zone_data.cc b/src/lib/cache/local_zone_data.cc
index 61ce35a..13d1d75 100644
--- a/src/lib/cache/local_zone_data.cc
+++ b/src/lib/cache/local_zone_data.cc
@@ -16,6 +16,7 @@
#include "local_zone_data.h"
#include "cache_entry_key.h"
#include "rrset_copy.h"
+#include "logger.h"
using namespace std;
using namespace isc::dns;
@@ -33,8 +34,10 @@ LocalZoneData::lookup(const isc::dns::Name& name,
string key = genCacheEntryName(name, type);
RRsetMapIterator iter = rrsets_map_.find(key);
if (iter == rrsets_map_.end()) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_LOCALZONE_UNKNOWN).arg(key);
return (RRsetPtr());
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_LOCALZONE_FOUND).arg(key);
return (iter->second);
}
}
@@ -43,6 +46,7 @@ void
LocalZoneData::update(const isc::dns::RRset& rrset) {
//TODO Do we really need to recreate the rrset again?
string key = genCacheEntryName(rrset.getName(), rrset.getType());
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_LOCALZONE_UPDATE).arg(key);
RRset* rrset_copy = new RRset(rrset.getName(), rrset.getClass(),
rrset.getType(), rrset.getTTL());
diff --git a/src/lib/cache/logger.cc b/src/lib/cache/logger.cc
new file mode 100644
index 0000000..f4b0f25
--- /dev/null
+++ b/src/lib/cache/logger.cc
@@ -0,0 +1,23 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <cache/logger.h>
+
+namespace isc {
+namespace cache {
+
+isc::log::Logger logger("cache");
+
+}
+}
diff --git a/src/lib/cache/logger.h b/src/lib/cache/logger.h
new file mode 100644
index 0000000..52c9743
--- /dev/null
+++ b/src/lib/cache/logger.h
@@ -0,0 +1,43 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATASRC_LOGGER_H
+#define __DATASRC_LOGGER_H
+
+#include <log/macros.h>
+#include <cache/cache_messages.h>
+
+/// \file cache/logger.h
+/// \brief Cache library global logger
+///
+/// This holds the logger for the cache library. It is a private header
+/// and should not be included in any publicly used header, only in local
+/// cc files.
+
+namespace isc {
+namespace cache {
+
+/// \brief The logger for this library
+extern isc::log::Logger logger;
+
+/// \brief Trace basic operations
+const int DBG_TRACE_BASIC = DBGLVL_TRACE_BASIC;
+
+/// \brief Trace data operations
+const int DBG_TRACE_DATA = DBGLVL_TRACE_BASIC_DATA;
+
+} // namespace cache
+} // namespace isc
+
+#endif
diff --git a/src/lib/cache/message_cache.cc b/src/lib/cache/message_cache.cc
index 0464f87..e141bb5 100644
--- a/src/lib/cache/message_cache.cc
+++ b/src/lib/cache/message_cache.cc
@@ -1,6 +1,7 @@
// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
+//
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
@@ -20,6 +21,7 @@
#include "message_cache.h"
#include "message_utility.h"
#include "cache_entry_key.h"
+#include "logger.h"
namespace isc {
namespace cache {
@@ -39,11 +41,14 @@ MessageCache::MessageCache(const RRsetCachePtr& rrset_cache,
message_lru_((3 * cache_size),
new HashDeleter<MessageEntry>(message_table_))
{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CACHE_MESSAGES_INIT).arg(cache_size).
+ arg(RRClass(message_class));
}
MessageCache::~MessageCache() {
// Destroy all the message entries in the cache.
message_lru_.clear();
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CACHE_MESSAGES_DEINIT);
}
bool
@@ -57,26 +62,38 @@ MessageCache::lookup(const isc::dns::Name& qname,
if(msg_entry) {
// Check whether the message entry has expired.
if (msg_entry->getExpireTime() > time(NULL)) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_FOUND).
+ arg(entry_name);
message_lru_.touch(msg_entry);
return (msg_entry->genMessage(time(NULL), response));
} else {
// message entry expires, remove it from hash table and lru list.
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_EXPIRED).
+ arg(entry_name);
message_table_.remove(entry_key);
message_lru_.remove(msg_entry);
return (false);
}
}
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_UNKNOWN).arg(entry_name);
return (false);
}
bool
MessageCache::update(const Message& msg) {
if (!canMessageBeCached(msg)){
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_UNCACHEABLE).
+ arg((*msg.beginQuestion())->getName()).
+ arg((*msg.beginQuestion())->getType()).
+ arg((*msg.beginQuestion())->getClass());
return (false);
}
QuestionIterator iter = msg.beginQuestion();
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_UPDATE).
+ arg((*iter)->getName()).arg((*iter)->getType()).
+ arg((*iter)->getClass());
std::string entry_name = genCacheEntryName((*iter)->getName(),
(*iter)->getType());
HashKey entry_key = HashKey(entry_name, RRClass(message_class_));
@@ -88,6 +105,9 @@ MessageCache::update(const Message& msg) {
// add the message entry, maybe there is one way to touch it once.
MessageEntryPtr old_msg_entry = message_table_.get(entry_key);
if (old_msg_entry) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_REMOVE).
+ arg((*iter)->getName()).arg((*iter)->getType()).
+ arg((*iter)->getClass());
message_lru_.remove(old_msg_entry);
}
@@ -97,24 +117,6 @@ MessageCache::update(const Message& msg) {
return (message_table_.add(msg_entry, entry_key, true));
}
-#if 0
-void
-MessageCache::dump(const std::string&) {
- //TODO
-}
-
-void
-MessageCache::load(const std::string&) {
- //TODO
-}
-
-bool
-MessageCache::resize(uint32_t) {
- //TODO
- return (true);
-}
-#endif
-
} // namespace cache
} // namespace isc
diff --git a/src/lib/cache/message_cache.h b/src/lib/cache/message_cache.h
index 7455f66..b418f23 100644
--- a/src/lib/cache/message_cache.h
+++ b/src/lib/cache/message_cache.h
@@ -30,6 +30,8 @@ namespace cache {
/// The object of MessageCache represents the cache for class-specific
/// messages.
///
+/// \todo The message cache class should provide the interfaces for
+/// loading, dumping and resizing.
class MessageCache {
// Noncopyable
private:
@@ -37,7 +39,7 @@ private:
MessageCache& operator=(const MessageCache& source);
public:
/// \param rrset_cache The cache that stores the RRsets that the
- /// message entry will points to
+ /// message entry will point to
/// \param cache_size The size of message cache.
/// \param message_class The class of the message cache
/// \param negative_soa_cache The cache that stores the SOA record
@@ -50,6 +52,8 @@ public:
virtual ~MessageCache();
/// \brief Look up message in cache.
+ /// \param qname Name of the domain for which the message is being sought.
+ /// \param qtype Type of the RR for which the message is being sought.
/// \param message generated response message if the message entry
/// can be found.
///
@@ -64,20 +68,6 @@ public:
/// If the message doesn't exist in the cache, it will be added
/// directly.
bool update(const isc::dns::Message& msg);
-
-#if 0
- /// \brief Dump the message cache to specified file.
- /// \todo It should can be dumped to one configured database.
- void dump(const std::string& file_name);
-
- /// \brief Load the cache from one file.
- /// \todo It should can be loaded from one configured database.
- void load(const std::string& file_name);
-
- /// \brief Resize the size of message cache in runtime.
- bool resize(uint32_t size);
-#endif
-
protected:
/// \brief Get the hash key for the message entry in the cache.
/// \param name query name of the message.
diff --git a/src/lib/cache/message_entry.cc b/src/lib/cache/message_entry.cc
index de4ea89..d9560a6 100644
--- a/src/lib/cache/message_entry.cc
+++ b/src/lib/cache/message_entry.cc
@@ -20,6 +20,7 @@
#include "message_entry.h"
#include "message_utility.h"
#include "rrset_cache.h"
+#include "logger.h"
using namespace isc::dns;
using namespace std;
@@ -64,7 +65,7 @@ static uint32_t MAX_UINT32 = numeric_limits<uint32_t>::max();
// tunable. Values of one to three hours have been found to work well
// and would make sensible a default. Values exceeding one day have
// been found to be problematic. (sec 5, RFC2308)
-// The default value is 3 hourse (10800 seconds)
+// The default value is 3 hours (10800 seconds)
// TODO:Give an option to let user configure
static uint32_t MAX_NEGATIVE_CACHE_TTL = 10800;
@@ -142,6 +143,8 @@ MessageEntry::genMessage(const time_t& time_now,
// has expired, if it is, return false.
vector<RRsetEntryPtr> rrset_entry_vec;
if (false == getRRsetEntries(rrset_entry_vec, time_now)) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_ENTRY_MISSING_RRSET).
+ arg(entry_name_);
return (false);
}
diff --git a/src/lib/cache/resolver_cache.cc b/src/lib/cache/resolver_cache.cc
index 261db3c..57935c0 100644
--- a/src/lib/cache/resolver_cache.cc
+++ b/src/lib/cache/resolver_cache.cc
@@ -17,6 +17,7 @@
#include "resolver_cache.h"
#include "dns/message.h"
#include "rrset_cache.h"
+#include "logger.h"
#include <string>
#include <algorithm>
@@ -29,6 +30,7 @@ namespace cache {
ResolverClassCache::ResolverClassCache(const RRClass& cache_class) :
cache_class_(cache_class)
{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CACHE_RESOLVER_INIT).arg(cache_class);
local_zone_data_ = LocalZoneDataPtr(new LocalZoneData(cache_class_.getCode()));
rrsets_cache_ = RRsetCachePtr(new RRsetCache(RRSET_CACHE_DEFAULT_SIZE,
cache_class_.getCode()));
@@ -45,6 +47,8 @@ ResolverClassCache::ResolverClassCache(const RRClass& cache_class) :
ResolverClassCache::ResolverClassCache(const CacheSizeInfo& cache_info) :
cache_class_(cache_info.cclass)
{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CACHE_RESOLVER_INIT_INFO).
+ arg(cache_class_);
uint16_t klass = cache_class_.getCode();
// TODO We should find one way to load local zone data.
local_zone_data_ = LocalZoneDataPtr(new LocalZoneData(klass));
@@ -69,8 +73,11 @@ ResolverClassCache::lookup(const isc::dns::Name& qname,
const isc::dns::RRType& qtype,
isc::dns::Message& response) const
{
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_LOOKUP_MSG).
+ arg(qname).arg(qtype);
// message response should has question section already.
if (response.beginQuestion() == response.endQuestion()) {
+ LOG_ERROR(logger, CACHE_RESOLVER_NO_QUESTION).arg(qname).arg(qtype);
isc_throw(MessageNoQuestionSection, "Message has no question section");
}
@@ -79,6 +86,8 @@ ResolverClassCache::lookup(const isc::dns::Name& qname,
// answer section.
RRsetPtr rrset_ptr = local_zone_data_->lookup(qname, qtype);
if (rrset_ptr) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_LOCAL_MSG).
+ arg(qname).arg(qtype);
response.addRRset(Message::SECTION_ANSWER, rrset_ptr);
return (true);
}
@@ -91,11 +100,15 @@ isc::dns::RRsetPtr
ResolverClassCache::lookup(const isc::dns::Name& qname,
const isc::dns::RRType& qtype) const
{
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_LOOKUP_RRSET).
+ arg(qname).arg(qtype);
// Algorithm:
// 1. Search in local zone data first,
// 2. Then do search in rrsets_cache_.
RRsetPtr rrset_ptr = local_zone_data_->lookup(qname, qtype);
if (rrset_ptr) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_LOCAL_RRSET).
+ arg(qname).arg(qtype);
return (rrset_ptr);
} else {
RRsetEntryPtr rrset_entry = rrsets_cache_->lookup(qname, qtype);
@@ -109,6 +122,10 @@ ResolverClassCache::lookup(const isc::dns::Name& qname,
bool
ResolverClassCache::update(const isc::dns::Message& msg) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_UPDATE_MSG).
+ arg((*msg.beginQuestion())->getName()).
+ arg((*msg.beginQuestion())->getType()).
+ arg((*msg.beginQuestion())->getClass());
return (messages_cache_->update(msg));
}
@@ -130,6 +147,9 @@ ResolverClassCache::updateRRsetCache(const isc::dns::ConstRRsetPtr& rrset_ptr,
bool
ResolverClassCache::update(const isc::dns::ConstRRsetPtr& rrset_ptr) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_UPDATE_RRSET).
+ arg(rrset_ptr->getName()).arg(rrset_ptr->getType()).
+ arg(rrset_ptr->getClass());
// First update local zone, then update rrset cache.
local_zone_data_->update((*rrset_ptr.get()));
updateRRsetCache(rrset_ptr, rrsets_cache_);
@@ -166,6 +186,8 @@ ResolverCache::lookup(const isc::dns::Name& qname,
if (cc) {
return (cc->lookup(qname, qtype, response));
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_UNKNOWN_CLASS_MSG).
+ arg(qclass);
return (false);
}
}
@@ -179,6 +201,8 @@ ResolverCache::lookup(const isc::dns::Name& qname,
if (cc) {
return (cc->lookup(qname, qtype));
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_UNKNOWN_CLASS_RRSET).
+ arg(qclass);
return (RRsetPtr());
}
}
@@ -187,6 +211,8 @@ isc::dns::RRsetPtr
ResolverCache::lookupDeepestNS(const isc::dns::Name& qname,
const isc::dns::RRClass& qclass) const
{
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_DEEPEST).arg(qname).
+ arg(qclass);
isc::dns::RRType qtype = RRType::NS();
ResolverClassCache* cc = getClassCache(qclass);
if (cc) {
@@ -213,6 +239,9 @@ ResolverCache::update(const isc::dns::Message& msg) {
if (cc) {
return (cc->update(msg));
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA,
+ CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG).
+ arg((*msg.beginQuestion())->getClass());
return (false);
}
}
@@ -223,20 +252,13 @@ ResolverCache::update(const isc::dns::ConstRRsetPtr& rrset_ptr) {
if (cc) {
return (cc->update(rrset_ptr));
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA,
+ CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET).
+ arg(rrset_ptr->getClass());
return (false);
}
}
-void
-ResolverCache::dump(const std::string&) {
- //TODO
-}
-
-void
-ResolverCache::load(const std::string&) {
- //TODO
-}
-
ResolverClassCache*
ResolverCache::getClassCache(const isc::dns::RRClass& cache_class) const {
for (int i = 0; i < class_caches_.size(); ++i) {
diff --git a/src/lib/cache/resolver_cache.h b/src/lib/cache/resolver_cache.h
index 49818b5..5630bd7 100644
--- a/src/lib/cache/resolver_cache.h
+++ b/src/lib/cache/resolver_cache.h
@@ -76,6 +76,9 @@ public:
///
/// \note Public interaction with the cache should be through ResolverCache,
/// not directly with this one. (TODO: make this private/hidden/local to the .cc?)
+///
+/// \todo The resolver cache class should provide the interfaces for
+/// loading, dumping and resizing.
class ResolverClassCache {
public:
/// \brief Default Constructor.
@@ -86,8 +89,8 @@ public:
ResolverClassCache(const isc::dns::RRClass& cache_class);
/// \brief Construct Function.
- /// \param caches_size cache size information for each
- /// messages/rrsets of different classes.
+ /// \param cache_info Cache size information for each message/rrsets of
+ /// different classes.
ResolverClassCache(const CacheSizeInfo& cache_info);
/// \name Lookup Interfaces
@@ -300,23 +303,6 @@ public:
///
bool update(const isc::dns::ConstRRsetPtr& rrset_ptr);
- /// \name Cache Serialization
- //@{
- /// \brief Dump the cache content to one file.
- ///
- /// \param file_name file to write to
- ///
- /// \todo It should can be dumped to one configured database.
- void dump(const std::string& file_name);
-
- /// \brief Load the cache from one file.
- ///
- /// \param file to load from
- ///
- /// \todo It should can be loaded from one configured database.
- void load(const std::string& file_name);
- //@}
-
private:
/// \brief Returns the class-specific subcache
///
diff --git a/src/lib/cache/rrset_cache.cc b/src/lib/cache/rrset_cache.cc
index f538320..1a5fd48 100644
--- a/src/lib/cache/rrset_cache.cc
+++ b/src/lib/cache/rrset_cache.cc
@@ -14,8 +14,9 @@
#include <config.h>
-#include <string>
#include "rrset_cache.h"
+#include "logger.h"
+#include <string>
#include <nsas/nsas_entry_compare.h>
#include <nsas/hash_table.h>
#include <nsas/hash_deleter.h>
@@ -34,20 +35,28 @@ RRsetCache::RRsetCache(uint32_t cache_size,
rrset_lru_((3 * cache_size),
new HashDeleter<RRsetEntry>(rrset_table_))
{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CACHE_RRSET_INIT).arg(cache_size).
+ arg(RRClass(rrset_class));
}
RRsetEntryPtr
RRsetCache::lookup(const isc::dns::Name& qname,
const isc::dns::RRType& qtype)
{
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_LOOKUP).arg(qname).
+ arg(qtype).arg(RRClass(class_));
const string entry_name = genCacheEntryName(qname, qtype);
- RRsetEntryPtr entry_ptr = rrset_table_.get(HashKey(entry_name, RRClass(class_)));
+
+ RRsetEntryPtr entry_ptr = rrset_table_.get(HashKey(entry_name,
+ RRClass(class_)));
if (entry_ptr) {
if (entry_ptr->getExpireTime() > time(NULL)) {
// Only touch the non-expired rrset entries
rrset_lru_.touch(entry_ptr);
return (entry_ptr);
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_EXPIRED).arg(qname).
+ arg(qtype).arg(RRClass(class_));
// the rrset entry has expired, so just remove it from
// hash table and lru list.
rrset_table_.remove(entry_ptr->hashKey());
@@ -55,19 +64,31 @@ RRsetCache::lookup(const isc::dns::Name& qname,
}
}
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_NOT_FOUND).arg(qname).
+ arg(qtype).arg(RRClass(class_));
return (RRsetEntryPtr());
}
RRsetEntryPtr
-RRsetCache::update(const isc::dns::RRset& rrset, const RRsetTrustLevel& level) {
+RRsetCache::update(const isc::dns::RRset& rrset,
+ const RRsetTrustLevel& level)
+{
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_UPDATE).arg(rrset.getName()).
+ arg(rrset.getType()).arg(rrset.getClass());
// TODO: If the RRset is an NS, we should update the NSAS as well
// lookup first
RRsetEntryPtr entry_ptr = lookup(rrset.getName(), rrset.getType());
if (entry_ptr) {
if (entry_ptr->getTrustLevel() > level) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_UNTRUSTED).
+ arg(rrset.getName()).arg(rrset.getType()).
+ arg(rrset.getClass());
// existed rrset entry is more authoritative, just return it
return (entry_ptr);
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_REMOVE_OLD).
+ arg(rrset.getName()).arg(rrset.getType()).
+ arg(rrset.getClass());
// Remove the old rrset entry from the lru list.
rrset_lru_.remove(entry_ptr);
}
@@ -79,24 +100,6 @@ RRsetCache::update(const isc::dns::RRset& rrset, const RRsetTrustLevel& level) {
return (entry_ptr);
}
-#if 0
-void
-RRsetCache::dump(const std::string&) {
- //TODO
-}
-
-void
-RRsetCache::load(const std::string&) {
- //TODO
-}
-
-bool
-RRsetCache::resize(uint32_t) {
- //TODO
- return (true);
-}
-#endif
-
} // namespace cache
} // namespace isc
diff --git a/src/lib/cache/rrset_cache.h b/src/lib/cache/rrset_cache.h
index 0e1b08f..73f9b58 100644
--- a/src/lib/cache/rrset_cache.h
+++ b/src/lib/cache/rrset_cache.h
@@ -30,6 +30,9 @@ class RRsetEntry;
/// \brief RRset Cache
/// The object of RRsetCache represented the cache for class-specific
/// RRsets.
+///
+/// \todo The rrset cache class should provide the interfaces for
+/// loading, dumping and resizing.
class RRsetCache{
///
/// \name Constructors and Destructor
@@ -73,28 +76,6 @@ public:
RRsetEntryPtr update(const isc::dns::RRset& rrset,
const RRsetTrustLevel& level);
-#if 0
- /// \brief Dump the rrset cache to specified file.
- ///
- /// \param file_name The file to write to
- ///
- /// \todo It should can be dumped to one configured database.
- void dump(const std::string& file_name);
-
- /// \brief Load the cache from one file.
- ///
- /// \param file_name The file to read from
- ///
- /// \todo It should can be loaded from one configured database.
- void load(const std::string& file_name);
-
- /// \brief Resize the size of rrset cache in runtime.
- ///
- /// \param The size to resize to
- /// \return true
- bool resize(uint32_t size);
-#endif
-
/// \short Protected memebers, so they can be accessed by tests.
protected:
uint16_t class_; // The class of the rrset cache.
diff --git a/src/lib/cache/rrset_entry.h b/src/lib/cache/rrset_entry.h
index 5fa8f2c..09cf79c 100644
--- a/src/lib/cache/rrset_entry.h
+++ b/src/lib/cache/rrset_entry.h
@@ -27,9 +27,9 @@ using namespace isc::nsas;
namespace isc {
namespace cache {
-/// \enum RRset Trustworthiness
+/// \enum RRsetTrustLevel
/// For detail of RRset trustworthiness, please refer to
-/// RFC2181 section5.4.1.
+/// RFC 2181 section 5.4.1.
/// Bigger value is more trustworthy.
enum RRsetTrustLevel {
/// Default trust for RRset.
diff --git a/src/lib/cache/tests/Makefile.am b/src/lib/cache/tests/Makefile.am
index 68a8425..a215c56 100644
--- a/src/lib/cache/tests/Makefile.am
+++ b/src/lib/cache/tests/Makefile.am
@@ -32,20 +32,20 @@ TESTS =
if HAVE_GTEST
TESTS += run_unittests
run_unittests_SOURCES = run_unittests.cc
-run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
-run_unittests_SOURCES += rrset_entry_unittest.cc
-run_unittests_SOURCES += rrset_cache_unittest.cc
-run_unittests_SOURCES += message_cache_unittest.cc
-run_unittests_SOURCES += message_entry_unittest.cc
-run_unittests_SOURCES += local_zone_data_unittest.cc
-run_unittests_SOURCES += resolver_cache_unittest.cc
-run_unittests_SOURCES += negative_cache_unittest.cc
-run_unittests_SOURCES += cache_test_messagefromfile.h
-run_unittests_SOURCES += cache_test_sectioncount.h
+run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
+run_unittests_SOURCES += rrset_entry_unittest.cc
+run_unittests_SOURCES += rrset_cache_unittest.cc
+run_unittests_SOURCES += message_cache_unittest.cc
+run_unittests_SOURCES += message_entry_unittest.cc
+run_unittests_SOURCES += local_zone_data_unittest.cc
+run_unittests_SOURCES += resolver_cache_unittest.cc
+run_unittests_SOURCES += negative_cache_unittest.cc
+run_unittests_SOURCES += cache_test_messagefromfile.h
+run_unittests_SOURCES += cache_test_sectioncount.h
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+run_unittests_LDADD = $(GTEST_LDADD)
# NOTE: we may have to clean up this hack later (see the note in configure.ac)
if NEED_LIBBOOST_THREAD
@@ -53,16 +53,18 @@ run_unittests_LDADD += -lboost_thread
endif
run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
endif
noinst_PROGRAMS = $(TESTS)
-EXTRA_DIST = testdata/message_cname_referral.wire
+EXTRA_DIST = testdata/message_cname_referral.wire
EXTRA_DIST += testdata/message_example_com_soa.wire
EXTRA_DIST += testdata/message_fromWire1
EXTRA_DIST += testdata/message_fromWire2
diff --git a/src/lib/cache/tests/run_unittests.cc b/src/lib/cache/tests/run_unittests.cc
index 2c86581..370bc69 100644
--- a/src/lib/cache/tests/run_unittests.cc
+++ b/src/lib/cache/tests/run_unittests.cc
@@ -15,14 +15,19 @@
#include <config.h>
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
#include <dns/tests/unittest_util.h>
+#include <log/logger_support.h>
+
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
isc::UnitTestUtil::addDataPath(TEST_DATA_SRCDIR);
isc::UnitTestUtil::addDataPath(TEST_DATA_BUILDDIR);
- return (RUN_ALL_TESTS());
+ isc::log::initLogger();
+
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/cc/Makefile.am b/src/lib/cc/Makefile.am
index 9d5b188..c23b27c 100644
--- a/src/lib/cc/Makefile.am
+++ b/src/lib/cc/Makefile.am
@@ -22,10 +22,18 @@ endif
lib_LTLIBRARIES = libcc.la
libcc_la_SOURCES = data.cc data.h session.cc session.h
+libcc_la_SOURCES += logger.cc logger.h
+nodist_libcc_la_SOURCES = cc_messages.cc cc_messages.h
+libcc_la_LIBADD = $(top_builddir)/src/lib/log/liblog.la
-CLEANFILES = *.gcno *.gcda session_config.h
+CLEANFILES = *.gcno *.gcda session_config.h cc_messages.cc cc_messages.h
session_config.h: session_config.h.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" session_config.h.pre >$@
-BUILT_SOURCES = session_config.h
+cc_messages.cc cc_messages.h: cc_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/cc/cc_messages.mes
+
+BUILT_SOURCES = session_config.h cc_messages.cc cc_messages.h
+
+EXTRA_DIST = cc_messages.mes
diff --git a/src/lib/cc/cc_messages.mes b/src/lib/cc/cc_messages.mes
new file mode 100644
index 0000000..8370cdd
--- /dev/null
+++ b/src/lib/cc/cc_messages.mes
@@ -0,0 +1,108 @@
+# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::cc
+
+% CC_ASYNC_READ_FAILED asynchronous read failed
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+
+% CC_CONN_ERROR error connecting to message queue (%1)
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+
+% CC_DISCONNECT disconnecting from message queue daemon
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+
+% CC_ESTABLISH trying to establish connection with message queue daemon at %1
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+
+% CC_ESTABLISHED successfully connected to message queue daemon
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+
+% CC_GROUP_RECEIVE trying to receive a message
+Debug message, noting that a message is expected to come over the command
+channel.
+
+% CC_GROUP_RECEIVED message arrived ('%1', '%2')
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+
+% CC_GROUP_SEND sending message '%1' to group '%2'
+Debug message, we're about to send a message over the command channel.
+
+% CC_INVALID_LENGTHS invalid length parameters (%1, %2)
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+
+% CC_LENGTH_NOT_READY length not ready
+There should be data representing the length of message on the socket, but it
+is not there.
+
+% CC_NO_MESSAGE no message ready to be received yet
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+
+% CC_NO_MSGQ unable to connect to message queue (%1)
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+
+% CC_READ_ERROR error reading data from command channel (%1)
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+
+% CC_READ_EXCEPTION error reading data from command channel (%1)
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+
+% CC_REPLY replying to message from '%1' with '%2'
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+
+% CC_SET_TIMEOUT setting timeout to %1ms
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+
+% CC_START_READ starting asynchronous read
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+
+% CC_SUBSCRIBE subscribing to communication group %1
+Debug message. The program wants to receive messages addressed to this group.
+
+% CC_TIMEOUT timeout reading data from command channel
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+
+% CC_UNSUBSCRIBE unsubscribing from communication group %1
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+
+% CC_WRITE_ERROR error writing data to command channel (%1)
+A low level error happened when the library tried to write data to the command
+channel socket.
+
+% CC_ZERO_LENGTH invalid message length (0)
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
diff --git a/src/lib/cc/data.cc b/src/lib/cc/data.cc
index 6f7d4a2..ffa5346 100644
--- a/src/lib/cc/data.cc
+++ b/src/lib/cc/data.cc
@@ -52,13 +52,6 @@ Element::toWire(std::ostream& ss) const {
toJSON(ss);
}
-//
-// The following methods are effectively empty, and their parameters are
-// unused. To silence compilers that warn unused function parameters,
-// we specify a (compiler dependent) special keyword when available.
-// It's defined in config.h, and to avoid including this header file from
-// installed files we define the methods here.
-//
bool
Element::getValue(long int&) {
return (false);
@@ -454,7 +447,9 @@ from_stringstream_map(std::istream &in, const std::string& file, int& line,
ElementPtr map = Element::createMap();
skip_chars(in, " \t\n", line, pos);
char c = in.peek();
- if (c == '}') {
+ if (c == EOF) {
+ throwJSONError(std::string("Unterminated map, <string> or } expected"), file, line, pos);
+ } else if (c == '}') {
// empty map, skip closing curly
c = in.get();
} else {
@@ -516,6 +511,8 @@ Element::nameToType(const std::string& type_name) {
return (Element::list);
} else if (type_name == "map") {
return (Element::map);
+ } else if (type_name == "named_set") {
+ return (Element::map);
} else if (type_name == "null") {
return (Element::null);
} else if (type_name == "any") {
diff --git a/src/lib/cc/data.h b/src/lib/cc/data.h
index 0a363f4..5c731e6 100644
--- a/src/lib/cc/data.h
+++ b/src/lib/cc/data.h
@@ -479,7 +479,7 @@ public:
return (true);
}
using Element::setValue;
- bool setValue(std::map<std::string, ConstElementPtr>& v) {
+ bool setValue(const std::map<std::string, ConstElementPtr>& v) {
m = v;
return (true);
}
diff --git a/src/lib/cc/logger.cc b/src/lib/cc/logger.cc
new file mode 100644
index 0000000..36db88d
--- /dev/null
+++ b/src/lib/cc/logger.cc
@@ -0,0 +1,23 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <cc/logger.h>
+
+namespace isc {
+namespace cc {
+
+isc::log::Logger logger("cc");
+
+}
+}
diff --git a/src/lib/cc/logger.h b/src/lib/cc/logger.h
new file mode 100644
index 0000000..d6253d0
--- /dev/null
+++ b/src/lib/cc/logger.h
@@ -0,0 +1,46 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef CC_LOGGER_H
+#define CC_LOGGER_H
+
+#include <cc/cc_messages.h>
+#include <log/macros.h>
+
+/// \file cc/logger.h
+/// \brief Command Channel library global logger
+///
+/// This holds the logger for the CC library. It is a private header
+/// and should not be included in any publicly used header, only in local
+/// cc files.
+
+namespace isc {
+namespace cc {
+
+/// Trace basic operation
+const int DBG_TRACE_BASIC = DBGLVL_TRACE_BASIC;
+
+/// This includes messages being sent and received, waiting for messages
+/// and alike.
+const int DBG_TRACE_DETAILED = DBGLVL_TRACE_DETAIL;
+
+// Declaration of the logger.
+extern isc::log::Logger logger;
+
+} // namespace cc
+} // namespace isc
+
+/// \brief Logger for this library
+
+#endif
diff --git a/src/lib/cc/session.cc b/src/lib/cc/session.cc
index e911a86..0052aca 100644
--- a/src/lib/cc/session.cc
+++ b/src/lib/cc/session.cc
@@ -14,6 +14,7 @@
#include <config.h>
#include <cc/session_config.h>
+#include <cc/logger.h>
#include <stdint.h>
@@ -118,12 +119,16 @@ private:
void
SessionImpl::establish(const char& socket_file) {
try {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(&socket_file);
socket_.connect(asio::local::stream_protocol::endpoint(&socket_file),
error_);
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISHED);
} catch(const asio::system_error& se) {
+ LOG_FATAL(logger, CC_CONN_ERROR).arg(se.what());
isc_throw(SessionError, se.what());
}
if (error_) {
+ LOG_FATAL(logger, CC_NO_MSGQ).arg(error_.message());
isc_throw(SessionError, "Unable to connect to message queue: " <<
error_.message());
}
@@ -131,6 +136,7 @@ SessionImpl::establish(const char& socket_file) {
void
SessionImpl::disconnect() {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_DISCONNECT);
socket_.close();
data_length_ = 0;
}
@@ -140,6 +146,7 @@ SessionImpl::writeData(const void* data, size_t datalen) {
try {
asio::write(socket_, asio::buffer(data, datalen));
} catch (const asio::system_error& asio_ex) {
+ LOG_FATAL(logger, CC_WRITE_ERROR).arg(asio_ex.what());
isc_throw(SessionError, "ASIO write failed: " << asio_ex.what());
}
}
@@ -151,6 +158,7 @@ SessionImpl::readDataLength() {
if (ret_len == 0) {
readData(&data_length_, sizeof(data_length_));
if (data_length_ == 0) {
+ LOG_ERROR(logger, CC_LENGTH_NOT_READY);
isc_throw(SessionError, "ASIO read: data length is not ready");
}
ret_len = ntohl(data_length_);
@@ -199,9 +207,11 @@ SessionImpl::readData(void* data, size_t datalen) {
// asio::error_code evaluates to false if there was no error
if (*read_result) {
if (*read_result == asio::error::operation_aborted) {
+ LOG_ERROR(logger, CC_TIMEOUT);
isc_throw(SessionTimeout,
"Timeout while reading data from cc session");
} else {
+ LOG_ERROR(logger, CC_READ_ERROR).arg(read_result->message());
isc_throw(SessionError,
"Error while reading data from cc session: " <<
read_result->message());
@@ -210,6 +220,7 @@ SessionImpl::readData(void* data, size_t datalen) {
} catch (const asio::system_error& asio_ex) {
// to hide ASIO specific exceptions, we catch them explicitly
// and convert it to SessionError.
+ LOG_FATAL(logger, CC_READ_EXCEPTION).arg(asio_ex.what());
isc_throw(SessionError, "ASIO read failed: " << asio_ex.what());
}
}
@@ -233,15 +244,18 @@ SessionImpl::internalRead(const asio::error_code& error,
assert(bytes_transferred == sizeof(data_length_));
data_length_ = ntohl(data_length_);
if (data_length_ == 0) {
+ LOG_ERROR(logger, CC_ZERO_LENGTH);
isc_throw(SessionError, "Invalid message length (0)");
}
user_handler_();
} else {
+ LOG_ERROR(logger, CC_ASYNC_READ_FAILED);
isc_throw(SessionError, "asynchronous read failed");
}
}
-Session::Session(io_service& io_service) : impl_(new SessionImpl(io_service))
+Session::Session(asio::io_service& io_service) :
+ impl_(new SessionImpl(io_service))
{}
Session::~Session() {
@@ -255,6 +269,7 @@ Session::disconnect() {
void
Session::startRead(boost::function<void()> read_callback) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_START_READ);
impl_->startRead(read_callback);
}
@@ -374,6 +389,7 @@ Session::recvmsg(ConstElementPtr& env, ConstElementPtr& msg,
unsigned short header_length = ntohs(header_length_net);
if (header_length > length || length < 2) {
+ LOG_ERROR(logger, CC_INVALID_LENGTHS).arg(length).arg(header_length);
isc_throw(SessionError, "Length parameters invalid: total=" << length
<< ", header=" << header_length);
}
@@ -417,6 +433,7 @@ Session::recvmsg(ConstElementPtr& env, ConstElementPtr& msg,
void
Session::subscribe(std::string group, std::string instance) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_SUBSCRIBE).arg(group);
ElementPtr env = Element::createMap();
env->set("type", Element::create("subscribe"));
@@ -428,6 +445,7 @@ Session::subscribe(std::string group, std::string instance) {
void
Session::unsubscribe(std::string group, std::string instance) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_UNSUBSCRIBE).arg(group);
ElementPtr env = Element::createMap();
env->set("type", Element::create("unsubscribe"));
@@ -441,6 +459,8 @@ int
Session::group_sendmsg(ConstElementPtr msg, std::string group,
std::string instance, std::string to)
{
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_GROUP_SEND).arg(msg->str()).
+ arg(group);
ElementPtr env = Element::createMap();
long int nseq = ++impl_->sequence_;
@@ -460,11 +480,21 @@ bool
Session::group_recvmsg(ConstElementPtr& envelope, ConstElementPtr& msg,
bool nonblock, int seq)
{
- return (recvmsg(envelope, msg, nonblock, seq));
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_GROUP_RECEIVE);
+ bool result(recvmsg(envelope, msg, nonblock, seq));
+ if (result) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_GROUP_RECEIVED).
+ arg(envelope->str()).arg(msg->str());
+ } else {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_NO_MESSAGE);
+ }
+ return (result);
}
int
Session::reply(ConstElementPtr envelope, ConstElementPtr newmsg) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_REPLY).arg(envelope->str()).
+ arg(newmsg->str());
ElementPtr env = Element::createMap();
long int nseq = ++impl_->sequence_;
@@ -488,6 +518,7 @@ Session::hasQueuedMsgs() const {
void
Session::setTimeout(size_t milliseconds) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_SET_TIMEOUT).arg(milliseconds);
impl_->setTimeout(milliseconds);
}
diff --git a/src/lib/cc/tests/Makefile.am b/src/lib/cc/tests/Makefile.am
index 71e6988..4760855 100644
--- a/src/lib/cc/tests/Makefile.am
+++ b/src/lib/cc/tests/Makefile.am
@@ -26,6 +26,8 @@ run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
endif
diff --git a/src/lib/cc/tests/data_unittests.cc b/src/lib/cc/tests/data_unittests.cc
index 2536682..53d5ab8 100644
--- a/src/lib/cc/tests/data_unittests.cc
+++ b/src/lib/cc/tests/data_unittests.cc
@@ -396,9 +396,24 @@ TEST(Element, to_and_from_wire) {
EXPECT_EQ("1", Element::fromWire(ss, 1)->str());
// Some malformed JSON input
+ EXPECT_THROW(Element::fromJSON("{ "), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\" "), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": "), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": \"b\""), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": {"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": {}"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": []"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": [ }"), isc::data::JSONError);
EXPECT_THROW(Element::fromJSON("{\":"), isc::data::JSONError);
EXPECT_THROW(Element::fromJSON("]"), isc::data::JSONError);
EXPECT_THROW(Element::fromJSON("[ 1, 2, }"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("[ 1, 2, {}"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("[ 1, 2, { ]"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("[ "), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{{}}"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{[]}"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\", \"b\" }"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("[ \"a\": \"b\" ]"), isc::data::JSONError);
}
ConstElementPtr
diff --git a/src/lib/cc/tests/run_unittests.cc b/src/lib/cc/tests/run_unittests.cc
index 0908071..299bd96 100644
--- a/src/lib/cc/tests/run_unittests.cc
+++ b/src/lib/cc/tests/run_unittests.cc
@@ -13,9 +13,14 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
+#include <log/logger_support.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
- return (RUN_ALL_TESTS());
+
+ isc::log::initLogger();
+
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/config/Makefile.am b/src/lib/config/Makefile.am
index 52337ad..500ff12 100644
--- a/src/lib/config/Makefile.am
+++ b/src/lib/config/Makefile.am
@@ -6,10 +6,10 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/log -I$(top_builddir)/src/lib/log
AM_CPPFLAGS += $(BOOST_INCLUDES)
# Define rule to build logging source files from message file
-configdef.h configdef.cc: configdef.mes
- $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/config/configdef.mes
+config_messages.h config_messages.cc: config_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/config/config_messages.mes
-BUILT_SOURCES = configdef.h configdef.cc
+BUILT_SOURCES = config_messages.h config_messages.cc
lib_LTLIBRARIES = libcfgclient.la
libcfgclient_la_SOURCES = config_data.h config_data.cc
@@ -17,9 +17,9 @@ libcfgclient_la_SOURCES += module_spec.h module_spec.cc
libcfgclient_la_SOURCES += ccsession.cc ccsession.h
libcfgclient_la_SOURCES += config_log.h config_log.cc
-nodist_libcfgclient_la_SOURCES = configdef.h configdef.cc
+nodist_libcfgclient_la_SOURCES = config_messages.h config_messages.cc
# The message file should be in the distribution.
-EXTRA_DIST = configdef.mes
+EXTRA_DIST = config_messages.mes
-CLEANFILES = *.gcno *.gcda configdef.h configdef.cc
+CLEANFILES = *.gcno *.gcda config_messages.h config_messages.cc
diff --git a/src/lib/config/ccsession.cc b/src/lib/config/ccsession.cc
index 1b5e47d..ac85077 100644
--- a/src/lib/config/ccsession.cc
+++ b/src/lib/config/ccsession.cc
@@ -18,11 +18,15 @@
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
+#include <ctype.h>
-#include <iostream>
+#include <algorithm>
+#include <cerrno>
#include <fstream>
+#include <iostream>
+#include <set>
#include <sstream>
-#include <cerrno>
+#include <string>
#include <boost/bind.hpp>
#include <boost/foreach.hpp>
@@ -35,6 +39,11 @@
#include <config/config_log.h>
#include <config/ccsession.h>
+#include <log/logger_support.h>
+#include <log/logger_specification.h>
+#include <log/logger_manager.h>
+#include <log/logger_name.h>
+
using namespace std;
using isc::data::Element;
@@ -151,15 +160,246 @@ parseCommand(ConstElementPtr& arg, ConstElementPtr command) {
}
}
+namespace {
+// Temporary workaround functions for missing functionality in
+// getValue() (main problem described in ticket #993)
+// This returns either the value set for the given relative id,
+// or its default value
+// (intentially defined here so this interface does not get
+// included in ConfigData as it is)
+ConstElementPtr getValueOrDefault(ConstElementPtr config_part,
+ const std::string& relative_id,
+ const ConfigData& config_data,
+ const std::string& full_id) {
+ if (config_part->contains(relative_id)) {
+ return config_part->get(relative_id);
+ } else {
+ return config_data.getDefaultValue(full_id);
+ }
+}
+
+// Prefix name with "b10-".
+//
+// In BIND 10, modules have names taken from the .spec file, which are typically
+// names starting with a capital letter (e.g. "Resolver", "Auth" etc.). The
+// names of the associated binaries are derived from the module names, being
+// prefixed "b10-" and having the first letter of the module name lower-cased
+// (e.g. "b10-resolver", "b10-auth"). (It is a required convention that there
+// be this relationship between the names.)
+//
+// Within the binaries the root loggers are named after the binaries themselves.
+// (The reason for this is that the name of the logger is included in the
+// message logged, so making it clear which message comes from which BIND 10
+// process.) As logging is configured using module names, the configuration code
+// has to match these with the corresponding logger names. This function
+// converts a module name to a root logger name by lowercasing the first letter
+// of the module name and prepending "b10-".
+//
+// \param instring String to convert. (This may be empty, in which case
+// "b10-" will be returned.)
+//
+// \return Converted string.
+std::string
+b10Prefix(const std::string& instring) {
+ std::string result = instring;
+ if (!result.empty()) {
+ result[0] = tolower(result[0]);
+ }
+ return (std::string("b10-") + result);
+}
+
+// Reads a output_option subelement of a logger configuration,
+// and sets the values thereing to the given OutputOption struct,
+// or defaults values if they are not provided (from config_data).
+void
+readOutputOptionConf(isc::log::OutputOption& output_option,
+ ConstElementPtr output_option_el,
+ const ConfigData& config_data)
+{
+ ConstElementPtr destination_el = getValueOrDefault(output_option_el,
+ "destination", config_data,
+ "loggers/output_options/destination");
+ output_option.destination = isc::log::getDestination(destination_el->stringValue());
+ ConstElementPtr output_el = getValueOrDefault(output_option_el,
+ "output", config_data,
+ "loggers/output_options/output");
+ if (output_option.destination == isc::log::OutputOption::DEST_CONSOLE) {
+ output_option.stream = isc::log::getStream(output_el->stringValue());
+ } else if (output_option.destination == isc::log::OutputOption::DEST_FILE) {
+ output_option.filename = output_el->stringValue();
+ } else if (output_option.destination == isc::log::OutputOption::DEST_SYSLOG) {
+ output_option.facility = output_el->stringValue();
+ }
+ output_option.flush = getValueOrDefault(output_option_el,
+ "flush", config_data,
+ "loggers/output_options/flush")->boolValue();
+ output_option.maxsize = getValueOrDefault(output_option_el,
+ "maxsize", config_data,
+ "loggers/output_options/maxsize")->intValue();
+ output_option.maxver = getValueOrDefault(output_option_el,
+ "maxver", config_data,
+ "loggers/output_options/maxver")->intValue();
+}
+
+// Reads a full 'loggers' configuration, and adds the loggers therein
+// to the given vector, fills in blanks with defaults from config_data
+void
+readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
+ ConstElementPtr logger,
+ const ConfigData& config_data)
+{
+ // Read name, adding prefix as required.
+ std::string lname = logger->get("name")->stringValue();
+
+ ConstElementPtr severity_el = getValueOrDefault(logger,
+ "severity", config_data,
+ "loggers/severity");
+ isc::log::Severity severity = isc::log::getSeverity(
+ severity_el->stringValue());
+ int dbg_level = getValueOrDefault(logger, "debuglevel",
+ config_data,
+ "loggers/debuglevel")->intValue();
+ bool additive = getValueOrDefault(logger, "additive", config_data,
+ "loggers/additive")->boolValue();
+
+ isc::log::LoggerSpecification logger_spec(
+ lname, severity, dbg_level, additive
+ );
+
+ if (logger->contains("output_options")) {
+ BOOST_FOREACH(ConstElementPtr output_option_el,
+ logger->get("output_options")->listValue()) {
+ // create outputoptions
+ isc::log::OutputOption output_option;
+ readOutputOptionConf(output_option,
+ output_option_el,
+ config_data);
+ logger_spec.addOutputOption(output_option);
+ }
+ }
+
+ specs.push_back(logger_spec);
+}
+
+// Copies the map for a logger, changing the name of the logger in the process.
+// This is used because the map being copied is "const", so in order to
+// change the name we need to create a new one.
+//
+// \param cur_logger Logger being copied.
+// \param new_name New value of the "name" element at the top level.
+//
+// \return Pointer to the map with the updated element.
+ConstElementPtr
+copyLogger(ConstElementPtr& cur_logger, const std::string& new_name) {
+
+ // Since we'll only be updating one first-level element and subsequent
+ // use won't change the contents of the map, a shallow map copy is enough.
+ ElementPtr new_logger(Element::createMap());
+ new_logger->setValue(cur_logger->mapValue());
+ new_logger->set("name", Element::create(new_name));
+
+ return (new_logger);
+}
+
+
+} // end anonymous namespace
+
+
+ConstElementPtr
+getRelatedLoggers(ConstElementPtr loggers) {
+ // Keep a list of names for easier lookup later
+ std::set<std::string> our_names;
+ const std::string& root_name = isc::log::getRootLoggerName();
+
+ ElementPtr result = isc::data::Element::createList();
+
+ BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
+ // Need to add the b10- prefix to names ready from the spec file.
+ const std::string cur_name = cur_logger->get("name")->stringValue();
+ const std::string mod_name = b10Prefix(cur_name);
+ if (mod_name == root_name || mod_name.find(root_name + ".") == 0) {
+
+ // Note this name so that we don't add a wildcard that matches it.
+ our_names.insert(mod_name);
+
+ // We want to store the logger with the modified name (i.e. with
+ // the b10- prefix). As we are dealing with const loggers, we
+ // store a modified copy of the data.
+ result->add(copyLogger(cur_logger, mod_name));
+ LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS, CONFIG_LOG_EXPLICIT)
+ .arg(cur_name);
+
+ } else if (!cur_name.empty() && (cur_name[0] != '*')) {
+ // Not a wildcard logger and we are ignoring it.
+ LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS,
+ CONFIG_LOG_IGNORE_EXPLICIT).arg(cur_name);
+ }
+ }
+
+ // Now find the wildcard names (the one that start with "*").
+ BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
+ std::string cur_name = cur_logger->get("name")->stringValue();
+ // If name is '*', or starts with '*.', replace * with root
+ // logger name.
+ if (cur_name == "*" || cur_name.length() > 1 &&
+ cur_name[0] == '*' && cur_name[1] == '.') {
+
+ // Substitute the "*" with the root name
+ std::string mod_name = cur_name;
+ mod_name.replace(0, 1, root_name);
+
+ // Now add it to the result list, but only if a logger with
+ // that name was not configured explicitly.
+ if (our_names.find(mod_name) == our_names.end()) {
+
+ // We substitute the name here, but as we are dealing with
+ // consts, we need to copy the data.
+ result->add(copyLogger(cur_logger, mod_name));
+ LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS,
+ CONFIG_LOG_WILD_MATCH).arg(cur_name);
+
+ } else if (!cur_name.empty() && (cur_name[0] == '*')) {
+ // Is a wildcard and we are ignoring it (because the wildcard
+ // expands to a specification that we already encountered when
+ // processing explicit names).
+ LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS,
+ CONFIG_LOG_IGNORE_WILD).arg(cur_name);
+ }
+ }
+ }
+ return (result);
+}
+
+void
+default_logconfig_handler(const std::string& module_name,
+ ConstElementPtr new_config,
+ const ConfigData& config_data) {
+ config_data.getModuleSpec().validateConfig(new_config, true);
+
+ std::vector<isc::log::LoggerSpecification> specs;
+
+ if (new_config->contains("loggers")) {
+ ConstElementPtr loggers = getRelatedLoggers(new_config->get("loggers"));
+ BOOST_FOREACH(ConstElementPtr logger,
+ loggers->listValue()) {
+ readLoggersConf(specs, logger, config_data);
+ }
+ }
+
+ isc::log::LoggerManager logger_manager;
+ logger_manager.process(specs.begin(), specs.end());
+}
+
+
ModuleSpec
ModuleCCSession::readModuleSpecification(const std::string& filename) {
std::ifstream file;
ModuleSpec module_spec;
-
+
// this file should be declared in a @something@ directive
file.open(filename.c_str());
if (!file) {
- LOG_ERROR(config_logger, CONFIG_FOPEN_ERR).arg(filename).arg(strerror(errno));
+ LOG_ERROR(config_logger, CONFIG_OPEN_FAIL).arg(filename).arg(strerror(errno));
isc_throw(CCSessionInitError, strerror(errno));
}
@@ -169,7 +409,7 @@ ModuleCCSession::readModuleSpecification(const std::string& filename) {
LOG_ERROR(config_logger, CONFIG_JSON_PARSE).arg(filename).arg(pe.what());
isc_throw(CCSessionInitError, pe.what());
} catch (const ModuleSpecError& dde) {
- LOG_ERROR(config_logger, CONFIG_MODULE_SPEC).arg(filename).arg(dde.what());
+ LOG_ERROR(config_logger, CONFIG_MOD_SPEC_FORMAT).arg(filename).arg(dde.what());
isc_throw(CCSessionInitError, dde.what());
}
file.close();
@@ -192,8 +432,11 @@ ModuleCCSession::ModuleCCSession(
isc::data::ConstElementPtr(*config_handler)(
isc::data::ConstElementPtr new_config),
isc::data::ConstElementPtr(*command_handler)(
- const std::string& command, isc::data::ConstElementPtr args)
+ const std::string& command, isc::data::ConstElementPtr args),
+ bool start_immediately,
+ bool handle_logging
) :
+ started_(false),
session_(session)
{
module_specification_ = readModuleSpecification(spec_file_name);
@@ -205,10 +448,8 @@ ModuleCCSession::ModuleCCSession(
session_.establish(NULL);
session_.subscribe(module_name_, "*");
- //session_.subscribe("Boss", "*");
- //session_.subscribe("statistics", "*");
- // send the data specification
+ // send the data specification
ConstElementPtr spec_msg = createCommand("module_spec",
module_specification_.getFullSpec());
unsigned int seq = session_.group_sendmsg(spec_msg, "ConfigManager");
@@ -218,10 +459,10 @@ ModuleCCSession::ModuleCCSession(
int rcode;
ConstElementPtr err = parseAnswer(rcode, answer);
if (rcode != 0) {
- LOG_ERROR(config_logger, CONFIG_MANAGER_MOD_SPEC).arg(answer->str());
+ LOG_ERROR(config_logger, CONFIG_MOD_SPEC_REJECT).arg(answer->str());
isc_throw(CCSessionInitError, answer->str());
}
-
+
setLocalConfig(Element::fromJSON("{}"));
// get any stored configuration from the manager
if (config_handler_) {
@@ -232,13 +473,32 @@ ModuleCCSession::ModuleCCSession(
if (rcode == 0) {
handleConfigUpdate(new_config);
} else {
- LOG_ERROR(config_logger, CONFIG_MANAGER_CONFIG).arg(new_config->str());
+ LOG_ERROR(config_logger, CONFIG_GET_FAIL).arg(new_config->str());
isc_throw(CCSessionInitError, answer->str());
}
}
+ // Keep track of logging settings automatically
+ if (handle_logging) {
+ addRemoteConfig("Logging", default_logconfig_handler, false);
+ }
+
+ if (start_immediately) {
+ start();
+ }
+
+}
+
+void
+ModuleCCSession::start() {
+ if (started_) {
+ isc_throw(CCSessionError, "Module CC session already started");
+ }
+
// register callback for asynchronous read
session_.startRead(boost::bind(&ModuleCCSession::startCheck, this));
+
+ started_ = true;
}
/// Validates the new config values, if they are correct,
@@ -328,7 +588,7 @@ int
ModuleCCSession::checkCommand() {
ConstElementPtr cmd, routing, data;
if (session_.group_recvmsg(routing, data, true)) {
-
+
/* ignore result messages (in case we're out of sync, to prevent
* pingpongs */
if (data->getType() != Element::map || data->contains("result")) {
@@ -346,6 +606,11 @@ ModuleCCSession::checkCommand() {
}
} catch (const CCSessionError& re) {
LOG_ERROR(config_logger, CONFIG_CCSESSION_MSG).arg(re.what());
+ } catch (const std::exception& stde) {
+ // No matter what unexpected error happens, we do not want
+ // to crash because of an incoming event, so we log the
+ // exception and continue to run
+ LOG_ERROR(config_logger, CONFIG_CCSESSION_MSG_INTERNAL).arg(stde.what());
}
if (!isNull(answer)) {
session_.reply(routing, answer);
@@ -355,23 +620,64 @@ ModuleCCSession::checkCommand() {
return (0);
}
+ModuleSpec
+ModuleCCSession::fetchRemoteSpec(const std::string& module, bool is_filename) {
+ if (is_filename) {
+ // It is a filename, simply load it.
+ return (readModuleSpecification(module));
+ } else {
+ // It's module name, request it from config manager
+
+ // Send the command
+ ConstElementPtr cmd(createCommand("get_module_spec",
+ Element::fromJSON("{\"module_name\": \"" + module +
+ "\"}")));
+ unsigned int seq = session_.group_sendmsg(cmd, "ConfigManager");
+ ConstElementPtr env, answer;
+ session_.group_recvmsg(env, answer, false, seq);
+ int rcode;
+ ConstElementPtr spec_data = parseAnswer(rcode, answer);
+ if (rcode == 0 && spec_data) {
+ // received OK, construct the spec out of it
+ ModuleSpec spec = ModuleSpec(spec_data);
+ if (module != spec.getModuleName()) {
+ // It's a different module!
+ isc_throw(CCSessionError, "Module name mismatch");
+ }
+ return (spec);
+ } else {
+ isc_throw(CCSessionError, "Error getting config for " +
+ module + ": " + answer->str());
+ }
+ }
+}
+
std::string
-ModuleCCSession::addRemoteConfig(const std::string& spec_file_name) {
- ModuleSpec rmod_spec = readModuleSpecification(spec_file_name);
- std::string module_name = rmod_spec.getFullSpec()->get("module_name")->stringValue();
- ConfigData rmod_config = ConfigData(rmod_spec);
- session_.subscribe(module_name);
+ModuleCCSession::addRemoteConfig(const std::string& spec_name,
+ void (*handler)(const std::string& module,
+ ConstElementPtr,
+ const ConfigData&),
+ bool spec_is_filename)
+{
+ // First get the module name, specification and default config
+ const ModuleSpec rmod_spec(fetchRemoteSpec(spec_name, spec_is_filename));
+ const std::string module_name(rmod_spec.getModuleName());
+ ConfigData rmod_config(rmod_spec);
- // Get the current configuration values for that module
- ConstElementPtr cmd = Element::fromJSON("{ \"command\": [\"get_config\", {\"module_name\":\"" + module_name + "\"} ] }");
- unsigned int seq = session_.group_sendmsg(cmd, "ConfigManager");
+ // Get the current configuration values from config manager
+ ConstElementPtr cmd(createCommand("get_config",
+ Element::fromJSON("{\"module_name\": \"" +
+ module_name + "\"}")));
+ const unsigned int seq = session_.group_sendmsg(cmd, "ConfigManager");
ConstElementPtr env, answer;
session_.group_recvmsg(env, answer, false, seq);
int rcode;
ConstElementPtr new_config = parseAnswer(rcode, answer);
+ ElementPtr local_config;
if (rcode == 0 && new_config) {
- ElementPtr local_config = rmod_config.getLocalConfig();
+ // Merge the received config into existing local config
+ local_config = rmod_config.getLocalConfig();
isc::data::merge(local_config, new_config);
rmod_config.setLocalConfig(local_config);
} else {
@@ -380,6 +686,13 @@ ModuleCCSession::addRemoteConfig(const std::string& spec_file_name) {
// all ok, add it
remote_module_configs_[module_name] = rmod_config;
+ if (handler) {
+ remote_module_handlers_[module_name] = handler;
+ handler(module_name, local_config, rmod_config);
+ }
+
+ // Make sure we get updates in future
+ session_.subscribe(module_name);
return (module_name);
}
@@ -390,6 +703,7 @@ ModuleCCSession::removeRemoteConfig(const std::string& module_name) {
it = remote_module_configs_.find(module_name);
if (it != remote_module_configs_.end()) {
remote_module_configs_.erase(it);
+ remote_module_handlers_.erase(module_name);
session_.unsubscribe(module_name);
}
}
@@ -419,6 +733,11 @@ ModuleCCSession::updateRemoteConfig(const std::string& module_name,
if (it != remote_module_configs_.end()) {
ElementPtr rconf = (*it).second.getLocalConfig();
isc::data::merge(rconf, new_config);
+ std::map<std::string, RemoteHandler>::iterator hit =
+ remote_module_handlers_.find(module_name);
+ if (hit != remote_module_handlers_.end()) {
+ hit->second(module_name, new_config, it->second);
+ }
}
}
diff --git a/src/lib/config/ccsession.h b/src/lib/config/ccsession.h
index 7364876..50bb65c 100644
--- a/src/lib/config/ccsession.h
+++ b/src/lib/config/ccsession.h
@@ -161,6 +161,7 @@ public:
* configuration of the local module needs to be updated.
* This must refer to a valid object of a concrete derived class of
* AbstractSession without establishing the session.
+ *
* Note: the design decision on who is responsible for establishing the
* session is in flux, and may change in near future.
*
@@ -171,6 +172,14 @@ public:
* @param command_handler A callback function pointer to be called when
* a control command from a remote agent needs to be performed on the
* local module.
+ * @param start_immediately If true (default), start listening to new commands
+ * and configuration changes asynchronously at the end of the constructor;
+ * if false, it will be delayed until the start() method is explicitly
+ * called. (This is a short term workaround for an initialization trouble.
+ * We'll need to develop a cleaner solution, and then remove this knob)
+ * @param handle_logging If true, the ModuleCCSession will automatically
+ * take care of logging configuration through the virtual Logging config
+ * module. Defaults to true.
*/
ModuleCCSession(const std::string& spec_file_name,
isc::cc::AbstractSession& session,
@@ -178,9 +187,21 @@ public:
isc::data::ConstElementPtr new_config) = NULL,
isc::data::ConstElementPtr(*command_handler)(
const std::string& command,
- isc::data::ConstElementPtr args) = NULL
+ isc::data::ConstElementPtr args) = NULL,
+ bool start_immediately = true,
+ bool handle_logging = true
);
+ /// Start receiving new commands and configuration changes asynchronously.
+ ///
+ /// This method must be called only once, and only when the ModuleCCSession
+ /// was constructed with start_immediately being false. Otherwise
+ /// CCSessionError will be thrown.
+ ///
+ /// As noted in the constructor, this method should be considered a short
+ /// term workaround and will be removed in future.
+ void start();
+
/**
* Optional optimization for checkCommand loop; returns true
* if there are unhandled queued messages in the cc session.
@@ -234,24 +255,48 @@ public:
/**
* Gives access to the configuration values of a different module
* Once this function has been called with the name of the specification
- * file of the module you want the configuration of, you can use
+ * file or the module you want the configuration of, you can use
* \c getRemoteConfigValue() to get a specific setting.
- * Changes are automatically updated, but you cannot specify handlers
- * for those changes, must use \c getRemoteConfigValue() to get a value
- * This function will subscribe to the relevant module channel.
+ * Changes are automatically updated, and you can specify handlers
+ * for those changes. This function will subscribe to the relevant module
+ * channel.
+ *
+ * This method must be called before calling the \c start() method on the
+ * ModuleCCSession (it also implies the ModuleCCSession must have been
+ * constructed with start_immediately being false).
*
- * \param spec_file_name The path to the specification file of
- * the module we want to have configuration
- * values from
+ * \param spec_name This specifies the module to add. It is either a
+ * filename of the spec file to use or a name of module
+ * (in case it's a module name, the spec data is
+ * downloaded from the configuration manager, therefore
+ * the configuration manager must know it). If
+ * spec_is_filename is true (the default), then a
+ * filename is assumed, otherwise a module name.
+ * \param handler The handler function called whenever there's a change.
+ * Called once initally from this function. May be NULL
+ * if you don't want any handler to be called and you're
+ * fine with requesting the data through
+ * getRemoteConfigValue() each time.
+ *
+ * The handler should not throw, or it'll fall trough and
+ * the exception will get into strange places, probably
+ * aborting the application.
+ * \param spec_is_filename Says if spec_name is filename or module name.
* \return The name of the module specified in the given specification
* file
*/
- std::string addRemoteConfig(const std::string& spec_file_name);
+ std::string addRemoteConfig(const std::string& spec_name,
+ void (*handler)(const std::string& module_name,
+ isc::data::ConstElementPtr
+ update,
+ const ConfigData& config_data) = NULL,
+ bool spec_is_filename = true);
/**
* Removes the module with the given name from the remote config
* settings. If the module was not added with \c addRemoteConfig(),
- * nothing happens.
+ * nothing happens. If there was a handler for this config, it is
+ * removed as well.
*/
void removeRemoteConfig(const std::string& module_name);
@@ -274,7 +319,8 @@ public:
private:
ModuleSpec readModuleSpecification(const std::string& filename);
void startCheck();
-
+
+ bool started_;
std::string module_name_;
isc::cc::AbstractSession& session_;
ModuleSpec module_specification_;
@@ -296,13 +342,72 @@ private:
const std::string& command,
isc::data::ConstElementPtr args);
+ typedef void (*RemoteHandler)(const std::string&,
+ isc::data::ConstElementPtr,
+ const ConfigData&);
std::map<std::string, ConfigData> remote_module_configs_;
+ std::map<std::string, RemoteHandler> remote_module_handlers_;
+
void updateRemoteConfig(const std::string& module_name,
isc::data::ConstElementPtr new_config);
+
+ ModuleSpec fetchRemoteSpec(const std::string& module, bool is_filename);
};
-}
-}
+/// \brief Default handler for logging config updates
+///
+/// When CCSession is initialized with handle_logging set to true,
+/// this callback will be used to update the logger when a configuration
+/// change comes in.
+///
+/// This function updates the (global) loggers by initializing a
+/// LoggerManager and passing the settings as specified in the given
+/// configuration update.
+///
+/// \param module_name The name of the module
+/// \param new_config The modified configuration values
+/// \param config_data The full config data for the (remote) logging
+/// module.
+void
+default_logconfig_handler(const std::string& module_name,
+ isc::data::ConstElementPtr new_config,
+ const ConfigData& config_data);
+
+
+/// \brief Returns the loggers related to this module
+///
+/// This function does two things;
+/// - it drops the configuration parts for loggers for other modules.
+/// - it replaces the '*' in the name of the loggers by the name of
+/// this module, but *only* if the expanded name is not configured
+/// explicitly.
+///
+/// Examples: if this is the module b10-resolver,
+/// For the config names ['*', 'b10-auth']
+/// The '*' is replaced with 'b10-resolver', and this logger is used.
+/// 'b10-auth' is ignored (of course, it will not be in the b10-auth
+/// module).
+///
+/// For ['*', 'b10-resolver']
+/// The '*' is ignored, and only 'b10-resolver' is used.
+///
+/// For ['*.reslib', 'b10-resolver']
+/// Or ['b10-resolver.reslib', '*']
+/// Both are used, where the * will be expanded to b10-resolver
+///
+/// \note This is a public function at this time, but mostly for
+/// the purposes of testing. Once we can directly test what loggers
+/// are running, this function may be moved to the unnamed namespace
+///
+/// \param loggers the original 'loggers' config list
+/// \return ListElement containing only loggers relevant for this
+/// module, where * is replaced by the root logger name
+isc::data::ConstElementPtr
+getRelatedLoggers(isc::data::ConstElementPtr loggers);
+
+} // namespace config
+
+} // namespace isc
#endif // __CCSESSION_H
// Local Variables:
diff --git a/src/lib/config/config_data.cc b/src/lib/config/config_data.cc
index 1fa37c1..ebe51cc 100644
--- a/src/lib/config/config_data.cc
+++ b/src/lib/config/config_data.cc
@@ -21,6 +21,63 @@
using namespace isc::data;
+namespace {
+
+// Returns the '_spec' part of a list or map specification (recursively,
+// i.e. if it is a list of lists or maps, will return the spec of the
+// inner-most list or map).
+//
+// \param spec_part the list or map specification (part)
+// \return the value of spec_part's "list_item_spec" or "map_item_spec",
+// or the original spec_part, if it is not a MapElement or does
+// not contain "list_item_spec" or "map_item_spec"
+ConstElementPtr findListOrMapSubSpec(ConstElementPtr spec_part) {
+ while (spec_part->getType() == Element::map &&
+ (spec_part->contains("list_item_spec") ||
+ spec_part->contains("map_item_spec"))) {
+ if (spec_part->contains("list_item_spec")) {
+ spec_part = spec_part->get("list_item_spec");
+ } else {
+ spec_part = spec_part->get("map_item_spec");
+ }
+ }
+ return spec_part;
+}
+
+// Returns a specific Element in a given specification ListElement
+//
+// \exception DataNotFoundError if the given identifier does not
+// point to an existing element. Since we are dealing with the
+// specification here, and not the config data itself, this should
+// not happen, and is a code bug.
+//
+// \param spec_part ListElement to find the element in
+// \param id_part the name of the element to find (must match the value
+// "item_name" in the list item
+// \param id_full the full identifier id_part is a part of, this is
+// used to better report any errors
+ConstElementPtr findItemInSpecList(ConstElementPtr spec_part,
+ const std::string& id_part,
+ const std::string& id_full)
+{
+ bool found = false;
+ BOOST_FOREACH(ConstElementPtr list_el, spec_part->listValue()) {
+ if (list_el->getType() == Element::map &&
+ list_el->contains("item_name") &&
+ list_el->get("item_name")->stringValue() == id_part) {
+ spec_part = list_el;
+ found = true;
+ }
+ }
+ if (!found) {
+ isc_throw(isc::config::DataNotFoundError,
+ id_part + " in " + id_full + " not found");
+ }
+ return (spec_part);
+}
+
+} // anonymous namespace
+
namespace isc {
namespace config {
@@ -36,11 +93,10 @@ namespace config {
// validated and conforms to the specification.
static ConstElementPtr
find_spec_part(ConstElementPtr spec, const std::string& identifier) {
- //std::cout << "[XX] find_spec_part for " << identifier << std::endl;
if (!spec) {
isc_throw(DataNotFoundError, "Empty specification");
}
- //std::cout << "in: " << std::endl << spec << std::endl;
+
ConstElementPtr spec_part = spec;
if (identifier == "") {
isc_throw(DataNotFoundError, "Empty identifier");
@@ -49,59 +105,44 @@ find_spec_part(ConstElementPtr spec, const std::string& identifier) {
size_t sep = id.find('/');
while(sep != std::string::npos) {
std::string part = id.substr(0, sep);
- //std::cout << "[XX] id part: " << part << std::endl;
+
if (spec_part->getType() == Element::list) {
- bool found = false;
- BOOST_FOREACH(ConstElementPtr list_el, spec_part->listValue()) {
- if (list_el->getType() == Element::map &&
- list_el->contains("item_name") &&
- list_el->get("item_name")->stringValue() == part) {
- spec_part = list_el;
- found = true;
- }
- }
- if (!found) {
- isc_throw(DataNotFoundError, identifier);
- }
+ spec_part = findItemInSpecList(spec_part, part, identifier);
+ } else {
+ isc_throw(DataNotFoundError,
+ "Not a list of spec items: " + spec_part->str());
}
id = id.substr(sep + 1);
sep = id.find("/");
+
+ // As long as we are not in the 'final' element as specified
+ // by the identifier, we want to automatically traverse list
+ // and map specifications
+ if (id != "" && id != "/") {
+ spec_part = findListOrMapSubSpec(spec_part);
+ }
}
if (id != "" && id != "/") {
if (spec_part->getType() == Element::list) {
- bool found = false;
- BOOST_FOREACH(ConstElementPtr list_el, spec_part->listValue()) {
- if (list_el->getType() == Element::map &&
- list_el->contains("item_name") &&
- list_el->get("item_name")->stringValue() == id) {
- spec_part = list_el;
- found = true;
- }
- }
- if (!found) {
- isc_throw(DataNotFoundError, identifier);
- }
+ spec_part = findItemInSpecList(spec_part, id, identifier);
} else if (spec_part->getType() == Element::map) {
if (spec_part->contains("map_item_spec")) {
- bool found = false;
- BOOST_FOREACH(ConstElementPtr list_el,
- spec_part->get("map_item_spec")->listValue()) {
- if (list_el->getType() == Element::map &&
- list_el->contains("item_name") &&
- list_el->get("item_name")->stringValue() == id) {
- spec_part = list_el;
- found = true;
- }
- }
- if (!found) {
- isc_throw(DataNotFoundError, identifier);
- }
+ spec_part = findItemInSpecList(
+ spec_part->get("map_item_spec"),
+ id, identifier);
} else {
- isc_throw(DataNotFoundError, identifier);
+ // Either we already have the element we are looking
+ // for, or we are trying to reach something that does
+ // not exist (i.e. the code does not match the spec)
+ if (!spec_part->contains("item_name") ||
+ spec_part->get("item_name")->stringValue() != id) {
+ isc_throw(DataNotFoundError, "Element above " + id +
+ " in " + identifier +
+ " is not a map: " + spec_part->str());
+ }
}
}
}
- //std::cout << "[XX] found spec part: " << std::endl << spec_part << std::endl;
return (spec_part);
}
@@ -164,6 +205,17 @@ ConfigData::getValue(bool& is_default, const std::string& identifier) const {
return (value);
}
+ConstElementPtr
+ConfigData::getDefaultValue(const std::string& identifier) const {
+ ConstElementPtr spec_part =
+ find_spec_part(_module_spec.getConfigSpec(), identifier);
+ if (spec_part->contains("item_default")) {
+ return spec_part->get("item_default");
+ } else {
+ isc_throw(DataNotFoundError, "No default for " + identifier);
+ }
+}
+
/// Returns an ElementPtr pointing to a ListElement containing
/// StringElements with the names of the options at the given
/// identifier. If recurse is true, maps will be expanded as well
diff --git a/src/lib/config/config_data.h b/src/lib/config/config_data.h
index 29a5b5f..197d319 100644
--- a/src/lib/config/config_data.h
+++ b/src/lib/config/config_data.h
@@ -57,6 +57,16 @@ public:
/// value that is to be returned
isc::data::ConstElementPtr getValue(const std::string& identifier) const;
+ /// Returns the default value for the given identifier.
+ ///
+ /// \exception DataNotFoundError if the given identifier does not
+ /// exist, or if the given value has no specified default
+ ///
+ /// \param identifier The identifier pointing to the configuration
+ /// value for which the default is to be returned
+ /// \return ElementPtr containing the default value
+ isc::data::ConstElementPtr getDefaultValue(const std::string& identifier) const;
+
/// Returns the value currently set for the given identifier
/// If no value is set, the default value (as specified by the
/// .spec file) is returned. If there is no value and no default,
diff --git a/src/lib/config/config_log.h b/src/lib/config/config_log.h
index 22e5a5c..21709fd 100644
--- a/src/lib/config/config_log.h
+++ b/src/lib/config/config_log.h
@@ -16,7 +16,7 @@
#define __CONFIG_LOG__H
#include <log/macros.h>
-#include "configdef.h"
+#include "config_messages.h"
namespace isc {
namespace config {
@@ -30,7 +30,10 @@ namespace config {
/// Define the logger used to log messages. We could define it in multiple
/// modules, but defining in a single module and linking to it saves time and
/// space.
-extern isc::log::Logger config_logger; // isc::config::config_logger is the CONFIG logger
+extern isc::log::Logger config_logger;
+
+// Enumerate configuration elements as they are processed.
+const int DBG_CONFIG_PROCESS = DBGLVL_TRACE_BASIC;
} // namespace config
} // namespace isc
diff --git a/src/lib/config/config_messages.mes b/src/lib/config/config_messages.mes
new file mode 100644
index 0000000..c439edd
--- /dev/null
+++ b/src/lib/config/config_messages.mes
@@ -0,0 +1,84 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::config
+
+% CONFIG_CCSESSION_MSG error in CC session message: %1
+There was a problem with an incoming message on the command and control
+channel. The message does not appear to be a valid command, and is
+missing a required element or contains an unknown data format. This
+most likely means that another BIND10 module is sending a bad message.
+The message itself is ignored by this module.
+
+% CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+
+The most likely cause of this error is a programming error. Please raise
+a bug report.
+
+% CONFIG_GET_FAIL error getting configuration from cfgmgr: %1
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+
+% CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program. The logging
+configuration for the program will be updated with the information.
+
+% CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger. As this does not match the logger specification for the
+program, it has been ignored.
+
+% CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry. The configuration is ignored.
+
+% CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1
+This is a debug message. When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+
+% CONFIG_JSON_PARSE JSON parse error in %1: %2
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+
+% CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+
+% CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+
+% CONFIG_OPEN_FAIL error opening %1: %2
+There was an error opening the given file. The reason for the failure
+is included in the message.
diff --git a/src/lib/config/configdef.mes b/src/lib/config/configdef.mes
deleted file mode 100644
index 4c3c991..0000000
--- a/src/lib/config/configdef.mes
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX CONFIG_
-$NAMESPACE isc::config
-
-% FOPEN_ERR error opening %1: %2
-There was an error opening the given file.
-
-% JSON_PARSE JSON parse error in %1: %2
-There was a parse error in the JSON file. The given file does not appear
-to be in valid JSON format. Please verify that the filename is correct
-and that the contents are valid JSON.
-
-% MODULE_SPEC module specification error in %1: %2
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
-
-% MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
-
-% MANAGER_CONFIG error getting configuration from cfgmgr: %1
-The configuration manager returned an error when this module requested
-the configuration. The full error message answer from the configuration
-manager is appended to the log error. The most likely cause is that
-the module is of a different (command specification) version than the
-running configuration manager.
-
-% CCSESSION_MSG error in CC session message: %1
-There was a problem with an incoming message on the command and control
-channel. The message does not appear to be a valid command, and is
-missing a required element or contains an unknown data format. This
-most likely means that another BIND10 module is sending a bad message.
-The message itself is ignored by this module.
diff --git a/src/lib/config/module_spec.cc b/src/lib/config/module_spec.cc
index 1621fe3..bebe695 100644
--- a/src/lib/config/module_spec.cc
+++ b/src/lib/config/module_spec.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium.
+// Copyright (C) 2010, 2011 Internet Systems Consortium.
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -67,10 +67,13 @@ check_config_item(ConstElementPtr spec) {
check_leaf_item(spec, "list_item_spec", Element::map, true);
check_config_item(spec->get("list_item_spec"));
}
- // todo: add stuff for type map
- if (Element::nameToType(spec->get("item_type")->stringValue()) == Element::map) {
+
+ if (spec->get("item_type")->stringValue() == "map") {
check_leaf_item(spec, "map_item_spec", Element::list, true);
check_config_item_list(spec->get("map_item_spec"));
+ } else if (spec->get("item_type")->stringValue() == "named_set") {
+ check_leaf_item(spec, "named_set_item_spec", Element::map, true);
+ check_config_item(spec->get("named_set_item_spec"));
}
}
@@ -84,6 +87,61 @@ check_config_item_list(ConstElementPtr spec) {
}
}
+// checks whether the given element is a valid statistics specification
+// returns false if the specification is bad
+bool
+check_format(ConstElementPtr value, ConstElementPtr format_name) {
+ typedef std::map<std::string, std::string> format_types;
+ format_types time_formats;
+ // TODO: should be added other format types if necessary
+ time_formats.insert(
+ format_types::value_type("date-time", "%Y-%m-%dT%H:%M:%SZ") );
+ time_formats.insert(
+ format_types::value_type("date", "%Y-%m-%d") );
+ time_formats.insert(
+ format_types::value_type("time", "%H:%M:%S") );
+ BOOST_FOREACH (const format_types::value_type& f, time_formats) {
+ if (format_name->stringValue() == f.first) {
+ struct tm tm;
+ std::vector<char> buf(32);
+ memset(&tm, 0, sizeof(tm));
+ // reverse check
+ return (strptime(value->stringValue().c_str(),
+ f.second.c_str(), &tm) != NULL
+ && strftime(&buf[0], buf.size(),
+ f.second.c_str(), &tm) != 0
+ && strncmp(value->stringValue().c_str(),
+ &buf[0], buf.size()) == 0);
+ }
+ }
+ return (false);
+}
+
+void check_statistics_item_list(ConstElementPtr spec);
+
+void
+check_statistics_item_list(ConstElementPtr spec) {
+ if (spec->getType() != Element::list) {
+ throw ModuleSpecError("statistics is not a list of elements");
+ }
+ BOOST_FOREACH(ConstElementPtr item, spec->listValue()) {
+ check_config_item(item);
+ // additional checks for statistics
+ check_leaf_item(item, "item_title", Element::string, true);
+ check_leaf_item(item, "item_description", Element::string, true);
+ check_leaf_item(item, "item_format", Element::string, false);
+ // checks name of item_format and validation of item_default
+ if (item->contains("item_format")
+ && item->contains("item_default")) {
+ if(!check_format(item->get("item_default"),
+ item->get("item_format"))) {
+ throw ModuleSpecError(
+ "item_default not valid type of item_format");
+ }
+ }
+ }
+}
+
void
check_command(ConstElementPtr spec) {
check_leaf_item(spec, "command_name", Element::string, true);
@@ -113,6 +171,9 @@ check_data_specification(ConstElementPtr spec) {
if (spec->contains("commands")) {
check_command_list(spec->get("commands"));
}
+ if (spec->contains("statistics")) {
+ check_statistics_item_list(spec->get("statistics"));
+ }
}
// checks whether the given element is a valid module specification
@@ -162,6 +223,15 @@ ModuleSpec::getConfigSpec() const {
}
}
+ConstElementPtr
+ModuleSpec::getStatisticsSpec() const {
+ if (module_specification->contains("statistics")) {
+ return (module_specification->get("statistics"));
+ } else {
+ return (ElementPtr());
+ }
+}
+
const std::string
ModuleSpec::getModuleName() const {
return (module_specification->get("module_name")->stringValue());
@@ -183,6 +253,12 @@ ModuleSpec::validateConfig(ConstElementPtr data, const bool full) const {
}
bool
+ModuleSpec::validateStatistics(ConstElementPtr data, const bool full) const {
+ ConstElementPtr spec = module_specification->find("statistics");
+ return (validateSpecList(spec, data, full, ElementPtr()));
+}
+
+bool
ModuleSpec::validateCommand(const std::string& command,
ConstElementPtr args,
ElementPtr errors) const
@@ -220,6 +296,14 @@ ModuleSpec::validateConfig(ConstElementPtr data, const bool full,
return (validateSpecList(spec, data, full, errors));
}
+bool
+ModuleSpec::validateStatistics(ConstElementPtr data, const bool full,
+ ElementPtr errors) const
+{
+ ConstElementPtr spec = module_specification->find("statistics");
+ return (validateSpecList(spec, data, full, errors));
+}
+
ModuleSpec
moduleSpecFromFile(const std::string& file_name, const bool check)
throw(JSONError, ModuleSpecError)
@@ -286,7 +370,8 @@ check_type(ConstElementPtr spec, ConstElementPtr element) {
return (cur_item_type == "list");
break;
case Element::map:
- return (cur_item_type == "map");
+ return (cur_item_type == "map" ||
+ cur_item_type == "named_set");
break;
}
return (false);
@@ -323,7 +408,27 @@ ModuleSpec::validateItem(ConstElementPtr spec, ConstElementPtr data,
}
}
if (data->getType() == Element::map) {
- if (!validateSpecList(spec->get("map_item_spec"), data, full, errors)) {
+ // either a normal 'map' or a 'named set' (determined by which
+ // subspecification it has)
+ if (spec->contains("map_item_spec")) {
+ if (!validateSpecList(spec->get("map_item_spec"), data, full, errors)) {
+ return (false);
+ }
+ } else {
+ typedef std::pair<std::string, ConstElementPtr> maptype;
+
+ BOOST_FOREACH(maptype m, data->mapValue()) {
+ if (!validateItem(spec->get("named_set_item_spec"), m.second, full, errors)) {
+ return (false);
+ }
+ }
+ }
+ }
+ if (spec->contains("item_format")) {
+ if (!check_format(data, spec->get("item_format"))) {
+ if (errors) {
+ errors->add(Element::create("Format mismatch"));
+ }
return (false);
}
}
diff --git a/src/lib/config/module_spec.h b/src/lib/config/module_spec.h
index ab6e273..ce3762f 100644
--- a/src/lib/config/module_spec.h
+++ b/src/lib/config/module_spec.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium.
+// Copyright (C) 2010, 2011 Internet Systems Consortium.
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -71,6 +71,12 @@ namespace isc { namespace config {
/// part of the specification
isc::data::ConstElementPtr getConfigSpec() const;
+ /// Returns the statistics part of the specification as an
+ /// ElementPtr
+ /// \return ElementPtr Shared pointer to the statistics
+ /// part of the specification
+ isc::data::ConstElementPtr getStatisticsSpec() const;
+
/// Returns the full module specification as an ElementPtr
/// \return ElementPtr Shared pointer to the specification
isc::data::ConstElementPtr getFullSpec() const {
@@ -95,6 +101,17 @@ namespace isc { namespace config {
bool validateConfig(isc::data::ConstElementPtr data,
const bool full = false) const;
+ // returns true if the given element conforms to this data
+ // statistics specification
+ /// Validates the given statistics data for this specification.
+ /// \param data The base \c Element of the data to check
+ /// \param full If true, all non-optional statistics parameters
+ /// must be specified.
+ /// \return true if the data conforms to the specification,
+ /// false otherwise.
+ bool validateStatistics(isc::data::ConstElementPtr data,
+ const bool full = false) const;
+
/// Validates the arguments for the given command
///
/// This checks the command and argument against the
@@ -142,6 +159,10 @@ namespace isc { namespace config {
bool validateConfig(isc::data::ConstElementPtr data, const bool full,
isc::data::ElementPtr errors) const;
+ /// errors must be of type ListElement
+ bool validateStatistics(isc::data::ConstElementPtr data, const bool full,
+ isc::data::ElementPtr errors) const;
+
private:
bool validateItem(isc::data::ConstElementPtr spec,
isc::data::ConstElementPtr data,
diff --git a/src/lib/config/tests/Makefile.am b/src/lib/config/tests/Makefile.am
index 0d2c29b..2f1fc6f 100644
--- a/src/lib/config/tests/Makefile.am
+++ b/src/lib/config/tests/Makefile.am
@@ -11,7 +11,7 @@ endif
CLEANFILES = *.gcno *.gcda
-lib_LTLIBRARIES = libfake_session.la
+noinst_LTLIBRARIES = libfake_session.la
libfake_session_la_SOURCES = fake_session.h fake_session.cc
TESTS =
@@ -22,11 +22,12 @@ run_unittests_SOURCES = ccsession_unittests.cc module_spec_unittests.cc config_d
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD += libfake_session.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
-run_unittests_LDADD += libfake_session.la
-run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
endif
diff --git a/src/lib/config/tests/ccsession_unittests.cc b/src/lib/config/tests/ccsession_unittests.cc
index f566949..793fa30 100644
--- a/src/lib/config/tests/ccsession_unittests.cc
+++ b/src/lib/config/tests/ccsession_unittests.cc
@@ -24,6 +24,8 @@
#include <config/tests/data_def_unittests_config.h>
+#include <log/logger_name.h>
+
using namespace isc::data;
using namespace isc::config;
using namespace isc::cc;
@@ -42,7 +44,9 @@ el(const std::string& str) {
class CCSessionTest : public ::testing::Test {
protected:
- CCSessionTest() : session(el("[]"), el("[]"), el("[]")) {
+ CCSessionTest() : session(el("[]"), el("[]"), el("[]")),
+ root_name(isc::log::getRootLoggerName())
+ {
// upon creation of a ModuleCCSession, the class
// sends its specification to the config manager.
// it expects an ok answer back, so everytime we
@@ -50,8 +54,11 @@ protected:
// ok answer.
session.getMessages()->add(createAnswer());
}
- ~CCSessionTest() {}
+ ~CCSessionTest() {
+ isc::log::setRootLoggerName(root_name);
+ }
FakeSession session;
+ const std::string root_name;
};
TEST_F(CCSessionTest, createAnswer) {
@@ -149,7 +156,8 @@ TEST_F(CCSessionTest, parseCommand) {
TEST_F(CCSessionTest, session1) {
EXPECT_FALSE(session.haveSubscription("Spec1", "*"));
- ModuleCCSession mccs(ccspecfile("spec1.spec"), session, NULL, NULL);
+ ModuleCCSession mccs(ccspecfile("spec1.spec"), session, NULL, NULL,
+ true, false);
EXPECT_TRUE(session.haveSubscription("Spec1", "*"));
EXPECT_EQ(1, session.getMsgQueue()->size());
@@ -160,18 +168,23 @@ TEST_F(CCSessionTest, session1) {
EXPECT_EQ("ConfigManager", group);
EXPECT_EQ("*", to);
EXPECT_EQ(0, session.getMsgQueue()->size());
+
+ // with this argument, the session should not automatically
+ // subscribe to logging config
+ EXPECT_FALSE(session.haveSubscription("Logging", "*"));
}
TEST_F(CCSessionTest, session2) {
EXPECT_FALSE(session.haveSubscription("Spec2", "*"));
- ModuleCCSession mccs(ccspecfile("spec2.spec"), session, NULL, NULL);
+ ModuleCCSession mccs(ccspecfile("spec2.spec"), session, NULL, NULL,
+ true, false);
EXPECT_TRUE(session.haveSubscription("Spec2", "*"));
EXPECT_EQ(1, session.getMsgQueue()->size());
ConstElementPtr msg;
std::string group, to;
msg = session.getFirstMessage(group, to);
- EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\" } ] }", msg->str());
+ EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\", \"statistics\": [ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ] } ] }", msg->str());
EXPECT_EQ("ConfigManager", group);
EXPECT_EQ("*", to);
EXPECT_EQ(0, session.getMsgQueue()->size());
@@ -211,14 +224,14 @@ TEST_F(CCSessionTest, session3) {
EXPECT_FALSE(session.haveSubscription("Spec2", "*"));
ModuleCCSession mccs(ccspecfile("spec2.spec"), session, my_config_handler,
- my_command_handler);
+ my_command_handler, true, false);
EXPECT_TRUE(session.haveSubscription("Spec2", "*"));
EXPECT_EQ(2, session.getMsgQueue()->size());
ConstElementPtr msg;
std::string group, to;
msg = session.getFirstMessage(group, to);
- EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\" } ] }", msg->str());
+ EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\", \"statistics\": [ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ] } ] }", msg->str());
EXPECT_EQ("ConfigManager", group);
EXPECT_EQ("*", to);
EXPECT_EQ(1, session.getMsgQueue()->size());
@@ -235,7 +248,7 @@ TEST_F(CCSessionTest, checkCommand) {
EXPECT_FALSE(session.haveSubscription("Spec29", "*"));
ModuleCCSession mccs(ccspecfile("spec29.spec"), session, my_config_handler,
- my_command_handler);
+ my_command_handler, true, false);
EXPECT_TRUE(session.haveSubscription("Spec29", "*"));
EXPECT_EQ(2, session.getMsgQueue()->size());
@@ -312,7 +325,7 @@ TEST_F(CCSessionTest, checkCommand2) {
session.getMessages()->add(createAnswer(0, el("{}")));
EXPECT_FALSE(session.haveSubscription("Spec29", "*"));
ModuleCCSession mccs(ccspecfile("spec29.spec"), session, my_config_handler,
- my_command_handler);
+ my_command_handler, true, false);
EXPECT_TRUE(session.haveSubscription("Spec29", "*"));
ConstElementPtr msg;
std::string group, to;
@@ -346,11 +359,26 @@ TEST_F(CCSessionTest, checkCommand2) {
EXPECT_EQ(2, mccs.getValue("item1")->intValue());
}
+std::string remote_module_name;
+int remote_item1(0);
+ConstElementPtr remote_config;
+ModuleCCSession *remote_mccs(NULL);
+
+void remoteHandler(const std::string& module_name,
+ ConstElementPtr config,
+ const ConfigData&) {
+ remote_module_name = module_name;
+ remote_item1 = remote_mccs->getRemoteConfigValue("Spec2", "item1")->
+ intValue();
+ remote_config = config;
+}
+
TEST_F(CCSessionTest, remoteConfig) {
std::string module_name;
int item1;
- ModuleCCSession mccs(ccspecfile("spec1.spec"), session, NULL, NULL);
+ ModuleCCSession mccs(ccspecfile("spec1.spec"), session, NULL, NULL,
+ false, false);
EXPECT_TRUE(session.haveSubscription("Spec1", "*"));
// first simply connect, with no config values, and see we get
@@ -392,6 +420,112 @@ TEST_F(CCSessionTest, remoteConfig) {
session.getMessages()->add(createAnswer());
EXPECT_THROW(mccs.addRemoteConfig(ccspecfile("spec2.spec")), CCSessionError);
+
+ {
+ SCOPED_TRACE("With module name");
+ // Try adding it with downloading the spec from config manager
+ ModuleSpec spec(moduleSpecFromFile(ccspecfile("spec2.spec")));
+ session.getMessages()->add(createAnswer(0, spec.getFullSpec()));
+ session.getMessages()->add(createAnswer(0, el("{}")));
+
+ EXPECT_NO_THROW(module_name = mccs.addRemoteConfig("Spec2", NULL,
+ false));
+
+ const size_t qsize(session.getMsgQueue()->size());
+ EXPECT_TRUE(session.getMsgQueue()->get(qsize - 2)->equals(*el(
+ "[ \"ConfigManager\", \"*\", { \"command\": ["
+ "\"get_module_spec\", { \"module_name\": \"Spec2\" } ] } ]")));
+ EXPECT_TRUE(session.getMsgQueue()->get(qsize - 1)->equals(*el(
+ "[ \"ConfigManager\", \"*\", { \"command\": [ \"get_config\","
+ "{ \"module_name\": \"Spec2\" } ] } ]")));
+ EXPECT_EQ("Spec2", module_name);
+ // Since we returned an empty local config above, the default value
+ // for "item1", which is 1, should be used.
+ EXPECT_NO_THROW(item1 =
+ mccs.getRemoteConfigValue(module_name,
+ "item1")->intValue());
+ EXPECT_EQ(1, item1);
+
+ mccs.removeRemoteConfig(module_name);
+ }
+
+ {
+ SCOPED_TRACE("With bad module name");
+ // It is almost the same as above, but we supply wrong module name.
+ // It should fail.
+ // Try adding it with downloading the spec from config manager
+ ModuleSpec spec(moduleSpecFromFile(ccspecfile("spec2.spec")));
+ session.getMessages()->add(createAnswer(0, spec.getFullSpec()));
+
+ EXPECT_THROW(module_name = mccs.addRemoteConfig("Spec1", NULL, false),
+ CCSessionError);
+ }
+
+ {
+ // Try adding it with a handler.
+ // Pass non-default value to see the handler is called after
+ // downloading the configuration, not too soon.
+ SCOPED_TRACE("With handler");
+ session.getMessages()->add(createAnswer(0, el("{ \"item1\": 2 }")));
+ remote_mccs = &mccs;
+ module_name = mccs.addRemoteConfig(ccspecfile("spec2.spec"),
+ remoteHandler);
+ {
+ SCOPED_TRACE("Before update");
+ EXPECT_EQ("Spec2", module_name);
+ EXPECT_TRUE(session.haveSubscription("Spec2", "*"));
+ // Now check the parameters the remote handler stored
+ // This also checks it was called
+ EXPECT_EQ("Spec2", remote_module_name);
+ remote_module_name = "";
+ EXPECT_EQ(2, remote_item1);
+ remote_item1 = 0;
+ if (remote_config) {
+ EXPECT_EQ(2, remote_config->get("item1")->intValue());
+ } else {
+ ADD_FAILURE() << "Remote config not set";
+ }
+ remote_config.reset();
+ // Make sure normal way still works
+ item1 = mccs.getRemoteConfigValue(module_name,
+ "item1")->intValue();
+ EXPECT_EQ(2, item1);
+ }
+
+ {
+ SCOPED_TRACE("After update");
+ session.addMessage(el("{ \"command\": [ \"config_update\", "
+ "{ \"item1\": 3 } ] }"), module_name, "*");
+ mccs.checkCommand();
+ EXPECT_EQ("Spec2", remote_module_name);
+ remote_module_name = "";
+ EXPECT_EQ(3, remote_item1);
+ remote_item1 = 0;
+ if (remote_config) {
+ EXPECT_EQ(3, remote_config->get("item1")->intValue());
+ } else {
+ ADD_FAILURE() << "Remote config not set";
+ }
+ remote_config.reset();
+ // Make sure normal way still works
+ item1 = mccs.getRemoteConfigValue(module_name,
+ "item1")->intValue();
+ EXPECT_EQ(3, item1);
+ }
+
+ remote_mccs = NULL;
+ mccs.removeRemoteConfig(module_name);
+
+ {
+ SCOPED_TRACE("When removed");
+ // Make sure nothing is called any more
+ session.addMessage(el("{ \"command\": [ \"config_update\", "
+ "{ \"item1\": 4 } ] }"), module_name, "*");
+ EXPECT_EQ("", remote_module_name);
+ EXPECT_EQ(0, remote_item1);
+ EXPECT_FALSE(remote_config);
+ }
+ }
}
TEST_F(CCSessionTest, ignoreRemoteConfigCommands) {
@@ -399,7 +533,8 @@ TEST_F(CCSessionTest, ignoreRemoteConfigCommands) {
session.getMessages()->add(createAnswer(0, el("{ }")));
EXPECT_FALSE(session.haveSubscription("Spec29", "*"));
- ModuleCCSession mccs(ccspecfile("spec29.spec"), session, my_config_handler, my_command_handler);
+ ModuleCCSession mccs(ccspecfile("spec29.spec"), session, my_config_handler,
+ my_command_handler, false, false);
EXPECT_TRUE(session.haveSubscription("Spec29", "*"));
EXPECT_EQ(2, session.getMsgQueue()->size());
@@ -449,4 +584,128 @@ TEST_F(CCSessionTest, initializationFail) {
EXPECT_TRUE(session.haveSubscription("Spec29", "*"));
}
+// Test it throws when we try to start it twice (once from the constructor)
+TEST_F(CCSessionTest, doubleStartImplicit) {
+ ModuleCCSession mccs(ccspecfile("spec29.spec"), session, NULL, NULL,
+ true, false);
+ EXPECT_THROW(mccs.start(), CCSessionError);
+}
+
+// The same, but both starts are explicit
+TEST_F(CCSessionTest, doubleStartExplicit) {
+ ModuleCCSession mccs(ccspecfile("spec29.spec"), session, NULL, NULL,
+ false, false);
+ mccs.start();
+ EXPECT_THROW(mccs.start(), CCSessionError);
+}
+
+// Test we can request synchronous receive before we start the session,
+// and check there's the mechanism if we do it after
+TEST_F(CCSessionTest, delayedStart) {
+ ModuleCCSession mccs(ccspecfile("spec2.spec"), session, NULL, NULL,
+ false, false);
+ session.getMessages()->add(createAnswer());
+ ConstElementPtr env, answer;
+ EXPECT_NO_THROW(session.group_recvmsg(env, answer, false, 3));
+ mccs.start();
+ session.getMessages()->add(createAnswer());
+ EXPECT_THROW(session.group_recvmsg(env, answer, false, 3),
+ FakeSession::DoubleRead);
+}
+
+TEST_F(CCSessionTest, loggingStart) {
+ // provide the logging module spec
+ ConstElementPtr log_spec = moduleSpecFromFile(LOG_SPEC_FILE).getFullSpec();
+ session.getMessages()->add(createAnswer(0, log_spec));
+ // just give an empty config
+ session.getMessages()->add(createAnswer(0, el("{}")));
+ ModuleCCSession mccs(ccspecfile("spec2.spec"), session, NULL, NULL,
+ true, true);
+ EXPECT_TRUE(session.haveSubscription("Logging", "*"));
+}
+
+TEST_F(CCSessionTest, loggingStartBadSpec) {
+ // provide the logging module spec
+ session.getMessages()->add(createAnswer(0, el("{}")));
+ // just give an empty config
+ session.getMessages()->add(createAnswer(0, el("{}")));
+ EXPECT_THROW(new ModuleCCSession(ccspecfile("spec2.spec"), session,
+ NULL, NULL), ModuleSpecError);
+ EXPECT_FALSE(session.haveSubscription("Logging", "*"));
+}
+
+// Similar to the above, but more implicitly by calling addRemoteConfig().
+// We should construct ModuleCCSession with start_immediately being false
+// if we need to call addRemoteConfig().
+// The correct cases are covered in remoteConfig test.
+TEST_F(CCSessionTest, doubleStartWithAddRemoteConfig) {
+ ModuleCCSession mccs(ccspecfile("spec29.spec"), session, NULL, NULL,
+ true, false);
+ session.getMessages()->add(createAnswer(0, el("{}")));
+ EXPECT_THROW(mccs.addRemoteConfig(ccspecfile("spec2.spec")),
+ FakeSession::DoubleRead);
+}
+
+namespace {
+void doRelatedLoggersTest(const char* input, const char* expected) {
+ ConstElementPtr all_conf = isc::data::Element::fromJSON(input);
+ ConstElementPtr expected_conf = isc::data::Element::fromJSON(expected);
+ EXPECT_EQ(*expected_conf, *isc::config::getRelatedLoggers(all_conf));
+}
+} // end anonymous namespace
+
+TEST(LogConfigTest, relatedLoggersTest) {
+ // make sure logger configs for 'other' programs are ignored,
+ // and that * is substituted correctly
+ // We'll use a root logger name of "b10-test".
+ isc::log::setRootLoggerName("b10-test");
+
+ doRelatedLoggersTest("[{ \"name\": \"other_module\" }]",
+ "[]");
+ doRelatedLoggersTest("[{ \"name\": \"other_module.somelib\" }]",
+ "[]");
+ doRelatedLoggersTest("[{ \"name\": \"test_other\" }]",
+ "[]");
+ doRelatedLoggersTest("[{ \"name\": \"test_other.somelib\" }]",
+ "[]");
+ doRelatedLoggersTest("[ { \"name\": \"other_module\" },"
+ " { \"name\": \"test\" }]",
+ "[ { \"name\": \"b10-test\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"test\" }]",
+ "[ { \"name\": \"b10-test\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"test.somelib\" }]",
+ "[ { \"name\": \"b10-test.somelib\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
+ " { \"name\": \"test.somelib\" }]",
+ "[ { \"name\": \"b10-test.somelib\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
+ " { \"name\": \"test\" },"
+ " { \"name\": \"test.somelib\" }]",
+ "[ { \"name\": \"b10-test\" },"
+ " { \"name\": \"b10-test.somelib\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"*\" }]",
+ "[ { \"name\": \"b10-test\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"*.somelib\" }]",
+ "[ { \"name\": \"b10-test.somelib\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
+ " { \"name\": \"test\", \"severity\": \"WARN\"}]",
+ "[ { \"name\": \"b10-test\", \"severity\": \"WARN\"} ]");
+ doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
+ " { \"name\": \"some_module\", \"severity\": \"WARN\"}]",
+ "[ { \"name\": \"b10-test\", \"severity\": \"DEBUG\"} ]");
+ doRelatedLoggersTest("[ { \"name\": \"b10-test\" }]",
+ "[]");
+ // make sure 'bad' things like '*foo.x' or '*lib' are ignored
+ // (cfgmgr should have already caught it in the logconfig plugin
+ // check, and is responsible for reporting the error)
+ doRelatedLoggersTest("[ { \"name\": \"*foo\" }]",
+ "[ ]");
+ doRelatedLoggersTest("[ { \"name\": \"*foo.bar\" }]",
+ "[ ]");
+ doRelatedLoggersTest("[ { \"name\": \"*foo\" },"
+ " { \"name\": \"*foo.lib\" },"
+ " { \"name\": \"test\" } ]",
+ "[ { \"name\": \"b10-test\" } ]");
+}
+
}
diff --git a/src/lib/config/tests/config_data_unittests.cc b/src/lib/config/tests/config_data_unittests.cc
index 974812d..26a3fc6 100644
--- a/src/lib/config/tests/config_data_unittests.cc
+++ b/src/lib/config/tests/config_data_unittests.cc
@@ -64,21 +64,35 @@ TEST(ConfigData, getValue) {
EXPECT_EQ("{ }", cd.getValue(is_default, "value6/")->str());
EXPECT_TRUE(is_default);
EXPECT_EQ("[ ]", cd.getValue("value8")->str());
+ EXPECT_EQ("[ ]", cd.getDefaultValue("value8")->str());
+ EXPECT_EQ("empty", cd.getValue("value8/a")->stringValue());
EXPECT_THROW(cd.getValue("")->str(), DataNotFoundError);
EXPECT_THROW(cd.getValue("/")->str(), DataNotFoundError);
EXPECT_THROW(cd.getValue("no_such_item")->str(), DataNotFoundError);
EXPECT_THROW(cd.getValue("value6/a")->str(), DataNotFoundError);
EXPECT_THROW(cd.getValue("value6/no_such_item")->str(), DataNotFoundError);
- EXPECT_THROW(cd.getValue("value8/a")->str(), DataNotFoundError);
- EXPECT_THROW(cd.getValue("value8/a")->str(), DataNotFoundError);
- EXPECT_THROW(cd.getValue("value8/a")->str(), DataNotFoundError);
+ EXPECT_THROW(cd.getValue("value8/b")->str(), DataNotFoundError);
ModuleSpec spec1 = moduleSpecFromFile(std::string(TEST_DATA_PATH) + "/spec1.spec");
ConfigData cd1 = ConfigData(spec1);
EXPECT_THROW(cd1.getValue("anything")->str(), DataNotFoundError);
}
+TEST(ConfigData, getDefaultValue) {
+ ModuleSpec spec31 = moduleSpecFromFile(std::string(TEST_DATA_PATH) + "/spec31.spec");
+ ConfigData cd = ConfigData(spec31);
+ EXPECT_EQ("[ ]", cd.getDefaultValue("first_list_items")->str());
+ EXPECT_EQ("\"foo\"", cd.getDefaultValue("first_list_items/foo")->str());
+ EXPECT_EQ("{ }", cd.getDefaultValue("first_list_items/second_list_items/map_element")->str());
+ EXPECT_EQ("[ ]", cd.getDefaultValue("first_list_items/second_list_items/map_element/list1")->str());
+ EXPECT_EQ("1", cd.getDefaultValue("first_list_items/second_list_items/map_element/list1/number")->str());
+
+ EXPECT_THROW(cd.getDefaultValue("doesnotexist")->str(), DataNotFoundError);
+ EXPECT_THROW(cd.getDefaultValue("first_list_items/second_list_items/map_element/list1/doesnotexist")->str(), DataNotFoundError);
+}
+
+
TEST(ConfigData, setLocalConfig) {
ModuleSpec spec2 = moduleSpecFromFile(std::string(TEST_DATA_PATH) + "/spec2.spec");
ConfigData cd = ConfigData(spec2);
diff --git a/src/lib/config/tests/data_def_unittests_config.h.in b/src/lib/config/tests/data_def_unittests_config.h.in
index 80e9cfa..f9662f0 100644
--- a/src/lib/config/tests/data_def_unittests_config.h.in
+++ b/src/lib/config/tests/data_def_unittests_config.h.in
@@ -13,3 +13,4 @@
// PERFORMANCE OF THIS SOFTWARE.
#define TEST_DATA_PATH "@abs_srcdir@/testdata"
+#define LOG_SPEC_FILE "@abs_top_srcdir@/src/bin/cfgmgr/plugins/logging.spec"
diff --git a/src/lib/config/tests/fake_session.cc b/src/lib/config/tests/fake_session.cc
index 5f79d48..2b216e7 100644
--- a/src/lib/config/tests/fake_session.cc
+++ b/src/lib/config/tests/fake_session.cc
@@ -71,7 +71,8 @@ FakeSession::FakeSession(isc::data::ElementPtr initial_messages,
isc::data::ElementPtr msg_queue) :
messages_(initial_messages),
subscriptions_(subscriptions),
- msg_queue_(msg_queue)
+ msg_queue_(msg_queue),
+ started_(false)
{
}
@@ -84,6 +85,7 @@ FakeSession::disconnect() {
void
FakeSession::startRead(boost::function<void()>) {
+ started_ = true;
}
void
@@ -91,7 +93,13 @@ FakeSession::establish(const char*) {
}
bool
-FakeSession::recvmsg(ConstElementPtr& msg, bool, int) {
+FakeSession::recvmsg(ConstElementPtr& msg, bool nonblock, int) {
+ if (started_ && !nonblock) {
+ // This would schedule another read for length, leading to
+ // corputed data
+ isc_throw(DoubleRead, "Second read scheduled from recvmsg");
+ }
+
//cout << "[XX] client asks for message " << endl;
if (messages_ &&
messages_->getType() == Element::list &&
@@ -105,7 +113,15 @@ FakeSession::recvmsg(ConstElementPtr& msg, bool, int) {
}
bool
-FakeSession::recvmsg(ConstElementPtr& env, ConstElementPtr& msg, bool, int) {
+FakeSession::recvmsg(ConstElementPtr& env, ConstElementPtr& msg, bool nonblock,
+ int)
+{
+ if (started_ && !nonblock) {
+ // This would schedule another read for length, leading to
+ // corputed data
+ isc_throw(DoubleRead, "Second read scheduled from recvmsg");
+ }
+
//cout << "[XX] client asks for message and env" << endl;
env = ElementPtr();
if (messages_ &&
diff --git a/src/lib/config/tests/fake_session.h b/src/lib/config/tests/fake_session.h
index ac8e291..85e47d5 100644
--- a/src/lib/config/tests/fake_session.h
+++ b/src/lib/config/tests/fake_session.h
@@ -42,6 +42,14 @@ public:
isc::data::ElementPtr msg_queue);
virtual ~FakeSession();
+ // This is thrown if two reads for length at once are scheduled at once.
+ // Such thing does bad things currently (see discussion in ticket #931).
+ class DoubleRead : public Exception {
+ public:
+ DoubleRead(const char* file, size_t line, const char* what) :
+ Exception(file, line, what) {}
+ };
+
virtual void startRead(boost::function<void()> read_callback);
virtual void establish(const char* socket_file = NULL);
@@ -89,6 +97,7 @@ private:
const isc::data::ElementPtr messages_;
isc::data::ElementPtr subscriptions_;
isc::data::ElementPtr msg_queue_;
+ bool started_;
};
} // namespace cc
} // namespace isc
diff --git a/src/lib/config/tests/module_spec_unittests.cc b/src/lib/config/tests/module_spec_unittests.cc
index 1b43350..b2ca7b4 100644
--- a/src/lib/config/tests/module_spec_unittests.cc
+++ b/src/lib/config/tests/module_spec_unittests.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2009, 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,8 @@
#include <fstream>
+#include <boost/foreach.hpp>
+
#include <config/tests/data_def_unittests_config.h>
using namespace isc::data;
@@ -57,6 +59,7 @@ TEST(ModuleSpec, ReadingSpecfiles) {
dd = moduleSpecFromFile(specfile("spec2.spec"));
EXPECT_EQ("[ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ]", dd.getCommandsSpec()->str());
+ EXPECT_EQ("[ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ]", dd.getStatisticsSpec()->str());
EXPECT_EQ("Spec2", dd.getModuleName());
EXPECT_EQ("", dd.getModuleDescription());
@@ -64,6 +67,11 @@ TEST(ModuleSpec, ReadingSpecfiles) {
EXPECT_EQ("Spec25", dd.getModuleName());
EXPECT_EQ("Just an empty module", dd.getModuleDescription());
EXPECT_THROW(moduleSpecFromFile(specfile("spec26.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec34.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec35.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec36.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec37.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec38.spec")), ModuleSpecError);
std::ifstream file;
file.open(specfile("spec1.spec").c_str());
@@ -71,6 +79,7 @@ TEST(ModuleSpec, ReadingSpecfiles) {
EXPECT_EQ(dd.getFullSpec()->get("module_name")
->stringValue(), "Spec1");
EXPECT_TRUE(isNull(dd.getCommandsSpec()));
+ EXPECT_TRUE(isNull(dd.getStatisticsSpec()));
std::ifstream file2;
file2.open(specfile("spec8.spec").c_str());
@@ -114,6 +123,12 @@ TEST(ModuleSpec, SpecfileConfigData) {
"commands is not a list of elements");
}
+TEST(ModuleSpec, SpecfileStatistics) {
+ moduleSpecError("spec36.spec", "item_default not valid type of item_format");
+ moduleSpecError("spec37.spec", "statistics is not a list of elements");
+ moduleSpecError("spec38.spec", "item_default not valid type of item_format");
+}
+
TEST(ModuleSpec, SpecfileCommands) {
moduleSpecError("spec17.spec",
"command_name missing in { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\" }");
@@ -137,6 +152,17 @@ dataTest(const ModuleSpec& dd, const std::string& data_file_name) {
}
bool
+statisticsTest(const ModuleSpec& dd, const std::string& data_file_name) {
+ std::ifstream data_file;
+
+ data_file.open(specfile(data_file_name).c_str());
+ ConstElementPtr data = Element::fromJSON(data_file, data_file_name);
+ data_file.close();
+
+ return (dd.validateStatistics(data));
+}
+
+bool
dataTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
ElementPtr errors)
{
@@ -149,6 +175,19 @@ dataTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
return (dd.validateConfig(data, true, errors));
}
+bool
+statisticsTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
+ ElementPtr errors)
+{
+ std::ifstream data_file;
+
+ data_file.open(specfile(data_file_name).c_str());
+ ConstElementPtr data = Element::fromJSON(data_file, data_file_name);
+ data_file.close();
+
+ return (dd.validateStatistics(data, true, errors));
+}
+
TEST(ModuleSpec, DataValidation) {
ModuleSpec dd = moduleSpecFromFile(specfile("spec22.spec"));
@@ -175,6 +214,17 @@ TEST(ModuleSpec, DataValidation) {
EXPECT_EQ("[ \"Unknown item value_does_not_exist\" ]", errors->str());
}
+TEST(ModuleSpec, StatisticsValidation) {
+ ModuleSpec dd = moduleSpecFromFile(specfile("spec33.spec"));
+
+ EXPECT_TRUE(statisticsTest(dd, "data33_1.data"));
+ EXPECT_FALSE(statisticsTest(dd, "data33_2.data"));
+
+ ElementPtr errors = Element::createList();
+ EXPECT_FALSE(statisticsTestWithErrors(dd, "data33_2.data", errors));
+ EXPECT_EQ("[ \"Format mismatch\", \"Format mismatch\", \"Format mismatch\" ]", errors->str());
+}
+
TEST(ModuleSpec, CommandValidation) {
ModuleSpec dd = moduleSpecFromFile(specfile("spec2.spec"));
ConstElementPtr arg = Element::fromJSON("{}");
@@ -211,3 +261,118 @@ TEST(ModuleSpec, CommandValidation) {
EXPECT_EQ(errors->get(0)->stringValue(), "Type mismatch");
}
+
+TEST(ModuleSpec, NamedSetValidation) {
+ ModuleSpec dd = moduleSpecFromFile(specfile("spec32.spec"));
+
+ ElementPtr errors = Element::createList();
+ EXPECT_TRUE(dataTestWithErrors(dd, "data32_1.data", errors));
+ EXPECT_FALSE(dataTest(dd, "data32_2.data"));
+ EXPECT_FALSE(dataTest(dd, "data32_3.data"));
+}
+
+TEST(ModuleSpec, CheckFormat) {
+
+ const std::string json_begin = "{ \"module_spec\": { \"module_name\": \"Foo\", \"statistics\": [ { \"item_name\": \"dummy_time\", \"item_type\": \"string\", \"item_optional\": true, \"item_title\": \"Dummy Time\", \"item_description\": \"A dummy date time\"";
+ const std::string json_end = " } ] } }";
+ std::string item_default;
+ std::string item_format;
+ std::vector<std::string> specs;
+ ConstElementPtr el;
+
+ specs.clear();
+ item_default = "\"item_default\": \"2011-05-27T19:42:57Z\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-05-27\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"19:42:57\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_format);
+ item_default = "";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_format);
+ item_default = "";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_format);
+
+ item_default = "\"item_default\": \"a\"";
+ specs.push_back("," + item_default);
+ item_default = "\"item_default\": \"b\"";
+ specs.push_back("," + item_default);
+ item_default = "\"item_default\": \"c\"";
+ specs.push_back("," + item_default);
+
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_format);
+
+ specs.push_back("");
+
+ BOOST_FOREACH(std::string s, specs) {
+ el = Element::fromJSON(json_begin + s + json_end)->get("module_spec");
+ EXPECT_NO_THROW(ModuleSpec(el, true));
+ }
+
+ specs.clear();
+ item_default = "\"item_default\": \"2011-05-27T19:42:57Z\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-05-27\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"19:42:57Z\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"2011-13-99T99:99:99Z\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-13-99\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"99:99:99Z\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ // wrong date-time-type format not ending with "Z"
+ item_default = "\"item_default\": \"2011-05-27T19:42:57\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ // wrong date-type format ending with "T"
+ item_default = "\"item_default\": \"2011-05-27T\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ // wrong time-type format ending with "Z"
+ item_default = "\"item_default\": \"19:42:57Z\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ BOOST_FOREACH(std::string s, specs) {
+ el = Element::fromJSON(json_begin + s + json_end)->get("module_spec");
+ EXPECT_THROW(ModuleSpec(el, true), ModuleSpecError);
+ }
+}
diff --git a/src/lib/config/tests/run_unittests.cc b/src/lib/config/tests/run_unittests.cc
index fab90f5..19d2be1 100644
--- a/src/lib/config/tests/run_unittests.cc
+++ b/src/lib/config/tests/run_unittests.cc
@@ -13,16 +13,12 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
#include <log/logger_support.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
-
- // TODO: UNCOMMENT ON MERGE
- // (this is the call we want in master, but branch point does not
- // have this yet)
- //isc::log::initLogger();
-
- return (RUN_ALL_TESTS());
+ isc::log::initLogger();
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/config/tests/testdata/Makefile.am b/src/lib/config/tests/testdata/Makefile.am
index 94c087d..0d8b92e 100644
--- a/src/lib/config/tests/testdata/Makefile.am
+++ b/src/lib/config/tests/testdata/Makefile.am
@@ -22,6 +22,11 @@ EXTRA_DIST += data22_7.data
EXTRA_DIST += data22_8.data
EXTRA_DIST += data22_9.data
EXTRA_DIST += data22_10.data
+EXTRA_DIST += data32_1.data
+EXTRA_DIST += data32_2.data
+EXTRA_DIST += data32_3.data
+EXTRA_DIST += data33_1.data
+EXTRA_DIST += data33_2.data
EXTRA_DIST += spec1.spec
EXTRA_DIST += spec2.spec
EXTRA_DIST += spec3.spec
@@ -51,3 +56,12 @@ EXTRA_DIST += spec26.spec
EXTRA_DIST += spec27.spec
EXTRA_DIST += spec28.spec
EXTRA_DIST += spec29.spec
+EXTRA_DIST += spec30.spec
+EXTRA_DIST += spec31.spec
+EXTRA_DIST += spec32.spec
+EXTRA_DIST += spec33.spec
+EXTRA_DIST += spec34.spec
+EXTRA_DIST += spec35.spec
+EXTRA_DIST += spec36.spec
+EXTRA_DIST += spec37.spec
+EXTRA_DIST += spec38.spec
diff --git a/src/lib/config/tests/testdata/data32_1.data b/src/lib/config/tests/testdata/data32_1.data
new file mode 100644
index 0000000..5695b52
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_1.data
@@ -0,0 +1,3 @@
+{
+ "named_set_item": { "foo": 1, "bar": 2 }
+}
diff --git a/src/lib/config/tests/testdata/data32_2.data b/src/lib/config/tests/testdata/data32_2.data
new file mode 100644
index 0000000..d5b9765
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_2.data
@@ -0,0 +1,3 @@
+{
+ "named_set_item": { "foo": "wrongtype", "bar": 2 }
+}
diff --git a/src/lib/config/tests/testdata/data32_3.data b/src/lib/config/tests/testdata/data32_3.data
new file mode 100644
index 0000000..85f32fe
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_3.data
@@ -0,0 +1,3 @@
+{
+ "named_set_item": []
+}
diff --git a/src/lib/config/tests/testdata/data33_1.data b/src/lib/config/tests/testdata/data33_1.data
new file mode 100644
index 0000000..429852c
--- /dev/null
+++ b/src/lib/config/tests/testdata/data33_1.data
@@ -0,0 +1,7 @@
+{
+ "dummy_str": "Dummy String",
+ "dummy_int": 118,
+ "dummy_datetime": "2011-05-27T19:42:57Z",
+ "dummy_date": "2011-05-27",
+ "dummy_time": "19:42:57"
+}
diff --git a/src/lib/config/tests/testdata/data33_2.data b/src/lib/config/tests/testdata/data33_2.data
new file mode 100644
index 0000000..eb0615c
--- /dev/null
+++ b/src/lib/config/tests/testdata/data33_2.data
@@ -0,0 +1,7 @@
+{
+ "dummy_str": "Dummy String",
+ "dummy_int": 118,
+ "dummy_datetime": "xxxx",
+ "dummy_date": "xxxx",
+ "dummy_time": "xxxx"
+}
diff --git a/src/lib/config/tests/testdata/spec2.spec b/src/lib/config/tests/testdata/spec2.spec
index 59b8ebc..4352422 100644
--- a/src/lib/config/tests/testdata/spec2.spec
+++ b/src/lib/config/tests/testdata/spec2.spec
@@ -66,6 +66,17 @@
"command_description": "Shut down BIND 10",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "dummy_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Dummy Time",
+ "item_description": "A dummy date time",
+ "item_format": "date-time"
+ }
]
}
}
diff --git a/src/lib/config/tests/testdata/spec30.spec b/src/lib/config/tests/testdata/spec30.spec
new file mode 100644
index 0000000..a9e00ad
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec30.spec
@@ -0,0 +1,45 @@
+{
+ "module_spec": {
+ "module_name": "lists",
+ "module_description": "Logging options",
+ "config_data": [
+ {
+ "item_name": "first_list_items",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec": {
+ "item_name": "first_list_item",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "foo",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "foo"
+ },
+ { "item_name": "second_list_items",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec": {
+ "item_name": "second_list_item",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "final_element",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "hello"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+ ]
+ }
+}
diff --git a/src/lib/config/tests/testdata/spec31.spec b/src/lib/config/tests/testdata/spec31.spec
new file mode 100644
index 0000000..9eebfd1
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec31.spec
@@ -0,0 +1,63 @@
+{
+ "module_spec": {
+ "module_name": "lists",
+ "module_description": "Logging options",
+ "config_data": [
+ {
+ "item_name": "first_list_items",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec": {
+ "item_name": "first_list_item",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "foo",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "foo"
+ },
+ { "item_name": "second_list_items",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec": {
+ "item_name": "second_list_item",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "map_element",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "list1",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec":
+ { "item_name": "list2",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [],
+ "list_item_spec":
+ { "item_name": "number",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 1
+ }
+ }
+ }]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+ ]
+ }
+}
diff --git a/src/lib/config/tests/testdata/spec32.spec b/src/lib/config/tests/testdata/spec32.spec
new file mode 100644
index 0000000..0d8cf7c
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec32.spec
@@ -0,0 +1,40 @@
+{
+ "module_spec": {
+ "module_name": "Spec32",
+ "config_data": [
+ { "item_name": "named_set_item",
+ "item_type": "named_set",
+ "item_optional": false,
+ "item_default": { "a": 1, "b": 2 },
+ "named_set_item_spec": {
+ "item_name": "named_set_element",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 3
+ }
+ },
+ { "item_name": "named_set_item2",
+ "item_type": "named_set",
+ "item_optional": true,
+ "item_default": { },
+ "named_set_item_spec": {
+ "item_name": "named_set_element",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "map_item_spec": [
+ { "item_name": "first",
+ "item_type": "integer",
+ "item_optional": true
+ },
+ { "item_name": "second",
+ "item_type": "string",
+ "item_optional": true
+ }
+ ]
+ }
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec33.spec b/src/lib/config/tests/testdata/spec33.spec
new file mode 100644
index 0000000..3002488
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec33.spec
@@ -0,0 +1,50 @@
+{
+ "module_spec": {
+ "module_name": "Spec33",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String",
+ "item_description": "A dummy string"
+ },
+ {
+ "item_name": "dummy_int",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Dummy Integer",
+ "item_description": "A dummy integer"
+ },
+ {
+ "item_name": "dummy_datetime",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Dummy DateTime",
+ "item_description": "A dummy datetime",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "dummy_date",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01",
+ "item_title": "Dummy Date",
+ "item_description": "A dummy date",
+ "item_format": "date"
+ },
+ {
+ "item_name": "dummy_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "00:00:00",
+ "item_title": "Dummy Time",
+ "item_description": "A dummy time",
+ "item_format": "time"
+ }
+ ]
+ }
+}
diff --git a/src/lib/config/tests/testdata/spec34.spec b/src/lib/config/tests/testdata/spec34.spec
new file mode 100644
index 0000000..dd1f3ca
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec34.spec
@@ -0,0 +1,14 @@
+{
+ "module_spec": {
+ "module_name": "Spec34",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_description": "A dummy string"
+ }
+ ]
+ }
+}
diff --git a/src/lib/config/tests/testdata/spec35.spec b/src/lib/config/tests/testdata/spec35.spec
new file mode 100644
index 0000000..86aaf14
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec35.spec
@@ -0,0 +1,15 @@
+{
+ "module_spec": {
+ "module_name": "Spec35",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec36.spec b/src/lib/config/tests/testdata/spec36.spec
new file mode 100644
index 0000000..fb9ce26
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec36.spec
@@ -0,0 +1,17 @@
+{
+ "module_spec": {
+ "module_name": "Spec36",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String",
+ "item_description": "A dummy string",
+ "item_format": "dummy"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec37.spec b/src/lib/config/tests/testdata/spec37.spec
new file mode 100644
index 0000000..bc444d1
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec37.spec
@@ -0,0 +1,7 @@
+{
+ "module_spec": {
+ "module_name": "Spec37",
+ "statistics": 8
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec38.spec b/src/lib/config/tests/testdata/spec38.spec
new file mode 100644
index 0000000..1892e88
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec38.spec
@@ -0,0 +1,17 @@
+{
+ "module_spec": {
+ "module_name": "Spec38",
+ "statistics": [
+ {
+ "item_name": "dummy_datetime",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "11",
+ "item_title": "Dummy DateTime",
+ "item_description": "A dummy datetime",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/cryptolink/crypto_hmac.cc b/src/lib/cryptolink/crypto_hmac.cc
index d20c85b..277b036 100644
--- a/src/lib/cryptolink/crypto_hmac.cc
+++ b/src/lib/cryptolink/crypto_hmac.cc
@@ -17,6 +17,7 @@
#include <boost/scoped_ptr.hpp>
+#include <botan/version.h>
#include <botan/botan.h>
#include <botan/hmac.h>
#include <botan/hash.h>
@@ -35,6 +36,15 @@ getBotanHashAlgorithmName(isc::cryptolink::HashAlgorithm algorithm) {
case isc::cryptolink::SHA256:
return ("SHA-256");
break;
+ case isc::cryptolink::SHA224:
+ return ("SHA-224");
+ break;
+ case isc::cryptolink::SHA384:
+ return ("SHA-384");
+ break;
+ case isc::cryptolink::SHA512:
+ return ("SHA-512");
+ break;
case isc::cryptolink::UNKNOWN_HASH:
return ("Unknown");
break;
@@ -60,7 +70,8 @@ public:
getBotanHashAlgorithmName(hash_algorithm));
} catch (const Botan::Algorithm_Not_Found&) {
isc_throw(isc::cryptolink::UnsupportedAlgorithm,
- "Unknown hash algorithm: " + hash_algorithm);
+ "Unknown hash algorithm: " <<
+ static_cast<int>(hash_algorithm));
} catch (const Botan::Exception& exc) {
isc_throw(isc::cryptolink::LibraryError, exc.what());
}
@@ -70,12 +81,28 @@ public:
// If the key length is larger than the block size, we hash the
// key itself first.
try {
- if (secret_len > hash->HASH_BLOCK_SIZE) {
+ // use a temp var so we don't have blocks spanning
+ // preprocessor directives
+#if BOTAN_VERSION_CODE >= BOTAN_VERSION_CODE_FOR(1,9,0)
+ size_t block_length = hash->hash_block_size();
+#elif BOTAN_VERSION_CODE >= BOTAN_VERSION_CODE_FOR(1,8,0)
+ size_t block_length = hash->HASH_BLOCK_SIZE;
+#else
+#error "Unsupported Botan version (need 1.8 or higher)"
+ // added to suppress irrelevant compiler errors
+ size_t block_length = 0;
+#endif
+ if (secret_len > block_length) {
Botan::SecureVector<Botan::byte> hashed_key =
hash->process(static_cast<const Botan::byte*>(secret),
secret_len);
hmac_->set_key(hashed_key.begin(), hashed_key.size());
} else {
+ // Botan 1.8 considers len 0 a bad key. 1.9 does not,
+ // but we won't accept it anyway, and fail early
+ if (secret_len == 0) {
+ isc_throw(BadKey, "Bad HMAC secret length: 0");
+ }
hmac_->set_key(static_cast<const Botan::byte*>(secret),
secret_len);
}
@@ -89,7 +116,15 @@ public:
~HMACImpl() { }
size_t getOutputLength() const {
+#if BOTAN_VERSION_CODE >= BOTAN_VERSION_CODE_FOR(1,9,0)
+ return (hmac_->output_length());
+#elif BOTAN_VERSION_CODE >= BOTAN_VERSION_CODE_FOR(1,8,0)
return (hmac_->OUTPUT_LENGTH);
+#else
+#error "Unsupported Botan version (need 1.8 or higher)"
+ // added to suppress irrelevant compiler errors
+ return 0;
+#endif
}
void update(const void* data, const size_t len) {
@@ -148,9 +183,9 @@ public:
try {
Botan::SecureVector<Botan::byte> our_mac = hmac_->final();
if (len < getOutputLength()) {
- // Currently we don't support truncated signature. To avoid
- // validating too short signature accidently, we enforce the
- // standard signature size for the moment.
+ // Currently we don't support truncated signature in TSIG (see
+ // #920). To avoid validating too short signature accidently,
+ // we enforce the standard signature size for the moment.
// Once we support truncation correctly, this if-clause should
// (and the capitalized comment above) be removed.
return (false);
@@ -211,7 +246,7 @@ HMAC::verify(const void* sig, const size_t len) {
}
void
-signHMAC(const void* data, size_t data_len, const void* secret,
+signHMAC(const void* data, const size_t data_len, const void* secret,
size_t secret_len, const HashAlgorithm hash_algorithm,
isc::util::OutputBuffer& result, size_t len)
{
diff --git a/src/lib/cryptolink/cryptolink.h b/src/lib/cryptolink/cryptolink.h
index 1583136..d0f7d38 100644
--- a/src/lib/cryptolink/cryptolink.h
+++ b/src/lib/cryptolink/cryptolink.h
@@ -29,15 +29,19 @@ namespace cryptolink {
/// \brief Hash algorithm identifiers
enum HashAlgorithm {
- MD5 = 0, ///< MD5
- SHA1 = 1, ///< SHA-1
- SHA256 = 2, ///< SHA-256
- UNKNOWN_HASH = 3 ///< This value can be used in conversion
+ UNKNOWN_HASH = 0, ///< This value can be used in conversion
/// functions, to be returned when the
/// input is unknown (but a value MUST be
/// returned), for instance when the input
/// is a Name or a string, and the return
/// value is a HashAlgorithm.
+ MD5 = 1, ///< MD5
+ SHA1 = 2, ///< SHA-1
+ SHA256 = 3, ///< SHA-256
+ SHA224 = 4, ///< SHA-224
+ SHA384 = 5, ///< SHA-384
+ SHA512 = 6 ///< SHA-512
+
};
// Forward declaration for createHMAC()
diff --git a/src/lib/cryptolink/tests/Makefile.am b/src/lib/cryptolink/tests/Makefile.am
index c8b5e26..fbdd13f 100644
--- a/src/lib/cryptolink/tests/Makefile.am
+++ b/src/lib/cryptolink/tests/Makefile.am
@@ -16,10 +16,11 @@ TESTS += run_unittests
run_unittests_SOURCES = run_unittests.cc
run_unittests_SOURCES += crypto_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+run_unittests_LDFLAGS = ${BOTAN_LDFLAGS} $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
-run_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libcryptolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libcryptolink.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
endif
diff --git a/src/lib/cryptolink/tests/crypto_unittests.cc b/src/lib/cryptolink/tests/crypto_unittests.cc
index a1ffaab..4abeb87 100644
--- a/src/lib/cryptolink/tests/crypto_unittests.cc
+++ b/src/lib/cryptolink/tests/crypto_unittests.cc
@@ -13,8 +13,16 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <config.h>
+
+#include <string>
+#include <vector>
+
+#include <boost/lexical_cast.hpp>
+
#include <gtest/gtest.h>
+#include <util/encode/hex.h>
+
#include <cryptolink/cryptolink.h>
#include <cryptolink/crypto_hmac.h>
@@ -23,7 +31,9 @@
#include <boost/shared_ptr.hpp>
+using namespace boost;
using namespace isc::util;
+using namespace isc::util::encode;
using namespace isc::cryptolink;
namespace {
@@ -340,77 +350,158 @@ TEST(CryptoLinkTest, DISABLED_HMAC_SHA1_RFC2202_SIGN_TRUNCATED) {
//
// Test values taken from RFC 4231
//
-TEST(CryptoLinkTest, HMAC_SHA256_RFC2202_SIGN) {
- const uint8_t secret[] = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
- 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
- 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b };
- const uint8_t hmac_expected[] = { 0xb0, 0x34, 0x4c, 0x61, 0xd8,
- 0xdb, 0x38, 0x53, 0x5c, 0xa8,
- 0xaf, 0xce, 0xaf, 0x0b, 0xf1,
- 0x2b, 0x88, 0x1d, 0xc2, 0x00,
- 0xc9, 0x83, 0x3d, 0xa7, 0x26,
- 0xe9, 0x37, 0x6c, 0x2e, 0x32,
- 0xcf, 0xf7 };
- doHMACTest("Hi There", secret, 20, SHA256, hmac_expected, 32);
-
- const uint8_t hmac_expected2[] = { 0x5b, 0xdc, 0xc1, 0x46, 0xbf,
- 0x60, 0x75, 0x4e, 0x6a, 0x04,
- 0x24, 0x26, 0x08, 0x95, 0x75,
- 0xc7, 0x5a, 0x00, 0x3f, 0x08,
- 0x9d, 0x27, 0x39, 0x83, 0x9d,
- 0xec, 0x58, 0xb9, 0x64, 0xec,
- 0x38, 0x43 };
- doHMACTest("what do ya want for nothing?", "Jefe", 4, SHA256,
- hmac_expected2, 32);
+// Test data from RFC4231, including secret key
+// and source data, they are common for sha224/256/384/512
+// so put them together within the separate function.
+void
+doRFC4231Tests(HashAlgorithm hash_algorithm,
+ const std::vector<std::vector<uint8_t> >& hmac_list)
+{
+ std::vector<std::string> data_list;
+ std::vector<std::string> secret_list;
+
+ data_list.push_back("Hi There");
+ data_list.push_back("what do ya want for nothing?");
+ data_list.push_back(std::string(50, 0xdd));
+ data_list.push_back(std::string(50, 0xcd));
+ data_list.push_back("Test With Truncation");
+ data_list.push_back("Test Using Larger Than Block-Size Key - "
+ "Hash Key First");
+ data_list.push_back("This is a test using a larger than block-size "
+ "key and a larger than block-size data. The key "
+ "needs to be hashed before being used by the HMAC "
+ "algorithm.");
+
+ secret_list.push_back(std::string(20, 0x0b));
+ secret_list.push_back("Jefe");
+ secret_list.push_back(std::string(20, 0xaa));
+ const uint8_t secret_array[] = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
+ 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c,
+ 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12,
+ 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x19
+ };
+ secret_list.push_back(std::string(secret_array,
+ secret_array + sizeof(secret_array)));
+ secret_list.push_back(std::string(20, 0x0c));
+ secret_list.push_back(std::string(131, 0xaa));
+ secret_list.push_back(std::string(131, 0xaa));
+
+ // Make sure we provide a consistent size of test data
+ ASSERT_EQ(secret_list.size(), data_list.size());
+ ASSERT_EQ(secret_list.size(), hmac_list.size());
+
+ for (int i = 0; i < data_list.size(); ++i) {
+ SCOPED_TRACE("RFC4231 HMAC test for algorithm ID: " +
+ lexical_cast<std::string>(hash_algorithm) +
+ ", data ID: " + lexical_cast<std::string>(i));
+ // Until #920 is resolved we have to skip truncation cases.
+ if (data_list[i] == "Test With Truncation") {
+ continue;
+ }
+ doHMACTest(data_list[i], secret_list[i].c_str(), secret_list[i].size(),
+ hash_algorithm, &hmac_list[i][0], hmac_list[i].size());
+ }
+}
- const uint8_t secret3[] = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
- 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
- 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
- 0xaa, 0xaa };
- const uint8_t hmac_expected3[] = { 0x77, 0x3e, 0xa9, 0x1e, 0x36,
- 0x80, 0x0e, 0x46, 0x85, 0x4d,
- 0xb8, 0xeb, 0xd0, 0x91, 0x81,
- 0xa7, 0x29, 0x59, 0x09, 0x8b,
- 0x3e, 0xf8, 0xc1, 0x22, 0xd9,
- 0x63, 0x55, 0x14, 0xce, 0xd5,
- 0x65, 0xfe };
- doHMACTest(std::string(50, 0xdd), secret3, 20, SHA256, hmac_expected3, 32);
+TEST(CryptoLinkTest, HMAC_SHA256_RFC4231_SIGN) {
+ std::vector<std::vector<uint8_t> > hmac_expected_list(7);
+
+ int i = 0;
+ decodeHex(
+ "b0344c61d8db38535ca8afceaf0bf12b881dc200c9833da726e9376c2e32cff7",
+ hmac_expected_list[i++]);
+ decodeHex(
+ "5bdcc146bf60754e6a042426089575c75a003f089d2739839dec58b964ec3843",
+ hmac_expected_list[i++]);
+ decodeHex(
+ "773ea91e36800e46854db8ebd09181a72959098b3ef8c122d9635514ced565fe",
+ hmac_expected_list[i++]);
+ decodeHex(
+ "82558a389a443c0ea4cc819899f2083a85f0faa3e578f8077a2e3ff46729665b",
+ hmac_expected_list[i++]);
+ decodeHex("a3b6167473100ee06e0c796c2955552b", hmac_expected_list[i++]);
+ decodeHex(
+ "60e431591ee0b67f0d8a26aacbf5b77f8e0bc6213728c5140546040f0ee37f54",
+ hmac_expected_list[i++]);
+ decodeHex(
+ "9b09ffa71b942fcb27635fbcd5b0e944bfdc63644f0713938a7f51535c3a35e2",
+ hmac_expected_list[i++]);
+
+ doRFC4231Tests(SHA256, hmac_expected_list);
+}
- const uint8_t secret4[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
- 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c,
- 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12,
- 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
- 0x19 };
- const uint8_t hmac_expected4[] = { 0x82, 0x55, 0x8a, 0x38, 0x9a,
- 0x44, 0x3c, 0x0e, 0xa4, 0xcc,
- 0x81, 0x98, 0x99, 0xf2, 0x08,
- 0x3a, 0x85, 0xf0, 0xfa, 0xa3,
- 0xe5, 0x78, 0xf8, 0x07, 0x7a,
- 0x2e, 0x3f, 0xf4, 0x67, 0x29,
- 0x66, 0x5b };
- doHMACTest(std::string(50, 0xcd), secret4, 25, SHA256, hmac_expected4, 32);
-
- const uint8_t hmac_expected6[] = { 0x60, 0xe4, 0x31, 0x59, 0x1e,
- 0xe0, 0xb6, 0x7f, 0x0d, 0x8a,
- 0x26, 0xaa, 0xcb, 0xf5, 0xb7,
- 0x7f, 0x8e, 0x0b, 0xc6, 0x21,
- 0x37, 0x28, 0xc5, 0x14, 0x05,
- 0x46, 0x04, 0x0f, 0x0e, 0xe3,
- 0x7f, 0x54 };
- doHMACTest("Test Using Larger Than Block-Size Key - Hash Key First",
- std::string(131, 0xaa).c_str(), 131, SHA256, hmac_expected6, 32);
-
- const uint8_t hmac_expected7[] = { 0x9b, 0x09, 0xff, 0xa7, 0x1b,
- 0x94, 0x2f, 0xcb, 0x27, 0x63,
- 0x5f, 0xbc, 0xd5, 0xb0, 0xe9,
- 0x44, 0xbf, 0xdc, 0x63, 0x64,
- 0x4f, 0x07, 0x13, 0x93, 0x8a,
- 0x7f, 0x51, 0x53, 0x5c, 0x3a,
- 0x35, 0xe2 };
- doHMACTest("This is a test using a larger than block-size key and a"
- " larger than block-size data. The key needs to be hashe"
- "d before being used by the HMAC algorithm.",
- std::string(131, 0xaa).c_str(), 131, SHA256, hmac_expected7, 32);
+//
+// Test values taken from RFC 4231, test optional algorithm 224,384,512
+//
+TEST(CryptoLinkTest, HMAC_SHA224_RFC4231_SIGN) {
+ std::vector<std::vector<uint8_t> > hmac_expected_list(7);
+
+ int i = 0;
+ decodeHex("896fb1128abbdf196832107cd49df33f47b4b1169912ba4f53684b22",
+ hmac_expected_list[i++]);
+ decodeHex("a30e01098bc6dbbf45690f3a7e9e6d0f8bbea2a39e6148008fd05e44",
+ hmac_expected_list[i++]);
+ decodeHex("7fb3cb3588c6c1f6ffa9694d7d6ad2649365b0c1f65d69d1ec8333ea",
+ hmac_expected_list[i++]);
+ decodeHex("6c11506874013cac6a2abc1bb382627cec6a90d86efc012de7afec5a",
+ hmac_expected_list[i++]);
+ decodeHex("0e2aea68a90c8d37c988bcdb9fca6fa8", hmac_expected_list[i++]);
+ decodeHex("95e9a0db962095adaebe9b2d6f0dbce2d499f112f2d2b7273fa6870e",
+ hmac_expected_list[i++]);
+ decodeHex("3a854166ac5d9f023f54d517d0b39dbd946770db9c2b95c9f6f565d1",
+ hmac_expected_list[i++]);
+
+ doRFC4231Tests(SHA224, hmac_expected_list);
+}
+
+TEST(CryptoLinkTest, HMAC_SHA384_RFC4231_SIGN) {
+ std::vector<std::vector<uint8_t> > hmac_expected_list(7);
+
+ int i = 0;
+ decodeHex("afd03944d84895626b0825f4ab46907f15f9dadbe4101ec682aa034c7cebc5"
+ "9cfaea9ea9076ede7f4af152e8b2fa9cb6", hmac_expected_list[i++]);
+ decodeHex("af45d2e376484031617f78d2b58a6b1b9c7ef464f5a01b47e42ec373632244"
+ "5e8e2240ca5e69e2c78b3239ecfab21649", hmac_expected_list[i++]);
+ decodeHex("88062608d3e6ad8a0aa2ace014c8a86f0aa635d947ac9febe83ef4e5596614"
+ "4b2a5ab39dc13814b94e3ab6e101a34f27", hmac_expected_list[i++]);
+ decodeHex("3e8a69b7783c25851933ab6290af6ca77a9981480850009cc5577c6e1f573b"
+ "4e6801dd23c4a7d679ccf8a386c674cffb", hmac_expected_list[i++]);
+ decodeHex("3abf34c3503b2a23a46efc619baef897", hmac_expected_list[i++]);
+ decodeHex("4ece084485813e9088d2c63a041bc5b44f9ef1012a2b588f3cd11f05033ac4"
+ "c60c2ef6ab4030fe8296248df163f44952", hmac_expected_list[i++]);
+ decodeHex("6617178e941f020d351e2f254e8fd32c602420feb0b8fb9adccebb82461e99"
+ "c5a678cc31e799176d3860e6110c46523e", hmac_expected_list[i++]);
+
+ doRFC4231Tests(SHA384, hmac_expected_list);
+}
+
+TEST(CryptoLinkTest, HMAC_SHA512_RFC4231_SIGN) {
+ std::vector<std::vector<uint8_t> > hmac_expected_list(7);
+
+ int i = 0;
+ decodeHex("87aa7cdea5ef619d4ff0b4241a1d6cb02379f4e2ce4ec2787ad0b30545e17c"
+ "dedaa833b7d6b8a702038b274eaea3f4e4be9d914eeb61f1702e696c203a12"
+ "6854", hmac_expected_list[i++]);
+ decodeHex("164b7a7bfcf819e2e395fbe73b56e0a387bd64222e831fd610270cd7ea2505"
+ "549758bf75c05a994a6d034f65f8f0e6fdcaeab1a34d4a6b4b636e070a38bc"
+ "e737", hmac_expected_list[i++]);
+ decodeHex("fa73b0089d56a284efb0f0756c890be9b1b5dbdd8ee81a3655f83e33b2279d"
+ "39bf3e848279a722c806b485a47e67c807b946a337bee8942674278859e132"
+ "92fb", hmac_expected_list[i++]);
+ decodeHex("b0ba465637458c6990e5a8c5f61d4af7e576d97ff94b872de76f8050361ee3"
+ "dba91ca5c11aa25eb4d679275cc5788063a5f19741120c4f2de2adebeb10a2"
+ "98dd", hmac_expected_list[i++]);
+ decodeHex("415fad6271580a531d4179bc891d87a6", hmac_expected_list[i++]);
+ decodeHex("80b24263c7c1a3ebb71493c1dd7be8b49b46d1f41b4aeec1121b013783f8f3"
+ "526b56d037e05f2598bd0fd2215d6a1e5295e64f73f63f0aec8b915a985d78"
+ "6598", hmac_expected_list[i++]);
+ decodeHex("e37b6a775dc87dbaa4dfa9f96e5e3ffddebd71f8867289865df5a32d20cdc9"
+ "44b6022cac3c4982b10d5eeb55c3e4de15134676fb6de0446065c97440fa8c"
+ "6a58", hmac_expected_list[i++]);
+
+ doRFC4231Tests(SHA512, hmac_expected_list);
}
TEST(CryptoLinkTest, DISABLED_HMAC_SHA256_RFC2202_SIGN_TRUNCATED) {
diff --git a/src/lib/cryptolink/tests/run_unittests.cc b/src/lib/cryptolink/tests/run_unittests.cc
index d16327e..a2181cf 100644
--- a/src/lib/cryptolink/tests/run_unittests.cc
+++ b/src/lib/cryptolink/tests/run_unittests.cc
@@ -13,10 +13,11 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index e028186..bf1171e 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -7,9 +7,9 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
AM_CXXFLAGS = $(B10_CXXFLAGS)
-CLEANFILES = *.gcno *.gcda messagedef.h messagedef.cc
+CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
-lib_LTLIBRARIES = libdatasrc.la
+lib_LTLIBRARIES = libdatasrc.la sqlite3_ds.la memory_ds.la
libdatasrc_la_SOURCES = data_source.h data_source.cc
libdatasrc_la_SOURCES += static_datasrc.h static_datasrc.cc
libdatasrc_la_SOURCES += sqlite3_datasrc.h sqlite3_datasrc.cc
@@ -17,19 +17,33 @@ libdatasrc_la_SOURCES += query.h query.cc
libdatasrc_la_SOURCES += cache.h cache.cc
libdatasrc_la_SOURCES += rbtree.h
libdatasrc_la_SOURCES += zonetable.h zonetable.cc
-libdatasrc_la_SOURCES += memory_datasrc.h memory_datasrc.cc
libdatasrc_la_SOURCES += zone.h
libdatasrc_la_SOURCES += result.h
libdatasrc_la_SOURCES += logger.h logger.cc
-nodist_libdatasrc_la_SOURCES = messagedef.h messagedef.cc
+libdatasrc_la_SOURCES += client.h iterator.h
+libdatasrc_la_SOURCES += database.h database.cc
+libdatasrc_la_SOURCES += factory.h factory.cc
+nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
+
+sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc
+sqlite3_ds_la_LDFLAGS = -module
+sqlite3_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+sqlite3_ds_la_LIBADD += libdatasrc.la
+sqlite3_ds_la_LIBADD += $(SQLITE_LIBS)
+
+memory_ds_la_SOURCES = memory_datasrc.h memory_datasrc.cc
+memory_ds_la_LDFLAGS = -module
+memory_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+memory_ds_la_LIBADD += libdatasrc.la
libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libdatasrc_la_LIBADD += $(SQLITE_LIBS)
-BUILT_SOURCES = messagedef.h messagedef.cc
-messagedef.h messagedef.cc: Makefile messagedef.mes
- $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/messagedef.mes
+BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
+datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
-EXTRA_DIST = messagedef.mes
+EXTRA_DIST = datasrc_messages.mes
diff --git a/src/lib/datasrc/cache.cc b/src/lib/datasrc/cache.cc
index 8e9487d..d88e649 100644
--- a/src/lib/datasrc/cache.cc
+++ b/src/lib/datasrc/cache.cc
@@ -100,6 +100,19 @@ public:
/// \return \c RRsetPtr
RRsetPtr getRRset() const { return (entry->rrset); }
+ /// \brief Returns name associated with cached node
+ ///
+ /// This is the name associated with the RRset if it is a positive
+ /// entry, and the associated question name if the RRSet is NULL
+ /// and this is a negative entry (together with an indication that
+ /// this is a negative entry).
+ string getNodeName() const {
+ if (getRRset()) {
+ return (getRRset()->getName().toText());
+ }
+ return (std::string("negative entry for ") + question.toText());
+ }
+
/// \brief Returns the query response flags associated with the data.
///
/// \return \c uint32_t
@@ -213,13 +226,14 @@ HotCacheImpl::HotCacheImpl(int slots, bool enabled) :
inline void
HotCacheImpl::insert(const CacheNodePtr node) {
LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_CACHE_INSERT).
- arg(node->getRRset()->getName());
+ arg(node->getNodeName());
std::map<Question, CacheNodePtr>::const_iterator iter;
iter = map_.find(node->question);
if (iter != map_.end()) {
CacheNodePtr old = iter->second;
if (old && old->isValid()) {
- LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_CACHE_OLD_FOUND);
+ LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_CACHE_OLD_FOUND)
+ .arg(node->getNodeName());
remove(old);
}
}
@@ -253,7 +267,7 @@ HotCacheImpl::promote(CacheNodePtr node) {
void
HotCacheImpl::remove(ConstCacheNodePtr node) {
LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_CACHE_REMOVE).
- arg(node->getRRset()->getName());
+ arg(node->getNodeName());
lru_.erase(node->lru_entry_);
map_.erase(node->question);
--count_;
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
new file mode 100644
index 0000000..40b7a3f
--- /dev/null
+++ b/src/lib/datasrc/client.h
@@ -0,0 +1,292 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATA_SOURCE_CLIENT_H
+#define __DATA_SOURCE_CLIENT_H 1
+
+#include <boost/noncopyable.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <datasrc/zone.h>
+
+/// \file
+/// Datasource clients
+///
+/// The data source client API is specified in client.h, and provides the
+/// functionality to query and modify data in the data sources. There are
+/// multiple datasource implementations, and by subclassing DataSourceClient or
+/// DatabaseClient, more can be added.
+///
+/// All datasources are implemented as loadable modules, with a name of the
+/// form "<type>_ds.so". This has been chosen intentionally, to minimize
+/// confusion and potential mistakes.
+///
+/// In order to use a datasource client backend, the class
+/// DataSourceClientContainer is provided in factory.h; this will load the
+/// library, set up the instance, and clean everything up once it is destroyed.
+///
+/// Access to the actual instance is provided with the getInstance() method
+/// in DataSourceClientContainer
+///
+/// \note Depending on actual usage, we might consider making the container
+/// a transparent abstraction layer, so it can be used as a DataSourceClient
+/// directly. This has some other implications though so for now the only access
+/// provided is through getInstance()).
+///
+/// For datasource backends, we use a dynamically loaded library system (with
+/// dlopen()). This library must contain the following things;
+/// - A subclass of DataSourceClient or DatabaseClient (which itself is a
+/// subclass of DataSourceClient)
+/// - A creator function for an instance of that subclass, of the form:
+/// \code
+/// extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr cfg);
+/// \endcode
+/// - A destructor for said instance, of the form:
+/// \code
+/// extern "C" void destroyInstance(isc::data::DataSourceClient* instance);
+/// \endcode
+///
+/// See the documentation for the \link DataSourceClient \endlink class for
+/// more information on implementing subclasses of it.
+///
+
+namespace isc {
+namespace datasrc {
+
+// The iterator.h is not included on purpose, most application won't need it
+class ZoneIterator;
+typedef boost::shared_ptr<ZoneIterator> ZoneIteratorPtr;
+
+/// \brief The base class of data source clients.
+///
+/// This is an abstract base class that defines the common interface for
+/// various types of data source clients. A data source client is a top level
+/// access point to a data source, allowing various operations on the data
+/// source such as lookups, traversing or updates. The client class itself
+/// has limited focus and delegates the responsibility for these specific
+/// operations to other classes; in general methods of this class act as
+/// factories of these other classes.
+///
+/// See \link datasrc/client.h datasrc/client.h \endlink for more information
+/// on adding datasource implementations.
+///
+/// The following derived classes are currently (expected to be) provided:
+/// - \c InMemoryClient: A client of a conceptual data source that stores
+/// all necessary data in memory for faster lookups
+/// - \c DatabaseClient: A client that uses a real database backend (such as
+/// an SQL database). It would internally hold a connection to the underlying
+/// database system.
+///
+/// \note It is intentional that while the term these derived classes don't
+/// contain "DataSource" unlike their base class. It's also noteworthy
+/// that the naming of the base class is somewhat redundant because the
+/// namespace \c datasrc would indicate that it's related to a data source.
+/// The redundant naming comes from the observation that namespaces are
+/// often omitted with \c using directives, in which case "Client"
+/// would be too generic. On the other hand, concrete derived classes are
+/// generally not expected to be referenced directly from other modules and
+/// applications, so we'll give them more concise names such as InMemoryClient.
+///
+/// A single \c DataSourceClient object is expected to handle only a single
+/// RR class even if the underlying data source contains records for multiple
+/// RR classes. Likewise, (when we support views) a \c DataSourceClient
+/// object is expected to handle only a single view.
+///
+/// If the application uses multiple threads, each thread will need to
+/// create and use a separate DataSourceClient. This is because some
+/// database backend doesn't allow multiple threads to share the same
+/// connection to the database.
+///
+/// \note For a client using an in memory backend, this may result in
+/// having a multiple copies of the same data in memory, increasing the
+/// memory footprint substantially. Depending on how to support multiple
+/// CPU cores for concurrent lookups on the same single data source (which
+/// is not fully fixed yet, and for which multiple threads may be used),
+/// this design may have to be revisited.
+///
+/// This class (and therefore its derived classes) are not copyable.
+/// This is because the derived classes would generally contain attributes
+/// that are not easy to copy (such as a large size of in memory data or a
+/// network connection to a database server). In order to avoid a surprising
+/// disruption with a naive copy it's prohibited explicitly. For the expected
+/// usage of the client classes the restriction should be acceptable.
+///
+/// \todo This class is still not complete. It will need more factory methods,
+/// e.g. for (re)loading a zone.
+class DataSourceClient : boost::noncopyable {
+public:
+ /// \brief A helper structure to represent the search result of
+ /// \c find().
+ ///
+ /// This is a straightforward pair of the result code and a share pointer
+ /// to the found zone to represent the result of \c find().
+ /// We use this in order to avoid overloading the return value for both
+ /// the result code ("success" or "not found") and the found object,
+ /// i.e., avoid using \c NULL to mean "not found", etc.
+ ///
+ /// This is a simple value class with no internal state, so for
+ /// convenience we allow the applications to refer to the members
+ /// directly.
+ ///
+ /// See the description of \c find() for the semantics of the member
+ /// variables.
+ struct FindResult {
+ FindResult(result::Result param_code,
+ const ZoneFinderPtr param_zone_finder) :
+ code(param_code), zone_finder(param_zone_finder)
+ {}
+ const result::Result code;
+ const ZoneFinderPtr zone_finder;
+ };
+
+ ///
+ /// \name Constructors and Destructor.
+ ///
+protected:
+ /// Default constructor.
+ ///
+ /// This is intentionally defined as protected as this base class
+ /// should never be instantiated directly.
+ ///
+ /// The constructor of a concrete derived class may throw an exception.
+ /// This interface does not specify which exceptions can happen (at least
+ /// at this moment), and the caller should expect any type of exception
+ /// and react accordingly.
+ DataSourceClient() {}
+
+public:
+ /// The destructor.
+ virtual ~DataSourceClient() {}
+ //@}
+
+ /// Returns a \c ZoneFinder for a zone that best matches the given name.
+ ///
+ /// A concrete derived version of this method gets access to its backend
+ /// data source to search for a zone whose origin gives the longest match
+ /// against \c name. It returns the search result in the form of a
+ /// \c FindResult object as follows:
+ /// - \c code: The result code of the operation.
+ /// - \c result::SUCCESS: A zone that gives an exact match is found
+ /// - \c result::PARTIALMATCH: A zone whose origin is a
+ /// super domain of \c name is found (but there is no exact match)
+ /// - \c result::NOTFOUND: For all other cases.
+ /// - \c zone_finder: Pointer to a \c ZoneFinder object for the found zone
+ /// if one is found; otherwise \c NULL.
+ ///
+ /// A specific derived version of this method may throw an exception.
+ /// This interface does not specify which exceptions can happen (at least
+ /// at this moment), and the caller should expect any type of exception
+ /// and react accordingly.
+ ///
+ /// \param name A domain name for which the search is performed.
+ /// \return A \c FindResult object enclosing the search result (see above).
+ virtual FindResult findZone(const isc::dns::Name& name) const = 0;
+
+ /// \brief Returns an iterator to the given zone
+ ///
+ /// This allows for traversing the whole zone. The returned object can
+ /// provide the RRsets one by one.
+ ///
+ /// This throws DataSourceError when the zone does not exist in the
+ /// datasource.
+ ///
+ /// The default implementation throws isc::NotImplemented. This allows
+ /// for easy and fast deployment of minimal custom data sources, where
+ /// the user/implementator doesn't have to care about anything else but
+ /// the actual queries. Also, in some cases, it isn't possible to traverse
+ /// the zone from logic point of view (eg. dynamically generated zone
+ /// data).
+ ///
+ /// It is not fixed if a concrete implementation of this method can throw
+ /// anything else.
+ ///
+ /// \param name The name of zone apex to be traversed. It doesn't do
+ /// nearest match as findZone.
+ /// \return Pointer to the iterator.
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const {
+ // This is here to both document the parameter in doxygen (therefore it
+ // needs a name) and avoid unused parameter warning.
+ static_cast<void>(name);
+
+ isc_throw(isc::NotImplemented,
+ "Data source doesn't support iteration");
+ }
+
+ /// Return an updater to make updates to a specific zone.
+ ///
+ /// The RR class of the zone is the one that the client is expected to
+ /// handle (see the detailed description of this class).
+ ///
+ /// If the specified zone is not found via the client, a NULL pointer
+ /// will be returned; in other words a completely new zone cannot be
+ /// created using an updater. It must be created beforehand (even if
+ /// it's an empty placeholder) in a way specific to the underlying data
+ /// source.
+ ///
+ /// Conceptually, the updater will trigger a separate transaction for
+ /// subsequent updates to the zone within the context of the updater
+ /// (the actual implementation of the "transaction" may vary for the
+ /// specific underlying data source). Until \c commit() is performed
+ /// on the updater, the intermediate updates won't affect the results
+ /// of other methods (and the result of the object's methods created
+ /// by other factory methods). Likewise, if the updater is destructed
+ /// without performing \c commit(), the intermediate updates will be
+ /// effectively canceled and will never affect other methods.
+ ///
+ /// If the underlying data source allows concurrent updates, this method
+ /// can be called multiple times while the previously returned updater(s)
+ /// are still active. In this case each updater triggers a different
+ /// "transaction". Normally it would be for different zones for such a
+ /// case as handling multiple incoming AXFR streams concurrently, but
+ /// this interface does not even prohibit an attempt of getting more than
+ /// one updater for the same zone, as long as the underlying data source
+ /// allows such an operation (and any conflict resolution is left to the
+ /// specific derived class implementation).
+ ///
+ /// If \c replace is true, any existing RRs of the zone will be
+ /// deleted on successful completion of updates (after \c commit() on
+ /// the updater); if it's false, the existing RRs will be
+ /// intact unless explicitly deleted by \c deleteRRset() on the updater.
+ ///
+ /// A data source can be "read only" or can prohibit partial updates.
+ /// In such cases this method will result in an \c isc::NotImplemented
+ /// exception unconditionally or when \c replace is false).
+ ///
+ /// \note To avoid throwing the exception accidentally with a lazy
+ /// implementation, we still keep this method pure virtual without
+ /// an implementation. All derived classes must explicitly define this
+ /// method, even if it simply throws the NotImplemented exception.
+ ///
+ /// \exception NotImplemented The underlying data source does not support
+ /// updates.
+ /// \exception DataSourceError Internal error in the underlying data
+ /// source.
+ /// \exception std::bad_alloc Resource allocation failure.
+ ///
+ /// \param name The zone name to be updated
+ /// \param replace Whether to delete existing RRs before making updates
+ ///
+ /// \return A pointer to the updater; it will be NULL if the specified
+ /// zone isn't found.
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+ bool replace) const = 0;
+};
+}
+}
+#endif // DATA_SOURCE_CLIENT_H
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/data_source.cc b/src/lib/datasrc/data_source.cc
index 4e1fcde..94dec89 100644
--- a/src/lib/datasrc/data_source.cc
+++ b/src/lib/datasrc/data_source.cc
@@ -903,7 +903,7 @@ tryWildcard(Query& q, QueryTaskPtr task, ZoneInfo& zoneinfo, bool& found) {
result = proveNX(q, task, zoneinfo, true);
if (result != DataSrc::SUCCESS) {
m.setRcode(Rcode::SERVFAIL());
- logger.error(DATASRC_QUERY_WILDCARD_PROVENX_FAIL).
+ logger.error(DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL).
arg(task->qname).arg(result);
return (DataSrc::ERROR);
}
@@ -945,7 +945,7 @@ tryWildcard(Query& q, QueryTaskPtr task, ZoneInfo& zoneinfo, bool& found) {
void
DataSrc::doQuery(Query& q) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_QUERY_PROCESS).arg(q.qname()).
- arg(q.qclass());
+ arg(q.qtype()).arg(q.qclass());
Message& m = q.message();
vector<RRsetPtr> additional;
@@ -1162,7 +1162,7 @@ DataSrc::doQuery(Query& q) {
result = proveNX(q, task, zoneinfo, false);
if (result != DataSrc::SUCCESS) {
m.setRcode(Rcode::SERVFAIL());
- logger.error(DATASRC_QUERY_PROVENX_FAIL).arg(task->qname);
+ logger.error(DATASRC_QUERY_PROVE_NX_FAIL).arg(task->qname);
return;
}
}
diff --git a/src/lib/datasrc/data_source.h b/src/lib/datasrc/data_source.h
index ff695da..a7a15a9 100644
--- a/src/lib/datasrc/data_source.h
+++ b/src/lib/datasrc/data_source.h
@@ -184,9 +184,9 @@ public:
void setClass(isc::dns::RRClass& c) { rrclass = c; }
void setClass(const isc::dns::RRClass& c) { rrclass = c; }
- Result init() { return (NOT_IMPLEMENTED); }
- Result init(isc::data::ConstElementPtr config);
- Result close() { return (NOT_IMPLEMENTED); }
+ virtual Result init() { return (NOT_IMPLEMENTED); }
+ virtual Result init(isc::data::ConstElementPtr config);
+ virtual Result close() { return (NOT_IMPLEMENTED); }
virtual Result findRRset(const isc::dns::Name& qname,
const isc::dns::RRClass& qclass,
@@ -351,7 +351,7 @@ public:
/// \brief Returns the best enclosing zone name found for the given
// name and RR class so far.
- ///
+ ///
/// \return A pointer to the zone apex \c Name, NULL if none found yet.
///
/// This method never throws an exception.
@@ -413,6 +413,6 @@ private:
#endif
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
new file mode 100644
index 0000000..3b079c6
--- /dev/null
+++ b/src/lib/datasrc/database.cc
@@ -0,0 +1,990 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <vector>
+
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
+
+#include <exceptions/exceptions.h>
+#include <dns/name.h>
+#include <dns/rrclass.h>
+#include <dns/rrttl.h>
+#include <dns/rrset.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <datasrc/data_source.h>
+#include <datasrc/logger.h>
+
+#include <boost/foreach.hpp>
+
+using namespace isc::dns;
+using namespace std;
+using boost::shared_ptr;
+using namespace isc::dns::rdata;
+
+namespace isc {
+namespace datasrc {
+
+DatabaseClient::DatabaseClient(RRClass rrclass,
+ boost::shared_ptr<DatabaseAccessor>
+ accessor) :
+ rrclass_(rrclass), accessor_(accessor)
+{
+ if (!accessor_) {
+ isc_throw(isc::InvalidParameter,
+ "No database provided to DatabaseClient");
+ }
+}
+
+DataSourceClient::FindResult
+DatabaseClient::findZone(const Name& name) const {
+ std::pair<bool, int> zone(accessor_->getZone(name.toText()));
+ // Try exact first
+ if (zone.first) {
+ return (FindResult(result::SUCCESS,
+ ZoneFinderPtr(new Finder(accessor_,
+ zone.second, name))));
+ }
+ // Then super domains
+ // Start from 1, as 0 is covered above
+ for (size_t i(1); i < name.getLabelCount(); ++i) {
+ isc::dns::Name superdomain(name.split(i));
+ zone = accessor_->getZone(superdomain.toText());
+ if (zone.first) {
+ return (FindResult(result::PARTIALMATCH,
+ ZoneFinderPtr(new Finder(accessor_,
+ zone.second,
+ superdomain))));
+ }
+ }
+ // No, really nothing
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+}
+
+DatabaseClient::Finder::Finder(boost::shared_ptr<DatabaseAccessor> accessor,
+ int zone_id, const isc::dns::Name& origin) :
+ accessor_(accessor),
+ zone_id_(zone_id),
+ origin_(origin)
+{ }
+
+namespace {
+// Adds the given Rdata to the given RRset
+// If the rrset is an empty pointer, a new one is
+// created with the given name, class, type and ttl
+// The type is checked if the rrset exists, but the
+// name is not.
+//
+// Then adds the given rdata to the set
+//
+// Raises a DataSourceError if the type does not
+// match, or if the given rdata string does not
+// parse correctly for the given type and class
+//
+// The DatabaseAccessor is passed to print the
+// database name in the log message if the TTL is
+// modified
+void addOrCreate(isc::dns::RRsetPtr& rrset,
+ const isc::dns::Name& name,
+ const isc::dns::RRClass& cls,
+ const isc::dns::RRType& type,
+ const isc::dns::RRTTL& ttl,
+ const std::string& rdata_str,
+ const DatabaseAccessor& db
+ )
+{
+ if (!rrset) {
+ rrset.reset(new isc::dns::RRset(name, cls, type, ttl));
+ } else {
+ // This is a check to make sure find() is not messing things up
+ assert(type == rrset->getType());
+ if (ttl != rrset->getTTL()) {
+ if (ttl < rrset->getTTL()) {
+ rrset->setTTL(ttl);
+ }
+ logger.warn(DATASRC_DATABASE_FIND_TTL_MISMATCH)
+ .arg(db.getDBName()).arg(name).arg(cls)
+ .arg(type).arg(rrset->getTTL());
+ }
+ }
+ try {
+ rrset->addRdata(isc::dns::rdata::createRdata(type, cls, rdata_str));
+ } catch (const isc::dns::rdata::InvalidRdataText& ivrt) {
+ // at this point, rrset may have been initialised for no reason,
+ // and won't be used. But the caller would drop the shared_ptr
+ // on such an error anyway, so we don't care.
+ isc_throw(DataSourceError,
+ "bad rdata in database for " << name << " "
+ << type << ": " << ivrt.what());
+ }
+}
+
+// This class keeps a short-lived store of RRSIG records encountered
+// during a call to find(). If the backend happens to return signatures
+// before the actual data, we might not know which signatures we will need
+// So if they may be relevant, we store the in this class.
+//
+// (If this class seems useful in other places, we might want to move
+// it to util. That would also provide an opportunity to add unit tests)
+class RRsigStore {
+public:
+ // Adds the given signature Rdata to the store
+ // The signature rdata MUST be of the RRSIG rdata type
+ // (the caller must make sure of this).
+ // NOTE: if we move this class to a public namespace,
+ // we should add a type_covered argument, so as not
+ // to have to do this cast here.
+ void addSig(isc::dns::rdata::RdataPtr sig_rdata) {
+ const isc::dns::RRType& type_covered =
+ static_cast<isc::dns::rdata::generic::RRSIG*>(
+ sig_rdata.get())->typeCovered();
+ sigs[type_covered].push_back(sig_rdata);
+ }
+
+ // If the store contains signatures for the type of the given
+ // rrset, they are appended to it.
+ void appendSignatures(isc::dns::RRsetPtr& rrset) const {
+ std::map<isc::dns::RRType,
+ std::vector<isc::dns::rdata::RdataPtr> >::const_iterator
+ found = sigs.find(rrset->getType());
+ if (found != sigs.end()) {
+ BOOST_FOREACH(isc::dns::rdata::RdataPtr sig, found->second) {
+ rrset->addRRsig(sig);
+ }
+ }
+ }
+
+private:
+ std::map<isc::dns::RRType, std::vector<isc::dns::rdata::RdataPtr> > sigs;
+};
+}
+
+DatabaseClient::Finder::FoundRRsets
+DatabaseClient::Finder::getRRsets(const string& name, const WantedTypes& types,
+ bool check_ns, const string* construct_name)
+{
+ RRsigStore sig_store;
+ bool records_found = false;
+ std::map<RRType, RRsetPtr> result;
+
+ // Request the context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor_->getRecords(name, zone_id_));
+ // It must not return NULL, that's a bug of the implementation
+ if (!context) {
+ isc_throw(isc::Unexpected, "Iterator context null at " + name);
+ }
+
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ if (construct_name == NULL) {
+ construct_name = &name;
+ }
+
+ const Name construct_name_object(*construct_name);
+
+ bool seen_cname(false);
+ bool seen_ds(false);
+ bool seen_other(false);
+ bool seen_ns(false);
+
+ while (context->getNext(columns)) {
+ // The domain is not empty
+ records_found = true;
+
+ try {
+ const RRType cur_type(columns[DatabaseAccessor::TYPE_COLUMN]);
+
+ if (cur_type == RRType::RRSIG()) {
+ // If we get signatures before we get the actual data, we
+ // can't know which ones to keep and which to drop...
+ // So we keep a separate store of any signature that may be
+ // relevant and add them to the final RRset when we are
+ // done.
+ // A possible optimization here is to not store them for
+ // types we are certain we don't need
+ sig_store.addSig(rdata::createRdata(cur_type, getClass(),
+ columns[DatabaseAccessor::RDATA_COLUMN]));
+ }
+
+ if (types.find(cur_type) != types.end()) {
+ // This type is requested, so put it into result
+ const RRTTL cur_ttl(columns[DatabaseAccessor::TTL_COLUMN]);
+ // Ths sigtype column was an optimization for finding the
+ // relevant RRSIG RRs for a lookup. Currently this column is
+ // not used in this revised datasource implementation. We
+ // should either start using it again, or remove it from use
+ // completely (i.e. also remove it from the schema and the
+ // backend implementation).
+ // Note that because we don't use it now, we also won't notice
+ // it if the value is wrong (i.e. if the sigtype column
+ // contains an rrtype that is different from the actual value
+ // of the 'type covered' field in the RRSIG Rdata).
+ //cur_sigtype(columns[SIGTYPE_COLUMN]);
+ addOrCreate(result[cur_type], construct_name_object,
+ getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *accessor_);
+ }
+
+ if (cur_type == RRType::CNAME()) {
+ seen_cname = true;
+ } else if (cur_type == RRType::NS()) {
+ seen_ns = true;
+ } else if (cur_type == RRType::DS()) {
+ seen_ds = true;
+ } else if (cur_type != RRType::RRSIG() &&
+ cur_type != RRType::NSEC3() &&
+ cur_type != RRType::NSEC()) {
+ // NSEC and RRSIG can coexist with anything, otherwise
+ // we've seen something that can't live together with potential
+ // CNAME or NS
+ //
+ // NSEC3 lives in separate namespace from everything, therefore
+ // we just ignore it here for these checks as well.
+ seen_other = true;
+ }
+ } catch (const InvalidRRType&) {
+ isc_throw(DataSourceError, "Invalid RRType in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ TYPE_COLUMN]);
+ } catch (const InvalidRRTTL&) {
+ isc_throw(DataSourceError, "Invalid TTL in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ TTL_COLUMN]);
+ } catch (const rdata::InvalidRdataText&) {
+ isc_throw(DataSourceError, "Invalid rdata in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ RDATA_COLUMN]);
+ }
+ }
+ if (seen_cname && (seen_other || seen_ns || seen_ds)) {
+ isc_throw(DataSourceError, "CNAME shares domain " << name <<
+ " with something else");
+ }
+ if (check_ns && seen_ns && seen_other) {
+ isc_throw(DataSourceError, "NS shares domain " << name <<
+ " with something else");
+ }
+ // Add signatures to all found RRsets
+ for (std::map<RRType, RRsetPtr>::iterator i(result.begin());
+ i != result.end(); ++ i) {
+ sig_store.appendSignatures(i->second);
+ }
+
+ return (FoundRRsets(records_found, result));
+}
+
+bool
+DatabaseClient::Finder::hasSubdomains(const std::string& name) {
+ // Request the context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor_->getRecords(name, zone_id_, true));
+ // It must not return NULL, that's a bug of the implementation
+ if (!context) {
+ isc_throw(isc::Unexpected, "Iterator context null at " + name);
+ }
+
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ return (context->getNext(columns));
+}
+
+// Some manipulation with RRType sets
+namespace {
+
+// Bunch of functions to construct specific sets of RRTypes we will
+// ask from it.
+typedef std::set<RRType> WantedTypes;
+
+const WantedTypes&
+NSEC_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::NSEC());
+ initialized = true;
+ }
+ return (result);
+}
+
+const WantedTypes&
+DELEGATION_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::DNAME());
+ result.insert(RRType::NS());
+ initialized = true;
+ }
+ return (result);
+}
+
+const WantedTypes&
+FINAL_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::CNAME());
+ result.insert(RRType::NS());
+ result.insert(RRType::NSEC());
+ initialized = true;
+ }
+ return (result);
+}
+
+}
+
+RRsetPtr
+DatabaseClient::Finder::findNSECCover(const Name& name) {
+ try {
+ // Which one should contain the NSEC record?
+ const Name coverName(findPreviousName(name));
+ // Get the record and copy it out
+ const FoundRRsets found = getRRsets(coverName.toText(), NSEC_TYPES(),
+ coverName != getOrigin());
+ const FoundIterator
+ nci(found.second.find(RRType::NSEC()));
+ if (nci != found.second.end()) {
+ return (nci->second);
+ } else {
+ // The previous doesn't contain NSEC.
+ // Badly signed zone or a bug?
+
+ // FIXME: Currently, if the zone is not signed, we could get
+ // here. In that case we can't really throw, but for now, we can't
+ // recognize it. So we don't throw at all, enable it once
+ // we have a is_signed flag or something.
+#if 0
+ isc_throw(DataSourceError, "No NSEC in " +
+ coverName.toText() + ", but it was "
+ "returned as previous - "
+ "accessor error? Badly signed zone?");
+#endif
+ }
+ }
+ catch (const isc::NotImplemented&) {
+ // Well, they want DNSSEC, but there is no available.
+ // So we don't provide anything.
+ LOG_INFO(logger, DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED).
+ arg(accessor_->getDBName()).arg(name);
+ }
+ // We didn't find it, return nothing
+ return (RRsetPtr());
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList*,
+ const FindOptions options)
+{
+ // This variable is used to determine the difference between
+ // NXDOMAIN and NXRRSET
+ bool records_found = false;
+ bool glue_ok((options & FIND_GLUE_OK) != 0);
+ const bool dnssec_data((options & FIND_DNSSEC) != 0);
+ bool get_cover(false);
+ isc::dns::RRsetPtr result_rrset;
+ ZoneFinder::Result result_status = SUCCESS;
+ FoundRRsets found;
+ logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
+ .arg(accessor_->getDBName()).arg(name).arg(type);
+ // In case we are in GLUE_OK mode and start matching wildcards,
+ // we can't do it under NS, so we store it here to check
+ isc::dns::RRsetPtr first_ns;
+
+ // First, do we have any kind of delegation (NS/DNAME) here?
+ const Name origin(getOrigin());
+ const size_t origin_label_count(origin.getLabelCount());
+ // Number of labels in the last known non-empty domain
+ size_t last_known(origin_label_count);
+ const size_t current_label_count(name.getLabelCount());
+ // This is how many labels we remove to get origin
+ const size_t remove_labels(current_label_count - origin_label_count);
+
+ // Now go trough all superdomains from origin down
+ for (int i(remove_labels); i > 0; --i) {
+ Name superdomain(name.split(i));
+ // Look if there's NS or DNAME (but ignore the NS in origin)
+ found = getRRsets(superdomain.toText(), DELEGATION_TYPES(),
+ i != remove_labels);
+ if (found.first) {
+ // It contains some RRs, so it exists.
+ last_known = superdomain.getLabelCount();
+
+ const FoundIterator nsi(found.second.find(RRType::NS()));
+ const FoundIterator dni(found.second.find(RRType::DNAME()));
+ // In case we are in GLUE_OK mode, we want to store the
+ // highest encountered NS (but not apex)
+ if (glue_ok && !first_ns && i != remove_labels &&
+ nsi != found.second.end()) {
+ first_ns = nsi->second;
+ } else if (!glue_ok && i != remove_labels &&
+ nsi != found.second.end()) {
+ // Do a NS delegation, but ignore NS in glue_ok mode. Ignore
+ // delegation in apex
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DELEGATION).
+ arg(accessor_->getDBName()).arg(superdomain);
+ result_rrset = nsi->second;
+ result_status = DELEGATION;
+ // No need to go lower, found
+ break;
+ } else if (dni != found.second.end()) {
+ // Very similar with DNAME
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DNAME).
+ arg(accessor_->getDBName()).arg(superdomain);
+ result_rrset = dni->second;
+ result_status = DNAME;
+ if (result_rrset->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "DNAME at " << superdomain <<
+ " has " << result_rrset->getRdataCount() <<
+ " rdata, 1 expected");
+ }
+ break;
+ }
+ }
+ }
+
+ if (!result_rrset) { // Only if we didn't find a redirect already
+ // Try getting the final result and extract it
+ // It is special if there's a CNAME or NS, DNAME is ignored here
+ // And we don't consider the NS in origin
+
+ WantedTypes final_types(FINAL_TYPES());
+ final_types.insert(type);
+ found = getRRsets(name.toText(), final_types, name != origin);
+ records_found = found.first;
+
+ // NS records, CNAME record and Wanted Type records
+ const FoundIterator nsi(found.second.find(RRType::NS()));
+ const FoundIterator cni(found.second.find(RRType::CNAME()));
+ const FoundIterator wti(found.second.find(type));
+ if (name != origin && !glue_ok && nsi != found.second.end()) {
+ // There's a delegation at the exact node.
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DELEGATION_EXACT).
+ arg(accessor_->getDBName()).arg(name);
+ result_status = DELEGATION;
+ result_rrset = nsi->second;
+ } else if (type != isc::dns::RRType::CNAME() &&
+ cni != found.second.end()) {
+ // A CNAME here
+ result_status = CNAME;
+ result_rrset = cni->second;
+ if (result_rrset->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "CNAME with " <<
+ result_rrset->getRdataCount() <<
+ " rdata at " << name << ", expected 1");
+ }
+ } else if (wti != found.second.end()) {
+ // Just get the answer
+ result_rrset = wti->second;
+ } else if (!records_found) {
+ // Nothing lives here.
+ // But check if something lives below this
+ // domain and if so, pretend something is here as well.
+ if (hasSubdomains(name.toText())) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
+ arg(accessor_->getDBName()).arg(name);
+ records_found = true;
+ get_cover = dnssec_data;
+ } else if ((options & NO_WILDCARD) != 0) {
+ // If wildcard check is disabled, the search will ultimately
+ // terminate with NXDOMAIN. If DNSSEC is enabled, flag that
+ // we need to get the NSEC records to prove this.
+ if (dnssec_data) {
+ get_cover = true;
+ }
+ } else {
+ // It's not empty non-terminal. So check for wildcards.
+ // We remove labels one by one and look for the wildcard there.
+ // Go up to first non-empty domain.
+ for (size_t i(1); i <= current_label_count - last_known; ++i) {
+ // Construct the name with *
+ const Name superdomain(name.split(i));
+ const string wildcard("*." + superdomain.toText());
+ const string construct_name(name.toText());
+ // TODO What do we do about DNAME here?
+ // The types are the same as with original query
+ found = getRRsets(wildcard, final_types, true,
+ &construct_name);
+ if (found.first) {
+ if (first_ns) {
+ // In case we are under NS, we don't
+ // wildcard-match, but return delegation
+ result_rrset = first_ns;
+ result_status = DELEGATION;
+ records_found = true;
+ // We pretend to switch to non-glue_ok mode
+ glue_ok = false;
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_CANCEL_NS).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(first_ns->getName());
+ } else if (!hasSubdomains(name.split(i - 1).toText()))
+ {
+ // Nothing we added as part of the * can exist
+ // directly, as we go up only to first existing
+ // domain, but it could be empty non-terminal. In
+ // that case, we need to cancel the match.
+ records_found = true;
+ const FoundIterator
+ cni(found.second.find(RRType::CNAME()));
+ const FoundIterator
+ nsi(found.second.find(RRType::NS()));
+ const FoundIterator
+ nci(found.second.find(RRType::NSEC()));
+ const FoundIterator wti(found.second.find(type));
+ if (cni != found.second.end() &&
+ type != RRType::CNAME()) {
+ result_rrset = cni->second;
+ result_status = WILDCARD_CNAME;
+ } else if (nsi != found.second.end()) {
+ result_rrset = nsi->second;
+ result_status = DELEGATION;
+ } else if (wti != found.second.end()) {
+ result_rrset = wti->second;
+ result_status = WILDCARD;
+ } else {
+ // NXRRSET case in the wildcard
+ result_status = WILDCARD_NXRRSET;
+ if (dnssec_data &&
+ nci != found.second.end()) {
+ // User wants a proof the wildcard doesn't
+ // contain it
+ //
+ // However, we need to get the RRset in the
+ // name of the wildcard, not the constructed
+ // one, so we walk it again
+ found = getRRsets(wildcard, NSEC_TYPES(),
+ true);
+ result_rrset =
+ found.second.find(RRType::NSEC())->
+ second;
+ }
+ }
+
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name);
+ } else {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_CANCEL_SUB).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name).arg(superdomain);
+ }
+ break;
+ } else if (hasSubdomains(wildcard)) {
+ // Empty non-terminal asterisk
+ records_found = true;
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_EMPTY).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name);
+ if (dnssec_data) {
+ result_rrset = findNSECCover(Name(wildcard));
+ if (result_rrset) {
+ result_status = WILDCARD_NXRRSET;
+ }
+ }
+ break;
+ }
+ }
+ // This is the NXDOMAIN case (nothing found anywhere). If
+ // they want DNSSEC data, try getting the NSEC record
+ if (dnssec_data && !records_found) {
+ get_cover = true;
+ }
+ }
+ } else if (dnssec_data) {
+ // This is the "usual" NXRRSET case
+ // So in case they want DNSSEC, provide the NSEC
+ // (which should be available already here)
+ result_status = NXRRSET;
+ const FoundIterator nci(found.second.find(RRType::NSEC()));
+ if (nci != found.second.end()) {
+ result_rrset = nci->second;
+ }
+ }
+ }
+
+ if (!result_rrset) {
+ if (result_status == SUCCESS) {
+ // Should we look for NSEC covering the name?
+ if (get_cover) {
+ result_rrset = findNSECCover(name);
+ if (result_rrset) {
+ result_status = NXDOMAIN;
+ }
+ }
+ // Something is not here and we didn't decide yet what
+ if (records_found) {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXRRSET)
+ .arg(accessor_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXRRSET;
+ } else {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXDOMAIN)
+ .arg(accessor_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXDOMAIN;
+ }
+ }
+ } else {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_RRSET)
+ .arg(accessor_->getDBName()).arg(*result_rrset);
+ }
+ return (FindResult(result_status, result_rrset));
+}
+
+Name
+DatabaseClient::Finder::findPreviousName(const Name& name) const {
+ const string str(accessor_->findPreviousName(zone_id_,
+ name.reverse().toText()));
+ try {
+ return (Name(str));
+ }
+ /*
+ * To avoid having the same code many times, we just catch all the
+ * exceptions and handle them in a common code below
+ */
+ catch (const isc::dns::EmptyLabel&) {}
+ catch (const isc::dns::TooLongLabel&) {}
+ catch (const isc::dns::BadLabelType&) {}
+ catch (const isc::dns::BadEscape&) {}
+ catch (const isc::dns::TooLongName&) {}
+ catch (const isc::dns::IncompleteName&) {}
+ isc_throw(DataSourceError, "Bad name " + str + " from findPreviousName");
+}
+
+Name
+DatabaseClient::Finder::getOrigin() const {
+ return (origin_);
+}
+
+isc::dns::RRClass
+DatabaseClient::Finder::getClass() const {
+ // TODO Implement
+ return isc::dns::RRClass::IN();
+}
+
+namespace {
+
+/*
+ * This needs, beside of converting all data from textual representation, group
+ * together rdata of the same RRsets. To do this, we hold one row of data ahead
+ * of iteration. When we get a request to provide data, we create it from this
+ * data and load a new one. If it is to be put to the same rrset, we add it.
+ * Otherwise we just return what we have and keep the row as the one ahead
+ * for next time.
+ */
+class DatabaseIterator : public ZoneIterator {
+public:
+ DatabaseIterator(shared_ptr<DatabaseAccessor> accessor,
+ const Name& zone_name,
+ const RRClass& rrclass) :
+ accessor_(accessor),
+ class_(rrclass),
+ ready_(true)
+ {
+ // Get the zone
+ const pair<bool, int> zone(accessor_->getZone(zone_name.toText()));
+ if (!zone.first) {
+ // No such zone, can't continue
+ isc_throw(DataSourceError, "Zone " + zone_name.toText() +
+ " can not be iterated, because it doesn't exist "
+ "in this data source");
+ }
+
+ // Start a separate transaction.
+ accessor_->startTransaction();
+
+ // Find the SOA of the zone (may or may not succeed). Note that
+ // this must be done before starting the iteration context.
+ soa_ = DatabaseClient::Finder(accessor_, zone.second, zone_name).
+ find(zone_name, RRType::SOA(), NULL).rrset;
+
+ // Request the context
+ context_ = accessor_->getAllRecords(zone.second);
+ // It must not return NULL, that's a bug of the implementation
+ if (!context_) {
+ isc_throw(isc::Unexpected, "Iterator context null at " +
+ zone_name.toText());
+ }
+
+ // Prepare data for the next time
+ getData();
+ }
+
+ virtual ~DatabaseIterator() {
+ if (ready_) {
+ accessor_->commit();
+ }
+ }
+
+ virtual ConstRRsetPtr getSOA() const {
+ return (soa_);
+ }
+
+ virtual isc::dns::ConstRRsetPtr getNextRRset() {
+ if (!ready_) {
+ isc_throw(isc::Unexpected, "Iterating past the zone end");
+ }
+ if (!data_ready_) {
+ // At the end of zone
+ accessor_->commit();
+ ready_ = false;
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_ITERATE_END);
+ return (ConstRRsetPtr());
+ }
+ const string name_str(name_), rtype_str(rtype_), ttl(ttl_);
+ const Name name(name_str);
+ const RRType rtype(rtype_str);
+ RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
+ while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
+ if (ttl_ != ttl) {
+ if (ttl < ttl_) {
+ ttl_ = ttl;
+ rrset->setTTL(RRTTL(ttl));
+ }
+ LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
+ arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
+ }
+ rrset->addRdata(rdata::createRdata(rtype, class_, rdata_));
+ getData();
+ }
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE_NEXT).
+ arg(rrset->getName()).arg(rrset->getType());
+ return (rrset);
+ }
+
+private:
+ // Load next row of data
+ void getData() {
+ string data[DatabaseAccessor::COLUMN_COUNT];
+ data_ready_ = context_->getNext(data);
+ name_ = data[DatabaseAccessor::NAME_COLUMN];
+ rtype_ = data[DatabaseAccessor::TYPE_COLUMN];
+ ttl_ = data[DatabaseAccessor::TTL_COLUMN];
+ rdata_ = data[DatabaseAccessor::RDATA_COLUMN];
+ }
+
+ // The dedicated accessor
+ shared_ptr<DatabaseAccessor> accessor_;
+ // The context
+ DatabaseAccessor::IteratorContextPtr context_;
+ // Class of the zone
+ const RRClass class_;
+ // SOA of the zone, if any (it should normally exist)
+ ConstRRsetPtr soa_;
+ // Status
+ bool ready_, data_ready_;
+ // Data of the next row
+ string name_, rtype_, rdata_, ttl_;
+};
+
+}
+
+ZoneIteratorPtr
+DatabaseClient::getIterator(const isc::dns::Name& name) const {
+ ZoneIteratorPtr iterator = ZoneIteratorPtr(new DatabaseIterator(
+ accessor_->clone(), name,
+ rrclass_));
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE).
+ arg(name);
+
+ return (iterator);
+}
+
+//
+// Zone updater using some database system as the underlying data source.
+//
+class DatabaseUpdater : public ZoneUpdater {
+public:
+ DatabaseUpdater(shared_ptr<DatabaseAccessor> accessor, int zone_id,
+ const Name& zone_name, const RRClass& zone_class) :
+ committed_(false), accessor_(accessor), zone_id_(zone_id),
+ db_name_(accessor->getDBName()), zone_name_(zone_name.toText()),
+ zone_class_(zone_class),
+ finder_(new DatabaseClient::Finder(accessor_, zone_id_, zone_name))
+ {
+ logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_CREATED)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+ }
+
+ virtual ~DatabaseUpdater() {
+ if (!committed_) {
+ try {
+ accessor_->rollback();
+ logger.info(DATASRC_DATABASE_UPDATER_ROLLBACK)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+ } catch (const DataSourceError& e) {
+ // We generally expect that rollback always succeeds, and
+ // it should in fact succeed in a way we execute it. But
+ // as the public API allows rollback() to fail and
+ // throw, we should expect it. Obviously we cannot re-throw
+ // it. The best we can do is to log it as a critical error.
+ logger.error(DATASRC_DATABASE_UPDATER_ROLLBACKFAIL)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_)
+ .arg(e.what());
+ }
+ }
+
+ logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_DESTROYED)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+ }
+
+ virtual ZoneFinder& getFinder() { return (*finder_); }
+
+ virtual void addRRset(const RRset& rrset);
+ virtual void deleteRRset(const RRset& rrset);
+ virtual void commit();
+
+private:
+ bool committed_;
+ shared_ptr<DatabaseAccessor> accessor_;
+ const int zone_id_;
+ const string db_name_;
+ const string zone_name_;
+ const RRClass zone_class_;
+ boost::scoped_ptr<DatabaseClient::Finder> finder_;
+};
+
+void
+DatabaseUpdater::addRRset(const RRset& rrset) {
+ if (committed_) {
+ isc_throw(DataSourceError, "Add attempt after commit to zone: "
+ << zone_name_ << "/" << zone_class_);
+ }
+ if (rrset.getClass() != zone_class_) {
+ isc_throw(DataSourceError, "An RRset of a different class is being "
+ << "added to " << zone_name_ << "/" << zone_class_ << ": "
+ << rrset.toText());
+ }
+ if (rrset.getRRsig()) {
+ isc_throw(DataSourceError, "An RRset with RRSIG is being added to "
+ << zone_name_ << "/" << zone_class_ << ": "
+ << rrset.toText());
+ }
+
+ RdataIteratorPtr it = rrset.getRdataIterator();
+ if (it->isLast()) {
+ isc_throw(DataSourceError, "An empty RRset is being added for "
+ << rrset.getName() << "/" << zone_class_ << "/"
+ << rrset.getType());
+ }
+
+ string columns[DatabaseAccessor::ADD_COLUMN_COUNT]; // initialized with ""
+ columns[DatabaseAccessor::ADD_NAME] = rrset.getName().toText();
+ columns[DatabaseAccessor::ADD_REV_NAME] =
+ rrset.getName().reverse().toText();
+ columns[DatabaseAccessor::ADD_TTL] = rrset.getTTL().toText();
+ columns[DatabaseAccessor::ADD_TYPE] = rrset.getType().toText();
+ for (; !it->isLast(); it->next()) {
+ if (rrset.getType() == RRType::RRSIG()) {
+ // XXX: the current interface (based on the current sqlite3
+ // data source schema) requires a separate "sigtype" column,
+ // even though it won't be used in a newer implementation.
+ // We should eventually clean up the schema design and simplify
+ // the interface, but until then we have to conform to the schema.
+ const generic::RRSIG& rrsig_rdata =
+ dynamic_cast<const generic::RRSIG&>(it->getCurrent());
+ columns[DatabaseAccessor::ADD_SIGTYPE] =
+ rrsig_rdata.typeCovered().toText();
+ }
+ columns[DatabaseAccessor::ADD_RDATA] = it->getCurrent().toText();
+ accessor_->addRecordToZone(columns);
+ }
+}
+
+void
+DatabaseUpdater::deleteRRset(const RRset& rrset) {
+ if (committed_) {
+ isc_throw(DataSourceError, "Delete attempt after commit on zone: "
+ << zone_name_ << "/" << zone_class_);
+ }
+ if (rrset.getClass() != zone_class_) {
+ isc_throw(DataSourceError, "An RRset of a different class is being "
+ << "deleted from " << zone_name_ << "/" << zone_class_
+ << ": " << rrset.toText());
+ }
+ if (rrset.getRRsig()) {
+ isc_throw(DataSourceError, "An RRset with RRSIG is being deleted from "
+ << zone_name_ << "/" << zone_class_ << ": "
+ << rrset.toText());
+ }
+
+ RdataIteratorPtr it = rrset.getRdataIterator();
+ if (it->isLast()) {
+ isc_throw(DataSourceError, "An empty RRset is being deleted for "
+ << rrset.getName() << "/" << zone_class_ << "/"
+ << rrset.getType());
+ }
+
+ string params[DatabaseAccessor::DEL_PARAM_COUNT]; // initialized with ""
+ params[DatabaseAccessor::DEL_NAME] = rrset.getName().toText();
+ params[DatabaseAccessor::DEL_TYPE] = rrset.getType().toText();
+ for (; !it->isLast(); it->next()) {
+ params[DatabaseAccessor::DEL_RDATA] = it->getCurrent().toText();
+ accessor_->deleteRecordInZone(params);
+ }
+}
+
+void
+DatabaseUpdater::commit() {
+ if (committed_) {
+ isc_throw(DataSourceError, "Duplicate commit attempt for "
+ << zone_name_ << "/" << zone_class_ << " on "
+ << db_name_);
+ }
+ accessor_->commit();
+ committed_ = true; // make sure the destructor won't trigger rollback
+
+ // We release the accessor immediately after commit is completed so that
+ // we don't hold the possible internal resource any longer.
+ accessor_.reset();
+
+ logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_COMMIT)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+}
+
+// The updater factory
+ZoneUpdaterPtr
+DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace) const {
+ shared_ptr<DatabaseAccessor> update_accessor(accessor_->clone());
+ const std::pair<bool, int> zone(update_accessor->startUpdateZone(
+ name.toText(), replace));
+ if (!zone.first) {
+ return (ZoneUpdaterPtr());
+ }
+
+ return (ZoneUpdaterPtr(new DatabaseUpdater(update_accessor, zone.second,
+ name, rrclass_)));
+}
+}
+}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
new file mode 100644
index 0000000..b9379b7
--- /dev/null
+++ b/src/lib/datasrc/database.h
@@ -0,0 +1,892 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATABASE_DATASRC_H
+#define __DATABASE_DATASRC_H
+
+#include <string>
+
+#include <boost/scoped_ptr.hpp>
+
+#include <dns/rrclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrset.h>
+
+#include <datasrc/client.h>
+
+#include <dns/name.h>
+#include <exceptions/exceptions.h>
+
+#include <map>
+#include <set>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Abstraction of lowlevel database with DNS data
+ *
+ * This class is defines interface to databases. Each supported database
+ * will provide methods for accessing the data stored there in a generic
+ * manner. The methods are meant to be low-level, without much or any knowledge
+ * about DNS and should be possible to translate directly to queries.
+ *
+ * On the other hand, how the communication with database is done and in what
+ * schema (in case of relational/SQL database) is up to the concrete classes.
+ *
+ * This class is non-copyable, as copying connections to database makes little
+ * sense and will not be needed.
+ *
+ * \todo Is it true this does not need to be copied? For example the zone
+ * iterator might need it's own copy. But a virtual clone() method might
+ * be better for that than copy constructor.
+ *
+ * \note The same application may create multiple connections to the same
+ * database, having multiple instances of this class. If the database
+ * allows having multiple open queries at one connection, the connection
+ * class may share it.
+ */
+class DatabaseAccessor : boost::noncopyable {
+public:
+ /**
+ * Definitions of the fields as they are required to be filled in
+ * by IteratorContext::getNext()
+ *
+ * When implementing getNext(), the columns array should
+ * be filled with the values as described in this enumeration,
+ * in this order, i.e. TYPE_COLUMN should be the first element
+ * (index 0) of the array, TTL_COLUMN should be the second element
+ * (index 1), etc.
+ */
+ enum RecordColumns {
+ TYPE_COLUMN = 0, ///< The RRType of the record (A/NS/TXT etc.)
+ TTL_COLUMN = 1, ///< The TTL of the record (a
+ SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPE
+ ///< the RRSIG covers. In the current implementation,
+ ///< this field is ignored.
+ RDATA_COLUMN = 3, ///< Full text representation of the record's RDATA
+ NAME_COLUMN = 4, ///< The domain name of this RR
+ COLUMN_COUNT = 5 ///< The total number of columns, MUST be value of
+ ///< the largest other element in this enum plus 1.
+ };
+
+ /**
+ * Definitions of the fields to be passed to addRecordToZone().
+ *
+ * Each derived implementation of addRecordToZone() should expect
+ * the "columns" array to be filled with the values as described in this
+ * enumeration, in this order.
+ */
+ enum AddRecordColumns {
+ ADD_NAME = 0, ///< The owner name of the record (a domain name)
+ ADD_REV_NAME = 1, ///< Reversed name of NAME (used for DNSSEC)
+ ADD_TTL = 2, ///< The TTL of the record (in numeric form)
+ ADD_TYPE = 3, ///< The RRType of the record (A/NS/TXT etc.)
+ ADD_SIGTYPE = 4, ///< For RRSIG records, this contains the RRTYPE
+ ///< the RRSIG covers.
+ ADD_RDATA = 5, ///< Full text representation of the record's RDATA
+ ADD_COLUMN_COUNT = 6 ///< Number of columns
+ };
+
+ /**
+ * Definitions of the fields to be passed to deleteRecordInZone().
+ *
+ * Each derived implementation of deleteRecordInZone() should expect
+ * the "params" array to be filled with the values as described in this
+ * enumeration, in this order.
+ */
+ enum DeleteRecordParams {
+ DEL_NAME = 0, ///< The owner name of the record (a domain name)
+ DEL_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
+ DEL_RDATA = 2, ///< Full text representation of the record's RDATA
+ DEL_PARAM_COUNT = 3 ///< Number of parameters
+ };
+
+ /**
+ * Operation mode when adding a record diff.
+ *
+ * This is used as the "operation" parameter value of addRecordDiff().
+ */
+ enum DiffOperation {
+ DIFF_ADD = 0, ///< This diff is for adding an RR
+ DIFF_DELETE = 1 ///< This diff is for deleting an RR
+ };
+
+ /**
+ * Definitions of the fields to be passed to addRecordDiff().
+ *
+ * Each derived implementation of addRecordDiff() should expect
+ * the "params" array to be filled with the values as described in this
+ * enumeration, in this order.
+ */
+ enum DiffRecordParams {
+ DIFF_NAME = 0, ///< The owner name of the record (a domain name)
+ DIFF_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
+ DIFF_TTL = 2, ///< The TTL of the record (in numeric form)
+ DIFF_RDATA = 3, ///< Full text representation of the record's RDATA
+ DIFF_PARAM_COUNT = 4 ///< Number of parameters
+ };
+
+ /**
+ * \brief Destructor
+ *
+ * It is empty, but needs a virtual one, since we will use the derived
+ * classes in polymorphic way.
+ */
+ virtual ~DatabaseAccessor() { }
+
+ /**
+ * \brief Retrieve a zone identifier
+ *
+ * This method looks up a zone for the given name in the database. It
+ * should match only exact zone name (eg. name is equal to the zone's
+ * apex), as the DatabaseClient will loop trough the labels itself and
+ * find the most suitable zone.
+ *
+ * It is not specified if and what implementation of this method may throw,
+ * so code should expect anything.
+ *
+ * \param name The (fully qualified) domain name of the zone's apex to be
+ * looked up.
+ * \return The first part of the result indicates if a matching zone
+ * was found. In case it was, the second part is internal zone ID.
+ * This one will be passed to methods finding data in the zone.
+ * It is not required to keep them, in which case whatever might
+ * be returned - the ID is only passed back to the database as
+ * an opaque handle.
+ */
+ virtual std::pair<bool, int> getZone(const std::string& name) const = 0;
+
+ /**
+ * \brief This holds the internal context of ZoneIterator for databases
+ *
+ * While the ZoneIterator implementation from DatabaseClient does all the
+ * translation from strings to DNS classes and validation, this class
+ * holds the pointer to where the database is at reading the data.
+ *
+ * It can either hold shared pointer to the connection which created it
+ * and have some kind of statement inside (in case single database
+ * connection can handle multiple concurrent SQL statements) or it can
+ * create a new connection (or, if it is more convenient, the connection
+ * itself can inherit both from DatabaseConnection and IteratorContext
+ * and just clone itself).
+ */
+ class IteratorContext : public boost::noncopyable {
+ public:
+ /**
+ * \brief Destructor
+ *
+ * Virtual destructor, so any descendand class is destroyed correctly.
+ */
+ virtual ~IteratorContext() { }
+
+ /**
+ * \brief Function to provide next resource record
+ *
+ * This function should provide data about the next resource record
+ * from the data that is searched. The data is not converted yet.
+ *
+ * Depending on how the iterator was constructed, there is a difference
+ * in behaviour; for a 'full zone iterator', created with
+ * getAllRecords(), all COLUMN_COUNT elements of the array are
+ * overwritten.
+ * For a 'name iterator', created with getRecords(), the column
+ * NAME_COLUMN is untouched, since what would be added here is by
+ * definition already known to the caller (it already passes it as
+ * an argument to getRecords()).
+ *
+ * Once this function returns false, any subsequent call to it should
+ * result in false. The implementation of a derived class must ensure
+ * it doesn't cause any disruption due to that such as a crash or
+ * exception.
+ *
+ * \note The order of RRs is not strictly set, but the RRs for single
+ * RRset must not be interleaved with any other RRs (eg. RRsets must be
+ * "together").
+ *
+ * \param columns The data will be returned through here. The order
+ * is specified by the RecordColumns enum, and the size must be
+ * COLUMN_COUNT
+ * \todo Do we consider databases where it is stored in binary blob
+ * format?
+ * \throw DataSourceError if there's database-related error. If the
+ * exception (or any other in case of derived class) is thrown,
+ * the iterator can't be safely used any more.
+ * \return true if a record was found, and the columns array was
+ * updated. false if there was no more data, in which case
+ * the columns array is untouched.
+ */
+ virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) = 0;
+ };
+
+ typedef boost::shared_ptr<IteratorContext> IteratorContextPtr;
+
+ /**
+ * \brief Creates an iterator context for a specific name.
+ *
+ * Returns an IteratorContextPtr that contains all records of the
+ * given name from the given zone.
+ *
+ * The implementation of the iterator that is returned may leave the
+ * NAME_COLUMN column of the array passed to getNext() untouched, as that
+ * data is already known (it is the same as the name argument here)
+ *
+ * \exception any Since any implementation can be used, the caller should
+ * expect any exception to be thrown.
+ *
+ * \param name The name to search for. This should be a FQDN.
+ * \param id The ID of the zone, returned from getZone().
+ * \param subdomains If set to true, match subdomains of name instead
+ * of name itself. It is used to find empty domains and match
+ * wildcards.
+ * \return Newly created iterator context. Must not be NULL.
+ */
+ virtual IteratorContextPtr getRecords(const std::string& name,
+ int id,
+ bool subdomains = false) const = 0;
+
+ /**
+ * \brief Creates an iterator context for the whole zone.
+ *
+ * Returns an IteratorContextPtr that contains all records of the
+ * zone with the given zone id.
+ *
+ * Each call to getNext() on the returned iterator should copy all
+ * column fields of the array that is passed, as defined in the
+ * RecordColumns enum.
+ *
+ * \exception any Since any implementation can be used, the caller should
+ * expect any exception to be thrown.
+ *
+ * \param id The ID of the zone, returned from getZone().
+ * \return Newly created iterator context. Must not be NULL.
+ */
+ virtual IteratorContextPtr getAllRecords(int id) const = 0;
+
+ /// Start a transaction for updating a zone.
+ ///
+ /// Each derived class version of this method starts a database
+ /// transaction to make updates to the given name of zone (whose class was
+ /// specified at the construction of the class).
+ ///
+ /// If \c replace is true, any existing records of the zone will be
+ /// deleted on successful completion of updates (after
+ /// \c commitUpdateZone()); if it's false, the existing records will be
+ /// intact unless explicitly deleted by \c deleteRecordInZone().
+ ///
+ /// A single \c DatabaseAccessor instance can perform at most one
+ /// transaction; a duplicate call to this method before
+ /// \c commitUpdateZone() or \c rollbackUpdateZone(), or a call to this
+ /// method within another transaction started by \c startTransaction()
+ /// will result in a \c DataSourceError exception.
+ /// If multiple update attempts need to be performed concurrently (and
+ /// if the underlying database allows such operation), separate
+ /// \c DatabaseAccessor instance must be created.
+ ///
+ /// \note The underlying database may not allow concurrent updates to
+ /// the same database instance even if different "connections" (or
+ /// something similar specific to the database implementation) are used
+ /// for different sets of updates. For example, it doesn't seem to be
+ /// possible for SQLite3 unless different databases are used. MySQL
+ /// allows concurrent updates to different tables of the same database,
+ /// but a specific operation may block others. As such, this interface
+ /// doesn't require derived classes to allow concurrent updates with
+ /// multiple \c DatabaseAccessor instances; however, the implementation
+ /// is encouraged to do the best for making it more likely to succeed
+ /// as long as the underlying database system allows concurrent updates.
+ ///
+ /// This method returns a pair of \c bool and \c int. Its first element
+ /// indicates whether the given name of zone is found. If it's false,
+ /// the transaction isn't considered to be started; a subsequent call to
+ /// this method with an existing zone name should succeed. Likewise,
+ /// if a call to this method results in an exception, the transaction
+ /// isn't considered to be started. Note also that if the zone is not
+ /// found this method doesn't try to create a new one in the database.
+ /// It must have been created by some other means beforehand.
+ ///
+ /// The second element is the internal zone ID used for subsequent
+ /// updates. Depending on implementation details of the actual derived
+ /// class method, it may be different from the one returned by
+ /// \c getZone(); for example, a specific implementation may use a
+ /// completely new zone ID when \c replace is true.
+ ///
+ /// \exception DataSourceError Duplicate call to this method, call to
+ /// this method within another transaction, or some internal database
+ /// related error.
+ ///
+ /// \param zone_name A string representation of the zone name to be updated
+ /// \param replace Whether to replace the entire zone (see above)
+ ///
+ /// \return A pair of bool and int, indicating whether the specified zone
+ /// exists and (if so) the zone ID to be used for the update, respectively.
+ virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
+ bool replace) = 0;
+
+ /// Add a single record to the zone to be updated.
+ ///
+ /// This method provides a simple interface to insert a new record
+ /// (a database "row") to the zone in the update context started by
+ /// \c startUpdateZone(). The zone to which the record to be added
+ /// is the one specified at the time of the \c startUpdateZone() call.
+ ///
+ /// A successful call to \c startUpdateZone() must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ ///
+ /// The row is defined as a vector of strings that has exactly
+ /// ADD_COLUMN_COUNT number of elements. See AddRecordColumns for
+ /// the semantics of each element.
+ ///
+ /// Derived class methods are not required to check whether the given
+ /// values in \c columns are valid in terms of the expected semantics;
+ /// in general, it's the caller's responsibility.
+ /// For example, TTLs would normally be expected to be a textual
+ /// representation of decimal numbers, but this interface doesn't require
+ /// the implementation to perform this level of validation. It may check
+ /// the values, however, and in that case if it detects an error it
+ /// should throw a \c DataSourceError exception.
+ ///
+ /// Likewise, derived class methods are not required to detect any
+ /// duplicate record that is already in the zone.
+ ///
+ /// \note The underlying database schema may not have a trivial mapping
+ /// from this style of definition of rows to actual database records.
+ /// It's the implementation's responsibility to implement the mapping
+ /// in the actual derived method.
+ ///
+ /// \exception DataSourceError Invalid call without starting a transaction,
+ /// or other internal database error.
+ ///
+ /// \param columns An array of strings that defines a record to be added
+ /// to the zone.
+ virtual void addRecordToZone(
+ const std::string (&columns)[ADD_COLUMN_COUNT]) = 0;
+
+ /// Delete a single record from the zone to be updated.
+ ///
+ /// This method provides a simple interface to delete a record
+ /// (a database "row") from the zone in the update context started by
+ /// \c startUpdateZone(). The zone from which the record to be deleted
+ /// is the one specified at the time of the \c startUpdateZone() call.
+ ///
+ /// A successful call to \c startUpdateZone() must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ ///
+ /// The record to be deleted is specified by a vector of strings that has
+ /// exactly DEL_PARAM_COUNT number of elements. See DeleteRecordParams
+ /// for the semantics of each element.
+ ///
+ /// \note In IXFR, TTL may also be specified, but we intentionally
+ /// ignore that in this interface, because it's not guaranteed
+ /// that all records have the same TTL (unlike the RRset
+ /// assumption) and there can even be multiple records for the
+ /// same name, type and rdata with different TTLs. If we only
+ /// delete one of them, subsequent lookup will still return a
+ /// positive answer, which would be confusing. It's a higher
+ /// layer's responsibility to check if there is at least one
+ /// record in the database that has the given TTL.
+ ///
+ /// Like \c addRecordToZone, derived class methods are not required to
+ /// validate the semantics of the given parameters or to check if there
+ /// is a record that matches the specified parameter; if there isn't
+ /// it simply ignores the result.
+ ///
+ /// \exception DataSourceError Invalid call without starting a transaction,
+ /// or other internal database error.
+ ///
+ /// \param params An array of strings that defines a record to be deleted
+ /// from the zone.
+ virtual void deleteRecordInZone(
+ const std::string (¶ms)[DEL_PARAM_COUNT]) = 0;
+
+ /// Start a general transaction.
+ ///
+ /// Each derived class version of this method starts a database
+ /// transaction in a way specific to the database details. Any subsequent
+ /// operations on the accessor are guaranteed to be not susceptible to
+ /// any update attempts made during the transaction. The transaction
+ /// must be terminated by either \c commit() or \c rollback().
+ ///
+ /// In practice, this transaction is intended to be used to perform
+ /// a set of atomic reads and work as a read-only lock. So, in many
+ /// cases \c commit() and \c rollback() will have the same effect.
+ ///
+ /// This transaction cannot coexist with an update transaction started
+ /// by \c startUpdateZone(). Such an attempt will result in
+ /// \c DataSourceError.
+ ///
+ /// \exception DataSourceError An attempt of nested transaction, or some
+ /// internal database related error.
+ virtual void startTransaction() = 0;
+
+ /// Commit a transaction.
+ ///
+ /// This method completes a transaction started by \c startTransaction
+ /// or \c startUpdateZone.
+ ///
+ /// A successful call to one of the "start" methods must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ /// Once this method successfully completes, the transaction isn't
+ /// considered to exist any more. So a new transaction can now be
+ /// started. On the other hand, a duplicate call to this method after
+ /// a successful completion of it is invalid and should result in
+ /// a \c DataSourceError exception.
+ ///
+ /// If some internal database error happens, a \c DataSourceError
+ /// exception must be thrown. In that case the transaction is still
+ /// considered to be valid; the caller must explicitly rollback it
+ /// or (if it's confident that the error is temporary) try to commit it
+ /// again.
+ ///
+ /// \exception DataSourceError Call without a transaction, duplicate call
+ /// to the method or internal database error.
+ virtual void commit() = 0;
+
+ /// Rollback any changes in a transaction made so far.
+ ///
+ /// This method rollbacks a transaction started by \c startTransaction or
+ /// \c startUpdateZone. When it succeeds (it normally should, but see
+ /// below), the underlying database should be reverted to the point
+ /// before performing the corresponding "start" method.
+ ///
+ /// A successful call to one of the "start" method must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ /// Once this method successfully completes, the transaction isn't
+ /// considered to exist any more. So a new transaction can now be
+ /// started. On the other hand, a duplicate call to this method after
+ /// a successful completion of it is invalid and should result in
+ /// a \c DataSourceError exception.
+ ///
+ /// Normally this method should not fail. But it may not always be
+ /// possible to guarantee it depending on the characteristics of the
+ /// underlying database system. So this interface doesn't require the
+ /// actual implementation for the error free property. But if a specific
+ /// implementation of this method can fail, it is encouraged to document
+ /// when that can happen with its implication.
+ ///
+ /// \exception DataSourceError Call without a transaction, duplicate call
+ /// to the method or internal database error.
+ virtual void rollback() = 0;
+
+ /// Install a single RR diff in difference sequences for zone update.
+ ///
+ /// This method inserts parameters of an update operation for a single RR
+ /// (either adding or deleting one) in the underlying database.
+ /// (These parameters would normally be a separate database table, but
+ /// actual realization can differ in specific implementations).
+ /// The information given via this method generally corresponds to either
+ /// a single call to \c addRecordToZone() or \c deleteRecordInZone(),
+ /// and this method is expected to be called immediately after (or before)
+ /// a call to either of those methods.
+ ///
+ /// Note, however, that this method passes more detailed information
+ /// than those update methods: it passes "serial", even if the diff
+ /// is not for the SOA RR; it passes TTL for a diff that deletes an RR
+ /// while in \c deleteRecordInZone() it's omitted. This is because
+ /// the stored diffs are expected to be retrieved in the form that
+ /// \c getRecordDiffs() is expected to meet. This means if the caller
+ /// wants to use this method with other update operations, it must
+ /// ensure the additional information is ready when this method is called.
+ ///
+ /// \note \c getRecordDiffs() is not yet implemented.
+ ///
+ /// The caller of this method must ensure that the added diffs via
+ /// this method in a single transaction form an IXFR-style difference
+ /// sequences: Each difference sequence is a sequence of RRs:
+ /// an older version of SOA (to be deleted), zero or more other deleted
+ /// RRs, the post-transaction SOA (to be added), and zero or more other
+ /// added RRs. So, for example, the first call to this method in a
+ /// transaction must always be deleting an SOA. Also, the \c serial
+ /// parameter must be equal to the value of the serial field of the
+ /// SOA that was last added or deleted (if the call is to add or delete
+ /// an SOA RR, \c serial must be identical to the serial of that SOA).
+ /// The underlying derived class implementation may or may not check
+ /// this condition, but if the caller doesn't meet the condition
+ /// a subsequent call to \c getRecordDiffs() will not work as expected.
+ ///
+ /// Any call to this method must be in a transaction, and, for now,
+ /// it must be a transaction triggered by \c startUpdateZone() (that is,
+ /// it cannot be a transaction started by \c startTransaction()).
+ /// All calls to this method are considered to be part of an atomic
+ /// transaction: Until \c commit() is performed, the added diffs are
+ /// not visible outside the transaction; if \c rollback() is performed,
+ /// all added diffs are canceled; and the added sequences are not
+ /// affected by any concurrent attempt of adding diffs (conflict resolution
+ /// is up to the database implementation).
+ ///
+ /// Also for now, all diffs are assumed to be for the zone that is
+ /// being updated in the context of \c startUpdateZone(). So the
+ /// \c zone_id parameter must be identical to the zone ID returned by
+ /// \c startUpdateZone().
+ ///
+ /// In a future version we may loosen this condition so that diffs can be
+ /// added in a generic transaction and may not even have to belong to
+ /// a single zone. For this possible extension \c zone_id parameter is
+ /// included even if it's redundant under the current restriction.
+ ///
+ /// The support for adding (or retrieving) diffs is optional; if it's
+ /// not supported in a specific data source, this method for the
+ /// corresponding derived class will throw an \c NotImplemented exception.
+ ///
+ /// \exception DataSourceError Invalid call without starting a transaction,
+ /// zone ID doesn't match the zone being updated, or other internal
+ /// database error.
+ /// \exception NotImplemented Adding diffs is not supported in the
+ /// data source.
+ /// \exception Other The concrete derived method may throw other
+ /// data source specific exceptions.
+ ///
+ /// \param zone_id The zone for the diff to be added.
+ /// \param serial The SOA serial to which the diff belongs.
+ /// \param operation Either \c DIFF_ADD or \c DIFF_DELETE.
+ /// \param params An array of strings that defines a record for the diff.
+ virtual void addRecordDiff(
+ int zone_id, uint32_t serial, DiffOperation operation,
+ const std::string (¶ms)[DIFF_PARAM_COUNT]) = 0;
+
+ /// Clone the accessor with the same configuration.
+ ///
+ /// Each derived class implementation of this method will create a new
+ /// accessor of the same derived class with the same configuration
+ /// (such as the database server address) as that of the caller object
+ /// and return it.
+ ///
+ /// Note that other internal states won't be copied to the new accessor
+ /// even though the name of "clone" may indicate so. For example, even
+ /// if the calling accessor is in the middle of a update transaction,
+ /// the new accessor will not start a transaction to trace the same
+ /// updates.
+ ///
+ /// The intended use case of cloning is to create a separate context
+ /// where a specific set of database operations can be performed
+ /// independently from the original accessor. The updater will use it
+ /// so that multiple updaters can be created concurrently even if the
+ /// underlying database system doesn't allow running multiple transactions
+ /// in a single database connection.
+ ///
+ /// The underlying database system may not support the functionality
+ /// that would be needed to implement this method. For example, it
+ /// may not allow a single thread (or process) to have more than one
+ /// database connections. In such a case the derived class implementation
+ /// should throw a \c DataSourceError exception.
+ ///
+ /// \return A shared pointer to the cloned accessor.
+ virtual boost::shared_ptr<DatabaseAccessor> clone() = 0;
+
+ /**
+ * \brief Returns a string identifying this dabase backend
+ *
+ * The returned string is mainly intended to be used for
+ * debugging/logging purposes.
+ *
+ * Any implementation is free to choose the exact string content,
+ * but it is advisable to make it a name that is distinguishable
+ * from the others.
+ *
+ * \return the name of the database
+ */
+ virtual const std::string& getDBName() const = 0;
+
+ /**
+ * \brief It returns the previous name in DNSSEC order.
+ *
+ * This is used in DatabaseClient::findPreviousName and does more
+ * or less the real work, except for working on strings.
+ *
+ * \param rname The name to ask for previous of, in reversed form.
+ * We use the reversed form (see isc::dns::Name::reverse),
+ * because then the case insensitive order of string representation
+ * and the DNSSEC order correspond (eg. org.example.a is followed
+ * by org.example.a.b which is followed by org.example.b, etc).
+ * \param zone_id The zone to look through.
+ * \return The previous name.
+ * \note This function must return previous name even in case
+ * the queried rname does not exist in the zone.
+ * \note This method must skip under-the-zone-cut data (glue data).
+ * This might be implemented by looking for NSEC records (as glue
+ * data don't have them) in the zone or in some other way.
+ *
+ * \throw DataSourceError if there's a problem with the database.
+ * \throw NotImplemented if this database doesn't support DNSSEC
+ * or there's no previous name for the queried one (the NSECs
+ * might be missing or the queried name is less or equal the
+ * apex of the zone).
+ */
+ virtual std::string findPreviousName(int zone_id,
+ const std::string& rname) const = 0;
+};
+
+/**
+ * \brief Concrete data source client oriented at database backends.
+ *
+ * This class (together with corresponding versions of ZoneFinder,
+ * ZoneIterator, etc.) translates high-level data source queries to
+ * low-level calls on DatabaseAccessor. It calls multiple queries
+ * if necessary and validates data from the database, allowing the
+ * DatabaseAccessor to be just simple translation to SQL/other
+ * queries to database.
+ *
+ * While it is possible to subclass it for specific database in case
+ * of special needs, it is not expected to be needed. This should just
+ * work as it is with whatever DatabaseAccessor.
+ */
+class DatabaseClient : public DataSourceClient {
+public:
+ /**
+ * \brief Constructor
+ *
+ * It initializes the client with a database via the given accessor.
+ *
+ * \exception isc::InvalidParameter if accessor is NULL. It might throw
+ * standard allocation exception as well, but doesn't throw anything else.
+ *
+ * \param rrclass The RR class of the zones that this client will handle.
+ * \param accessor The accessor to the database to use to get data.
+ * As the parameter suggests, the client takes ownership of the accessor
+ * and will delete it when itself deleted.
+ */
+ DatabaseClient(isc::dns::RRClass rrclass,
+ boost::shared_ptr<DatabaseAccessor> accessor);
+
+ /**
+ * \brief Corresponding ZoneFinder implementation
+ *
+ * The zone finder implementation for database data sources. Similarly
+ * to the DatabaseClient, it translates the queries to methods of the
+ * database.
+ *
+ * Application should not come directly in contact with this class
+ * (it should handle it trough generic ZoneFinder pointer), therefore
+ * it could be completely hidden in the .cc file. But it is provided
+ * to allow testing and for rare cases when a database needs slightly
+ * different handling, so it can be subclassed.
+ *
+ * Methods directly corresponds to the ones in ZoneFinder.
+ */
+ class Finder : public ZoneFinder {
+ public:
+ /**
+ * \brief Constructor
+ *
+ * \param database The database (shared with DatabaseClient) to
+ * be used for queries (the one asked for ID before).
+ * \param zone_id The zone ID which was returned from
+ * DatabaseAccessor::getZone and which will be passed to further
+ * calls to the database.
+ * \param origin The name of the origin of this zone. It could query
+ * it from database, but as the DatabaseClient just searched for
+ * the zone using the name, it should have it.
+ */
+ Finder(boost::shared_ptr<DatabaseAccessor> database, int zone_id,
+ const isc::dns::Name& origin);
+ // The following three methods are just implementations of inherited
+ // ZoneFinder's pure virtual methods.
+ virtual isc::dns::Name getOrigin() const;
+ virtual isc::dns::RRClass getClass() const;
+
+ /**
+ * \brief Find an RRset in the datasource
+ *
+ * Searches the datasource for an RRset of the given name and
+ * type. If there is a CNAME at the given name, the CNAME rrset
+ * is returned.
+ * (this implementation is not complete, and currently only
+ * does full matches, CNAMES, and the signatures for matches and
+ * CNAMEs)
+ * \note target was used in the original design to handle ANY
+ * queries. This is not implemented yet, and may use
+ * target again for that, but it might also use something
+ * different. It is left in for compatibility at the moment.
+ * \note options are ignored at this moment
+ *
+ * \note Maybe counter intuitively, this method is not a const member
+ * function. This is intentional; some of the underlying implementations
+ * are expected to use a database backend, and would internally contain
+ * some abstraction of "database connection". In the most strict sense
+ * any (even read only) operation might change the internal state of
+ * such a connection, and in that sense the operation cannot be considered
+ * "const". In order to avoid giving a false sense of safety to the
+ * caller, we indicate a call to this method may have a surprising
+ * side effect. That said, this view may be too strict and it may
+ * make sense to say the internal database connection doesn't affect
+ * external behavior in terms of the interface of this method. As
+ * we gain more experiences with various kinds of backends we may
+ * revisit the constness.
+ *
+ * \exception DataSourceError when there is a problem reading
+ * the data from the dabase backend.
+ * This can be a connection, code, or
+ * data (parse) error.
+ *
+ * \param name The name to find
+ * \param type The RRType to find
+ * \param target Unused at this moment
+ * \param options Options about how to search.
+ * See ZoneFinder::FindOptions.
+ */
+ virtual FindResult find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList* target = NULL,
+ const FindOptions options = FIND_DEFAULT);
+
+ /**
+ * \brief Implementation of ZoneFinder::findPreviousName method.
+ */
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
+ const;
+
+ /**
+ * \brief The zone ID
+ *
+ * This function provides the stored zone ID as passed to the
+ * constructor. This is meant for testing purposes and normal
+ * applications shouldn't need it.
+ */
+ int zone_id() const { return (zone_id_); }
+
+ /**
+ * \brief The database accessor.
+ *
+ * This function provides the database accessor stored inside as
+ * passed to the constructor. This is meant for testing purposes and
+ * normal applications shouldn't need it.
+ */
+ const DatabaseAccessor& getAccessor() const {
+ return (*accessor_);
+ }
+ private:
+ boost::shared_ptr<DatabaseAccessor> accessor_;
+ const int zone_id_;
+ const isc::dns::Name origin_;
+ //
+ /// \brief Shortcut name for the result of getRRsets
+ typedef std::pair<bool, std::map<dns::RRType, dns::RRsetPtr> >
+ FoundRRsets;
+ /// \brief Just shortcut for set of types
+ typedef std::set<dns::RRType> WantedTypes;
+ /**
+ * \brief Searches database for RRsets of one domain.
+ *
+ * This method scans RRs of single domain specified by name and
+ * extracts any RRsets found and requested by parameters.
+ *
+ * It is used internally by find(), because it is called multiple
+ * times (usually with different domains).
+ *
+ * \param name Which domain name should be scanned.
+ * \param types List of types the caller is interested in.
+ * \param check_ns If this is set to true, it checks nothing lives
+ * together with NS record (with few little exceptions, like RRSIG
+ * or NSEC). This check is meant for non-apex NS records.
+ * \param construct_name If this is NULL, the resulting RRsets have
+ * their name set to name. If it is not NULL, it overrides the name
+ * and uses this one (this can be used for wildcard synthesized
+ * records).
+ * \return A pair, where the first element indicates if the domain
+ * contains any RRs at all (not only the requested, it may happen
+ * this is set to true, but the second part is empty). The second
+ * part is map from RRtypes to RRsets of the corresponding types.
+ * If the RRset is not present in DB, the RRtype is not there at
+ * all (so you'll not find NULL pointer in the result).
+ * \throw DataSourceError If there's a low-level error with the
+ * database or the database contains bad data.
+ */
+ FoundRRsets getRRsets(const std::string& name,
+ const WantedTypes& types, bool check_ns,
+ const std::string* construct_name = NULL);
+ /**
+ * \brief Checks if something lives below this domain.
+ *
+ * This looks if there's any subdomain of the given name. It can be
+ * used to test if domain is empty non-terminal.
+ *
+ * \param name The domain to check.
+ */
+ bool hasSubdomains(const std::string& name);
+
+ /**
+ * \brief Get the NSEC covering a name.
+ *
+ * This one calls findPreviousName on the given name and extracts an NSEC
+ * record on the result. It handles various error cases. The method exists
+ * to share code present at more than one location.
+ */
+ dns::RRsetPtr findNSECCover(const dns::Name& name);
+
+ /**
+ * \brief Convenience type shortcut.
+ *
+ * To find stuff in the result of getRRsets.
+ */
+ typedef std::map<dns::RRType, dns::RRsetPtr>::const_iterator
+ FoundIterator;
+ };
+
+ /**
+ * \brief Find a zone in the database
+ *
+ * This queries database's getZone to find the best matching zone.
+ * It will propagate whatever exceptions are thrown from that method
+ * (which is not restricted in any way).
+ *
+ * \param name Name of the zone or data contained there.
+ * \return FindResult containing the code and an instance of Finder, if
+ * anything is found. However, application should not rely on the
+ * ZoneFinder being instance of Finder (possible subclass of this class
+ * may return something else and it may change in future versions), it
+ * should use it as a ZoneFinder only.
+ */
+ virtual FindResult findZone(const isc::dns::Name& name) const;
+
+ /**
+ * \brief Get the zone iterator
+ *
+ * The iterator allows going through the whole zone content. If the
+ * underlying DatabaseConnection is implemented correctly, it should
+ * be possible to have multiple ZoneIterators at once and query data
+ * at the same time.
+ *
+ * \exception DataSourceError if the zone doesn't exist.
+ * \exception isc::NotImplemented if the underlying DatabaseConnection
+ * doesn't implement iteration. But in case it is not implemented
+ * and the zone doesn't exist, DataSourceError is thrown.
+ * \exception Anything else the underlying DatabaseConnection might
+ * want to throw.
+ * \param name The origin of the zone to iterate.
+ * \return Shared pointer to the iterator (it will never be NULL)
+ */
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const;
+
+ /// This implementation internally clones the accessor from the one
+ /// used in the client and starts a separate transaction using the cloned
+ /// accessor. The returned updater will be able to work separately from
+ /// the original client.
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+ bool replace) const;
+
+private:
+ /// \brief The RR class that this client handles.
+ const isc::dns::RRClass rrclass_;
+
+ /// \brief The accessor to our database.
+ const boost::shared_ptr<DatabaseAccessor> accessor_;
+};
+
+}
+}
+
+#endif // __DATABASE_DATASRC_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
new file mode 100644
index 0000000..04ad610
--- /dev/null
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -0,0 +1,632 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::datasrc
+
+# \brief Messages for the data source library
+
+% DATASRC_CACHE_CREATE creating the hotspot cache
+This is a debug message issued during startup when the hotspot cache
+is created.
+
+% DATASRC_CACHE_DESTROY destroying the hotspot cache
+Debug information. The hotspot cache is being destroyed.
+
+% DATASRC_CACHE_DISABLE disabling the hotspot cache
+A debug message issued when the hotspot cache is disabled.
+
+% DATASRC_CACHE_ENABLE enabling the hotspot cache
+A debug message issued when the hotspot cache is enabled.
+
+% DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired
+A debug message issued when a hotspot cache lookup located the item but it
+had expired. The item was removed and the program proceeded as if the item
+had not been found.
+
+% DATASRC_CACHE_FOUND the item '%1' was found
+Debug information. An item was successfully located in the hotspot cache.
+
+% DATASRC_CACHE_FULL hotspot cache is full, dropping oldest
+Debug information. After inserting an item into the hotspot cache, the
+maximum number of items was exceeded, so the least recently used item will
+be dropped. This should be directly followed by CACHE_REMOVE.
+
+% DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache
+A debug message indicating that a new item is being inserted into the hotspot
+cache.
+
+% DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
+
+% DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing
+Debug information. While inserting an item into the hotspot cache, an older
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
+
+% DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache
+Debug information. An item is being removed from the hotspot cache.
+
+% DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items
+The maximum allowed number of items of the hotspot cache is set to the given
+number. If there are too many, some of them will be dropped. The size of 0
+means no limit.
+
+% DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
+
+% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+
+% DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5
+The datasource backend provided resource records for the given RRset with
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+
+% DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+
+% DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+
+% DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+
+% DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
+
+% DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+
+% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+
+% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+
+% DATASRC_DATABASE_ITERATE iterating zone %1
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+
+% DATASRC_DATABASE_ITERATE_END iterating zone finished
+While iterating through the zone, the program reached end of the data.
+
+% DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+
+% DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+
+% DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+
+% DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+
+% DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+
+% DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
+
+% DATASRC_DO_QUERY handling query for '%1/%2'
+A debug message indicating that a query for the given name and RR type is being
+processed.
+
+% DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
+Debug information. An RRset is being added to the in-memory data source.
+
+% DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
+
+% DATASRC_MEM_ADD_ZONE adding zone '%1/%2'
+Debug information. A zone is being added into the in-memory data source.
+
+% DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful
+Debug information. The domain was found and an ANY type query is being answered
+by providing everything found inside the domain.
+
+% DATASRC_MEM_CNAME CNAME at the domain '%1'
+Debug information. The requested domain is an alias to a different domain,
+returning the CNAME instead.
+
+% DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'
+This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
+other way around -- adding some other data to CNAME.
+
+% DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'
+Someone or something tried to add a CNAME into a domain that already contains
+some other data. But the protocol forbids coexistence of CNAME with anything
+(RFC 1034, section 3.6.2). This indicates a problem with provided data.
+
+% DATASRC_MEM_CREATE creating zone '%1' in '%2' class
+Debug information. A representation of a zone for the in-memory data source is
+being created.
+
+% DATASRC_MEM_DELEG_FOUND delegation found at '%1'
+Debug information. A delegation point was found above the requested record.
+
+% DATASRC_MEM_DESTROY destroying zone '%1' in '%2' class
+Debug information. A zone from in-memory data source is being destroyed.
+
+% DATASRC_MEM_DNAME_ENCOUNTERED encountered a DNAME
+Debug information. While searching for the requested domain, a DNAME was
+encountered on the way. This may lead to redirection to a different domain and
+stop the search.
+
+% DATASRC_MEM_DNAME_FOUND DNAME found at '%1'
+Debug information. A DNAME was found instead of the requested information.
+
+% DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
+
+% DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty
+Debug information. The requested domain exists in the tree of domains, but
+it is empty. Therefore it doesn't contain the requested resource type.
+
+% DATASRC_MEM_DUP_RRSET duplicate RRset '%1/%2'
+An RRset is being inserted into in-memory data source for a second time. The
+original version must be removed first. Note that loading master files where an
+RRset is split into multiple locations is not supported yet.
+
+% DATASRC_MEM_EXACT_DELEGATION delegation at the exact domain '%1'
+Debug information. There's a NS record at the requested domain. This means
+this zone is not authoritative for the requested domain, but a delegation
+should be followed. The requested domain is an apex of some zone.
+
+% DATASRC_MEM_FIND find '%1/%2'
+Debug information. A search for the requested RRset is being started.
+
+% DATASRC_MEM_FIND_ZONE looking for zone '%1'
+Debug information. A zone object for this zone is being searched for in the
+in-memory data source.
+
+% DATASRC_MEM_LOAD loading zone '%1' from file '%2'
+Debug information. The content of master file is being loaded into the memory.
+
+% DATASRC_MEM_NOT_FOUND requested domain '%1' not found
+Debug information. The requested domain does not exist.
+
+% DATASRC_MEM_NS_ENCOUNTERED encountered a NS
+Debug information. While searching for the requested domain, a NS was
+encountered on the way (a delegation). This may lead to stop of the search.
+
+% DATASRC_MEM_NXRRSET no such type '%1' at '%2'
+Debug information. The domain exists, but it doesn't hold any record of the
+requested type.
+
+% DATASRC_MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'
+It was attempted to add the domain into a zone that shouldn't have it
+(eg. the domain is not subdomain of the zone origin). This indicates a
+problem with provided data.
+
+% DATASRC_MEM_RENAME renaming RRset from '%1' to '%2'
+Debug information. A RRset is being generated from a different RRset (most
+probably a wildcard). So it must be renamed to whatever the user asked for. In
+fact, it's impossible to rename RRsets with our libraries, so a new one is
+created and all resource records are copied over.
+
+% DATASRC_MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'
+Some resource types are singletons -- only one is allowed in a domain
+(for example CNAME or SOA). This indicates a problem with provided data.
+
+% DATASRC_MEM_SUCCESS query for '%1/%2' successful
+Debug information. The requested record was found.
+
+% DATASRC_MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
+Debug information. The search stopped at a superdomain of the requested
+domain. The domain is a empty nonterminal, therefore it is treated as NXRRSET
+case (eg. the domain exists, but it doesn't have the requested record type).
+
+% DATASRC_MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
+Debug information. The contents of two in-memory zones are being exchanged.
+This is usual practice to do some manipulation in exception-safe manner -- the
+new data are prepared in a different zone object and when it works, they are
+swapped. The old one contains the new data and the other one can be safely
+destroyed.
+
+% DATASRC_MEM_WILDCARD_CANCEL wildcard match canceled for '%1'
+Debug information. A domain above wildcard was reached, but there's something
+below the requested domain. Therefore the wildcard doesn't apply here. This
+behaviour is specified by RFC 1034, section 4.3.3
+
+% DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'
+The software refuses to load DNAME records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+
+% DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'
+The software refuses to load NS records into a wildcard domain. It isn't
+explicitly forbidden, but the protocol is ambiguous about how this should
+behave and BIND 9 refuses that as well. Please describe your intention using
+different tools.
+
+% DATASRC_META_ADD adding a data source into meta data source
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
+
+% DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
+It was attempted to add a data source into a meta data source, but their
+classes do not match.
+
+% DATASRC_META_REMOVE removing data source from meta data source
+Debug information. A data source is being removed from meta data source.
+
+% DATASRC_QUERY_ADD_NSEC adding NSEC record for '%1'
+Debug information. A NSEC record covering this zone is being added.
+
+% DATASRC_QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'
+Debug information. A NSEC3 record for the given zone is being added to the
+response message.
+
+% DATASRC_QUERY_ADD_RRSET adding RRset '%1/%2' to message
+Debug information. An RRset is being added to the response message.
+
+% DATASRC_QUERY_ADD_SOA adding SOA of '%1'
+Debug information. A SOA record of the given zone is being added to the
+authority section of the response message.
+
+% DATASRC_QUERY_AUTH_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the authoritative query. 1 means
+some error, 2 is not implemented. The data source should have logged the
+specific error already.
+
+% DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'
+The domain lives in another zone. But it is not possible to generate referral
+information for it.
+
+% DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache
+Debug information. The requested data were found in the hotspot cache, so
+no query is sent to the real data source.
+
+% DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'
+Debug information. While processing a query, lookup to the hotspot cache
+is being made.
+
+% DATASRC_QUERY_COPY_AUTH copying authoritative section into message
+Debug information. The whole referral information is being copied into the
+response message.
+
+% DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'
+Debug information. The software is trying to identify delegation points on the
+way down to the given domain.
+
+% DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
+
+% DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty
+During an attempt to synthesize CNAME from this DNAME it was discovered the
+DNAME is empty (it has no records). This indicates problem with supplied data.
+
+% DATASRC_QUERY_FAIL query failed
+Some subtask of query processing failed. The reason should have been reported
+already and a SERVFAIL will be returned to the querying system.
+
+% DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
+
+% DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
+Debug information. While processing a query, a MX record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+
+% DATASRC_QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'
+Debug information. While processing a query, a NS record was met. It
+references the mentioned address, so A/AAAA records for it are looked up
+and put it into the additional section.
+
+% DATASRC_QUERY_GLUE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the glue query. 1 means some error,
+2 is not implemented. The data source should have logged the specific error
+already.
+
+% DATASRC_QUERY_INVALID_OP invalid query operation requested
+This indicates a programmer error. The DO_QUERY was called with unknown
+operation code.
+
+% DATASRC_QUERY_IS_AUTH auth query (%1/%2)
+Debug information. The last DO_QUERY is an auth query.
+
+% DATASRC_QUERY_IS_GLUE glue query (%1/%2)
+Debug information. The last DO_QUERY is a query for glue addresses.
+
+% DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
+Debug information. The last DO_QUERY is a query for addresses that are not
+glue.
+
+% DATASRC_QUERY_IS_REF query for referral (%1/%2)
+Debug information. The last DO_QUERY is a query for referral information.
+
+% DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)
+Debug information. The last DO_QUERY is a simple query.
+
+% DATASRC_QUERY_MISPLACED_TASK task of this type should not be here
+This indicates a programming error. A task was found in the internal task
+queue, but this kind of task wasn't designed to be inside the queue (it should
+be handled right away, not queued).
+
+% DATASRC_QUERY_MISSING_NS missing NS records for '%1'
+NS records should have been put into the authority section. However, this zone
+has none. This indicates problem with provided data.
+
+% DATASRC_QUERY_MISSING_SOA the zone '%1' has no SOA
+The answer should have been a negative one (eg. of nonexistence of something).
+To do so, a SOA record should be put into the authority section, but the zone
+does not have one. This indicates problem with provided data.
+
+% DATASRC_QUERY_NOGLUE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the no-glue query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+
+% DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)
+Debug information. The hotspot cache is ignored for authoritative ANY queries
+for consistency reasons.
+
+% DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)
+Debug information. The hotspot cache is ignored for ANY queries for consistency
+reasons.
+
+% DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone
+An attempt to add a NSEC record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+
+% DATASRC_QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone
+An attempt to add a NSEC3 record into the message failed, because the zone does
+not have any DS record. This indicates problem with the provided data.
+
+% DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'
+Lookup of domain failed because the data have no zone that contain the
+domain. Maybe someone sent a query to the wrong server for some reason.
+
+% DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class
+Debug information. A sure query is being processed now.
+
+% DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'
+The user wants DNSSEC and we discovered the entity doesn't exist (either
+domain or the record). But there was an error getting NSEC/NSEC3 record
+to prove the nonexistence.
+
+% DATASRC_QUERY_REF_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the query for referral information.
+1 means some error, 2 is not implemented. The data source should have logged
+the specific error already.
+
+% DATASRC_QUERY_RRSIG unable to answer RRSIG query
+The server is unable to answer a direct query for RRSIG type, but was asked
+to do so.
+
+% DATASRC_QUERY_SIMPLE_FAIL the underlying data source failed with %1
+The underlying data source failed to answer the simple query. 1 means some
+error, 2 is not implemented. The data source should have logged the specific
+error already.
+
+% DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
+
+% DATASRC_QUERY_TASK_FAIL task failed with %1
+The query subtask failed. The reason should have been reported by the subtask
+already. The code is 1 for error, 2 for not implemented.
+
+% DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'
+A CNAME led to another CNAME and it led to another, and so on. After 16
+CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
+might possibly be a loop as well. Note that some of the CNAMEs might have
+been synthesized from DNAMEs. This indicates problem with supplied data.
+
+% DATASRC_QUERY_UNKNOWN_RESULT unknown result of subtask
+This indicates a programmer error. The answer of subtask doesn't look like
+anything known.
+
+% DATASRC_QUERY_WILDCARD looking for a wildcard covering '%1'
+Debug information. A direct match wasn't found, so a wildcard covering the
+domain is being looked for now.
+
+% DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'
+During an attempt to cover the domain by a wildcard an error happened. The
+exact kind was hopefully already reported.
+
+% DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)
+While processing a wildcard, it wasn't possible to prove nonexistence of the
+given domain or record. The code is 1 for error and 2 for not implemented.
+
+% DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)
+While processing a wildcard, a referral was met. But it wasn't possible to get
+enough information for it. The code is 1 for error, 2 for not implemented.
+
+% DATASRC_SQLITE_CLOSE closing SQLite database
+Debug information. The SQLite data source is closing the database file.
+
+% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
+The database file is being opened so it can start providing data.
+
+% DATASRC_SQLITE_CONNCLOSE Closing sqlite database
+The database file is no longer needed and is being closed.
+
+% DATASRC_SQLITE_CREATE SQLite data source created
+Debug information. An instance of SQLite data source is being created.
+
+% DATASRC_SQLITE_DESTROY SQLite data source destroyed
+Debug information. An instance of SQLite data source is being destroyed.
+
+% DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized
+The object around a database connection is being destroyed.
+
+% DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'
+Debug information. The SQLite data source is trying to identify which zone
+should hold this domain.
+
+% DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
+no such zone in our data.
+
+% DATASRC_SQLITE_FIND looking for RRset '%1/%2'
+Debug information. The SQLite data source is looking up a resource record
+set.
+
+% DATASRC_SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'
+Debug information. The data source is looking up the addresses for given
+domain name.
+
+% DATASRC_SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')
+The SQLite data source was looking up A/AAAA addresses, but the data source
+contains different class than the query was for.
+
+% DATASRC_SQLITE_FINDEXACT looking for exact RRset '%1/%2'
+Debug information. The SQLite data source is looking up an exact resource
+record.
+
+% DATASRC_SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
+The SQLite data source was looking up an exact RRset, but the data source
+contains different class than the query was for.
+
+% DATASRC_SQLITE_FINDREC looking for record '%1/%2'
+Debug information. The SQLite data source is looking up records of given name
+and type in the database.
+
+% DATASRC_SQLITE_FINDREF looking for referral at '%1'
+Debug information. The SQLite data source is identifying if this domain is
+a referral and where it goes.
+
+% DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')
+The SQLite data source was trying to identify if there's a referral. But
+it contains different class than the query was for.
+
+% DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
+The SQLite data source was looking up an RRset, but the data source contains
+different class than the query was for.
+
+% DATASRC_SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'
+Debug information. We're trying to look up a NSEC3 record in the SQLite data
+source.
+
+% DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'
+The SQLite data source was asked to provide a NSEC3 record for given zone.
+But it doesn't contain that zone.
+
+% DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized
+A wrapper object to hold database connection is being initialized.
+
+% DATASRC_SQLITE_OPEN opening SQLite database '%1'
+Debug information. The SQLite data source is loading an SQLite database in
+the provided file.
+
+% DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'
+This is a debug message. The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
+
+% DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
+
+% DATASRC_SQLITE_SETUP setting up SQLite database
+The database for SQLite data source was found empty. It is assumed this is the
+first run and it is being initialized with current schema. It'll still contain
+no data, but it will be ready for use.
+
+% DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
+
+% DATASRC_STATIC_CREATE creating the static datasource
+Debug information. The static data source (the one holding stuff like
+version.bind) is being created.
+
+% DATASRC_STATIC_FIND looking for '%1/%2'
+Debug information. This resource record set is being looked up in the static
+data source.
+
+% DATASRC_UNEXPECTED_QUERY_STATE unexpected query state
+This indicates a programming error. An internal task of unknown type was
+generated.
+
+% DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3
+Debug information. A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+
+% DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3
+Debug information. A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+
+%DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes. The intermediate changes made through the updater won't
+be applied to the underlying database. The zone name, its class, and
+the underlying database name are shown in the log message.
+
+%DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails. The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation. In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid. The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+
+% DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3
+Debug information. A set of updates to a zone has been successfully
+committed to the corresponding database backend. The zone name,
+its class and the database name are printed.
diff --git a/src/lib/datasrc/factory.cc b/src/lib/datasrc/factory.cc
new file mode 100644
index 0000000..1818c70
--- /dev/null
+++ b/src/lib/datasrc/factory.cc
@@ -0,0 +1,95 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "factory.h"
+
+#include "data_source.h"
+#include "database.h"
+#include "sqlite3_accessor.h"
+#include "memory_datasrc.h"
+
+#include <datasrc/logger.h>
+
+#include <dlfcn.h>
+
+using namespace isc::data;
+using namespace isc::datasrc;
+
+namespace isc {
+namespace datasrc {
+
+LibraryContainer::LibraryContainer(const std::string& name) {
+ // use RTLD_GLOBAL so that shared symbols (e.g. exceptions)
+ // are recognized as such
+ ds_lib_ = dlopen(name.c_str(), RTLD_NOW | RTLD_GLOBAL);
+ if (ds_lib_ == NULL) {
+ isc_throw(DataSourceLibraryError, dlerror());
+ }
+}
+
+LibraryContainer::~LibraryContainer() {
+ dlclose(ds_lib_);
+}
+
+void*
+LibraryContainer::getSym(const char* name) {
+ // Since dlsym can return NULL on success, we check for errors by
+ // first clearing any existing errors with dlerror(), then calling dlsym,
+ // and finally checking for errors with dlerror()
+ dlerror();
+
+ void *sym = dlsym(ds_lib_, name);
+
+ const char* dlsym_error = dlerror();
+ if (dlsym_error != NULL) {
+ isc_throw(DataSourceLibrarySymbolError, dlsym_error);
+ }
+
+ return (sym);
+}
+
+DataSourceClientContainer::DataSourceClientContainer(const std::string& type,
+ ConstElementPtr config)
+: ds_lib_(type + "_ds.so")
+{
+ // We are casting from a data to a function pointer here
+ // Some compilers (rightfully) complain about that, but
+ // c-style casts are accepted the most here. If we run
+ // into any that also don't like this, we might need to
+ // use some form of union cast or memory copy to get
+ // from the void* to the function pointer.
+ ds_creator* ds_create = (ds_creator*)ds_lib_.getSym("createInstance");
+ destructor_ = (ds_destructor*)ds_lib_.getSym("destroyInstance");
+
+ std::string error;
+ try {
+ instance_ = ds_create(config, error);
+ if (instance_ == NULL) {
+ isc_throw(DataSourceError, error);
+ }
+ } catch (const std::exception& exc) {
+ isc_throw(DataSourceError, "Unknown uncaught exception from " + type +
+ " createInstance: " + exc.what());
+ } catch (...) {
+ isc_throw(DataSourceError, "Unknown uncaught exception from " + type);
+ }
+}
+
+DataSourceClientContainer::~DataSourceClientContainer() {
+ destructor_(instance_);
+}
+
+} // end namespace datasrc
+} // end namespace isc
+
diff --git a/src/lib/datasrc/factory.h b/src/lib/datasrc/factory.h
new file mode 100644
index 0000000..0284067
--- /dev/null
+++ b/src/lib/datasrc/factory.h
@@ -0,0 +1,170 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATA_SOURCE_FACTORY_H
+#define __DATA_SOURCE_FACTORY_H 1
+
+#include <boost/noncopyable.hpp>
+
+#include <datasrc/data_source.h>
+#include <datasrc/client.h>
+#include <exceptions/exceptions.h>
+
+#include <cc/data.h>
+
+namespace isc {
+namespace datasrc {
+
+
+/// \brief Raised if there is an error loading the datasource implementation
+/// library
+class DataSourceLibraryError : public DataSourceError {
+public:
+ DataSourceLibraryError(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
+/// \brief Raised if there is an error reading a symbol from the datasource
+/// implementation library
+class DataSourceLibrarySymbolError : public DataSourceError {
+public:
+ DataSourceLibrarySymbolError(const char* file, size_t line,
+ const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
+typedef DataSourceClient* ds_creator(isc::data::ConstElementPtr config,
+ std::string& error);
+typedef void ds_destructor(DataSourceClient* instance);
+
+/// \brief Container class for dynamically loaded libraries
+///
+/// This class is used to dlopen() a library, provides access to dlsym(),
+/// and cleans up the dlopened library when the instance of this class is
+/// destroyed.
+///
+/// Its main function is to provide RAII-style access to dlopen'ed libraries.
+///
+/// \note Currently it is Datasource-backend specific. If we have need for this
+/// in other places than for dynamically loading datasources, then, apart
+/// from moving it to another location, we also need to make the
+/// exceptions raised more general.
+class LibraryContainer : boost::noncopyable {
+public:
+ /// \brief Constructor
+ ///
+ /// \param name The name of the library (.so) file. This file must be in
+ /// the library path.
+ ///
+ /// \exception DataSourceLibraryError If the library cannot be found or
+ /// cannot be loaded.
+ LibraryContainer(const std::string& name);
+
+ /// \brief Destructor
+ ///
+ /// Cleans up the library by calling dlclose()
+ ~LibraryContainer();
+
+ /// \brief Retrieve a symbol
+ ///
+ /// This retrieves a symbol from the loaded library.
+ ///
+ /// \exception DataSourceLibrarySymbolError if the symbol cannot be found,
+ /// or if another error (as reported by dlerror() occurs.
+ ///
+ /// \param name The name of the symbol to retrieve
+ /// \return A pointer to the symbol. This may be NULL, and if so, indicates
+ /// the symbol does indeed exist, but has the value NULL itself.
+ /// If the symbol does not exist, a DataSourceLibrarySymbolError is
+ /// raised.
+ ///
+ /// \note The argument is a const char* (and not a std::string like the
+ /// argument in the constructor). This argument is always a fixed
+ /// string in the code, while the other can be read from
+ /// configuration, and needs modification
+ void* getSym(const char* name);
+private:
+ /// Pointer to the dynamically loaded library structure
+ void *ds_lib_;
+};
+
+
+/// \brief Container for a specific instance of a dynamically loaded
+/// DataSourceClient implementation
+///
+/// Given a datasource type and a type-specific set of configuration data,
+/// the corresponding dynamic library is loaded (if it hadn't been already),
+/// and an instance is created. This instance is stored within this structure,
+/// and can be accessed through getInstance(). Upon destruction of this
+/// container, the stored instance of the DataSourceClient is deleted with
+/// the destructor function provided by the loaded library.
+///
+/// The 'type' is actually the name of the library, minus the '_ds.so' postfix
+/// Datasource implementation libraries therefore have a fixed name, both for
+/// easy recognition and to reduce potential mistakes.
+/// For example, the sqlite3 implementation has the type 'sqlite3', and the
+/// derived filename 'sqlite3_ds.so'
+///
+/// There are of course some demands to an implementation, not all of which
+/// can be verified compile-time. It must provide a creator and destructor
+/// functions. The creator function must return an instance of a subclass of
+/// DataSourceClient. The prototypes of these functions are as follows:
+/// \code
+/// extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr cfg);
+///
+/// extern "C" void destroyInstance(isc::data::DataSourceClient* instance);
+/// \endcode
+class DataSourceClientContainer : boost::noncopyable {
+public:
+ /// \brief Constructor
+ ///
+ /// \exception DataSourceLibraryError if there is an error loading the
+ /// backend library
+ /// \exception DataSourceLibrarySymbolError if the library does not have
+ /// the needed symbols, or if there is an error reading them
+ /// \exception DataError if the given config is not correct
+ /// for the given type, or if there was a problem during
+ /// initialization
+ ///
+ /// \param type The type of the datasource client. Based on the value of
+ /// type, a specific backend library is used, by appending the
+ /// string '_ds.so' to the given type, and loading that as the
+ /// implementation library
+ /// \param config Type-specific configuration data, see the documentation
+ /// of the datasource backend type for information on what
+ /// configuration data to pass.
+ DataSourceClientContainer(const std::string& type,
+ isc::data::ConstElementPtr config);
+
+ /// \brief Destructor
+ ~DataSourceClientContainer();
+
+ /// \brief Accessor to the instance
+ ///
+ /// \return Reference to the DataSourceClient instance contained in this
+ /// container.
+ DataSourceClient& getInstance() { return *instance_; }
+
+private:
+ DataSourceClient* instance_;
+ ds_destructor* destructor_;
+ LibraryContainer ds_lib_;
+};
+
+} // end namespace datasrc
+} // end namespace isc
+#endif // DATA_SOURCE_FACTORY_H
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/iterator.h b/src/lib/datasrc/iterator.h
new file mode 100644
index 0000000..99d3331
--- /dev/null
+++ b/src/lib/datasrc/iterator.h
@@ -0,0 +1,105 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATASRC_ZONE_ITERATOR_H
+#define __DATASRC_ZONE_ITERATOR_H 1
+
+#include <dns/rrset.h>
+
+#include <boost/noncopyable.hpp>
+
+#include <datasrc/zone.h>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Read-only iterator to a zone.
+ *
+ * You can get an instance of (descendand of) ZoneIterator from
+ * DataSourceClient::getIterator() method. The actual concrete implementation
+ * will be different depending on the actual data source used. This is the
+ * abstract interface.
+ *
+ * There's no way to start iterating from the beginning again or return.
+ */
+class ZoneIterator : public boost::noncopyable {
+public:
+ /**
+ * \brief Destructor
+ *
+ * Virtual destructor. It is empty, but ensures the right destructor from
+ * descendant is called.
+ */
+ virtual ~ ZoneIterator() { }
+
+ /**
+ * \brief Get next RRset from the zone.
+ *
+ * This returns the next RRset in the zone as a shared pointer. The
+ * shared pointer is used to allow both accessing in-memory data and
+ * automatic memory management.
+ *
+ * Any special order is not guaranteed.
+ *
+ * While this can potentially throw anything (including standard allocation
+ * errors), it should be rare.
+ *
+ * \return Pointer to the next RRset or NULL pointer when the iteration
+ * gets to the end of the zone.
+ */
+ virtual isc::dns::ConstRRsetPtr getNextRRset() = 0;
+
+ /**
+ * \brief Return the SOA record of the zone in the iterator context.
+ *
+ * This method returns the zone's SOA record (if any, and a valid zone
+ * should have it) in the form of an RRset object. This SOA is identical
+ * to that (again, if any) contained in the sequence of RRsets returned
+ * by the iterator. In that sense this method is redundant, but is
+ * provided as a convenient utility for the application of the
+ * iterator; the application may need to know the SOA serial or the
+ * SOA RR itself for the purpose of protocol handling or skipping the
+ * expensive iteration processing.
+ *
+ * If the zone doesn't have an SOA (which is broken, but some data source
+ * may allow that situation), this method returns NULL. Also, in the
+ * normal and valid case, the SOA should have exactly one RDATA, but
+ * this API does not guarantee it as some data source may accept such an
+ * abnormal condition. It's up to the caller whether to check the number
+ * of RDATA and how to react to the unexpected case.
+ *
+ * Each concrete derived method must ensure that the SOA returned by this
+ * method is identical to the zone's SOA returned via the iteration.
+ * For example, even if another thread or process updates the SOA while
+ * the iterator is working, the result of this method must not be
+ * affected by the update. For database based data sources, this can
+ * be done by making the entire iterator operation as a single database
+ * transaction, but the actual implementation can differ.
+ *
+ * \exception None
+ *
+ * \return A shared pointer to an SOA RRset that would be returned
+ * from the iteration. It will be NULL if the zone doesn't have an SOA.
+ */
+ virtual isc::dns::ConstRRsetPtr getSOA() const = 0;
+};
+
+}
+}
+#endif // __DATASRC_ZONE_ITERATOR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/logger.h b/src/lib/datasrc/logger.h
index 7c2828d..db4e5cb 100644
--- a/src/lib/datasrc/logger.h
+++ b/src/lib/datasrc/logger.h
@@ -16,9 +16,9 @@
#define __DATASRC_LOGGER_H
#include <log/macros.h>
-#include <datasrc/messagedef.h>
+#include <datasrc/datasrc_messages.h>
-/// \file logger.h
+/// \file datasrc/logger.h
/// \brief Data Source library global logger
///
/// This holds the logger for the data source library. It is a private header
@@ -31,14 +31,14 @@ namespace datasrc {
/// \brief The logger for this library
extern isc::log::Logger logger;
-enum {
- /// \brief Trace basic operations
- DBG_TRACE_BASIC = 10,
- /// \brief Trace data changes and lookups as well
- DBG_TRACE_DATA = 20,
- /// \brief Detailed even about how the lookups happen
- DBG_TRACE_DETAILED = 50
-};
+/// \brief Trace basic operations
+const int DBG_TRACE_BASIC = DBGLVL_TRACE_BASIC;
+
+/// \brief Trace data changes and lookups as well
+const int DBG_TRACE_DATA = DBGLVL_TRACE_BASIC_DATA;
+
+/// \brief Detailed even about how the lookups happen
+const int DBG_TRACE_DETAILED = DBGLVL_TRACE_DETAIL;
}
}
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 3c57d1b..8da43d0 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -16,6 +16,9 @@
#include <cassert>
#include <boost/shared_ptr.hpp>
#include <boost/bind.hpp>
+#include <boost/foreach.hpp>
+
+#include <exceptions/exceptions.h>
#include <dns/name.h>
#include <dns/rrclass.h>
@@ -25,17 +28,44 @@
#include <datasrc/memory_datasrc.h>
#include <datasrc/rbtree.h>
#include <datasrc/logger.h>
+#include <datasrc/iterator.h>
+#include <datasrc/data_source.h>
+#include <datasrc/factory.h>
+
+#include <cc/data.h>
using namespace std;
using namespace isc::dns;
+using namespace isc::data;
namespace isc {
namespace datasrc {
-// Private data and hidden methods of MemoryZone
-struct MemoryZone::MemoryZoneImpl {
+namespace {
+// Some type aliases
+/*
+ * Each domain consists of some RRsets. They will be looked up by the
+ * RRType.
+ *
+ * The use of map is questionable with regard to performance - there'll
+ * be usually only few RRsets in the domain, so the log n benefit isn't
+ * much and a vector/array might be faster due to its simplicity and
+ * continuous memory location. But this is unlikely to be a performance
+ * critical place and map has better interface for the lookups, so we use
+ * that.
+ */
+typedef map<RRType, ConstRRsetPtr> Domain;
+typedef Domain::value_type DomainPair;
+typedef boost::shared_ptr<Domain> DomainPtr;
+// The tree stores domains
+typedef RBTree<Domain> DomainTree;
+typedef RBNode<Domain> DomainNode;
+}
+
+// Private data and hidden methods of InMemoryZoneFinder
+struct InMemoryZoneFinder::InMemoryZoneFinderImpl {
// Constructor
- MemoryZoneImpl(const RRClass& zone_class, const Name& origin) :
+ InMemoryZoneFinderImpl(const RRClass& zone_class, const Name& origin) :
zone_class_(zone_class), origin_(origin), origin_data_(NULL),
domains_(true)
{
@@ -44,25 +74,6 @@ struct MemoryZone::MemoryZoneImpl {
DomainPtr origin_domain(new Domain);
origin_data_->setData(origin_domain);
}
-
- // Some type aliases
- /*
- * Each domain consists of some RRsets. They will be looked up by the
- * RRType.
- *
- * The use of map is questionable with regard to performance - there'll
- * be usually only few RRsets in the domain, so the log n benefit isn't
- * much and a vector/array might be faster due to its simplicity and
- * continuous memory location. But this is unlikely to be a performance
- * critical place and map has better interface for the lookups, so we use
- * that.
- */
- typedef map<RRType, ConstRRsetPtr> Domain;
- typedef Domain::value_type DomainPair;
- typedef boost::shared_ptr<Domain> DomainPtr;
- // The tree stores domains
- typedef RBTree<Domain> DomainTree;
- typedef RBNode<Domain> DomainNode;
static const DomainNode::Flags DOMAINFLAG_WILD = DomainNode::FLAG_USER1;
// Information about the zone
@@ -129,7 +140,7 @@ struct MemoryZone::MemoryZoneImpl {
// Ensure CNAME and other type of RR don't coexist for the same
// owner name.
if (rrset->getType() == RRType::CNAME()) {
- // XXX: this check will become incorrect when we support DNSSEC
+ // TODO: this check will become incorrect when we support DNSSEC
// (depending on how we support DNSSEC). We should revisit it
// at that point.
if (!domain->empty()) {
@@ -223,12 +234,15 @@ struct MemoryZone::MemoryZoneImpl {
* Implementation of longer methods. We put them here, because the
* access is without the impl_-> and it will get inlined anyway.
*/
- // Implementation of MemoryZone::add
+ // Implementation of InMemoryZoneFinder::add
result::Result add(const ConstRRsetPtr& rrset, DomainTree* domains) {
+ // Sanitize input. This will cause an exception to be thrown
+ // if the input RRset is empty.
+ addValidation(rrset);
+
+ // OK, can add the RRset.
LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_ADD_RRSET).
arg(rrset->getName()).arg(rrset->getType()).arg(origin_);
- // Sanitize input
- addValidation(rrset);
// Add wildcards possibly contained in the owner name to the domain
// tree.
@@ -406,7 +420,7 @@ struct MemoryZone::MemoryZoneImpl {
}
}
- // Implementation of MemoryZone::find
+ // Implementation of InMemoryZoneFinder::find
FindResult find(const Name& name, RRType type,
RRsetList* target, const FindOptions options) const
{
@@ -520,7 +534,7 @@ struct MemoryZone::MemoryZoneImpl {
// fall through
case DomainTree::NOTFOUND:
- LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_NOTFOUND).
+ LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_NOT_FOUND).
arg(name);
return (FindResult(NXDOMAIN, ConstRRsetPtr()));
case DomainTree::EXACTMATCH: // This one is OK, handle it
@@ -590,50 +604,50 @@ struct MemoryZone::MemoryZoneImpl {
}
};
-MemoryZone::MemoryZone(const RRClass& zone_class, const Name& origin) :
- impl_(new MemoryZoneImpl(zone_class, origin))
+InMemoryZoneFinder::InMemoryZoneFinder(const RRClass& zone_class, const Name& origin) :
+ impl_(new InMemoryZoneFinderImpl(zone_class, origin))
{
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_CREATE).arg(origin).
arg(zone_class);
}
-MemoryZone::~MemoryZone() {
+InMemoryZoneFinder::~InMemoryZoneFinder() {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_DESTROY).arg(getOrigin()).
arg(getClass());
delete impl_;
}
-const Name&
-MemoryZone::getOrigin() const {
+Name
+InMemoryZoneFinder::getOrigin() const {
return (impl_->origin_);
}
-const RRClass&
-MemoryZone::getClass() const {
+RRClass
+InMemoryZoneFinder::getClass() const {
return (impl_->zone_class_);
}
-Zone::FindResult
-MemoryZone::find(const Name& name, const RRType& type,
- RRsetList* target, const FindOptions options) const
+ZoneFinder::FindResult
+InMemoryZoneFinder::find(const Name& name, const RRType& type,
+ RRsetList* target, const FindOptions options)
{
return (impl_->find(name, type, target, options));
}
result::Result
-MemoryZone::add(const ConstRRsetPtr& rrset) {
+InMemoryZoneFinder::add(const ConstRRsetPtr& rrset) {
return (impl_->add(rrset, &impl_->domains_));
}
void
-MemoryZone::load(const string& filename) {
+InMemoryZoneFinder::load(const string& filename) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_LOAD).arg(getOrigin()).
arg(filename);
// Load it into a temporary tree
- MemoryZoneImpl::DomainTree tmp;
+ DomainTree tmp;
masterLoad(filename.c_str(), getOrigin(), getClass(),
- boost::bind(&MemoryZoneImpl::addFromLoad, impl_, _1, &tmp));
+ boost::bind(&InMemoryZoneFinderImpl::addFromLoad, impl_, _1, &tmp));
// If it went well, put it inside
impl_->file_name_ = filename;
tmp.swap(impl_->domains_);
@@ -641,64 +655,308 @@ MemoryZone::load(const string& filename) {
}
void
-MemoryZone::swap(MemoryZone& zone) {
+InMemoryZoneFinder::swap(InMemoryZoneFinder& zone_finder) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_SWAP).arg(getOrigin()).
- arg(zone.getOrigin());
- std::swap(impl_, zone.impl_);
+ arg(zone_finder.getOrigin());
+ std::swap(impl_, zone_finder.impl_);
}
const string
-MemoryZone::getFileName() const {
+InMemoryZoneFinder::getFileName() const {
return (impl_->file_name_);
}
-/// Implementation details for \c MemoryDataSrc hidden from the public
+isc::dns::Name
+InMemoryZoneFinder::findPreviousName(const isc::dns::Name&) const {
+ isc_throw(NotImplemented, "InMemory data source doesn't support DNSSEC "
+ "yet, can't find previous name");
+}
+
+/// Implementation details for \c InMemoryClient hidden from the public
/// interface.
///
-/// For now, \c MemoryDataSrc only contains a \c ZoneTable object, which
-/// consists of (pointers to) \c MemoryZone objects, we may add more
+/// For now, \c InMemoryClient only contains a \c ZoneTable object, which
+/// consists of (pointers to) \c InMemoryZoneFinder objects, we may add more
/// member variables later for new features.
-class MemoryDataSrc::MemoryDataSrcImpl {
+class InMemoryClient::InMemoryClientImpl {
public:
- MemoryDataSrcImpl() : zone_count(0) {}
+ InMemoryClientImpl() : zone_count(0) {}
unsigned int zone_count;
ZoneTable zone_table;
};
-MemoryDataSrc::MemoryDataSrc() : impl_(new MemoryDataSrcImpl)
+InMemoryClient::InMemoryClient() : impl_(new InMemoryClientImpl)
{}
-MemoryDataSrc::~MemoryDataSrc() {
+InMemoryClient::~InMemoryClient() {
delete impl_;
}
unsigned int
-MemoryDataSrc::getZoneCount() const {
+InMemoryClient::getZoneCount() const {
return (impl_->zone_count);
}
result::Result
-MemoryDataSrc::addZone(ZonePtr zone) {
- if (!zone) {
+InMemoryClient::addZone(ZoneFinderPtr zone_finder) {
+ if (!zone_finder) {
isc_throw(InvalidParameter,
- "Null pointer is passed to MemoryDataSrc::addZone()");
+ "Null pointer is passed to InMemoryClient::addZone()");
}
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_ADD_ZONE).
- arg(zone->getOrigin()).arg(zone->getClass().toText());
+ arg(zone_finder->getOrigin()).arg(zone_finder->getClass().toText());
- const result::Result result = impl_->zone_table.addZone(zone);
+ const result::Result result = impl_->zone_table.addZone(zone_finder);
if (result == result::SUCCESS) {
++impl_->zone_count;
}
return (result);
}
-MemoryDataSrc::FindResult
-MemoryDataSrc::findZone(const isc::dns::Name& name) const {
+InMemoryClient::FindResult
+InMemoryClient::findZone(const isc::dns::Name& name) const {
LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_FIND_ZONE).arg(name);
- return (FindResult(impl_->zone_table.findZone(name).code,
- impl_->zone_table.findZone(name).zone));
+ ZoneTable::FindResult result(impl_->zone_table.findZone(name));
+ return (FindResult(result.code, result.zone));
+}
+
+namespace {
+
+class MemoryIterator : public ZoneIterator {
+private:
+ RBTreeNodeChain<Domain> chain_;
+ Domain::const_iterator dom_iterator_;
+ const DomainTree& tree_;
+ const DomainNode* node_;
+ bool ready_;
+public:
+ MemoryIterator(const DomainTree& tree, const Name& origin) :
+ tree_(tree),
+ ready_(true)
+ {
+ // Find the first node (origin) and preserve the node chain for future
+ // searches
+ DomainTree::Result result(tree_.find<void*>(origin, &node_, chain_,
+ NULL, NULL));
+ // It can't happen that the origin is not in there
+ if (result != DomainTree::EXACTMATCH) {
+ isc_throw(Unexpected,
+ "In-memory zone corrupted, missing origin node");
+ }
+ // Initialize the iterator if there's somewhere to point to
+ if (node_ != NULL && node_->getData() != DomainPtr()) {
+ dom_iterator_ = node_->getData()->begin();
+ }
+ }
+
+ virtual ConstRRsetPtr getNextRRset() {
+ if (!ready_) {
+ isc_throw(Unexpected, "Iterating past the zone end");
+ }
+ /*
+ * This cycle finds the first nonempty node with yet unused RRset.
+ * If it is NULL, we run out of nodes. If it is empty, it doesn't
+ * contain any RRsets. If we are at the end, just get to next one.
+ */
+ while (node_ != NULL && (node_->getData() == DomainPtr() ||
+ dom_iterator_ == node_->getData()->end())) {
+ node_ = tree_.nextNode(chain_);
+ // If there's a node, initialize the iterator and check next time
+ // if the map is empty or not
+ if (node_ != NULL && node_->getData() != NULL) {
+ dom_iterator_ = node_->getData()->begin();
+ }
+ }
+ if (node_ == NULL) {
+ // That's all, folks
+ ready_ = false;
+ return (ConstRRsetPtr());
+ }
+ // The iterator points to the next yet unused RRset now
+ ConstRRsetPtr result(dom_iterator_->second);
+ // This one is used, move it to the next time for next call
+ ++dom_iterator_;
+
+ return (result);
+ }
+
+ virtual ConstRRsetPtr getSOA() const {
+ isc_throw(NotImplemented, "Not imelemented");
+ }
+};
+
+} // End of anonymous namespace
+
+ZoneIteratorPtr
+InMemoryClient::getIterator(const Name& name) const {
+ ZoneTable::FindResult result(impl_->zone_table.findZone(name));
+ if (result.code != result::SUCCESS) {
+ isc_throw(DataSourceError, "No such zone: " + name.toText());
+ }
+
+ const InMemoryZoneFinder*
+ zone(dynamic_cast<const InMemoryZoneFinder*>(result.zone.get()));
+ if (zone == NULL) {
+ /*
+ * TODO: This can happen only during some of the tests and only as
+ * a temporary solution. This should be fixed by #1159 and then
+ * this cast and check shouldn't be necessary. We don't have
+ * test for handling a "can not happen" condition.
+ */
+ isc_throw(Unexpected, "The zone at " + name.toText() +
+ " is not InMemoryZoneFinder");
+ }
+ return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name)));
+}
+
+ZoneUpdaterPtr
+InMemoryClient::getUpdater(const isc::dns::Name&, bool) const {
+ isc_throw(isc::NotImplemented, "Update attempt on in memory data source");
+}
+
+
+namespace {
+// convencience function to add an error message to a list of those
+// (TODO: move functions like these to some util lib?)
+void
+addError(ElementPtr errors, const std::string& error) {
+ if (errors != ElementPtr() && errors->getType() == Element::list) {
+ errors->add(Element::create(error));
+ }
}
+
+/// Check if the given element exists in the map, and if it is a string
+bool
+checkConfigElementString(ConstElementPtr config, const std::string& name,
+ ElementPtr errors)
+{
+ if (!config->contains(name)) {
+ addError(errors,
+ "Config for memory backend does not contain a '"
+ +name+
+ "' value");
+ return false;
+ } else if (!config->get(name) ||
+ config->get(name)->getType() != Element::string) {
+ addError(errors, "value of " + name +
+ " in memory backend config is not a string");
+ return false;
+ } else {
+ return true;
+ }
+}
+
+bool
+checkZoneConfig(ConstElementPtr config, ElementPtr errors) {
+ bool result = true;
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Elements in memory backend's zone list must be maps");
+ result = false;
+ } else {
+ if (!checkConfigElementString(config, "origin", errors)) {
+ result = false;
+ }
+ if (!checkConfigElementString(config, "file", errors)) {
+ result = false;
+ }
+ // we could add some existence/readabilty/parsability checks here
+ // if we want
+ }
+ return result;
+}
+
+bool
+checkConfig(ConstElementPtr config, ElementPtr errors) {
+ /* Specific configuration is under discussion, right now this accepts
+ * the 'old' configuration, see [TODO]
+ * So for memory datasource, we get a structure like this:
+ * { "type": string ("memory"),
+ * "class": string ("IN"/"CH"/etc),
+ * "zones": list
+ * }
+ * Zones list is a list of maps:
+ * { "origin": string,
+ * "file": string
+ * }
+ *
+ * At this moment we cannot be completely sure of the contents of the
+ * structure, so we have to do some more extensive tests than should
+ * strictly be necessary (e.g. existence and type of elements)
+ */
+ bool result = true;
+
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Base config for memory backend must be a map");
+ result = false;
+ } else {
+ if (!checkConfigElementString(config, "type", errors)) {
+ result = false;
+ } else {
+ if (config->get("type")->stringValue() != "memory") {
+ addError(errors,
+ "Config for memory backend is not of type \"memory\"");
+ result = false;
+ }
+ }
+ if (!checkConfigElementString(config, "class", errors)) {
+ result = false;
+ } else {
+ try {
+ RRClass rrc(config->get("class")->stringValue());
+ } catch (const isc::Exception& rrce) {
+ addError(errors,
+ "Error parsing class config for memory backend: " +
+ std::string(rrce.what()));
+ result = false;
+ }
+ }
+ if (!config->contains("zones")) {
+ addError(errors, "No 'zones' element in memory backend config");
+ result = false;
+ } else if (!config->get("zones") ||
+ config->get("zones")->getType() != Element::list) {
+ addError(errors, "'zones' element in memory backend config is not a list");
+ result = false;
+ } else {
+ BOOST_FOREACH(ConstElementPtr zone_config,
+ config->get("zones")->listValue()) {
+ if (!checkZoneConfig(zone_config, errors)) {
+ result = false;
+ }
+ }
+ }
+ }
+
+ return (result);
+ return true;
+}
+
+} // end anonymous namespace
+
+DataSourceClient *
+createInstance(isc::data::ConstElementPtr config, std::string& error) {
+ ElementPtr errors(Element::createList());
+ if (!checkConfig(config, errors)) {
+ error = "Configuration error: " + errors->str();
+ return (NULL);
+ }
+ try {
+ return (new InMemoryClient());
+ } catch (const std::exception& exc) {
+ error = std::string("Error creating memory datasource: ") + exc.what();
+ return (NULL);
+ } catch (...) {
+ error = std::string("Error creating memory datasource, "
+ "unknown exception");
+ return (NULL);
+ }
+}
+
+void destroyInstance(DataSourceClient* instance) {
+ delete instance;
+}
+
+
} // end of namespace datasrc
-} // end of namespace dns
+} // end of namespace isc
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 99bb4e8..610deff 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -17,7 +17,12 @@
#include <string>
+#include <boost/noncopyable.hpp>
+
#include <datasrc/zonetable.h>
+#include <datasrc/client.h>
+
+#include <cc/data.h>
namespace isc {
namespace dns {
@@ -27,18 +32,17 @@ class RRsetList;
namespace datasrc {
-/// A derived zone class intended to be used with the memory data source.
-class MemoryZone : public Zone {
+/// A derived zone finder class intended to be used with the memory data source.
+///
+/// Conceptually this "finder" maintains a local in-memory copy of all RRs
+/// of a single zone from some kind of source (right now it's a textual
+/// master file, but it could also be another data source with a database
+/// backend). This is why the class has methods like \c load() or \c add().
+///
+/// This class is non copyable.
+class InMemoryZoneFinder : boost::noncopyable, public ZoneFinder {
///
/// \name Constructors and Destructor.
- ///
- /// \b Note:
- /// The copy constructor and the assignment operator are intentionally
- /// defined as private, making this class non copyable.
- //@{
-private:
- MemoryZone(const MemoryZone& source);
- MemoryZone& operator=(const MemoryZone& source);
public:
/// \brief Constructor from zone parameters.
///
@@ -48,17 +52,18 @@ public:
///
/// \param rrclass The RR class of the zone.
/// \param origin The origin name of the zone.
- MemoryZone(const isc::dns::RRClass& rrclass, const isc::dns::Name& origin);
+ InMemoryZoneFinder(const isc::dns::RRClass& rrclass,
+ const isc::dns::Name& origin);
/// The destructor.
- virtual ~MemoryZone();
+ virtual ~InMemoryZoneFinder();
//@}
/// \brief Returns the origin of the zone.
- virtual const isc::dns::Name& getOrigin() const;
+ virtual isc::dns::Name getOrigin() const;
/// \brief Returns the class of the zone.
- virtual const isc::dns::RRClass& getClass() const;
+ virtual isc::dns::RRClass getClass() const;
/// \brief Looks up an RRset in the zone.
///
@@ -70,7 +75,13 @@ public:
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
isc::dns::RRsetList* target = NULL,
- const FindOptions options = FIND_DEFAULT) const;
+ const FindOptions options = FIND_DEFAULT);
+
+ /// \brief Imelementation of the ZoneFinder::findPreviousName method
+ ///
+ /// This one throws NotImplemented exception, as InMemory doesn't
+ /// support DNSSEC currently.
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query) const;
/// \brief Inserts an rrset into the zone.
///
@@ -128,14 +139,14 @@ public:
/// Return the master file name of the zone
///
/// This method returns the name of the zone's master file to be loaded.
- /// The returned string will be an empty unless the zone has successfully
- /// loaded a zone.
+ /// The returned string will be an empty unless the zone finder has
+ /// successfully loaded a zone.
///
/// This method should normally not throw an exception. But the creation
/// of the return string may involve a resource allocation, and if it
/// fails, the corresponding standard exception will be thrown.
///
- /// \return The name of the zone file loaded in the zone, or an empty
+ /// \return The name of the zone file loaded in the zone finder, or an empty
/// string if the zone hasn't loaded any file.
const std::string getFileName() const;
@@ -164,144 +175,154 @@ public:
/// configuration reloading is written.
void load(const std::string& filename);
- /// Exchanges the content of \c this zone with that of the given \c zone.
+ /// Exchanges the content of \c this zone finder with that of the given
+ /// \c zone_finder.
///
/// This method never throws an exception.
///
- /// \param zone Another \c MemoryZone object which is to be swapped with
- /// \c this zone.
- void swap(MemoryZone& zone);
+ /// \param zone_finder Another \c InMemoryZone object which is to
+ /// be swapped with \c this zone finder.
+ void swap(InMemoryZoneFinder& zone_finder);
private:
/// \name Hidden private data
//@{
- struct MemoryZoneImpl;
- MemoryZoneImpl* impl_;
+ struct InMemoryZoneFinderImpl;
+ InMemoryZoneFinderImpl* impl_;
//@}
+ // The friend here is for InMemoryClient::getIterator. The iterator
+ // needs to access the data inside the zone, so the InMemoryClient
+ // extracts the pointer to data and puts it into the iterator.
+ // The access is read only.
+ friend class InMemoryClient;
};
-/// \brief A data source that uses in memory dedicated backend.
+/// \brief A data source client that holds all necessary data in memory.
///
-/// The \c MemoryDataSrc class represents a data source and provides a
-/// basic interface to help DNS lookup processing. For a given domain
-/// name, its \c findZone() method searches the in memory dedicated backend
-/// for the zone that gives a longest match against that name.
+/// The \c InMemoryClient class provides an access to a conceptual data
+/// source that maintains all necessary data in a memory image, thereby
+/// allowing much faster lookups. The in memory data is a copy of some
+/// real physical source - in the current implementation a list of zones
+/// are populated as a result of \c addZone() calls; zone data is given
+/// in a standard master file (but there's a plan to use database backends
+/// as a source of the in memory data).
///
-/// The in memory dedicated backend are assumed to be of the same RR class,
-/// but the \c MemoryDataSrc class does not enforce the assumption through
+/// Although every data source client is assumed to be of the same RR class,
+/// the \c InMemoryClient class does not enforce the assumption through
/// its interface.
/// For example, the \c addZone() method does not check if the new zone is of
-/// the same RR class as that of the others already in the dedicated backend.
+/// the same RR class as that of the others already in memory.
/// It is caller's responsibility to ensure this assumption.
///
/// <b>Notes to developer:</b>
///
-/// For now, we don't make it a derived class of AbstractDataSrc because the
-/// interface is so different (we'll eventually consider this as part of the
-/// generalization work).
-///
/// The addZone() method takes a (Boost) shared pointer because it would be
/// inconvenient to require the caller to maintain the ownership of zones,
/// while it wouldn't be safe to delete unnecessary zones inside the dedicated
/// backend.
///
-/// The findZone() method takes a domain name and returns the best matching \c
-/// MemoryZone in the form of (Boost) shared pointer, so that it can provide
-/// the general interface for all data sources.
-class MemoryDataSrc {
+/// The findZone() method takes a domain name and returns the best matching
+/// \c InMemoryZoneFinder in the form of (Boost) shared pointer, so that it can
+/// provide the general interface for all data sources.
+class InMemoryClient : public DataSourceClient {
public:
- /// \brief A helper structure to represent the search result of
- /// <code>MemoryDataSrc::find()</code>.
- ///
- /// This is a straightforward pair of the result code and a share pointer
- /// to the found zone to represent the result of \c find().
- /// We use this in order to avoid overloading the return value for both
- /// the result code ("success" or "not found") and the found object,
- /// i.e., avoid using \c NULL to mean "not found", etc.
- ///
- /// This is a simple value class with no internal state, so for
- /// convenience we allow the applications to refer to the members
- /// directly.
- ///
- /// See the description of \c find() for the semantics of the member
- /// variables.
- struct FindResult {
- FindResult(result::Result param_code, const ZonePtr param_zone) :
- code(param_code), zone(param_zone)
- {}
- const result::Result code;
- const ZonePtr zone;
- };
-
///
/// \name Constructors and Destructor.
///
- /// \b Note:
- /// The copy constructor and the assignment operator are intentionally
- /// defined as private, making this class non copyable.
//@{
-private:
- MemoryDataSrc(const MemoryDataSrc& source);
- MemoryDataSrc& operator=(const MemoryDataSrc& source);
-public:
/// Default constructor.
///
/// This constructor internally involves resource allocation, and if
/// it fails, a corresponding standard exception will be thrown.
/// It never throws an exception otherwise.
- MemoryDataSrc();
+ InMemoryClient();
/// The destructor.
- ~MemoryDataSrc();
+ ~InMemoryClient();
//@}
- /// Return the number of zones stored in the data source.
+ /// Return the number of zones stored in the client.
///
/// This method never throws an exception.
///
- /// \return The number of zones stored in the data source.
+ /// \return The number of zones stored in the client.
unsigned int getZoneCount() const;
- /// Add a \c Zone to the \c MemoryDataSrc.
+ /// Add a zone (in the form of \c ZoneFinder) to the \c InMemoryClient.
///
- /// \c Zone must not be associated with a NULL pointer; otherwise
+ /// \c zone_finder must not be associated with a NULL pointer; otherwise
/// an exception of class \c InvalidParameter will be thrown.
/// If internal resource allocation fails, a corresponding standard
/// exception will be thrown.
/// This method never throws an exception otherwise.
///
- /// \param zone A \c Zone object to be added.
- /// \return \c result::SUCCESS If the zone is successfully
- /// added to the memory data source.
+ /// \param zone_finder A \c ZoneFinder object to be added.
+ /// \return \c result::SUCCESS If the zone_finder is successfully
+ /// added to the client.
/// \return \c result::EXIST The memory data source already
/// stores a zone that has the same origin.
- result::Result addZone(ZonePtr zone);
-
- /// Find a \c Zone that best matches the given name in the \c MemoryDataSrc.
- ///
- /// It searches the internal storage for a \c Zone that gives the
- /// longest match against \c name, and returns the result in the
- /// form of a \c FindResult object as follows:
- /// - \c code: The result code of the operation.
- /// - \c result::SUCCESS: A zone that gives an exact match
- // is found
- /// - \c result::PARTIALMATCH: A zone whose origin is a
- // super domain of \c name is found (but there is no exact match)
- /// - \c result::NOTFOUND: For all other cases.
- /// - \c zone: A "Boost" shared pointer to the found \c Zone object if one
- // is found; otherwise \c NULL.
- ///
- /// This method never throws an exception.
+ result::Result addZone(ZoneFinderPtr zone_finder);
+
+ /// Returns a \c ZoneFinder for a zone_finder that best matches the given
+ /// name.
///
- /// \param name A domain name for which the search is performed.
- /// \return A \c FindResult object enclosing the search result (see above).
- FindResult findZone(const isc::dns::Name& name) const;
+ /// This derived version of the method never throws an exception.
+ /// For other details see \c DataSourceClient::findZone().
+ virtual FindResult findZone(const isc::dns::Name& name) const;
+
+ /// \brief Implementation of the getIterator method
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const;
+
+ /// In-memory data source is read-only, so this derived method will
+ /// result in a NotImplemented exception.
+ ///
+ /// \note We plan to use a database-based data source as a backend
+ /// persistent storage for an in-memory data source. When it's
+ /// implemented we may also want to allow the user of the in-memory client
+ /// to update via its updater (this may or may not be a good idea and
+ /// is subject to further discussions).
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+ bool replace) const;
private:
- class MemoryDataSrcImpl;
- MemoryDataSrcImpl* impl_;
+ // TODO: Do we still need the PImpl if nobody should manipulate this class
+ // directly any more (it should be handled through DataSourceClient)?
+ class InMemoryClientImpl;
+ InMemoryClientImpl* impl_;
};
+
+/// \brief Creates an instance of the Memory datasource client
+///
+/// Currently the configuration passed here must be a MapElement, formed as
+/// follows:
+/// \code
+/// { "type": string ("memory"),
+/// "class": string ("IN"/"CH"/etc),
+/// "zones": list
+/// }
+/// Zones list is a list of maps:
+/// { "origin": string,
+/// "file": string
+/// }
+/// \endcode
+/// (i.e. the configuration that was used prior to the datasource refactor)
+///
+/// This configuration setup is currently under discussion and will change in
+/// the near future.
+///
+/// \param config The configuration for the datasource instance
+/// \param error This string will be set to an error message if an error occurs
+/// during initialization
+/// \return An instance of the memory datasource client, or NULL if there was
+/// an error
+extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr config,
+ std::string& error);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void destroyInstance(DataSourceClient* instance);
+
+
}
}
#endif // __DATA_SOURCE_MEMORY_H
diff --git a/src/lib/datasrc/messagedef.mes b/src/lib/datasrc/messagedef.mes
deleted file mode 100644
index 2fc5c6b..0000000
--- a/src/lib/datasrc/messagedef.mes
+++ /dev/null
@@ -1,498 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX DATASRC_
-$NAMESPACE isc::datasrc
-
-# \brief Messages for the data source library
-
-% CACHE_CREATE creating the hotspot cache
-Debug information that the hotspot cache was created at startup.
-
-% CACHE_DESTROY destroying the hotspot cache
-Debug information. The hotspot cache is being destroyed.
-
-% CACHE_INSERT inserting item '%1' into the cache
-Debug information. It means a new item is being inserted into the hotspot
-cache.
-
-% CACHE_OLD_FOUND older instance of cache item found, replacing
-Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
-
-% CACHE_FULL cache is full, dropping oldest
-Debug information. After inserting an item into the hotspot cache, the
-maximum number of items was exceeded, so the least recently used item will
-be dropped. This should be directly followed by CACHE_REMOVE.
-
-% CACHE_REMOVE removing '%1' from the cache
-Debug information. An item is being removed from the hotspot cache.
-
-% CACHE_NOT_FOUND the item '%1' was not found
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
-
-% CACHE_FOUND the item '%1' was found
-Debug information. An item was successfully looked up in the hotspot cache.
-
-% CACHE_EXPIRED the item '%1' is expired
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
-
-% CACHE_SLOTS setting the cache size to '%1', dropping '%2' items
-The maximum allowed number of items of the hotspot cache is set to the given
-number. If there are too many, some of them will be dropped. The size of 0
-means no limit.
-
-% CACHE_ENABLE enabling the cache
-The hotspot cache is enabled from now on.
-
-% CACHE_DISABLE disabling the cache
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
-
-% QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
-
-% QUERY_EMPTY_DNAME the DNAME on '%1' is empty
-During an attempt to synthesize CNAME from this DNAME it was discovered the
-DNAME is empty (it has no records). This indicates problem with supplied data.
-
-% QUERY_GET_NS_ADDITIONAL addition of A/AAAA for '%1' requested by NS '%2'
-Debug information. While processing a query, a NS record was met. It
-references the mentioned address, so A/AAAA records for it are looked up
-and put it into the additional section.
-
-% QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
-Debug information. While processing a query, a MX record was met. It
-references the mentioned address, so A/AAAA records for it are looked up
-and put it into the additional section.
-
-% QUERY_FOLLOW_CNAME following CNAME at '%1'
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
-
-% QUERY_EMPTY_CNAME cNAME at '%1' is empty
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
-
-% QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'
-A CNAME led to another CNAME and it led to another, and so on. After 16
-CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
-might possibly be a loop as well. Note that some of the CNAMEs might have
-been synthesized from DNAMEs. This indicates problem with supplied data.
-
-% QUERY_CHECK_CACHE checking cache for '%1/%2'
-Debug information. While processing a query, lookup to the hotspot cache
-is being made.
-
-% QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)
-Debug information. The hotspot cache is ignored for ANY queries for consistency
-reasons.
-
-% QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)
-Debug information. The hotspot cache is ignored for authoritative ANY queries
-for consistency reasons.
-
-% DO_QUERY handling query for '%1/%2'
-Debug information. We're processing some internal query for given name and
-type.
-
-% QUERY_NO_ZONE no zone containing '%1' in class '%2'
-Lookup of domain failed because the data have no zone that contain the
-domain. Maybe someone sent a query to the wrong server for some reason.
-
-% QUERY_CACHED data for %1/%2 found in cache
-Debug information. The requested data were found in the hotspot cache, so
-no query is sent to the real data source.
-
-% QUERY_IS_SIMPLE simple query (%1/%2)
-Debug information. The last DO_QUERY is a simple query.
-
-% QUERY_IS_AUTH auth query (%1/%2)
-Debug information. The last DO_QUERY is an auth query.
-
-% QUERY_IS_GLUE glue query (%1/%2)
-Debug information. The last DO_QUERY is query for glue addresses.
-
-% QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
-Debug information. The last DO_QUERY is query for addresses that are not
-glue.
-
-% QUERY_IS_REF query for referral (%1/%2)
-Debug information. The last DO_QUERY is query for referral information.
-
-% QUERY_SIMPLE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the simple query. 1 means some
-error, 2 is not implemented. The data source should have logged the specific
-error already.
-
-% QUERY_AUTH_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the authoritative query. 1 means
-some error, 2 is not implemented. The data source should have logged the
-specific error already.
-
-% QUERY_GLUE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the glue query. 1 means some error,
-2 is not implemented. The data source should have logged the specific error
-already.
-
-% QUERY_NOGLUE_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the no-glue query. 1 means some
-error, 2 is not implemented. The data source should have logged the specific
-error already.
-
-% QUERY_REF_FAIL the underlying data source failed with %1
-The underlying data source failed to answer the query for referral information.
-1 means some error, 2 is not implemented. The data source should have logged
-the specific error already.
-
-% QUERY_INVALID_OP invalid query operation requested
-This indicates a programmer error. The DO_QUERY was called with unknown
-operation code.
-
-% QUERY_ADD_RRSET adding RRset '%1/%2' to message
-Debug information. An RRset is being added to the response message.
-
-% QUERY_COPY_AUTH copying authoritative section into message
-Debug information. The whole referral information is being copied into the
-response message.
-
-% QUERY_DELEGATION looking for delegation on the path to '%1'
-Debug information. The software is trying to identify delegation points on the
-way down to the given domain.
-
-% QUERY_ADD_SOA adding SOA of '%1'
-Debug information. A SOA record of the given zone is being added to the
-authority section of the response message.
-
-% QUERY_ADD_NSEC adding NSEC record for '%1'
-Debug information. A NSEC record covering this zone is being added.
-
-% QUERY_ADD_NSEC3 adding NSEC3 record of zone '%1'
-Debug information. A NSEC3 record for the given zone is being added to the
-response message.
-
-% QUERY_NO_DS_NSEC3 there's no DS record in the '%1' zone
-An attempt to add a NSEC3 record into the message failed, because the zone does
-not have any DS record. This indicates problem with the provided data.
-
-% QUERY_NO_DS_NSEC there's no DS record in the '%1' zone
-An attempt to add a NSEC record into the message failed, because the zone does
-not have any DS record. This indicates problem with the provided data.
-
-% QUERY_WILDCARD looking for a wildcard covering '%1'
-Debug information. A direct match wasn't found, so a wildcard covering the
-domain is being looked for now.
-
-% QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)
-While processing a wildcard, it wasn't possible to prove nonexistence of the
-given domain or record. The code is 1 for error and 2 for not implemented.
-
-% QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)
-While processing a wildcard, a referral was met. But it wasn't possible to get
-enough information for it. The code is 1 for error, 2 for not implemented.
-
-% QUERY_PROCESS processing query '%1/%2' in the '%3' class
-Debug information. A sure query is being processed now.
-
-% QUERY_RRSIG unable to answer RRSIG query
-The server is unable to answer a direct query for RRSIG type, but was asked
-to do so.
-
-% QUERY_MISPLACED_TASK task of this type should not be here
-This indicates a programming error. A task was found in the internal task
-queue, but this kind of task wasn't designed to be inside the queue (it should
-be handled right away, not queued).
-
-% QUERY_TASK_FAIL task failed with %1
-The query subtask failed. The reason should have been reported by the subtask
-already. The code is 1 for error, 2 for not implemented.
-
-% QUERY_MISSING_NS missing NS records for '%1'
-NS records should have been put into the authority section. However, this zone
-has none. This indicates problem with provided data.
-
-% UNEXPECTED_QUERY_STATE unexpected query state
-This indicates a programming error. An internal task of unknown type was
-generated.
-
-% QUERY_FAIL query failed
-Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
-
-% QUERY_BAD_REFERRAL bad referral to '%1'
-The domain lives in another zone. But it is not possible to generate referral
-information for it.
-
-% QUERY_WILDCARD_FAIL error processing wildcard for '%1'
-During an attempt to cover the domain by a wildcard an error happened. The
-exact kind was hopefully already reported.
-
-% QUERY_MISSING_SOA the zone '%1' has no SOA
-The answer should have been a negative one (eg. of nonexistence of something).
-To do so, a SOA record should be put into the authority section, but the zone
-does not have one. This indicates problem with provided data.
-
-% QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'
-The user wants DNSSEC and we discovered the entity doesn't exist (either
-domain or the record). But there was an error getting NSEC/NSEC3 record
-to prove the nonexistence.
-
-% QUERY_UNKNOWN_RESULT unknown result of subtask
-This indicates a programmer error. The answer of subtask doesn't look like
-anything known.
-
-% META_ADD adding a data source into meta data source
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
-
-% META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
-It was attempted to add a data source into a meta data source. But their
-classes do not match.
-
-% META_REMOVE removing data source from meta data source
-Debug information. A data source is being removed from meta data source.
-
-% MEM_ADD_WILDCARD adding wildcards for '%1'
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
-
-% MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'
-Someone or something tried to add a CNAME into a domain that already contains
-some other data. But the protocol forbids coexistence of CNAME with anything
-(RFC 1034, section 3.6.2). This indicates a problem with provided data.
-
-% MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'
-This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
-
-% MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
-
-% MEM_SINGLETON trying to add multiple RRs for domain '%1' and type '%2'
-Some resource types are singletons -- only one is allowed in a domain
-(for example CNAME or SOA). This indicates a problem with provided data.
-
-% MEM_OUT_OF_ZONE domain '%1' doesn't belong to zone '%2'
-It was attempted to add the domain into a zone that shouldn't have it
-(eg. the domain is not subdomain of the zone origin). This indicates a
-problem with provided data.
-
-% MEM_WILDCARD_NS nS record in wildcard domain '%1'
-The software refuses to load NS records into a wildcard domain. It isn't
-explicitly forbidden, but the protocol is ambiguous about how this should
-behave and BIND 9 refuses that as well. Please describe your intention using
-different tools.
-
-% MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'
-The software refuses to load DNAME records into a wildcard domain. It isn't
-explicitly forbidden, but the protocol is ambiguous about how this should
-behave and BIND 9 refuses that as well. Please describe your intention using
-different tools.
-
-% MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
-Debug information. An RRset is being added to the in-memory data source.
-
-% MEM_DUP_RRSET duplicate RRset '%1/%2'
-An RRset is being inserted into in-memory data source for a second time. The
-original version must be removed first. Note that loading master files where an
-RRset is split into multiple locations is not supported yet.
-
-% MEM_DNAME_ENCOUNTERED encountered a DNAME
-Debug information. While searching for the requested domain, a DNAME was
-encountered on the way. This may lead to redirection to a different domain and
-stop the search.
-
-% MEM_NS_ENCOUNTERED encountered a NS
-Debug information. While searching for the requested domain, a NS was
-encountered on the way (a delegation). This may lead to stop of the search.
-
-% MEM_RENAME renaming RRset from '%1' to '%2'
-Debug information. A RRset is being generated from a different RRset (most
-probably a wildcard). So it must be renamed to whatever the user asked for. In
-fact, it's impossible to rename RRsets with our libraries, so a new one is
-created and all resource records are copied over.
-
-% MEM_FIND find '%1/%2'
-Debug information. A search for the requested RRset is being started.
-
-% MEM_DNAME_FOUND DNAME found at '%1'
-Debug information. A DNAME was found instead of the requested information.
-
-% MEM_DELEG_FOUND delegation found at '%1'
-Debug information. A delegation point was found above the requested record.
-
-% MEM_SUPER_STOP stopped at superdomain '%1', domain '%2' is empty
-Debug information. The search stopped at a superdomain of the requested
-domain. The domain is a empty nonterminal, therefore it is treated as NXRRSET
-case (eg. the domain exists, but it doesn't have the requested record type).
-
-% MEM_WILDCARD_CANCEL wildcard match canceled for '%1'
-Debug information. A domain above wildcard was reached, but there's something
-below the requested domain. Therefore the wildcard doesn't apply here. This
-behaviour is specified by RFC 1034, section 4.3.3
-
-% MEM_NOTFOUND requested domain '%1' not found
-Debug information. The requested domain does not exist.
-
-% MEM_DOMAIN_EMPTY requested domain '%1' is empty
-Debug information. The requested domain exists in the tree of domains, but
-it is empty. Therefore it doesn't contain the requested resource type.
-
-% MEM_EXACT_DELEGATION delegation at the exact domain '%1'
-Debug information. There's a NS record at the requested domain. This means
-this zone is not authoritative for the requested domain, but a delegation
-should be followed. The requested domain is an apex of some zone.
-
-% MEM_ANY_SUCCESS ANY query for '%1' successful
-Debug information. The domain was found and an ANY type query is being answered
-by providing everything found inside the domain.
-
-% MEM_SUCCESS query for '%1/%2' successful
-Debug information. The requested record was found.
-
-% MEM_CNAME CNAME at the domain '%1'
-Debug information. The requested domain is an alias to a different domain,
-returning the CNAME instead.
-
-% MEM_NXRRSET no such type '%1' at '%2'
-Debug information. The domain exists, but it doesn't hold any record of the
-requested type.
-
-% MEM_CREATE creating zone '%1' in '%2' class
-Debug information. A representation of a zone for the in-memory data source is
-being created.
-
-% MEM_DESTROY destroying zone '%1' in '%2' class
-Debug information. A zone from in-memory data source is being destroyed.
-
-% MEM_LOAD loading zone '%1' from file '%2'
-Debug information. The content of master file is being loaded into the memory.
-
-% MEM_SWAP swapping contents of two zone representations ('%1' and '%2')
-Debug information. The contents of two in-memory zones are being exchanged.
-This is usual practice to do some manipulation in exception-safe manner -- the
-new data are prepared in a different zone object and when it works, they are
-swapped. The old one contains the new data and the other one can be safely
-destroyed.
-
-% MEM_ADD_ZONE adding zone '%1/%2'
-Debug information. A zone is being added into the in-memory data source.
-
-% MEM_FIND_ZONE looking for zone '%1'
-Debug information. A zone object for this zone is being searched for in the
-in-memory data source.
-
-% STATIC_CREATE creating the static datasource
-Debug information. The static data source (the one holding stuff like
-version.bind) is being created.
-
-% STATIC_BAD_CLASS static data source can handle CH only
-For some reason, someone asked the static data source a query that is not in
-the CH class.
-
-% STATIC_FIND looking for '%1/%2'
-Debug information. This resource record set is being looked up in the static
-data source.
-
-% SQLITE_FINDREC looking for record '%1/%2'
-Debug information. The SQLite data source is looking up records of given name
-and type in the database.
-
-% SQLITE_ENCLOSURE looking for zone containing '%1'
-Debug information. The SQLite data source is trying to identify, which zone
-should hold this domain.
-
-% SQLITE_ENCLOSURE_BAD_CLASS class mismatch looking for a zone ('%1' and '%2')
-The SQLite data source can handle only one class at a time and it was asked
-to identify which zone is holding data of a different class.
-
-% SQLITE_ENCLOSURE_NOTFOUND no zone contains it
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
-no such zone in our data.
-
-% SQLITE_PREVIOUS looking for name previous to '%1'
-Debug information. We're trying to look up name preceding the supplied one.
-
-% SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
-
-% SQLITE_FIND_NSEC3 looking for NSEC3 in zone '%1' for hash '%2'
-Debug information. We're trying to look up a NSEC3 record in the SQLite data
-source.
-
-% SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'
-The SQLite data source was asked to provide a NSEC3 record for given zone.
-But it doesn't contain that zone.
-
-% SQLITE_FIND looking for RRset '%1/%2'
-Debug information. The SQLite data source is looking up a resource record
-set.
-
-% SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
-The SQLite data source was looking up an RRset, but the data source contains
-different class than the query was for.
-
-% SQLITE_FINDEXACT looking for exact RRset '%1/%2'
-Debug information. The SQLite data source is looking up an exact resource
-record.
-
-% SQLITE_FINDEXACT_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')
-The SQLite data source was looking up an exact RRset, but the data source
-contains different class than the query was for.
-
-% SQLITE_FINDADDRS looking for A/AAAA addresses for '%1'
-Debug information. The data source is looking up the addresses for given
-domain name.
-
-% SQLITE_FINDADDRS_BAD_CLASS class mismatch looking for addresses ('%1' and '%2')
-The SQLite data source was looking up A/AAAA addresses, but the data source
-contains different class than the query was for.
-
-% SQLITE_FINDREF looking for referral at '%1'
-Debug information. The SQLite data source is identifying if this domain is
-a referral and where it goes.
-
-% SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')
-The SQLite data source was trying to identify, if there's a referral. But
-it contains different class than the query was for.
-
-% SQLITE_CREATE sQLite data source created
-Debug information. An instance of SQLite data source is being created.
-
-% SQLITE_DESTROY sQLite data source destroyed
-Debug information. An instance of SQLite data source is being destroyed.
-
-% SQLITE_SETUP setting up SQLite database
-The database for SQLite data source was found empty. It is assumed this is the
-first run and it is being initialized with current schema. It'll still contain
-no data, but it will be ready for use.
-
-% SQLITE_OPEN opening SQLite database '%1'
-Debug information. The SQLite data source is loading an SQLite database in
-the provided file.
-
-% SQLITE_CLOSE closing SQLite database
-Debug information. The SQLite data source is closing the database file.
diff --git a/src/lib/datasrc/rbtree.h b/src/lib/datasrc/rbtree.h
index 03a6967..b6c098a 100644
--- a/src/lib/datasrc/rbtree.h
+++ b/src/lib/datasrc/rbtree.h
@@ -209,7 +209,7 @@ public:
/// \exception isc::InvalidParameter Unsettable flag is specified
/// \exception None otherwise
/// \param flag The node flag to be changed.
- /// \on If \c true, set the flag to on; otherwise set it to off.
+ /// \param on If \c true, set the flag to on; otherwise set it to off.
void setFlag(Flags flag, bool on = true) {
if ((flag & ~SETTABLE_FLAGS) != 0) {
isc_throw(isc::InvalidParameter,
@@ -226,7 +226,8 @@ public:
private:
/// \name Callback related methods
///
- /// See the description of \c RBTree<T>::find() about callbacks.
+ /// See the description of \c RBTree<T>::find() at \ref callback
+ /// about callbacks.
///
/// These methods never throw an exception.
//@{
@@ -702,11 +703,12 @@ public:
}
/// \brief Find with callback and node chain.
+ /// \anchor callback
///
/// This version of \c find() is specifically designed for the backend
- /// of the \c MemoryZone class, and implements all necessary features
- /// for that purpose. Other applications shouldn't need these additional
- /// features, and should normally use the simpler versions.
+ /// of the \c InMemoryZoneFinder class, and implements all necessary
+ /// features for that purpose. Other applications shouldn't need these
+ /// additional features, and should normally use the simpler versions.
///
/// This version of \c find() calls the callback whenever traversing (on
/// the way from root down the tree) a marked node on the way down through
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
new file mode 100644
index 0000000..efa5717
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -0,0 +1,895 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sqlite3.h>
+
+#include <string>
+#include <vector>
+
+#include <boost/foreach.hpp>
+
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/logger.h>
+#include <datasrc/data_source.h>
+#include <datasrc/factory.h>
+#include <util/filename.h>
+
+using namespace std;
+using namespace isc::data;
+
+#define SQLITE_SCHEMA_VERSION 1
+
+#define CONFIG_ITEM_DATABASE_FILE "database_file"
+
+namespace isc {
+namespace datasrc {
+
+// The following enum and char* array define the SQL statements commonly
+// used in this implementation. Corresponding prepared statements (of
+// type sqlite3_stmt*) are maintained in the statements_ array of the
+// SQLite3Parameters structure.
+
+enum StatementID {
+ ZONE = 0,
+ ANY = 1,
+ ANY_SUB = 2,
+ BEGIN = 3,
+ COMMIT = 4,
+ ROLLBACK = 5,
+ DEL_ZONE_RECORDS = 6,
+ ADD_RECORD = 7,
+ DEL_RECORD = 8,
+ ITERATE = 9,
+ FIND_PREVIOUS = 10,
+ ADD_RECORD_DIFF = 11,
+ GET_RECORD_DIFF = 12, // This is temporary for testing "add diff"
+ NUM_STATEMENTS = 13
+};
+
+const char* const text_statements[NUM_STATEMENTS] = {
+ // note for ANY and ITERATE: the order of the SELECT values is
+ // specifically chosen to match the enum values in RecordColumns
+ "SELECT id FROM zones WHERE name=?1 AND rdclass = ?2", // ZONE
+ "SELECT rdtype, ttl, sigtype, rdata FROM records " // ANY
+ "WHERE zone_id=?1 AND name=?2",
+ "SELECT rdtype, ttl, sigtype, rdata " // ANY_SUB
+ "FROM records WHERE zone_id=?1 AND name LIKE (\"%.\" || ?2)",
+ "BEGIN", // BEGIN
+ "COMMIT", // COMMIT
+ "ROLLBACK", // ROLLBACK
+ "DELETE FROM records WHERE zone_id=?1", // DEL_ZONE_RECORDS
+ "INSERT INTO records " // ADD_RECORD
+ "(zone_id, name, rname, ttl, rdtype, sigtype, rdata) "
+ "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+ "DELETE FROM records WHERE zone_id=?1 AND name=?2 " // DEL_RECORD
+ "AND rdtype=?3 AND rdata=?4",
+ "SELECT rdtype, ttl, sigtype, rdata, name FROM records " // ITERATE
+ "WHERE zone_id = ?1 ORDER BY rname, rdtype",
+ /*
+ * This one looks for previous name with NSEC record. It is done by
+ * using the reversed name. The NSEC is checked because we need to
+ * skip glue data, which don't have the NSEC.
+ */
+ "SELECT name FROM records " // FIND_PREVIOUS
+ "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+ "rname < $2 ORDER BY rname DESC LIMIT 1",
+ "INSERT INTO diffs " // ADD_RECORD_DIFF
+ "(zone_id, version, operation, name, rrtype, ttl, rdata) "
+ "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"
+ , "SELECT name, rrtype, ttl, rdata, version, operation " // GET_RECORD_DIFF
+ "FROM diffs WHERE zone_id = ?1 ORDER BY id, operation"
+};
+
+struct SQLite3Parameters {
+ SQLite3Parameters() :
+ db_(NULL), version_(-1), in_transaction(false), updating_zone(false),
+ updated_zone_id(-1)
+ {
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ statements_[i] = NULL;
+ }
+ }
+
+ // This method returns the specified ID of SQLITE3 statement. If it's
+ // not yet prepared it internally creates a new one. This way we can
+ // avoid preparing unnecessary statements and minimize the overhead.
+ sqlite3_stmt*
+ getStatement(int id) {
+ assert(id < NUM_STATEMENTS);
+ if (statements_[id] == NULL) {
+ assert(db_ != NULL);
+ sqlite3_stmt* prepared = NULL;
+ if (sqlite3_prepare_v2(db_, text_statements[id], -1, &prepared,
+ NULL) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not prepare SQLite statement: "
+ << text_statements[id] <<
+ ": " << sqlite3_errmsg(db_));
+ }
+ statements_[id] = prepared;
+ }
+ return (statements_[id]);
+ }
+
+ void
+ finalizeStatements() {
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ if (statements_[i] != NULL) {
+ sqlite3_finalize(statements_[i]);
+ statements_[i] = NULL;
+ }
+ }
+ }
+
+ sqlite3* db_;
+ int version_;
+ bool in_transaction; // whether or not a transaction has been started
+ bool updating_zone; // whether or not updating the zone
+ int updated_zone_id; // valid only when in_transaction is true
+private:
+ // statements_ are private and must be accessed via getStatement() outside
+ // of this structure.
+ sqlite3_stmt* statements_[NUM_STATEMENTS];
+};
+
+// This is a helper class to encapsulate the code logic of executing
+// a specific SQLite3 statement, ensuring the corresponding prepared
+// statement is always reset whether the execution is completed successfully
+// or it results in an exception.
+// Note that an object of this class is intended to be used for "ephemeral"
+// statement, which is completed with a single "step" (normally within a
+// single call to an SQLite3Database method). In particular, it cannot be
+// used for "SELECT" variants, which generally expect multiple matching rows.
+class StatementProcessor {
+public:
+ // desc will be used on failure in the what() message of the resulting
+ // DataSourceError exception.
+ StatementProcessor(SQLite3Parameters& dbparameters, StatementID stmt_id,
+ const char* desc) :
+ dbparameters_(dbparameters), stmt_(dbparameters.getStatement(stmt_id)),
+ desc_(desc)
+ {
+ sqlite3_clear_bindings(stmt_);
+ }
+
+ ~StatementProcessor() {
+ sqlite3_reset(stmt_);
+ }
+
+ void exec() {
+ if (sqlite3_step(stmt_) != SQLITE_DONE) {
+ sqlite3_reset(stmt_);
+ isc_throw(DataSourceError, "failed to " << desc_ << ": " <<
+ sqlite3_errmsg(dbparameters_.db_));
+ }
+ }
+
+private:
+ SQLite3Parameters& dbparameters_;
+ sqlite3_stmt* stmt_;
+ const char* const desc_;
+};
+
+SQLite3Accessor::SQLite3Accessor(const std::string& filename,
+ const string& rrclass) :
+ dbparameters_(new SQLite3Parameters),
+ filename_(filename),
+ class_(rrclass),
+ database_name_("sqlite3_" +
+ isc::util::Filename(filename).nameAndExtension())
+{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_NEWCONN);
+
+ open(filename);
+}
+
+boost::shared_ptr<DatabaseAccessor>
+SQLite3Accessor::clone() {
+ return (boost::shared_ptr<DatabaseAccessor>(new SQLite3Accessor(filename_,
+ class_)));
+}
+
+namespace {
+
+// This is a helper class to initialize a Sqlite3 DB safely. An object of
+// this class encapsulates all temporary resources that are necessary for
+// the initialization, and release them in the destructor. Once everything
+// is properly initialized, the move() method moves the allocated resources
+// to the main object in an exception free manner. This way, the main code
+// for the initialization can be exception safe, and can provide the strong
+// exception guarantee.
+class Initializer {
+public:
+ ~Initializer() {
+ if (params_.db_ != NULL) {
+ sqlite3_close(params_.db_);
+ }
+ }
+ void move(SQLite3Parameters* dst) {
+ *dst = params_;
+ params_ = SQLite3Parameters(); // clear everything
+ }
+ SQLite3Parameters params_;
+};
+
+const char* const SCHEMA_LIST[] = {
+ "CREATE TABLE schema_version (version INTEGER NOT NULL)",
+ "INSERT INTO schema_version VALUES (1)",
+ "CREATE TABLE zones (id INTEGER PRIMARY KEY, "
+ "name STRING NOT NULL COLLATE NOCASE, "
+ "rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN', "
+ "dnssec BOOLEAN NOT NULL DEFAULT 0)",
+ "CREATE INDEX zones_byname ON zones (name)",
+ "CREATE TABLE records (id INTEGER PRIMARY KEY, "
+ "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+ "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
+ "CREATE INDEX records_byname ON records (name)",
+ "CREATE INDEX records_byrname ON records (rname)",
+ "CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
+ "hash STRING NOT NULL COLLATE NOCASE, "
+ "owner STRING NOT NULL COLLATE NOCASE, "
+ "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
+ "CREATE INDEX nsec3_byhash ON nsec3 (hash)",
+ "CREATE TABLE diffs (id INTEGER PRIMARY KEY, "
+ "zone_id INTEGER NOT NULL, version INTEGER NOT NULL, "
+ "operation INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+ "rrtype STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdata STRING NOT NULL)",
+ NULL
+};
+
+sqlite3_stmt*
+prepare(sqlite3* const db, const char* const statement) {
+ sqlite3_stmt* prepared = NULL;
+ if (sqlite3_prepare_v2(db, statement, -1, &prepared, NULL) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not prepare SQLite statement: " <<
+ statement << ": " << sqlite3_errmsg(db));
+ }
+ return (prepared);
+}
+
+// small function to sleep for 0.1 seconds, needed when waiting for
+// exclusive database locks (which should only occur on startup, and only
+// when the database has not been created yet)
+void doSleep() {
+ struct timespec req;
+ req.tv_sec = 0;
+ req.tv_nsec = 100000000;
+ nanosleep(&req, NULL);
+}
+
+// returns the schema version if the schema version table exists
+// returns -1 if it does not
+int checkSchemaVersion(sqlite3* db) {
+ sqlite3_stmt* prepared = NULL;
+ // At this point in time, the database might be exclusively locked, in
+ // which case even prepare() will return BUSY, so we may need to try a
+ // few times
+ for (size_t i = 0; i < 50; ++i) {
+ int rc = sqlite3_prepare_v2(db, "SELECT version FROM schema_version",
+ -1, &prepared, NULL);
+ if (rc == SQLITE_ERROR) {
+ // this is the error that is returned when the table does not
+ // exist
+ return (-1);
+ } else if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(SQLite3Error, "Unable to prepare version query: "
+ << rc << " " << sqlite3_errmsg(db));
+ }
+ doSleep();
+ }
+ if (sqlite3_step(prepared) != SQLITE_ROW) {
+ isc_throw(SQLite3Error,
+ "Unable to query version: " << sqlite3_errmsg(db));
+ }
+ int version = sqlite3_column_int(prepared, 0);
+ sqlite3_finalize(prepared);
+ return (version);
+}
+
+// return db version
+int create_database(sqlite3* db) {
+ // try to get an exclusive lock. Once that is obtained, do the version
+ // check *again*, just in case this process was racing another
+ //
+ // try for 5 secs (50*0.1)
+ int rc;
+ logger.info(DATASRC_SQLITE_SETUP);
+ for (size_t i = 0; i < 50; ++i) {
+ rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL,
+ NULL);
+ if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(SQLite3Error, "Unable to acquire exclusive lock "
+ "for database creation: " << sqlite3_errmsg(db));
+ }
+ doSleep();
+ }
+ int schema_version = checkSchemaVersion(db);
+ if (schema_version == -1) {
+ for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
+ if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
+ SQLITE_OK) {
+ isc_throw(SQLite3Error,
+ "Failed to set up schema " << SCHEMA_LIST[i]);
+ }
+ }
+ sqlite3_exec(db, "COMMIT TRANSACTION", NULL, NULL, NULL);
+ return (SQLITE_SCHEMA_VERSION);
+ } else {
+ return (schema_version);
+ }
+}
+
+void
+checkAndSetupSchema(Initializer* initializer) {
+ sqlite3* const db = initializer->params_.db_;
+
+ int schema_version = checkSchemaVersion(db);
+ if (schema_version != SQLITE_SCHEMA_VERSION) {
+ schema_version = create_database(db);
+ }
+ initializer->params_.version_ = schema_version;
+}
+
+}
+
+void
+SQLite3Accessor::open(const std::string& name) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNOPEN).arg(name);
+ if (dbparameters_->db_ != NULL) {
+ // There shouldn't be a way to trigger this anyway
+ isc_throw(DataSourceError, "Duplicate SQLite open with " << name);
+ }
+
+ Initializer initializer;
+
+ if (sqlite3_open(name.c_str(), &initializer.params_.db_) != 0) {
+ isc_throw(SQLite3Error, "Cannot open SQLite database file: " << name);
+ }
+
+ checkAndSetupSchema(&initializer);
+ initializer.move(dbparameters_.get());
+}
+
+SQLite3Accessor::~SQLite3Accessor() {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_DROPCONN);
+ if (dbparameters_->db_ != NULL) {
+ close();
+ }
+}
+
+void
+SQLite3Accessor::close(void) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNCLOSE);
+ if (dbparameters_->db_ == NULL) {
+ isc_throw(DataSourceError,
+ "SQLite data source is being closed before open");
+ }
+
+ dbparameters_->finalizeStatements();
+ sqlite3_close(dbparameters_->db_);
+ dbparameters_->db_ = NULL;
+}
+
+std::pair<bool, int>
+SQLite3Accessor::getZone(const std::string& name) const {
+ int rc;
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(ZONE);
+
+ // Take the statement (simple SELECT id FROM zones WHERE...)
+ // and prepare it (bind the parameters to it)
+ sqlite3_reset(stmt);
+ rc = sqlite3_bind_text(stmt, 1, name.c_str(), -1, SQLITE_STATIC);
+ if (rc != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind " << name <<
+ " to SQL statement (zone)");
+ }
+ rc = sqlite3_bind_text(stmt, 2, class_.c_str(), -1, SQLITE_STATIC);
+ if (rc != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind " << class_ <<
+ " to SQL statement (zone)");
+ }
+
+ // Get the data there and see if it found anything
+ rc = sqlite3_step(stmt);
+ if (rc == SQLITE_ROW) {
+ const int zone_id = sqlite3_column_int(stmt, 0);
+ sqlite3_reset(stmt);
+ return (pair<bool, int>(true, zone_id));
+ } else if (rc == SQLITE_DONE) {
+ // Free resources
+ sqlite3_reset(stmt);
+ return (pair<bool, int>(false, 0));
+ }
+
+ sqlite3_reset(stmt);
+ isc_throw(DataSourceError, "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ // Compilers might not realize isc_throw always throws
+ return (std::pair<bool, int>(false, 0));
+}
+
+namespace {
+
+// Conversion to plain char
+const char*
+convertToPlainChar(const unsigned char* ucp, sqlite3 *db) {
+ if (ucp == NULL) {
+ // The field can really be NULL, in which case we return an
+ // empty string, or sqlite may have run out of memory, in
+ // which case we raise an error
+ if (sqlite3_errcode(db) == SQLITE_NOMEM) {
+ isc_throw(DataSourceError,
+ "Sqlite3 backend encountered a memory allocation "
+ "error in sqlite3_column_text()");
+ } else {
+ return ("");
+ }
+ }
+ const void* p = ucp;
+ return (static_cast<const char*>(p));
+}
+
+}
+class SQLite3Accessor::Context : public DatabaseAccessor::IteratorContext {
+public:
+ // Construct an iterator for all records. When constructed this
+ // way, the getNext() call will copy all fields
+ Context(const boost::shared_ptr<const SQLite3Accessor>& accessor, int id) :
+ iterator_type_(ITT_ALL),
+ accessor_(accessor),
+ statement_(NULL),
+ name_("")
+ {
+ // We create the statement now and then just keep getting data from it
+ statement_ = prepare(accessor->dbparameters_->db_,
+ text_statements[ITERATE]);
+ bindZoneId(id);
+ }
+
+ // Construct an iterator for records with a specific name. When constructed
+ // this way, the getNext() call will copy all fields except name
+ Context(const boost::shared_ptr<const SQLite3Accessor>& accessor, int id,
+ const std::string& name, bool subdomains) :
+ iterator_type_(ITT_NAME),
+ accessor_(accessor),
+ statement_(NULL),
+ name_(name)
+
+ {
+ // We create the statement now and then just keep getting data from it
+ statement_ = prepare(accessor->dbparameters_->db_,
+ subdomains ? text_statements[ANY_SUB] :
+ text_statements[ANY]);
+ bindZoneId(id);
+ bindName(name_);
+ }
+
+ bool getNext(std::string (&data)[COLUMN_COUNT]) {
+ // If there's another row, get it
+ // If finalize has been called (e.g. when previous getNext() got
+ // SQLITE_DONE), directly return false
+ if (statement_ == NULL) {
+ return false;
+ }
+ const int rc(sqlite3_step(statement_));
+ if (rc == SQLITE_ROW) {
+ // For both types, we copy the first four columns
+ copyColumn(data, TYPE_COLUMN);
+ copyColumn(data, TTL_COLUMN);
+ copyColumn(data, SIGTYPE_COLUMN);
+ copyColumn(data, RDATA_COLUMN);
+ // Only copy Name if we are iterating over every record
+ if (iterator_type_ == ITT_ALL) {
+ copyColumn(data, NAME_COLUMN);
+ }
+ return (true);
+ } else if (rc != SQLITE_DONE) {
+ isc_throw(DataSourceError,
+ "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ finalize();
+ return (false);
+ }
+
+ virtual ~Context() {
+ finalize();
+ }
+
+private:
+ // Depending on which constructor is called, behaviour is slightly
+ // different. We keep track of what to do with the iterator type
+ // See description of getNext() and the constructors
+ enum IteratorType {
+ ITT_ALL,
+ ITT_NAME
+ };
+
+ void copyColumn(std::string (&data)[COLUMN_COUNT], int column) {
+ data[column] = convertToPlainChar(sqlite3_column_text(statement_,
+ column),
+ accessor_->dbparameters_->db_);
+ }
+
+ void bindZoneId(const int zone_id) {
+ if (sqlite3_bind_int(statement_, 1, zone_id) != SQLITE_OK) {
+ finalize();
+ isc_throw(SQLite3Error, "Could not bind int " << zone_id <<
+ " to SQL statement: " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ }
+
+ void bindName(const std::string& name) {
+ if (sqlite3_bind_text(statement_, 2, name.c_str(), -1,
+ SQLITE_TRANSIENT) != SQLITE_OK) {
+ const char* errmsg = sqlite3_errmsg(accessor_->dbparameters_->db_);
+ finalize();
+ isc_throw(SQLite3Error, "Could not bind text '" << name <<
+ "' to SQL statement: " << errmsg);
+ }
+ }
+
+ void finalize() {
+ sqlite3_finalize(statement_);
+ statement_ = NULL;
+ }
+
+ const IteratorType iterator_type_;
+ boost::shared_ptr<const SQLite3Accessor> accessor_;
+ sqlite3_stmt* statement_;
+ const std::string name_;
+};
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getRecords(const std::string& name, int id,
+ bool subdomains) const
+{
+ return (IteratorContextPtr(new Context(shared_from_this(), id, name,
+ subdomains)));
+}
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getAllRecords(int id) const {
+ return (IteratorContextPtr(new Context(shared_from_this(), id)));
+}
+
+pair<bool, int>
+SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
+ if (dbparameters_->updating_zone) {
+ isc_throw(DataSourceError,
+ "duplicate zone update on SQLite3 data source");
+ }
+ if (dbparameters_->in_transaction) {
+ isc_throw(DataSourceError,
+ "zone update attempt in another SQLite3 transaction");
+ }
+
+ const pair<bool, int> zone_info(getZone(zone_name));
+ if (!zone_info.first) {
+ return (zone_info);
+ }
+
+ StatementProcessor(*dbparameters_, BEGIN,
+ "start an SQLite3 update transaction").exec();
+
+ if (replace) {
+ try {
+ StatementProcessor delzone_exec(*dbparameters_, DEL_ZONE_RECORDS,
+ "delete zone records");
+
+ sqlite3_stmt* stmt = dbparameters_->getStatement(DEL_ZONE_RECORDS);
+ sqlite3_clear_bindings(stmt);
+ if (sqlite3_bind_int(stmt, 1, zone_info.second) != SQLITE_OK) {
+ isc_throw(DataSourceError,
+ "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+
+ delzone_exec.exec();
+ } catch (const DataSourceError&) {
+ // Once we start a transaction, if something unexpected happens
+ // we need to rollback the transaction so that a subsequent update
+ // is still possible with this accessor.
+ StatementProcessor(*dbparameters_, ROLLBACK,
+ "rollback an SQLite3 transaction").exec();
+ throw;
+ }
+ }
+
+ dbparameters_->in_transaction = true;
+ dbparameters_->updating_zone = true;
+ dbparameters_->updated_zone_id = zone_info.second;
+
+ return (zone_info);
+}
+
+void
+SQLite3Accessor::startTransaction() {
+ if (dbparameters_->in_transaction) {
+ isc_throw(DataSourceError,
+ "duplicate transaction on SQLite3 data source");
+ }
+
+ StatementProcessor(*dbparameters_, BEGIN,
+ "start an SQLite3 transaction").exec();
+ dbparameters_->in_transaction = true;
+}
+
+void
+SQLite3Accessor::commit() {
+ if (!dbparameters_->in_transaction) {
+ isc_throw(DataSourceError, "performing commit on SQLite3 "
+ "data source without transaction");
+ }
+
+ StatementProcessor(*dbparameters_, COMMIT,
+ "commit an SQLite3 transaction").exec();
+ dbparameters_->in_transaction = false;
+ dbparameters_->updated_zone_id = -1;
+}
+
+void
+SQLite3Accessor::rollback() {
+ if (!dbparameters_->in_transaction) {
+ isc_throw(DataSourceError, "performing rollback on SQLite3 "
+ "data source without transaction");
+ }
+
+ StatementProcessor(*dbparameters_, ROLLBACK,
+ "rollback an SQLite3 transaction").exec();
+ dbparameters_->in_transaction = false;
+ dbparameters_->updated_zone_id = -1;
+}
+
+namespace {
+// Commonly used code sequence for adding/deleting record
+template <typename COLUMNS_TYPE>
+void
+doUpdate(SQLite3Parameters& dbparams, StatementID stmt_id,
+ COLUMNS_TYPE update_params, const char* exec_desc)
+{
+ sqlite3_stmt* const stmt = dbparams.getStatement(stmt_id);
+ StatementProcessor executer(dbparams, stmt_id, exec_desc);
+
+ int param_id = 0;
+ if (sqlite3_bind_int(stmt, ++param_id, dbparams.updated_zone_id)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparams.db_));
+ }
+ const size_t column_count =
+ sizeof(update_params) / sizeof(update_params[0]);
+ for (int i = 0; i < column_count; ++i) {
+ // The old sqlite3 data source API assumes NULL for an empty column.
+ // We need to provide compatibility at least for now.
+ if (sqlite3_bind_text(stmt, ++param_id,
+ update_params[i].empty() ? NULL :
+ update_params[i].c_str(),
+ -1, SQLITE_TRANSIENT) != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparams.db_));
+ }
+ }
+ executer.exec();
+}
+}
+
+void
+SQLite3Accessor::addRecordToZone(const string (&columns)[ADD_COLUMN_COUNT]) {
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "adding record to SQLite3 "
+ "data source without transaction");
+ }
+ doUpdate<const string (&)[DatabaseAccessor::ADD_COLUMN_COUNT]>(
+ *dbparameters_, ADD_RECORD, columns, "add record to zone");
+}
+
+void
+SQLite3Accessor::deleteRecordInZone(const string (¶ms)[DEL_PARAM_COUNT]) {
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "deleting record in SQLite3 "
+ "data source without transaction");
+ }
+ doUpdate<const string (&)[DatabaseAccessor::DEL_PARAM_COUNT]>(
+ *dbparameters_, DEL_RECORD, params, "delete record from zone");
+}
+
+void
+SQLite3Accessor::addRecordDiff(int zone_id, uint32_t serial,
+ DiffOperation operation,
+ const std::string (¶ms)[DIFF_PARAM_COUNT])
+{
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "adding record diff without update "
+ "transaction on " << getDBName());
+ }
+ if (zone_id != dbparameters_->updated_zone_id) {
+ isc_throw(DataSourceError, "bad zone ID for adding record diff on "
+ << getDBName() << ": " << zone_id << ", must be "
+ << dbparameters_->updated_zone_id);
+ }
+
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(ADD_RECORD_DIFF);
+ StatementProcessor executer(*dbparameters_, ADD_RECORD_DIFF,
+ "add record diff");
+ int param_id = 0;
+ if (sqlite3_bind_int(stmt, ++param_id, zone_id)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ if (sqlite3_bind_int64(stmt, ++param_id, serial)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ if (sqlite3_bind_int(stmt, ++param_id, operation)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ for (int i = 0; i < DIFF_PARAM_COUNT; ++i) {
+ if (sqlite3_bind_text(stmt, ++param_id, params[i].c_str(),
+ -1, SQLITE_TRANSIENT) != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ }
+ executer.exec();
+}
+
+vector<vector<string> >
+SQLite3Accessor::getRecordDiff(int zone_id) {
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(GET_RECORD_DIFF);
+ sqlite3_bind_int(stmt, 1, zone_id);
+
+ vector<vector<string> > result;
+ while (sqlite3_step(stmt) == SQLITE_ROW) {
+ vector<string> row_result;
+ for (int i = 0; i < 6; ++i) {
+ row_result.push_back(convertToPlainChar(sqlite3_column_text(stmt,
+ i),
+ dbparameters_->db_));
+ }
+ result.push_back(row_result);
+ }
+ sqlite3_reset(stmt);
+
+ return (result);
+}
+
+std::string
+SQLite3Accessor::findPreviousName(int zone_id, const std::string& rname)
+ const
+{
+ sqlite3_stmt* const stmt = dbparameters_->getStatement(FIND_PREVIOUS);
+ sqlite3_reset(stmt);
+ sqlite3_clear_bindings(stmt);
+
+ if (sqlite3_bind_int(stmt, 1, zone_id) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind zone ID " << zone_id <<
+ " to SQL statement (find previous): " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ if (sqlite3_bind_text(stmt, 2, rname.c_str(), -1, SQLITE_STATIC) !=
+ SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind name " << rname <<
+ " to SQL statement (find previous): " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+
+ std::string result;
+ const int rc = sqlite3_step(stmt);
+ if (rc == SQLITE_ROW) {
+ // We found it
+ result = convertToPlainChar(sqlite3_column_text(stmt, 0),
+ dbparameters_->db_);
+ }
+ sqlite3_reset(stmt);
+
+ if (rc == SQLITE_DONE) {
+ // No NSEC records here, this DB doesn't support DNSSEC or
+ // we asked before the apex
+ isc_throw(isc::NotImplemented, "The zone doesn't support DNSSEC or "
+ "query before apex");
+ }
+
+ if (rc != SQLITE_ROW && rc != SQLITE_DONE) {
+ // Some kind of error
+ isc_throw(SQLite3Error, "Could not get data for previous name");
+ }
+
+ return (result);
+}
+
+namespace {
+void
+addError(ElementPtr errors, const std::string& error) {
+ if (errors != ElementPtr() && errors->getType() == Element::list) {
+ errors->add(Element::create(error));
+ }
+}
+
+bool
+checkConfig(ConstElementPtr config, ElementPtr errors) {
+ /* Specific configuration is under discussion, right now this accepts
+ * the 'old' configuration, see header file
+ */
+ bool result = true;
+
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Base config for SQlite3 backend must be a map");
+ result = false;
+ } else {
+ if (!config->contains(CONFIG_ITEM_DATABASE_FILE)) {
+ addError(errors,
+ "Config for SQlite3 backend does not contain a '"
+ CONFIG_ITEM_DATABASE_FILE
+ "' value");
+ result = false;
+ } else if (!config->get(CONFIG_ITEM_DATABASE_FILE) ||
+ config->get(CONFIG_ITEM_DATABASE_FILE)->getType() !=
+ Element::string) {
+ addError(errors, "value of " CONFIG_ITEM_DATABASE_FILE
+ " in SQLite3 backend is not a string");
+ result = false;
+ } else if (config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue() ==
+ "") {
+ addError(errors, "value of " CONFIG_ITEM_DATABASE_FILE
+ " in SQLite3 backend is empty");
+ result = false;
+ }
+ }
+
+ return (result);
+}
+
+} // end anonymous namespace
+
+DataSourceClient *
+createInstance(isc::data::ConstElementPtr config, std::string& error) {
+ ElementPtr errors(Element::createList());
+ if (!checkConfig(config, errors)) {
+ error = "Configuration error: " + errors->str();
+ return (NULL);
+ }
+ std::string dbfile = config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue();
+ try {
+ boost::shared_ptr<DatabaseAccessor> sqlite3_accessor(
+ new SQLite3Accessor(dbfile, "IN")); // XXX: avoid hardcode RR class
+ return (new DatabaseClient(isc::dns::RRClass::IN(), sqlite3_accessor));
+ } catch (const std::exception& exc) {
+ error = std::string("Error creating sqlite3 datasource: ") + exc.what();
+ return (NULL);
+ } catch (...) {
+ error = std::string("Error creating sqlite3 datasource, "
+ "unknown exception");
+ return (NULL);
+ }
+}
+
+void destroyInstance(DataSourceClient* instance) {
+ delete instance;
+}
+
+} // end of namespace datasrc
+} // end of namespace isc
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
new file mode 100644
index 0000000..6b5369c
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -0,0 +1,231 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#ifndef __DATASRC_SQLITE3_ACCESSOR_H
+#define __DATASRC_SQLITE3_ACCESSOR_H
+
+#include <datasrc/database.h>
+
+#include <exceptions/exceptions.h>
+
+#include <boost/enable_shared_from_this.hpp>
+#include <boost/scoped_ptr.hpp>
+#include <string>
+
+#include <cc/data.h>
+
+namespace isc {
+namespace dns {
+class RRClass;
+}
+
+namespace datasrc {
+
+/**
+ * \brief Low-level database error
+ *
+ * This exception is thrown when the SQLite library complains about something.
+ * It might mean corrupt database file, invalid request or that something is
+ * rotten in the library.
+ */
+class SQLite3Error : public Exception {
+public:
+ SQLite3Error(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+struct SQLite3Parameters;
+
+/**
+ * \brief Concrete implementation of DatabaseAccessor for SQLite3 databases
+ *
+ * This opens one database file with our schema and serves data from there.
+ * According to the design, it doesn't interpret the data in any way, it just
+ * provides unified access to the DB.
+ */
+class SQLite3Accessor : public DatabaseAccessor,
+ public boost::enable_shared_from_this<SQLite3Accessor> {
+public:
+ /**
+ * \brief Constructor
+ *
+ * This opens the database and becomes ready to serve data from there.
+ *
+ * \exception SQLite3Error will be thrown if the given database file
+ * doesn't work (it is broken, doesn't exist and can't be created, etc).
+ *
+ * \param filename The database file to be used.
+ * \param rrclass Textual representation of RR class ("IN", "CH", etc),
+ * specifying which class of data it should serve (while the database
+ * file can contain multiple classes of data, a single accessor can
+ * work with only one class).
+ */
+ SQLite3Accessor(const std::string& filename, const std::string& rrclass);
+
+ /**
+ * \brief Destructor
+ *
+ * Closes the database.
+ */
+ ~SQLite3Accessor();
+
+ /// This implementation internally opens a new sqlite3 database for the
+ /// same file name specified in the constructor of the original accessor.
+ virtual boost::shared_ptr<DatabaseAccessor> clone();
+
+ /**
+ * \brief Look up a zone
+ *
+ * This implements the getZone from DatabaseAccessor and looks up a zone
+ * in the data. It looks for a zone with the exact given origin and class
+ * passed to the constructor.
+ *
+ * \exception SQLite3Error if something about the database is broken.
+ *
+ * \param name The (fully qualified) domain name of zone to look up
+ * \return The pair contains if the lookup was successful in the first
+ * element and the zone id in the second if it was.
+ */
+ virtual std::pair<bool, int> getZone(const std::string& name) const;
+
+ /** \brief Look up all resource records for a name
+ *
+ * This implements the getRecords() method from DatabaseAccessor
+ *
+ * \exception SQLite3Error if there is an sqlite3 error when performing
+ * the query
+ *
+ * \param name the name to look up
+ * \param id the zone id, as returned by getZone()
+ * \param subdomains Match subdomains instead of the name.
+ * \return Iterator that contains all records with the given name
+ */
+ virtual IteratorContextPtr getRecords(const std::string& name,
+ int id,
+ bool subdomains = false) const;
+
+ /** \brief Look up all resource records for a zone
+ *
+ * This implements the getRecords() method from DatabaseAccessor
+ *
+ * \exception SQLite3Error if there is an sqlite3 error when performing
+ * the query
+ *
+ * \param id the zone id, as returned by getZone()
+ * \return Iterator that contains all records in the given zone
+ */
+ virtual IteratorContextPtr getAllRecords(int id) const;
+
+ virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
+ bool replace);
+
+ virtual void startTransaction();
+
+ /// \note we are quite impatient here: it's quite possible that the COMMIT
+ /// fails due to other process performing SELECT on the same database
+ /// (consider the case where COMMIT is done by xfrin or dynamic update
+ /// server while an authoritative server is busy reading the DB).
+ /// In a future version we should probably need to introduce some retry
+ /// attempt and/or increase timeout before giving up the COMMIT, even
+ /// if it still doesn't guarantee 100% success. Right now this
+ /// implementation throws a \c DataSourceError exception in such a case.
+ virtual void commit();
+
+ /// \note In SQLite3 rollback can fail if there's another unfinished
+ /// statement is performed for the same database structure.
+ /// Although it's not expected to happen in our expected usage, it's not
+ /// guaranteed to be prevented at the API level. If it ever happens, this
+ /// method throws a \c DataSourceError exception. It should be
+ /// considered a bug of the higher level application program.
+ virtual void rollback();
+
+ virtual void addRecordToZone(
+ const std::string (&columns)[ADD_COLUMN_COUNT]);
+
+ virtual void deleteRecordInZone(
+ const std::string (¶ms)[DEL_PARAM_COUNT]);
+
+ /// This derived version of the method prepares an SQLite3 statement
+ /// for adding the diff first time it's called, and if it fails throws
+ // an \c SQLite3Error exception.
+ virtual void addRecordDiff(
+ int zone_id, uint32_t serial, DiffOperation operation,
+ const std::string (¶ms)[DIFF_PARAM_COUNT]);
+
+ // A short term method for tests until we implement more complete
+ // API to retrieve diffs (#1330). It returns all records of the diffs
+ // table whose zone_id column is identical to the given value.
+ // Since this is a short term workaround, it ignores some corner cases
+ // (such as an SQLite3 execution failure) and is not very efficient,
+ // in favor of brevity. Once #1330 is completed, this method must be
+ // removed, and the tests using this method must be rewritten using the
+ // official API.
+ std::vector<std::vector<std::string> > getRecordDiff(int zone_id);
+
+ /// The SQLite3 implementation of this method returns a string starting
+ /// with a fixed prefix of "sqlite3_" followed by the DB file name
+ /// removing any path name. For example, for the DB file
+ /// /somewhere/in/the/system/bind10.sqlite3, this method will return
+ /// "sqlite3_bind10.sqlite3".
+ virtual const std::string& getDBName() const { return (database_name_); }
+
+ /// \brief Concrete implementation of the pure virtual method
+ virtual std::string findPreviousName(int zone_id, const std::string& rname)
+ const;
+
+private:
+ /// \brief Private database data
+ boost::scoped_ptr<SQLite3Parameters> dbparameters_;
+ /// \brief The filename of the DB (necessary for clone())
+ const std::string filename_;
+ /// \brief The class for which the queries are done
+ const std::string class_;
+ /// \brief Opens the database
+ void open(const std::string& filename);
+ /// \brief Closes the database
+ void close();
+ /// \brief SQLite3 implementation of IteratorContext
+ class Context;
+ friend class Context;
+ const std::string database_name_;
+};
+
+/// \brief Creates an instance of the SQlite3 datasource client
+///
+/// Currently the configuration passed here must be a MapElement, containing
+/// one item called "database_file", whose value is a string
+///
+/// This configuration setup is currently under discussion and will change in
+/// the near future.
+///
+/// \param config The configuration for the datasource instance
+/// \param error This string will be set to an error message if an error occurs
+/// during initialization
+/// \return An instance of the sqlite3 datasource client, or NULL if there was
+/// an error
+extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr config,
+ std::string& error);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void destroyInstance(DataSourceClient* instance);
+
+}
+}
+
+#endif // __DATASRC_SQLITE3_CONNECTION_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/sqlite3_datasrc.cc b/src/lib/datasrc/sqlite3_datasrc.cc
index 22f035b..03b057c 100644
--- a/src/lib/datasrc/sqlite3_datasrc.cc
+++ b/src/lib/datasrc/sqlite3_datasrc.cc
@@ -26,6 +26,8 @@
#include <dns/rrset.h>
#include <dns/rrsetlist.h>
+#define SQLITE_SCHEMA_VERSION 1
+
using namespace std;
using namespace isc::dns;
using namespace isc::dns::rdata;
@@ -77,6 +79,8 @@ const char* const SCHEMA_LIST[] = {
NULL
};
+const char* const q_version_str = "SELECT version FROM schema_version";
+
const char* const q_zone_str = "SELECT id FROM zones WHERE name=?1";
const char* const q_record_str = "SELECT rdtype, ttl, sigtype, rdata "
@@ -254,7 +258,7 @@ Sqlite3DataSrc::findRecords(const Name& name, const RRType& rdtype,
}
break;
}
-
+
sqlite3_reset(query);
sqlite3_clear_bindings(query);
@@ -295,7 +299,7 @@ Sqlite3DataSrc::findRecords(const Name& name, const RRType& rdtype,
//
sqlite3_reset(dbparameters->q_count_);
sqlite3_clear_bindings(dbparameters->q_count_);
-
+
rc = sqlite3_bind_int(dbparameters->q_count_, 1, zone_id);
if (rc != SQLITE_OK) {
isc_throw(Sqlite3Error, "Could not bind zone ID " << zone_id <<
@@ -351,14 +355,13 @@ Sqlite3DataSrc::findClosestEnclosure(DataSrcMatch& match) const {
LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_SQLITE_ENCLOSURE).
arg(match.getName());
if (match.getClass() != getClass() && match.getClass() != RRClass::ANY()) {
- LOG_ERROR(logger, DATASRC_SQLITE_ENCLOSURE_BAD_CLASS).arg(getClass()).
- arg(match.getClass());
return;
}
unsigned int position;
if (findClosest(match.getName(), &position) == -1) {
- LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_SQLITE_ENCLOSURE_NOTFOUND);
+ LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_SQLITE_ENCLOSURE_NOT_FOUND)
+ .arg(match.getName());
return;
}
@@ -654,29 +657,90 @@ prepare(sqlite3* const db, const char* const statement) {
return (prepared);
}
-void
-checkAndSetupSchema(Sqlite3Initializer* initializer) {
- sqlite3* const db = initializer->params_.db_;
+// small function to sleep for 0.1 seconds, needed when waiting for
+// exclusive database locks (which should only occur on startup, and only
+// when the database has not been created yet)
+void do_sleep() {
+ struct timespec req;
+ req.tv_sec = 0;
+ req.tv_nsec = 100000000;
+ nanosleep(&req, NULL);
+}
+// returns the schema version if the schema version table exists
+// returns -1 if it does not
+int check_schema_version(sqlite3* db) {
sqlite3_stmt* prepared = NULL;
- if (sqlite3_prepare_v2(db, "SELECT version FROM schema_version", -1,
- &prepared, NULL) == SQLITE_OK &&
- sqlite3_step(prepared) == SQLITE_ROW) {
- initializer->params_.version_ = sqlite3_column_int(prepared, 0);
- sqlite3_finalize(prepared);
- } else {
- logger.info(DATASRC_SQLITE_SETUP);
- if (prepared != NULL) {
- sqlite3_finalize(prepared);
+ // At this point in time, the database might be exclusively locked, in
+ // which case even prepare() will return BUSY, so we may need to try a
+ // few times
+ for (size_t i = 0; i < 50; ++i) {
+ int rc = sqlite3_prepare_v2(db, q_version_str, -1, &prepared, NULL);
+ if (rc == SQLITE_ERROR) {
+ // this is the error that is returned when the table does not
+ // exist
+ return (-1);
+ } else if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(Sqlite3Error, "Unable to prepare version query: "
+ << rc << " " << sqlite3_errmsg(db));
}
+ do_sleep();
+ }
+ if (sqlite3_step(prepared) != SQLITE_ROW) {
+ isc_throw(Sqlite3Error,
+ "Unable to query version: " << sqlite3_errmsg(db));
+ }
+ int version = sqlite3_column_int(prepared, 0);
+ sqlite3_finalize(prepared);
+ return (version);
+}
+
+// return db version
+int create_database(sqlite3* db) {
+ // try to get an exclusive lock. Once that is obtained, do the version
+ // check *again*, just in case this process was racing another
+ //
+ // try for 5 secs (50*0.1)
+ int rc;
+ logger.info(DATASRC_SQLITE_SETUP);
+ for (size_t i = 0; i < 50; ++i) {
+ rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL,
+ NULL);
+ if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(Sqlite3Error, "Unable to acquire exclusive lock "
+ "for database creation: " << sqlite3_errmsg(db));
+ }
+ do_sleep();
+ }
+ int schema_version = check_schema_version(db);
+ if (schema_version == -1) {
for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
SQLITE_OK) {
isc_throw(Sqlite3Error,
- "Failed to set up schema " << SCHEMA_LIST[i]);
+ "Failed to set up schema " << SCHEMA_LIST[i]);
}
}
+ sqlite3_exec(db, "COMMIT TRANSACTION", NULL, NULL, NULL);
+ return (SQLITE_SCHEMA_VERSION);
+ } else {
+ return (schema_version);
+ }
+}
+
+void
+checkAndSetupSchema(Sqlite3Initializer* initializer) {
+ sqlite3* const db = initializer->params_.db_;
+
+ int schema_version = check_schema_version(db);
+ if (schema_version != SQLITE_SCHEMA_VERSION) {
+ schema_version = create_database(db);
}
+ initializer->params_.version_ = schema_version;
initializer->params_.q_zone_ = prepare(db, q_zone_str);
initializer->params_.q_record_ = prepare(db, q_record_str);
diff --git a/src/lib/datasrc/static_datasrc.cc b/src/lib/datasrc/static_datasrc.cc
index dee14b9..fd43e1c 100644
--- a/src/lib/datasrc/static_datasrc.cc
+++ b/src/lib/datasrc/static_datasrc.cc
@@ -70,6 +70,7 @@ StaticDataSrcImpl::StaticDataSrcImpl() :
authors = RRsetPtr(new RRset(authors_name, RRClass::CH(),
RRType::TXT(), RRTTL(0)));
authors->addRdata(generic::TXT("Chen Zhengzhang")); // Jerry
+ authors->addRdata(generic::TXT("Dmitriy Volodin"));
authors->addRdata(generic::TXT("Evan Hunt"));
authors->addRdata(generic::TXT("Haidong Wang")); // Ocean
authors->addRdata(generic::TXT("Han Feng"));
@@ -161,7 +162,7 @@ StaticDataSrc::findRRset(const Name& qname,
arg(qtype);
flags = 0;
if (qclass != getClass() && qclass != RRClass::ANY()) {
- LOG_ERROR(logger, DATASRC_STATIC_BAD_CLASS);
+ LOG_ERROR(logger, DATASRC_STATIC_CLASS_NOT_CH);
return (ERROR);
}
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index ad4374a..e5cca0a 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -1,8 +1,12 @@
+SUBDIRS = testdata
+
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_builddir)/src/lib/dns -I$(top_srcdir)/src/lib/dns
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CPPFLAGS += $(SQLITE_CFLAGS)
-AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(srcdir)/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_srcdir)/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_builddir)/testdata\"
+AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
AM_CXXFLAGS = $(B10_CXXFLAGS)
@@ -25,19 +29,37 @@ run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += cache_unittest.cc
run_unittests_SOURCES += test_datasrc.h test_datasrc.cc
run_unittests_SOURCES += rbtree_unittest.cc
-run_unittests_SOURCES += zonetable_unittest.cc
-run_unittests_SOURCES += memory_datasrc_unittest.cc
+#run_unittests_SOURCES += zonetable_unittest.cc
+#run_unittests_SOURCES += memory_datasrc_unittest.cc
run_unittests_SOURCES += logger_unittest.cc
+run_unittests_SOURCES += database_unittest.cc
+run_unittests_SOURCES += client_unittest.cc
+run_unittests_SOURCES += sqlite3_accessor_unittest.cc
+if !USE_STATIC_LINK
+# This test uses dynamically loadable module. It will cause various
+# troubles with static link such as "missing" symbols in the static object
+# for the module. As a workaround we disable this particualr test
+# in this case.
+run_unittests_SOURCES += factory_unittest.cc
+endif
+# for the dlopened types we have tests for, we also need to include the
+# sources
+run_unittests_SOURCES += $(top_srcdir)/src/lib/datasrc/sqlite3_accessor.cc
+#run_unittests_SOURCES += $(top_srcdir)/src/lib/datasrc/memory_datasrc.cc
+
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(SQLITE_LIBS)
run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
endif
noinst_PROGRAMS = $(TESTS)
@@ -54,3 +76,5 @@ EXTRA_DIST += testdata/sql1.example.com.signed
EXTRA_DIST += testdata/sql2.example.com.signed
EXTRA_DIST += testdata/test-root.sqlite3
EXTRA_DIST += testdata/test.sqlite3
+EXTRA_DIST += testdata/test.sqlite3.nodiffs
+EXTRA_DIST += testdata/rwtest.sqlite3
diff --git a/src/lib/datasrc/tests/cache_unittest.cc b/src/lib/datasrc/tests/cache_unittest.cc
index 96beae0..1325f64 100644
--- a/src/lib/datasrc/tests/cache_unittest.cc
+++ b/src/lib/datasrc/tests/cache_unittest.cc
@@ -202,15 +202,15 @@ TEST_F(CacheTest, retrieveFail) {
}
TEST_F(CacheTest, expire) {
- // Insert "foo" with a duration of 2 seconds; sleep 3. The
+ // Insert "foo" with a duration of 1 seconds; sleep 2. The
// record should not be returned from the cache even though it's
// at the top of the cache.
RRsetPtr aaaa(new RRset(Name("foo"), RRClass::IN(), RRType::AAAA(),
RRTTL(0)));
aaaa->addRdata(in::AAAA("2001:db8:3:bb::5"));
- cache.addPositive(aaaa, 0, 2);
+ cache.addPositive(aaaa, 0, 1);
- sleep(3);
+ sleep(2);
RRsetPtr r;
uint32_t f;
diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc
new file mode 100644
index 0000000..5b2c91a
--- /dev/null
+++ b/src/lib/datasrc/tests/client_unittest.cc
@@ -0,0 +1,50 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <datasrc/client.h>
+
+#include <dns/name.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using isc::dns::Name;
+
+namespace {
+
+/*
+ * The DataSourceClient can't be created as it has pure virtual methods.
+ * So we implement them as NOPs and test the other methods.
+ */
+class NopClient : public DataSourceClient {
+public:
+ virtual FindResult findZone(const isc::dns::Name&) const {
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+ }
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name&, bool) const {
+ return (ZoneUpdaterPtr());
+ }
+};
+
+class ClientTest : public ::testing::Test {
+public:
+ NopClient client_;
+};
+
+// The default implementation is NotImplemented
+TEST_F(ClientTest, defaultIterator) {
+ EXPECT_THROW(client_.getIterator(Name(".")), isc::NotImplemented);
+}
+
+}
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
new file mode 100644
index 0000000..1514fc3
--- /dev/null
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -0,0 +1,2630 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/shared_ptr.hpp>
+
+#include <gtest/gtest.h>
+
+#include <dns/name.h>
+#include <dns/rrttl.h>
+#include <dns/rrset.h>
+#include <exceptions/exceptions.h>
+
+#include <datasrc/database.h>
+#include <datasrc/zone.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include <testutils/dnsmessage_test.h>
+
+#include <map>
+
+using namespace isc::datasrc;
+using namespace std;
+// don't import the entire boost namespace. It will unexpectedly hide uint32_t
+// for some systems.
+using boost::shared_ptr;
+using boost::dynamic_pointer_cast;
+using namespace isc::dns;
+
+namespace {
+
+// Imaginary zone IDs used in the mock accessor below.
+const int READONLY_ZONE_ID = 42;
+const int WRITABLE_ZONE_ID = 4200;
+
+// Commonly used test data
+const char* const TEST_RECORDS[][5] = {
+ // some plain data
+ {"www.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"www.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+ {"www.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+ {"www.example.org.", "NSEC", "3600", "", "www2.example.org. A AAAA NSEC RRSIG"},
+ {"www.example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"www2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"www2.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+ {"www2.example.org.", "A", "3600", "", "192.0.2.2"},
+
+ {"cname.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+ // some DNSSEC-'signed' data
+ {"signed1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"signed1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"signed1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE"},
+ {"signed1.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+ {"signed1.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+ {"signed1.example.org.", "RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"signedcname1.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"signedcname1.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // special case might fail; sig is for cname, which isn't there (should be ignored)
+ // (ignoring of 'normal' other type is done above by www.)
+ {"acnamesig1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"acnamesig1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig1.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // let's pretend we have a database that is not careful
+ // about the order in which it returns data
+ {"signed2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"signed2.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+ {"signed2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE"},
+ {"signed2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"signed2.example.org.", "RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"signed2.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+
+ {"signedcname2.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"signedcname2.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+ {"acnamesig2.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"acnamesig2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"acnamesig3.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig3.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig3.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ {"ttldiff1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"ttldiff1.example.org.", "A", "360", "", "192.0.2.2"},
+
+ {"ttldiff2.example.org.", "A", "360", "", "192.0.2.1"},
+ {"ttldiff2.example.org.", "A", "3600", "", "192.0.2.2"},
+
+ // also add some intentionally bad data
+ {"badcname1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"badcname1.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+ {"badcname2.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"badcname2.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ {"badcname3.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"badcname3.example.org.", "CNAME", "3600", "", "www.example2.org."},
+
+ {"badrdata.example.org.", "A", "3600", "", "bad"},
+
+ {"badtype.example.org.", "BAD_TYPE", "3600", "", "192.0.2.1"},
+
+ {"badttl.example.org.", "A", "badttl", "", "192.0.2.1"},
+
+ {"badsig.example.org.", "A", "badttl", "", "192.0.2.1"},
+ {"badsig.example.org.", "RRSIG", "3600", "", "A 5 3 3600 somebaddata 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"badsigtype.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"badsigtype.example.org.", "RRSIG", "3600", "TXT", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // Data for testing delegation (with NS and DNAME)
+ {"delegation.example.org.", "NS", "3600", "", "ns.example.com."},
+ {"delegation.example.org.", "NS", "3600", "",
+ "ns.delegation.example.org."},
+ {"delegation.example.org.", "DS", "3600", "", "1 RSAMD5 2 abcd"},
+ {"delegation.example.org.", "RRSIG", "3600", "", "NS 5 3 3600 "
+ "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"ns.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"deep.below.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ {"dname.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"dname.example.org.", "DNAME", "3600", "", "dname.example.com."},
+ {"dname.example.org.", "RRSIG", "3600", "",
+ "DNAME 5 3 3600 20000101000000 20000201000000 12345 "
+ "example.org. FAKEFAKEFAKE"},
+
+ {"below.dname.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ // Broken NS
+ {"brokenns1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"brokenns1.example.org.", "NS", "3600", "", "ns.example.com."},
+
+ {"brokenns2.example.org.", "NS", "3600", "", "ns.example.com."},
+ {"brokenns2.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ // Now double DNAME, to test failure mode
+ {"baddname.example.org.", "DNAME", "3600", "", "dname1.example.com."},
+ {"baddname.example.org.", "DNAME", "3600", "", "dname2.example.com."},
+
+ // Put some data into apex (including NS) so we can check our NS
+ // doesn't break anything
+ {"example.org.", "SOA", "3600", "", "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200" },
+ {"example.org.", "NS", "3600", "", "ns.example.com."},
+ {"example.org.", "A", "3600", "", "192.0.2.1"},
+ {"example.org.", "NSEC", "3600", "", "acnamesig1.example.org. NS A NSEC RRSIG"},
+ {"example.org.", "RRSIG", "3600", "", "SOA 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"example.org.", "RRSIG", "3600", "", "NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // This is because of empty domain test
+ {"a.b.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ // Something for wildcards
+ {"*.wild.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"*.wild.example.org.", "RRSIG", "3600", "A", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"*.wild.example.org.", "NSEC", "3600", "", "cancel.here.wild.example.org. A NSEC RRSIG"},
+ {"*.wild.example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"cancel.here.wild.example.org.", "AAAA", "3600", "", "2001:db8::5"},
+ {"delegatedwild.example.org.", "NS", "3600", "", "ns.example.com."},
+ {"*.delegatedwild.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"wild.*.foo.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"wild.*.foo.*.bar.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"wild.*.foo.*.bar.example.org.", "NSEC", "3600", "",
+ "brokenns1.example.org. A NSEC"},
+ {"bao.example.org.", "NSEC", "3600", "", "wild.*.foo.*.bar.example.org. NSEC"},
+ {"*.cnamewild.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"*.dnamewild.example.org.", "DNAME", "3600", "", "dname.example.com."},
+ {"*.nswild.example.org.", "NS", "3600", "", "ns.example.com."},
+ // For NSEC empty non-terminal
+ {"l.example.org.", "NSEC", "3600", "", "empty.nonterminal.example.org. NSEC"},
+ {"empty.nonterminal.example.org.", "A", "3600", "", "192.0.2.1"},
+ // Invalid rdata
+ {"invalidrdata.example.org.", "A", "3600", "", "Bunch of nonsense"},
+ {"invalidrdata2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"invalidrdata2.example.org.", "RRSIG", "3600", "", "Nonsense"},
+
+ {NULL, NULL, NULL, NULL, NULL},
+};
+
+/*
+ * An accessor with minimum implementation, keeping the original
+ * "NotImplemented" methods.
+ */
+class NopAccessor : public DatabaseAccessor {
+public:
+ NopAccessor() : database_name_("mock_database")
+ { }
+
+ virtual std::pair<bool, int> getZone(const std::string& name) const {
+ if (name == "example.org.") {
+ return (std::pair<bool, int>(true, READONLY_ZONE_ID));
+ } else if (name == "null.example.org.") {
+ return (std::pair<bool, int>(true, 13));
+ } else if (name == "empty.example.org.") {
+ return (std::pair<bool, int>(true, 0));
+ } else if (name == "bad.example.org.") {
+ return (std::pair<bool, int>(true, -1));
+ } else {
+ return (std::pair<bool, int>(false, 0));
+ }
+ }
+
+ virtual shared_ptr<DatabaseAccessor> clone() {
+ // This accessor is stateless, so we can simply return a new instance.
+ return (shared_ptr<DatabaseAccessor>(new NopAccessor));
+ }
+
+ virtual std::pair<bool, int> startUpdateZone(const std::string&, bool) {
+ // return dummy value. unused anyway.
+ return (pair<bool, int>(true, 0));
+ }
+ virtual void startTransaction() {}
+ virtual void commit() {}
+ virtual void rollback() {}
+ virtual void addRecordToZone(const string (&)[ADD_COLUMN_COUNT]) {}
+ virtual void deleteRecordInZone(const string (&)[DEL_PARAM_COUNT]) {}
+ virtual void addRecordDiff(int, uint32_t, DiffOperation,
+ const std::string (&)[DIFF_PARAM_COUNT]) {}
+
+ virtual const std::string& getDBName() const {
+ return (database_name_);
+ }
+
+ virtual IteratorContextPtr getRecords(const std::string&, int, bool)
+ const
+ {
+ isc_throw(isc::NotImplemented,
+ "This database datasource can't be iterated");
+ }
+
+ virtual IteratorContextPtr getAllRecords(int) const {
+ isc_throw(isc::NotImplemented,
+ "This database datasource can't be iterated");
+ }
+
+ virtual std::string findPreviousName(int, const std::string&) const {
+ isc_throw(isc::NotImplemented,
+ "This data source doesn't support DNSSEC");
+ }
+private:
+ const std::string database_name_;
+
+};
+
+/*
+ * A virtual database accessor that pretends it contains single zone --
+ * example.org.
+ *
+ * It has the same getZone method as NopConnection, but it provides
+ * implementation of the optional functionality.
+ */
+class MockAccessor : public NopAccessor {
+ // Type of mock database "row"s. This is a map whose keys are the
+ // own names. We internally sort them by the name comparison order.
+ struct NameCompare : public binary_function<string, string, bool> {
+ bool operator()(const string& n1, const string& n2) const {
+ return (Name(n1).compare(Name(n2)).getOrder() < 0);
+ }
+ };
+ typedef std::map<std::string,
+ std::vector< std::vector<std::string> >,
+ NameCompare > Domains;
+
+public:
+ MockAccessor() : rollbacked_(false), did_transaction_(false) {
+ readonly_records_ = &readonly_records_master_;
+ update_records_ = &update_records_master_;
+ empty_records_ = &empty_records_master_;
+ fillData();
+ }
+
+ virtual shared_ptr<DatabaseAccessor> clone() {
+ shared_ptr<MockAccessor> cloned_accessor(new MockAccessor());
+ cloned_accessor->readonly_records_ = &readonly_records_master_;
+ cloned_accessor->update_records_ = &update_records_master_;
+ cloned_accessor->empty_records_ = &empty_records_master_;
+ latest_clone_ = cloned_accessor;
+ return (cloned_accessor);
+ }
+
+ virtual void startTransaction() {
+ // Currently we only use this transaction for simple read-only
+ // operations. So we just make a local copy of the data (we don't
+ // care about what happens after commit() or rollback()).
+ // Obviously as a consequence, if a test case tries to make multiple
+ // transactions on a single mock accessor it will fail.
+
+ // Check any attempt of multiple transactions
+ if (did_transaction_) {
+ isc_throw(isc::Unexpected, "MockAccessor::startTransaction() "
+ "called multiple times - likely a bug in the test");
+ }
+
+ readonly_records_copy_ = *readonly_records_;
+ readonly_records_ = &readonly_records_copy_;
+ did_transaction_ = true;
+ }
+
+private:
+ class MockNameIteratorContext : public IteratorContext {
+ public:
+ MockNameIteratorContext(const MockAccessor& mock_accessor, int zone_id,
+ const std::string& name, bool subdomains) :
+ searched_name_(name), cur_record_(0)
+ {
+ // 'hardcoded' names to trigger exceptions
+ // On these names some exceptions are thrown, to test the robustness
+ // of the find() method.
+ if (searched_name_ == "dsexception.in.search.") {
+ isc_throw(DataSourceError, "datasource exception on search");
+ } else if (searched_name_ == "iscexception.in.search.") {
+ isc_throw(isc::Exception, "isc exception on search");
+ } else if (searched_name_ == "basicexception.in.search.") {
+ throw std::exception();
+ }
+
+ cur_record_ = 0;
+ const Domains& cur_records = mock_accessor.getMockRecords(zone_id);
+ if (cur_records.count(name) > 0) {
+ // we're not aiming for efficiency in this test, simply
+ // copy the relevant vector from records
+ cur_name = cur_records.find(name)->second;
+ } else if (subdomains) {
+ cur_name.clear();
+ // Just walk everything and check if it is a subdomain.
+ // If it is, just copy all data from there.
+ for (Domains::const_iterator i(cur_records.begin());
+ i != cur_records.end(); ++i) {
+ const Name local(i->first);
+ if (local.compare(Name(name)).getRelation() ==
+ isc::dns::NameComparisonResult::SUBDOMAIN) {
+ cur_name.insert(cur_name.end(), i->second.begin(),
+ i->second.end());
+ }
+ }
+ } else {
+ cur_name.clear();
+ }
+ }
+
+ virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) {
+ if (searched_name_ == "dsexception.in.getnext.") {
+ isc_throw(DataSourceError, "datasource exception on getnextrecord");
+ } else if (searched_name_ == "iscexception.in.getnext.") {
+ isc_throw(isc::Exception, "isc exception on getnextrecord");
+ } else if (searched_name_ == "basicexception.in.getnext.") {
+ throw std::exception();
+ }
+
+ if (cur_record_ < cur_name.size()) {
+ for (size_t i = 0; i < COLUMN_COUNT; ++i) {
+ columns[i] = cur_name[cur_record_][i];
+ }
+ cur_record_++;
+ return (true);
+ } else {
+ return (false);
+ }
+ }
+
+ private:
+ const std::string searched_name_;
+ int cur_record_;
+ std::vector< std::vector<std::string> > cur_name;
+ };
+
+ class MockIteratorContext : public IteratorContext {
+ private:
+ int step;
+ const Domains& domains_;
+ public:
+ MockIteratorContext(const Domains& domains) :
+ step(0), domains_(domains)
+ { }
+ virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ // A special case: if the given set of domains is already empty,
+ // we always return false.
+ if (domains_.empty()) {
+ return (false);
+ }
+
+ // Return faked data for tests
+ switch (step ++) {
+ case 0:
+ data[DatabaseAccessor::NAME_COLUMN] = "example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "3600";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 1:
+ data[DatabaseAccessor::NAME_COLUMN] = "example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "SOA";
+ data[DatabaseAccessor::TTL_COLUMN] = "3600";
+ data[DatabaseAccessor::RDATA_COLUMN] = "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200";
+ return (true);
+ case 2:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 3:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+ return (true);
+ case 4:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::1";
+ return (true);
+ case 5:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::2";
+ return (true);
+ default:
+ ADD_FAILURE() <<
+ "Request past the end of iterator context";
+ case 6:
+ return (false);
+ }
+ }
+ };
+ class EmptyIteratorContext : public IteratorContext {
+ public:
+ virtual bool getNext(string(&)[COLUMN_COUNT]) {
+ return (false);
+ }
+ };
+ class BadIteratorContext : public IteratorContext {
+ private:
+ int step;
+ public:
+ BadIteratorContext() :
+ step(0)
+ { }
+ virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ switch (step ++) {
+ case 0:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 1:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "301";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+ return (true);
+ default:
+ ADD_FAILURE() <<
+ "Request past the end of iterator context";
+ case 2:
+ return (false);
+ }
+ }
+ };
+public:
+ virtual IteratorContextPtr getAllRecords(int id) const {
+ if (id == READONLY_ZONE_ID) {
+ return (IteratorContextPtr(new MockIteratorContext(
+ *readonly_records_)));
+ } else if (id == 13) {
+ return (IteratorContextPtr());
+ } else if (id == 0) {
+ return (IteratorContextPtr(new EmptyIteratorContext()));
+ } else if (id == -1) {
+ return (IteratorContextPtr(new BadIteratorContext()));
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
+ virtual IteratorContextPtr getRecords(const std::string& name, int id,
+ bool subdomains) const
+ {
+ if (id == READONLY_ZONE_ID || id == WRITABLE_ZONE_ID) {
+ return (IteratorContextPtr(
+ new MockNameIteratorContext(*this, id, name,
+ subdomains)));
+ } else {
+ // This iterator is bogus, but for the cases tested below that's
+ // sufficient.
+ return (IteratorContextPtr(
+ new MockNameIteratorContext(*this, READONLY_ZONE_ID,
+ name, subdomains)));
+ }
+ }
+
+ virtual pair<bool, int> startUpdateZone(const std::string& zone_name,
+ bool replace)
+ {
+ const pair<bool, int> zone_info = getZone(zone_name);
+ if (!zone_info.first) {
+ return (pair<bool, int>(false, 0));
+ }
+
+ // Prepare the record set for update. If replacing the existing one,
+ // we use an empty set; otherwise we use a writable copy of the
+ // original.
+ if (replace) {
+ update_records_->clear();
+ } else {
+ *update_records_ = *readonly_records_;
+ }
+
+ return (pair<bool, int>(true, WRITABLE_ZONE_ID));
+ }
+ virtual void commit() {
+ *readonly_records_ = *update_records_;
+ }
+ virtual void rollback() {
+ // Special hook: if something with a name of "throw.example.org"
+ // has been added, trigger an imaginary unexpected event with an
+ // exception.
+ if (update_records_->count("throw.example.org.") > 0) {
+ isc_throw(DataSourceError, "unexpected failure in rollback");
+ }
+
+ rollbacked_ = true;
+ }
+ virtual void addRecordToZone(const string (&columns)[ADD_COLUMN_COUNT]) {
+ // Copy the current value to cur_name. If it doesn't exist,
+ // operator[] will create a new one.
+ cur_name_ = (*update_records_)[columns[DatabaseAccessor::ADD_NAME]];
+
+ vector<string> record_columns;
+ record_columns.push_back(columns[DatabaseAccessor::ADD_TYPE]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_TTL]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_SIGTYPE]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_RDATA]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_NAME]);
+
+ // copy back the added entry
+ cur_name_.push_back(record_columns);
+ (*update_records_)[columns[DatabaseAccessor::ADD_NAME]] = cur_name_;
+
+ // remember this one so that test cases can check it.
+ copy(columns, columns + DatabaseAccessor::ADD_COLUMN_COUNT,
+ columns_lastadded_);
+ }
+
+ // Helper predicate class used in deleteRecordInZone().
+ struct deleteMatch {
+ deleteMatch(const string& type, const string& rdata) :
+ type_(type), rdata_(rdata)
+ {}
+ bool operator()(const vector<string>& row) const {
+ return (row[0] == type_ && row[3] == rdata_);
+ }
+ const string& type_;
+ const string& rdata_;
+ };
+
+ virtual void deleteRecordInZone(const string (¶ms)[DEL_PARAM_COUNT]) {
+ vector<vector<string> >& records =
+ (*update_records_)[params[DatabaseAccessor::DEL_NAME]];
+ records.erase(remove_if(records.begin(), records.end(),
+ deleteMatch(
+ params[DatabaseAccessor::DEL_TYPE],
+ params[DatabaseAccessor::DEL_RDATA])),
+ records.end());
+ if (records.empty()) {
+ (*update_records_).erase(params[DatabaseAccessor::DEL_NAME]);
+ }
+ }
+
+ //
+ // Helper methods to keep track of some update related activities
+ //
+ bool isRollbacked() const {
+ return (rollbacked_);
+ }
+
+ const string* getLastAdded() const {
+ return (columns_lastadded_);
+ }
+
+ // This allows the test code to get the accessor used in an update context
+ shared_ptr<const MockAccessor> getLatestClone() const {
+ return (latest_clone_);
+ }
+
+ virtual std::string findPreviousName(int id, const std::string& rname)
+ const
+ {
+ if (id == -1) {
+ isc_throw(isc::NotImplemented, "Test not implemented behaviour");
+ } else if (id == READONLY_ZONE_ID) {
+ // For some specific names we intentionally return broken or
+ // unexpected result.
+ if (rname == "org.example.badnsec2.") {
+ return ("badnsec1.example.org.");
+ } else if (rname == "org.example.brokenname.") {
+ return ("brokenname...example.org.");
+ } else if (rname == "org.example.notimplnsec." ||
+ rname == "org.example.wild.here.") {
+ isc_throw(isc::NotImplemented, "Not implemented in this test");
+ }
+
+ // For the general case, we search for the first name N in the
+ // domains that meets N >= reverse(rname) using lower_bound.
+ // The "previous name" is the name of the previous entry of N.
+ // Note that Domains are internally sorted by the Name comparison
+ // order. Due to the API requirement we are given a reversed
+ // name (rname), so we need to reverse it again to convert it
+ // to the original name.
+ Domains::const_iterator it(readonly_records_->lower_bound(
+ Name(rname).reverse().toText()));
+ if (it == readonly_records_->begin()) {
+ isc_throw(isc::Unexpected, "Unexpected name");
+ }
+ if (it == readonly_records_->end()) {
+ return ((*readonly_records_->rbegin()).first);
+ }
+ return ((*(--it)).first);
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
+private:
+ // The following member variables are storage and/or update work space
+ // of the test zone. The "master"s are the real objects that contain
+ // the data, and they are shared among all accessors cloned from
+ // an initially created one. The "copy" data will be used for read-only
+ // transaction. The pointer members allow the sharing.
+ // "readonly" is for normal lookups. "update" is the workspace for
+ // updates. When update starts it will be initialized either as an
+ // empty set (when replacing the entire zone) or as a copy of the
+ // "readonly" one. "empty" is a sentinel to produce negative results.
+ Domains readonly_records_master_;
+ Domains readonly_records_copy_;
+ Domains* readonly_records_;
+ Domains update_records_master_;
+ Domains* update_records_;
+ const Domains empty_records_master_;
+ const Domains* empty_records_;
+
+ // used as temporary storage after searchForRecord() and during
+ // getNextRecord() calls, as well as during the building of the
+ // fake data
+ std::vector< std::vector<std::string> > cur_name_;
+
+ // The columns that were most recently added via addRecordToZone()
+ string columns_lastadded_[ADD_COLUMN_COUNT];
+
+ // Whether rollback operation has been performed for the database.
+ // Not useful except for purely testing purpose.
+ bool rollbacked_;
+
+ // Remember the mock accessor that was last cloned
+ boost::shared_ptr<MockAccessor> latest_clone_;
+
+ // Internal flag for duplicate check
+ bool did_transaction_;
+
+ const Domains& getMockRecords(int zone_id) const {
+ if (zone_id == READONLY_ZONE_ID) {
+ return (*readonly_records_);
+ } else if (zone_id == WRITABLE_ZONE_ID) {
+ return (*update_records_);
+ }
+ return (*empty_records_);
+ }
+
+ // Adds one record to the current name in the database
+ // The actual data will not be added to 'records' until
+ // addCurName() is called
+ void addRecord(const std::string& type,
+ const std::string& ttl,
+ const std::string& sigtype,
+ const std::string& rdata) {
+ std::vector<std::string> columns;
+ columns.push_back(type);
+ columns.push_back(ttl);
+ columns.push_back(sigtype);
+ columns.push_back(rdata);
+ cur_name_.push_back(columns);
+ }
+
+ // Adds all records we just built with calls to addRecords
+ // to the actual fake database. This will clear cur_name_,
+ // so we can immediately start adding new records.
+ void addCurName(const std::string& name) {
+ ASSERT_EQ(0, readonly_records_->count(name));
+ // Append the name to all of them
+ for (std::vector<std::vector<std::string> >::iterator
+ i(cur_name_.begin()); i != cur_name_.end(); ++ i) {
+ i->push_back(name);
+ }
+ (*readonly_records_)[name] = cur_name_;
+ cur_name_.clear();
+ }
+
+ // Fills the database with zone data.
+ // This method constructs a number of resource records (with addRecord),
+ // which will all be added for one domain name to the fake database
+ // (with addCurName). So for instance the first set of calls create
+ // data for the name 'www.example.org', which will consist of one A RRset
+ // of one record, and one AAAA RRset of two records.
+ // The order in which they are added is the order in which getNextRecord()
+ // will return them (so we can test whether find() etc. support data that
+ // might not come in 'normal' order)
+ // It shall immediately fail if you try to add the same name twice.
+ void fillData() {
+ const char* prev_name = NULL;
+ for (int i = 0; TEST_RECORDS[i][0] != NULL; ++i) {
+ if (prev_name != NULL &&
+ strcmp(prev_name, TEST_RECORDS[i][0]) != 0) {
+ addCurName(prev_name);
+ }
+ prev_name = TEST_RECORDS[i][0];
+ addRecord(TEST_RECORDS[i][1], TEST_RECORDS[i][2],
+ TEST_RECORDS[i][3], TEST_RECORDS[i][4]);
+ }
+ addCurName(prev_name);
+ }
+};
+
+// This tests the default getRecords behaviour, throwing NotImplemented
+TEST(DatabaseConnectionTest, getRecords) {
+ EXPECT_THROW(NopAccessor().getRecords(".", 1, false),
+ isc::NotImplemented);
+}
+
+// This tests the default getAllRecords behaviour, throwing NotImplemented
+TEST(DatabaseConnectionTest, getAllRecords) {
+ // The parameters don't matter
+ EXPECT_THROW(NopAccessor().getAllRecords(1),
+ isc::NotImplemented);
+}
+
+// This test fixture is templated so that we can share (most of) the test
+// cases with different types of data sources. Note that in test cases
+// we need to use 'this' to refer to member variables of the test class.
+template <typename ACCESSOR_TYPE>
+class DatabaseClientTest : public ::testing::Test {
+public:
+ DatabaseClientTest() : zname_("example.org"), qname_("www.example.org"),
+ qclass_(RRClass::IN()), qtype_(RRType::A()),
+ rrttl_(3600)
+ {
+ createClient();
+
+ // set up the commonly used finder.
+ DataSourceClient::FindResult zone(client_->findZone(zname_));
+ assert(zone.code == result::SUCCESS);
+ finder_ = dynamic_pointer_cast<DatabaseClient::Finder>(
+ zone.zone_finder);
+
+ // Test IN/A RDATA to be added in update tests. Intentionally using
+ // different data than the initial data configured in the MockAccessor.
+ rrset_.reset(new RRset(qname_, qclass_, qtype_, rrttl_));
+ rrset_->addRdata(rdata::createRdata(rrset_->getType(),
+ rrset_->getClass(), "192.0.2.2"));
+
+ // And its RRSIG. Also different from the configured one.
+ rrsigset_.reset(new RRset(qname_, qclass_, RRType::RRSIG(),
+ rrttl_));
+ rrsigset_->addRdata(rdata::createRdata(rrsigset_->getType(),
+ rrsigset_->getClass(),
+ "A 5 3 0 20000101000000 "
+ "20000201000000 0 example.org. "
+ "FAKEFAKEFAKE"));
+ }
+
+ /*
+ * We initialize the client from a function, so we can call it multiple
+ * times per test.
+ */
+ void createClient() {
+ current_accessor_ = new ACCESSOR_TYPE();
+ is_mock_ = (dynamic_cast<MockAccessor*>(current_accessor_) != NULL);
+ client_.reset(new DatabaseClient(qclass_,
+ shared_ptr<ACCESSOR_TYPE>(
+ current_accessor_)));
+ }
+
+ /**
+ * Check the zone finder is a valid one and references the zone ID and
+ * database available here.
+ */
+ void checkZoneFinder(const DataSourceClient::FindResult& zone) {
+ ASSERT_NE(ZoneFinderPtr(), zone.zone_finder) << "No zone finder";
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ ASSERT_NE(shared_ptr<DatabaseClient::Finder>(), finder) <<
+ "Wrong type of finder";
+ if (is_mock_) {
+ EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+ }
+ EXPECT_EQ(current_accessor_, &finder->getAccessor());
+ }
+
+ shared_ptr<DatabaseClient::Finder> getFinder() {
+ DataSourceClient::FindResult zone(client_->findZone(zname_));
+ EXPECT_EQ(result::SUCCESS, zone.code);
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ if (is_mock_) {
+ EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+ }
+
+ return (finder);
+ }
+
+ // Helper methods for update tests
+ bool isRollbacked(bool expected = false) const {
+ if (is_mock_) {
+ const MockAccessor& mock_accessor =
+ dynamic_cast<const MockAccessor&>(*update_accessor_);
+ return (mock_accessor.isRollbacked());
+ } else {
+ return (expected);
+ }
+ }
+
+ void checkLastAdded(const char* const expected[]) const {
+ if (is_mock_) {
+ const MockAccessor* mock_accessor =
+ dynamic_cast<const MockAccessor*>(current_accessor_);
+ for (int i = 0; i < DatabaseAccessor::ADD_COLUMN_COUNT; ++i) {
+ EXPECT_EQ(expected[i],
+ mock_accessor->getLatestClone()->getLastAdded()[i]);
+ }
+ }
+ }
+
+ void setUpdateAccessor() {
+ if (is_mock_) {
+ const MockAccessor* mock_accessor =
+ dynamic_cast<const MockAccessor*>(current_accessor_);
+ update_accessor_ = mock_accessor->getLatestClone();
+ }
+ }
+
+ // Some tests only work for MockAccessor. We remember whether our accessor
+ // is of that type.
+ bool is_mock_;
+
+ // Will be deleted by client_, just keep the current value for comparison.
+ ACCESSOR_TYPE* current_accessor_;
+ shared_ptr<DatabaseClient> client_;
+ const std::string database_name_;
+
+ // The zone finder of the test zone commonly used in various tests.
+ shared_ptr<DatabaseClient::Finder> finder_;
+
+ // Some shortcut variables for commonly used test parameters
+ const Name zname_; // the zone name stored in the test data source
+ const Name qname_; // commonly used name to be found
+ const RRClass qclass_; // commonly used RR class used with qname
+ const RRType qtype_; // commonly used RR type used with qname
+ const RRTTL rrttl_; // commonly used RR TTL
+ RRsetPtr rrset_; // for adding/deleting an RRset
+ RRsetPtr rrsigset_; // for adding/deleting an RRset
+
+ // update related objects to be tested
+ ZoneUpdaterPtr updater_;
+ shared_ptr<const DatabaseAccessor> update_accessor_;
+
+ // placeholders
+ const std::vector<std::string> empty_rdatas_; // for NXRRSET/NXDOMAIN
+ std::vector<std::string> expected_rdatas_;
+ std::vector<std::string> expected_sig_rdatas_;
+};
+
+class TestSQLite3Accessor : public SQLite3Accessor {
+public:
+ TestSQLite3Accessor() : SQLite3Accessor(
+ TEST_DATA_BUILDDIR "/rwtest.sqlite3.copied", "IN")
+ {
+ startUpdateZone("example.org.", true);
+ string columns[ADD_COLUMN_COUNT];
+ for (int i = 0; TEST_RECORDS[i][0] != NULL; ++i) {
+ columns[ADD_NAME] = TEST_RECORDS[i][0];
+ columns[ADD_REV_NAME] = Name(columns[ADD_NAME]).reverse().toText();
+ columns[ADD_TYPE] = TEST_RECORDS[i][1];
+ columns[ADD_TTL] = TEST_RECORDS[i][2];
+ columns[ADD_SIGTYPE] = TEST_RECORDS[i][3];
+ columns[ADD_RDATA] = TEST_RECORDS[i][4];
+
+ addRecordToZone(columns);
+ }
+ commit();
+ }
+};
+
+// The following two lines instantiate test cases with concrete accessor
+// classes to be tested.
+// XXX: clang++ installed on our FreeBSD buildbot cannot complete compiling
+// this file, seemingly due to the size of the code. We'll consider more
+// complete workaround, but for a short term workaround we'll reduce the
+// number of tested accessor classes (thus reducing the amount of code
+// to be compiled) for this particular environment.
+#if defined(__clang__) && defined(__FreeBSD__)
+typedef ::testing::Types<MockAccessor> TestAccessorTypes;
+#else
+typedef ::testing::Types<MockAccessor, TestSQLite3Accessor> TestAccessorTypes;
+#endif
+
+TYPED_TEST_CASE(DatabaseClientTest, TestAccessorTypes);
+
+// In some cases the entire test fixture is for the mock accessor only.
+// We use the usual TEST_F for them with the corresponding specialized class
+// to make the code simpler.
+typedef DatabaseClientTest<MockAccessor> MockDatabaseClientTest;
+
+TYPED_TEST(DatabaseClientTest, zoneNotFound) {
+ DataSourceClient::FindResult zone(
+ this->client_->findZone(Name("example.com")));
+ EXPECT_EQ(result::NOTFOUND, zone.code);
+}
+
+TYPED_TEST(DatabaseClientTest, exactZone) {
+ DataSourceClient::FindResult zone(
+ this->client_->findZone(Name("example.org")));
+ EXPECT_EQ(result::SUCCESS, zone.code);
+ this->checkZoneFinder(zone);
+}
+
+TYPED_TEST(DatabaseClientTest, superZone) {
+ DataSourceClient::FindResult zone(this->client_->findZone(Name(
+ "sub.example.org")));
+ EXPECT_EQ(result::PARTIALMATCH, zone.code);
+ this->checkZoneFinder(zone);
+}
+
+// This test doesn't depend on derived accessor class, so we use TEST().
+TEST(GenericDatabaseClientTest, noAccessorException) {
+ // We need a dummy variable here; some compiler would regard it a mere
+ // declaration instead of an instantiation and make the test fail.
+ EXPECT_THROW(DatabaseClient dummy(RRClass::IN(),
+ shared_ptr<DatabaseAccessor>()),
+ isc::InvalidParameter);
+}
+
+// If the zone doesn't exist, exception is thrown
+TYPED_TEST(DatabaseClientTest, noZoneIterator) {
+ EXPECT_THROW(this->client_->getIterator(Name("example.com")),
+ DataSourceError);
+}
+
+// If the zone doesn't exist and iteration is not implemented, it still throws
+// the exception it doesn't exist
+TEST(GenericDatabaseClientTest, noZoneNotImplementedIterator) {
+ EXPECT_THROW(DatabaseClient(RRClass::IN(),
+ boost::shared_ptr<DatabaseAccessor>(
+ new NopAccessor())).getIterator(
+ Name("example.com")),
+ DataSourceError);
+}
+
+TEST(GenericDatabaseClientTest, notImplementedIterator) {
+ EXPECT_THROW(DatabaseClient(RRClass::IN(), shared_ptr<DatabaseAccessor>(
+ new NopAccessor())).getIterator(Name("example.org")),
+ isc::NotImplemented);
+}
+
+// Pretend a bug in the connection and pass NULL as the context
+// Should not crash, but gracefully throw. Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, nullIteratorContext) {
+ EXPECT_THROW(this->client_->getIterator(Name("null.example.org")),
+ isc::Unexpected);
+}
+
+// It doesn't crash or anything if the zone is completely empty.
+// Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, emptyIterator) {
+ ZoneIteratorPtr it(this->client_->getIterator(Name("empty.example.org")));
+ EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
+ // This is past the end, it should throw
+ EXPECT_THROW(it->getNextRRset(), isc::Unexpected);
+}
+
+// checks if the given rrset matches the
+// given name, class, type and rdatas
+void
+checkRRset(isc::dns::ConstRRsetPtr rrset,
+ const isc::dns::Name& name,
+ const isc::dns::RRClass& rrclass,
+ const isc::dns::RRType& rrtype,
+ const isc::dns::RRTTL& rrttl,
+ const std::vector<std::string>& rdatas) {
+ isc::dns::RRsetPtr expected_rrset(
+ new isc::dns::RRset(name, rrclass, rrtype, rrttl));
+ for (unsigned int i = 0; i < rdatas.size(); ++i) {
+ expected_rrset->addRdata(
+ isc::dns::rdata::createRdata(rrtype, rrclass,
+ rdatas[i]));
+ }
+ isc::testutils::rrsetCheck(expected_rrset, rrset);
+}
+
+// Iterate through a zone
+TYPED_TEST(DatabaseClientTest, iterator) {
+ ZoneIteratorPtr it(this->client_->getIterator(Name("example.org")));
+ ConstRRsetPtr rrset(it->getNextRRset());
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+
+ // The first name should be the zone origin.
+ EXPECT_EQ(this->zname_, rrset->getName());
+
+ // The rest of the checks work only for the mock accessor.
+ if (!this->is_mock_) {
+ return;
+ }
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ checkRRset(rrset, Name("example.org"), this->qclass_, RRType::A(),
+ this->rrttl_, this->expected_rdatas_);
+
+ rrset = it->getNextRRset();
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200");
+ checkRRset(rrset, Name("example.org"), this->qclass_, RRType::SOA(),
+ this->rrttl_, this->expected_rdatas_);
+
+ rrset = it->getNextRRset();
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ checkRRset(rrset, Name("x.example.org"), this->qclass_, RRType::A(),
+ RRTTL(300), this->expected_rdatas_);
+
+ rrset = it->getNextRRset();
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ checkRRset(rrset, Name("x.example.org"), this->qclass_, RRType::AAAA(),
+ RRTTL(300), this->expected_rdatas_);
+}
+
+// This has inconsistent TTL in the set (the rest, like nonsense in
+// the data is handled in rdata itself). Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, badIterator) {
+ // It should not throw, but get the lowest one of them
+ ZoneIteratorPtr it(this->client_->getIterator(Name("bad.example.org")));
+ EXPECT_EQ(it->getNextRRset()->getTTL(), isc::dns::RRTTL(300));
+}
+
+TYPED_TEST(DatabaseClientTest, getSOAFromIterator) {
+ vector<string> soa_data;
+ soa_data.push_back("ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200");
+
+ ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+ ASSERT_TRUE(it);
+ checkRRset(it->getSOA(), this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_, soa_data);
+
+ // Iterate over the zone until we find an SOA. Although there's a broken
+ // RDATA that would trigger an exception in getNextRRset(), we should
+ // reach the SOA as the sequence should be sorted and the SOA is at
+ // the origin name (which has no bogus data).
+ ConstRRsetPtr rrset;
+ while ((rrset = it->getNextRRset()) != ConstRRsetPtr() &&
+ rrset->getType() != RRType::SOA()) {
+ ;
+ }
+ ASSERT_TRUE(rrset);
+ // It should be identical to the result of getSOA().
+ isc::testutils::rrsetCheck(it->getSOA(), rrset);
+}
+
+TYPED_TEST(DatabaseClientTest, noSOAFromIterator) {
+ // First, empty the zone.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+
+ // Then getSOA() should return NULL.
+ ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+ ASSERT_TRUE(it);
+ EXPECT_FALSE(it->getSOA());
+}
+
+TYPED_TEST(DatabaseClientTest, iterateThenUpdate) {
+ ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+ ASSERT_TRUE(it);
+
+ // Try to empty the zone after getting the iterator. Depending on the
+ // underlying data source, it may result in an exception due to the
+ // transaction for the iterator. In either case the integrity of the
+ // iterator result should be reserved.
+ try {
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+
+ // Confirm at least it doesn't contain any SOA
+ EXPECT_EQ(ZoneFinder::NXDOMAIN,
+ this->getFinder()->find(this->zname_, RRType::SOA()).code);
+ } catch (const DataSourceError&) {}
+
+ ConstRRsetPtr rrset;
+ while ((rrset = it->getNextRRset()) != ConstRRsetPtr() &&
+ rrset->getType() != RRType::SOA()) {
+ ;
+ }
+ ASSERT_TRUE(rrset);
+ // It should be identical to the result of getSOA().
+ isc::testutils::rrsetCheck(it->getSOA(), rrset);
+}
+
+TYPED_TEST(DatabaseClientTest, updateThenIterateThenUpdate) {
+ // First clear the zone.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+
+ // Then iterate over it. It should immediately reach the end, at which
+ // point the transaction should be committed.
+ ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+ ASSERT_TRUE(it);
+ EXPECT_FALSE(it->getNextRRset());
+
+ // So another update attempt should succeed, too.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+}
+
+TYPED_TEST(DatabaseClientTest, updateAfterDeleteIterator) {
+ // Similar to the previous case, but we delete the iterator in the
+ // middle of zone. The transaction should be canceled (actually no
+ // different from commit though) at that point.
+ ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+ ASSERT_TRUE(it);
+ EXPECT_TRUE(it->getNextRRset());
+ it.reset();
+
+ // So another update attempt should succeed.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+}
+
+void
+doFindTest(ZoneFinder& finder,
+ const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ const isc::dns::RRType& expected_type,
+ const isc::dns::RRTTL expected_ttl,
+ ZoneFinder::Result expected_result,
+ const std::vector<std::string>& expected_rdatas,
+ const std::vector<std::string>& expected_sig_rdatas,
+ const isc::dns::Name& expected_name = isc::dns::Name::ROOT_NAME(),
+ const ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT)
+{
+ SCOPED_TRACE("doFindTest " + name.toText() + " " + type.toText());
+ const ZoneFinder::FindResult result = finder.find(name, type, NULL,
+ options);
+ ASSERT_EQ(expected_result, result.code) << name << " " << type;
+ if (!expected_rdatas.empty() && result.rrset) {
+ checkRRset(result.rrset, expected_name != Name(".") ? expected_name :
+ name, finder.getClass(), expected_type, expected_ttl,
+ expected_rdatas);
+
+ if (!expected_sig_rdatas.empty() && result.rrset->getRRsig()) {
+ checkRRset(result.rrset->getRRsig(), expected_name != Name(".") ?
+ expected_name : name, finder.getClass(),
+ isc::dns::RRType::RRSIG(), expected_ttl,
+ expected_sig_rdatas);
+ } else if (expected_sig_rdatas.empty()) {
+ EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset->getRRsig());
+ } else {
+ ADD_FAILURE() << "Missing RRSIG";
+ }
+ } else if (expected_rdatas.empty()) {
+ EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset);
+ } else {
+ ADD_FAILURE() << "Missing result";
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, find) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(*finder, isc::dns::Name("www2.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_,
+ ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
+ this->rrttl_,
+ ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ doFindTest(*finder, isc::dns::Name("cname.example.org."),
+ this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+ ZoneFinder::CNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ doFindTest(*finder, isc::dns::Name("cname.example.org."),
+ isc::dns::RRType::CNAME(), isc::dns::RRType::CNAME(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("doesnotexist.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ this->expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(), this->rrttl_,
+ ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ this->expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signedcname1.example.org."),
+ this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+ ZoneFinder::CNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::2");
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(), this->rrttl_,
+ ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ this->expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signedcname2.example.org."),
+ this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+ ZoneFinder::CNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("acnamesig1.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("acnamesig2.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("acnamesig3.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(*finder, isc::dns::Name("ttldiff1.example.org."),
+ this->qtype_, this->qtype_, isc::dns::RRTTL(360),
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(*finder, isc::dns::Name("ttldiff2.example.org."),
+ this->qtype_, this->qtype_, isc::dns::RRTTL(360),
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname1.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname2.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname3.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badrdata.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badtype.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badttl.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badsig.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+
+ // Trigger the hardcoded exceptions and see if find() has cleaned up
+ if (this->is_mock_) {
+ EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.search."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.search."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ isc::Exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.search."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ std::exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.getnext."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.getnext."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ isc::Exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.getnext."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ std::exception);
+ }
+
+ // This RRSIG has the wrong sigtype field, which should be
+ // an error if we decide to keep using that field
+ // Right now the field is ignored, so it does not error
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("badsigtype.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, findDelegation) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // The apex should not be considered delegation point and we can access
+ // data
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, isc::dns::Name("example.org."),
+ this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // Check when we ask for something below delegation point, we get the NS
+ // (Both when the RRset there exists and doesn't)
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ this->expected_rdatas_.push_back("ns.delegation.example.org.");
+ this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ this->qtype_, isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ doFindTest(*finder, isc::dns::Name("deep.below.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+
+ // Even when we check directly at the delegation point, we should get
+ // the NS
+ doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // And when we ask direcly for the NS, we should still get delegation
+ doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // Now test delegation. If it is below the delegation point, we should get
+ // the DNAME (the one with data under DNAME is invalid zone, but we test
+ // the behaviour anyway just to make sure)
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("dname.example.com.");
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ this->qtype_, isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ doFindTest(*finder, isc::dns::Name("really.deep.below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+
+ // Asking direcly for DNAME should give SUCCESS
+ doFindTest(*finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::DNAME(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // But we don't delegate at DNAME point
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("dname.example.org."),
+ this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // This is broken dname, it contains two targets
+ EXPECT_THROW(finder->find(isc::dns::Name("below.baddname.example.org."),
+ this->qtype_, NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+
+ // Broken NS - it lives together with something else
+ EXPECT_THROW(finder->find(isc::dns::Name("brokenns1.example.org."),
+ this->qtype_, NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("brokenns2.example.org."),
+ this->qtype_, NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, emptyDomain) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // This domain doesn't exist, but a subdomain of it does.
+ // Therefore we should pretend the domain is there, but contains no RRsets
+ doFindTest(*finder, isc::dns::Name("b.example.org."), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+// Glue-OK mode. Just go through NS delegations down (but not through
+// DNAME) and pretend it is not there.
+TYPED_TEST(DatabaseClientTest, glueOK) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("ns.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ doFindTest(*finder, isc::dns::Name("nothere.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("nothere.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("ns.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ this->expected_rdatas_.push_back("ns.delegation.example.org.");
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ // When we request the NS, it should be SUCCESS, not DELEGATION
+ // (different in GLUE_OK)
+ doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("dname.example.com.");
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ this->qtype_, isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("dname.example.org."), ZoneFinder::FIND_GLUE_OK);
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("dname.example.org."), ZoneFinder::FIND_GLUE_OK);
+}
+
+TYPED_TEST(DatabaseClientTest, wildcard) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // First, simple wildcard match
+ // Check also that the RRSIG is added from the wildcard (not modified)
+ this->expected_rdatas_.push_back("192.0.2.5");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::WILDCARD, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ doFindTest(*finder, isc::dns::Name("b.a.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::WILDCARD,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ doFindTest(*finder, isc::dns::Name("b.a.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ // Direct request for this wildcard
+ this->expected_rdatas_.push_back("192.0.2.5");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("*.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("*.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ // This is nonsense, but check it doesn't match by some stupid accident
+ doFindTest(*finder, isc::dns::Name("a.*.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // These should be canceled, since it is below a domain which exitsts
+ doFindTest(*finder, isc::dns::Name("nothing.here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ doFindTest(*finder, isc::dns::Name("cancel.here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ doFindTest(*finder,
+ isc::dns::Name("below.cancel.here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // And this should be just plain empty non-terminal domain, check
+ // the wildcard doesn't hurt it
+ doFindTest(*finder, isc::dns::Name("here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // Also make sure that the wildcard doesn't hurt the original data
+ // below the wildcard
+ this->expected_rdatas_.push_back("2001:db8::5");
+ doFindTest(*finder, isc::dns::Name("cancel.here.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+
+ // How wildcard go together with delegation
+ this->expected_rdatas_.push_back("ns.example.com.");
+ doFindTest(*finder, isc::dns::Name("below.delegatedwild.example.org"),
+ this->qtype_, isc::dns::RRType::NS(), this->rrttl_,
+ ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegatedwild.example.org"));
+ // FIXME: This doesn't look logically OK, GLUE_OK should make it transparent,
+ // so the match should either work or be canceled, but return NXDOMAIN
+ doFindTest(*finder, isc::dns::Name("below.delegatedwild.example.org"),
+ this->qtype_, isc::dns::RRType::NS(), this->rrttl_,
+ ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegatedwild.example.org"),
+ ZoneFinder::FIND_GLUE_OK);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.5");
+ // These are direct matches
+ const char* positive_names[] = {
+ "wild.*.foo.example.org.",
+ "wild.*.foo.*.bar.example.org.",
+ NULL
+ };
+ for (const char** name(positive_names); *name != NULL; ++ name) {
+ doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ }
+
+ // These are wildcard matches against empty nonterminal asterisk
+ this->expected_rdatas_.clear();
+ const char* negative_names[] = {
+ "a.foo.example.org.",
+ "*.foo.example.org.",
+ "foo.example.org.",
+ "wild.bar.foo.example.org.",
+ "baz.foo.*.bar.example.org",
+ "baz.foo.baz.bar.example.org",
+ "*.foo.baz.bar.example.org",
+ "*.foo.*.bar.example.org",
+ "foo.*.bar.example.org",
+ "*.bar.example.org",
+ "bar.example.org",
+ NULL
+ };
+ // Unless FIND_DNSSEC is specified, this is no different from other
+ // NXRRSET case.
+ for (const char** name(negative_names); *name != NULL; ++ name) {
+ doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ }
+
+ // With FIND_DNSSEC, it should result in WILDCARD_NXRRSET.
+ const char* negative_dnssec_names[] = {
+ "a.bar.example.org.",
+ "foo.baz.bar.example.org.",
+ "a.foo.bar.example.org.",
+ NULL
+ };
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("wild.*.foo.*.bar.example.org. NSEC");
+ this->expected_sig_rdatas_.clear();
+ for (const char** name(negative_dnssec_names); *name != NULL; ++ name) {
+ doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+ RRType::NSEC(), this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("bao.example.org."), ZoneFinder::FIND_DNSSEC);
+ }
+
+ // CNAME on a wildcard. Maybe not so common, but not disallowed.
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("a.cnamewild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::CNAME(),
+ this->rrttl_, ZoneFinder::WILDCARD_CNAME,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ // DNAME on a wildcard. In our implementation we ignore DNAMEs on a
+ // wildcard, but at a higher level we say the behavior is "unspecified".
+ // rfc2672bis strongly discourages the mixture of DNAME and wildcard
+ // (with SHOULD NOT).
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, Name("a.dnamewild.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::WILDCARD_NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // Some strange things in the wild node
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ doFindTest(*finder, isc::dns::Name("a.nswild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, noWildcard) {
+ // Tests with the NO_WILDCARD flag.
+
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // This would match *.wild.example.org, but with NO_WILDCARD should
+ // result in NXDOMAIN.
+ this->expected_rdatas_.push_back("cancel.here.wild.example.org. A "
+ "NSEC RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+ RRType::NSEC(), RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->expected_rdatas_,
+ this->expected_sig_rdatas_, Name("*.wild.example.org."),
+ ZoneFinder::FIND_DNSSEC | ZoneFinder::NO_WILDCARD);
+
+ // Should be the same without FIND_DNSSEC (but in this case no RRsets
+ // will be returned)
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+ RRType::NSEC(), RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_, Name::ROOT_NAME(), // name is dummy
+ ZoneFinder::NO_WILDCARD);
+
+ // Same for wildcard empty non terminal.
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("brokenns1.example.org. A NSEC");
+ doFindTest(*finder, isc::dns::Name("a.bar.example.org"),
+ RRType::NSEC(), RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->expected_rdatas_,
+ this->empty_rdatas_, Name("wild.*.foo.*.bar.example.org"),
+ ZoneFinder::FIND_DNSSEC | ZoneFinder::NO_WILDCARD);
+
+ // Search for a wildcard name with NO_WILDCARD. There should be no
+ // difference. This is, for example, necessary to provide non existence
+ // of matching wildcard for isnx.nonterminal.example.org.
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("empty.nonterminal.example.org. NSEC");
+ doFindTest(*finder, isc::dns::Name("*.nonterminal.example.org"),
+ RRType::NSEC(), RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->expected_rdatas_,
+ this->empty_rdatas_, Name("l.example.org"),
+ ZoneFinder::FIND_DNSSEC | ZoneFinder::NO_WILDCARD);
+
+ // On the other hand, if there's exact match for the wildcard name
+ // it should be found regardless of NO_WILDCARD.
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.5");
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("*.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_, Name("*.wild.example.org"),
+ ZoneFinder::NO_WILDCARD);
+}
+
+TYPED_TEST(DatabaseClientTest, NXRRSET_NSEC) {
+ // The domain exists, but doesn't have this RRType
+ // So we should get its NSEC
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, wildcardNXRRSET_NSEC) {
+ // The domain exists, but doesn't have this RRType
+ // So we should get its NSEC
+ //
+ // The user will have to query us again to get the correct
+ // answer (eg. prove there's not an exact match)
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("cancel.here.wild.example.org. A NSEC "
+ "RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ // Note that the NSEC name should NOT be synthesized.
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("*.wild.example.org"), ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, NXDOMAIN_NSEC) {
+ // The domain doesn't exist, so we must get the right NSEC
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("www1.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("www.example.org."), ZoneFinder::FIND_DNSSEC);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("acnamesig1.example.org. NS A NSEC RRSIG");
+ // This tests it works correctly in apex (there was a bug, where a check
+ // for NS-alone was there and it would throw).
+ doFindTest(*finder, isc::dns::Name("aa.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("example.org."), ZoneFinder::FIND_DNSSEC);
+
+ // Check that if the DB doesn't support it, the exception from there
+ // is not propagated and it only does not include the NSEC
+ if (!this->is_mock_) {
+ return; // We don't make the real DB to throw
+ }
+ EXPECT_NO_THROW(doFindTest(*finder,
+ isc::dns::Name("notimplnsec.example.org."),
+ isc::dns::RRType::TXT(),
+ isc::dns::RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_, Name::ROOT_NAME(),
+ ZoneFinder::FIND_DNSSEC));
+}
+
+TYPED_TEST(DatabaseClientTest, emptyNonterminalNSEC) {
+ // Same as NXDOMAIN_NSEC, but with empty non-terminal
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("empty.nonterminal.example.org. NSEC");
+ doFindTest(*finder, isc::dns::Name("nonterminal.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("l.example.org."), ZoneFinder::FIND_DNSSEC);
+
+ // Check that if the DB doesn't support it, the exception from there
+ // is not propagated and it only does not include the NSEC
+ if (!this->is_mock_) {
+ return; // We don't make the real DB to throw
+ }
+ EXPECT_NO_THROW(doFindTest(*finder,
+ isc::dns::Name("here.wild.example.org."),
+ isc::dns::RRType::TXT(),
+ isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_,
+ Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC));
+}
+
+TYPED_TEST(DatabaseClientTest, getOrigin) {
+ DataSourceClient::FindResult
+ zone(this->client_->findZone(Name("example.org")));
+ ASSERT_EQ(result::SUCCESS, zone.code);
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ if (this->is_mock_) {
+ EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+ }
+ EXPECT_EQ(this->zname_, finder->getOrigin());
+}
+
+TYPED_TEST(DatabaseClientTest, updaterFinder) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ ASSERT_TRUE(this->updater_);
+
+ // If this update isn't replacing the zone, the finder should work
+ // just like the normal find() case.
+ if (this->is_mock_) {
+ DatabaseClient::Finder& finder = dynamic_cast<DatabaseClient::Finder&>(
+ this->updater_->getFinder());
+ EXPECT_EQ(WRITABLE_ZONE_ID, finder.zone_id());
+ }
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(this->updater_->getFinder(), this->qname_,
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // When replacing the zone, the updater's finder shouldn't see anything
+ // in the zone until something is added.
+ this->updater_.reset();
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ ASSERT_TRUE(this->updater_);
+ if (this->is_mock_) {
+ DatabaseClient::Finder& finder = dynamic_cast<DatabaseClient::Finder&>(
+ this->updater_->getFinder());
+ EXPECT_EQ(WRITABLE_ZONE_ID, finder.zone_id());
+ }
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->empty_rdatas_, this->empty_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, flushZone) {
+ // A simple update case: flush the entire zone
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // Before update, the name exists.
+ EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+ this->qtype_).code);
+
+ // start update in the replace mode. the normal finder should still
+ // be able to see the record, but the updater's finder shouldn't.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->setUpdateAccessor();
+ EXPECT_EQ(ZoneFinder::SUCCESS,
+ finder->find(this->qname_, this->qtype_).code);
+ EXPECT_EQ(ZoneFinder::NXDOMAIN,
+ this->updater_->getFinder().find(this->qname_,
+ this->qtype_).code);
+
+ // commit the update. now the normal finder shouldn't see it.
+ this->updater_->commit();
+ EXPECT_EQ(ZoneFinder::NXDOMAIN, finder->find(this->qname_,
+ this->qtype_).code);
+
+ // Check rollback wasn't accidentally performed.
+ EXPECT_FALSE(this->isRollbacked());
+}
+
+TYPED_TEST(DatabaseClientTest, updateCancel) {
+ // similar to the previous test, but destruct the updater before commit.
+
+ ZoneFinderPtr finder = this->client_->findZone(this->zname_).zone_finder;
+ EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+ this->qtype_).code);
+
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->setUpdateAccessor();
+ EXPECT_EQ(ZoneFinder::NXDOMAIN,
+ this->updater_->getFinder().find(this->qname_,
+ this->qtype_).code);
+ // DB should not have been rolled back yet.
+ EXPECT_FALSE(this->isRollbacked());
+ this->updater_.reset(); // destruct without commit
+
+ // reset() should have triggered rollback (although it doesn't affect
+ // anything to the mock accessor implementation except for the result of
+ // isRollbacked())
+ EXPECT_TRUE(this->isRollbacked(true));
+ EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+ this->qtype_).code);
+}
+
+TYPED_TEST(DatabaseClientTest, exceptionFromRollback) {
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+
+ this->rrset_.reset(new RRset(Name("throw.example.org"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_->addRRset(*this->rrset_);
+ // destruct without commit. The added name will result in an exception
+ // in the MockAccessor's rollback method. It shouldn't be propagated.
+ EXPECT_NO_THROW(this->updater_.reset());
+}
+
+TYPED_TEST(DatabaseClientTest, duplicateCommit) {
+ // duplicate commit. should result in exception.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+ EXPECT_THROW(this->updater_->commit(), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetToNewZone) {
+ // Add a single RRset to a fresh empty zone
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+
+ // Similar to the previous case, but with RRSIG
+ this->updater_.reset();
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->addRRset(*this->rrset_);
+ this->updater_->addRRset(*this->rrsigset_);
+
+ // confirm the expected columns were passed to the accessor (if checkable).
+ const char* const rrsig_added[] = {
+ "www.example.org.", "org.example.www.", "3600", "RRSIG", "A",
+ "A 5 3 0 20000101000000 20000201000000 0 example.org. FAKEFAKEFAKE"
+ };
+ this->checkLastAdded(rrsig_added);
+
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back(
+ rrsig_added[DatabaseAccessor::ADD_RDATA]);
+ {
+ SCOPED_TRACE("add RRset with RRSIG");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ }
+
+ // Add the non RRSIG RRset again, to see the attempt of adding RRSIG
+ // causes any unexpected effect, in particular, whether the SIGTYPE
+ // field might remain.
+ this->updater_->addRRset(*this->rrset_);
+ const char* const rrset_added[] = {
+ "www.example.org.", "org.example.www.", "3600", "A", "", "192.0.2.2"
+ };
+ this->checkLastAdded(rrset_added);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetToCurrentZone) {
+ // Similar to the previous test, but not replacing the existing data.
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->addRRset(*this->rrset_);
+
+ // We should see both old and new data.
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+ this->updater_->commit();
+ {
+ SCOPED_TRACE("add RRset after commit");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addMultipleRRs) {
+ // Similar to the previous case, but the added RRset contains multiple
+ // RRs.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.3"));
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ this->expected_rdatas_.push_back("192.0.2.3");
+ {
+ SCOPED_TRACE("add multiple RRs");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetOfLargerTTL) {
+ // Similar to the previous one, but the TTL of the added RRset is larger
+ // than that of the existing record. The finder should use the smaller
+ // one.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->setTTL(RRTTL(7200));
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset of larger TTL");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetOfSmallerTTL) {
+ // Similar to the previous one, but the added RRset has a smaller TTL.
+ // The added TTL should be used by the finder.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->setTTL(RRTTL(1800));
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset of smaller TTL");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, RRTTL(1800), ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addSameRR) {
+ // Add the same RR as that is already in the data source.
+ // Currently the add interface doesn't try to suppress the duplicate,
+ // neither does the finder. We may want to revisit it in future versions.
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ {
+ SCOPED_TRACE("add same RR");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addDeviantRR) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+ // RR class mismatch. This should be detected and rejected.
+ this->rrset_.reset(new RRset(this->qname_, RRClass::CH(), RRType::TXT(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "test text"));
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+
+ // Out-of-zone owner name. At a higher level this should be rejected,
+ // but it doesn't happen in this interface.
+ this->rrset_.reset(new RRset(Name("example.com"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.100"));
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.100");
+ {
+ // Note: with the find() implementation being more strict about
+ // zone cuts, this test may fail. Then the test should be updated.
+ SCOPED_TRACE("add out-of-zone RR");
+ doFindTest(this->updater_->getFinder(), Name("example.com"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addEmptyRRset) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addAfterCommit) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->commit();
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetWithRRSIG) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->addRRsig(*this->rrsigset_);
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRset) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+
+ // Delete one RR from an RRset
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+
+ // Delete the only RR of a name
+ this->rrset_.reset(new RRset(Name("cname.example.org"), this->qclass_,
+ RRType::CNAME(), this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "www.example.org"));
+ this->updater_->deleteRRset(*this->rrset_);
+
+ // The this->updater_ finder should immediately see the deleted results.
+ {
+ SCOPED_TRACE("delete RRset");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_);
+ doFindTest(this->updater_->getFinder(), Name("cname.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_);
+ }
+
+ // before committing the change, the original finder should see the
+ // original record.
+ {
+ SCOPED_TRACE("delete RRset before commit");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ doFindTest(*finder, Name("cname.example.org"), this->qtype_,
+ RRType::CNAME(), this->rrttl_, ZoneFinder::CNAME,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+
+ // once committed, the record should be removed from the original finder's
+ // view, too.
+ this->updater_->commit();
+ {
+ SCOPED_TRACE("delete RRset after commit");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::NXRRSET, this->empty_rdatas_,
+ this->empty_rdatas_);
+ doFindTest(*finder, Name("cname.example.org"), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->empty_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRsetToNXDOMAIN) {
+ // similar to the previous case, but it removes the only record of the
+ // given name. a subsequent find() should result in NXDOMAIN.
+ this->rrset_.reset(new RRset(Name("cname.example.org"), this->qclass_,
+ RRType::CNAME(), this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "www.example.org"));
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("delete RRset to NXDOMAIN");
+ doFindTest(this->updater_->getFinder(), Name("cname.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteMultipleRRs) {
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, RRType::AAAA(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::1"));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::2"));
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+
+ {
+ SCOPED_TRACE("delete multiple RRs");
+ doFindTest(this->updater_->getFinder(), this->qname_, RRType::AAAA(),
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, partialDelete) {
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, RRType::AAAA(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::1"));
+ // This does not exist in the test data source:
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::3"));
+
+ // deleteRRset should succeed "silently", and subsequent find() should
+ // find the remaining RR.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("partial delete");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ doFindTest(this->updater_->getFinder(), this->qname_, RRType::AAAA(),
+ RRType::AAAA(), this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteNoMatch) {
+ // similar to the previous test, but there's not even a match in the
+ // specified RRset. Essentially there's no difference in the result.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("delete no match");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteWithDifferentTTL) {
+ // Our delete interface simply ignores TTL (may change in a future version)
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ RRTTL(1800)));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("delete RRset with a different TTL");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteDeviantRR) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+ // RR class mismatch. This should be detected and rejected.
+ this->rrset_.reset(new RRset(this->qname_, RRClass::CH(), RRType::TXT(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "test text"));
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+
+ // Out-of-zone owner name. At a higher level this should be rejected,
+ // but it doesn't happen in this interface.
+ this->rrset_.reset(new RRset(Name("example.com"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.100"));
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->rrset_));
+}
+
+TYPED_TEST(DatabaseClientTest, deleteAfterCommit) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->commit();
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteEmptyRRset) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRsetWithRRSIG) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->addRRsig(*this->rrsigset_);
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, compoundUpdate) {
+ // This test case performs an arbitrary chosen add/delete operations
+ // in a single update transaction. Essentially there is nothing new to
+ // test here, but there may be some bugs that was overlooked and can
+ // only happen in the compound update scenario, so we test it.
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+ // add a new RR to an existing RRset
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // delete an existing RR
+ this->rrset_.reset(new RRset(Name("www.example.org"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_->deleteRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // re-add it
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // add a new RR with a new name
+ const Name newname("newname.example.org");
+ const RRType newtype(RRType::AAAA());
+ doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+ this->rrttl_, ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_);
+ this->rrset_.reset(new RRset(newname, this->qclass_, newtype,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::10"));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::11"));
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::10");
+ this->expected_rdatas_.push_back("2001:db8::11");
+ doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ // delete one RR from the previous set
+ this->rrset_.reset(new RRset(newname, this->qclass_, newtype,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::11"));
+ this->updater_->deleteRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::10");
+ doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ // Commit the changes, confirm the entire changes applied.
+ this->updater_->commit();
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.2");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::10");
+ doFindTest(*finder, newname, newtype, newtype, this->rrttl_,
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, previous) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_EQ(Name("www.example.org."),
+ finder->findPreviousName(Name("www2.example.org.")));
+ // Check a name that doesn't exist there
+ EXPECT_EQ(Name("www.example.org."),
+ finder->findPreviousName(Name("www1.example.org.")));
+ if (this->is_mock_) { // We can't really force the DB to throw
+ // Check it doesn't crash or anything if the underlying DB throws
+ DataSourceClient::FindResult
+ zone(this->client_->findZone(Name("bad.example.org")));
+ finder =
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder);
+
+ EXPECT_THROW(finder->findPreviousName(Name("bad.example.org")),
+ isc::NotImplemented);
+ } else {
+ // No need to test this on mock one, because we test only that
+ // the exception gets through
+
+ // A name before the origin
+ EXPECT_THROW(finder->findPreviousName(Name("example.com")),
+ isc::NotImplemented);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, invalidRdata) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_THROW(finder->find(Name("invalidrdata.example.org."), RRType::A()),
+ DataSourceError);
+ EXPECT_THROW(finder->find(Name("invalidrdata2.example.org."), RRType::A()),
+ DataSourceError);
+}
+
+TEST_F(MockDatabaseClientTest, missingNSEC) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ /*
+ * FIXME: For now, we can't really distinguish this bogus input
+ * from not-signed zone so we can't throw. But once we can,
+ * enable the original test.
+ */
+#if 0
+ EXPECT_THROW(finder->find(Name("badnsec2.example.org."), RRType::A(), NULL,
+ ZoneFinder::FIND_DNSSEC),
+ DataSourceError);
+#endif
+ doFindTest(*finder, Name("badnsec2.example.org."), RRType::A(),
+ RRType::A(), this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TEST_F(MockDatabaseClientTest, badName) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_THROW(finder->findPreviousName(Name("brokenname.example.org.")),
+ DataSourceError);
+}
+
+}
diff --git a/src/lib/datasrc/tests/factory_unittest.cc b/src/lib/datasrc/tests/factory_unittest.cc
new file mode 100644
index 0000000..0133508
--- /dev/null
+++ b/src/lib/datasrc/tests/factory_unittest.cc
@@ -0,0 +1,175 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/scoped_ptr.hpp>
+
+#include <datasrc/factory.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include <dns/rrclass.h>
+#include <cc/data.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using namespace isc::data;
+
+std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+
+namespace {
+
+TEST(FactoryTest, sqlite3ClientBadConfig) {
+ // We start out by building the configuration data bit by bit,
+ // testing each form of 'bad config', until we have a good one.
+ // Then we do some very basic operation on the client (detailed
+ // tests are left to the implementation-specific backends)
+ ElementPtr config;
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config = Element::create("asdf");
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config = Element::createMap();
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("class", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("class", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("class", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("class", Element::create("IN"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("database_file", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("database_file", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("database_file", Element::create("/foo/bar/doesnotexist"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceError);
+
+ config->set("database_file", Element::create(SQLITE_DBFILE_EXAMPLE_ORG));
+ DataSourceClientContainer dsc("sqlite3", config);
+
+ DataSourceClient::FindResult result1(
+ dsc.getInstance().findZone(isc::dns::Name("example.org.")));
+ ASSERT_EQ(result::SUCCESS, result1.code);
+
+ DataSourceClient::FindResult result2(
+ dsc.getInstance().findZone(isc::dns::Name("no.such.zone.")));
+ ASSERT_EQ(result::NOTFOUND, result2.code);
+
+ ZoneIteratorPtr iterator(dsc.getInstance().getIterator(
+ isc::dns::Name("example.org.")));
+
+ ZoneUpdaterPtr updater(dsc.getInstance().getUpdater(
+ isc::dns::Name("example.org."), false));
+}
+
+TEST(FactoryTest, memoryClient) {
+ // We start out by building the configuration data bit by bit,
+ // testing each form of 'bad config', until we have a good one.
+ // Then we do some very basic operation on the client (detailed
+ // tests are left to the implementation-specific backends)
+ ElementPtr config;
+ ASSERT_THROW(DataSourceClientContainer client("memory", config),
+ DataSourceError);
+
+ config = Element::create("asdf");
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config = Element::createMap();
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("type", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("type", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("type", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("type", Element::create("memory"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("class", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("class", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("class", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("class", Element::create("IN"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("zones", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("zones", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceError);
+
+ config->set("zones", Element::createList());
+ DataSourceClientContainer dsc("memory", config);
+
+ // Once it is able to load some zones, we should add a few tests
+ // here to see that it does.
+ DataSourceClient::FindResult result(
+ dsc.getInstance().findZone(isc::dns::Name("no.such.zone.")));
+ ASSERT_EQ(result::NOTFOUND, result.code);
+
+ ASSERT_THROW(dsc.getInstance().getIterator(isc::dns::Name("example.org.")),
+ DataSourceError);
+
+ ASSERT_THROW(dsc.getInstance().getUpdater(isc::dns::Name("no.such.zone."),
+ false), isc::NotImplemented);
+}
+
+TEST(FactoryTest, badType) {
+ ASSERT_THROW(DataSourceClientContainer("foo", ElementPtr()),
+ DataSourceError);
+}
+
+} // end anonymous namespace
+
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 83fbb58..2b854db 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -29,6 +29,8 @@
#include <dns/masterload.h>
#include <datasrc/memory_datasrc.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
#include <gtest/gtest.h>
@@ -42,119 +44,173 @@ namespace {
using result::SUCCESS;
using result::EXIST;
-class MemoryDataSrcTest : public ::testing::Test {
+class InMemoryClientTest : public ::testing::Test {
protected:
- MemoryDataSrcTest() : rrclass(RRClass::IN())
+ InMemoryClientTest() : rrclass(RRClass::IN())
{}
RRClass rrclass;
- MemoryDataSrc memory_datasrc;
+ InMemoryClient memory_client;
};
-TEST_F(MemoryDataSrcTest, add_find_Zone) {
+TEST_F(InMemoryClientTest, add_find_Zone) {
// test add zone
// Bogus zone (NULL)
- EXPECT_THROW(memory_datasrc.addZone(ZonePtr()), isc::InvalidParameter);
+ EXPECT_THROW(memory_client.addZone(ZoneFinderPtr()),
+ isc::InvalidParameter);
// add zones with different names one by one
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(), Name("a")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(), Name("b")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(), Name("c")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("a")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("b")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("c")))));
// add zones with the same name suffix
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(),
- Name("x.d.e.f")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(),
- Name("o.w.y.d.e.f")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(),
- Name("p.w.y.d.e.f")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(),
- Name("q.w.y.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("x.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("o.w.y.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("p.w.y.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("q.w.y.d.e.f")))));
// add super zone and its subzone
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(), Name("g.h")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(), Name("i.g.h")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(),
- Name("z.d.e.f")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(),
- Name("j.z.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("g.h")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("i.g.h")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("z.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("j.z.d.e.f")))));
// different zone class isn't allowed.
- EXPECT_EQ(result::EXIST, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(),
- Name("q.w.y.d.e.f")))));
+ EXPECT_EQ(result::EXIST, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("q.w.y.d.e.f")))));
// names are compared in a case insensitive manner.
- EXPECT_EQ(result::EXIST, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(),
- Name("Q.W.Y.d.E.f")))));
+ EXPECT_EQ(result::EXIST, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("Q.W.Y.d.E.f")))));
// test find zone
- EXPECT_EQ(result::SUCCESS, memory_datasrc.findZone(Name("a")).code);
+ EXPECT_EQ(result::SUCCESS, memory_client.findZone(Name("a")).code);
EXPECT_EQ(Name("a"),
- memory_datasrc.findZone(Name("a")).zone->getOrigin());
+ memory_client.findZone(Name("a")).zone_finder->getOrigin());
EXPECT_EQ(result::SUCCESS,
- memory_datasrc.findZone(Name("j.z.d.e.f")).code);
+ memory_client.findZone(Name("j.z.d.e.f")).code);
EXPECT_EQ(Name("j.z.d.e.f"),
- memory_datasrc.findZone(Name("j.z.d.e.f")).zone->getOrigin());
+ memory_client.findZone(Name("j.z.d.e.f")).zone_finder->
+ getOrigin());
// NOTFOUND
- EXPECT_EQ(result::NOTFOUND, memory_datasrc.findZone(Name("d.e.f")).code);
- EXPECT_EQ(ConstZonePtr(), memory_datasrc.findZone(Name("d.e.f")).zone);
+ EXPECT_EQ(result::NOTFOUND, memory_client.findZone(Name("d.e.f")).code);
+ EXPECT_EQ(ConstZoneFinderPtr(),
+ memory_client.findZone(Name("d.e.f")).zone_finder);
EXPECT_EQ(result::NOTFOUND,
- memory_datasrc.findZone(Name("w.y.d.e.f")).code);
- EXPECT_EQ(ConstZonePtr(),
- memory_datasrc.findZone(Name("w.y.d.e.f")).zone);
+ memory_client.findZone(Name("w.y.d.e.f")).code);
+ EXPECT_EQ(ConstZoneFinderPtr(),
+ memory_client.findZone(Name("w.y.d.e.f")).zone_finder);
// there's no exact match. the result should be the longest match,
// and the code should be PARTIALMATCH.
EXPECT_EQ(result::PARTIALMATCH,
- memory_datasrc.findZone(Name("j.g.h")).code);
+ memory_client.findZone(Name("j.g.h")).code);
EXPECT_EQ(Name("g.h"),
- memory_datasrc.findZone(Name("g.h")).zone->getOrigin());
+ memory_client.findZone(Name("g.h")).zone_finder->getOrigin());
EXPECT_EQ(result::PARTIALMATCH,
- memory_datasrc.findZone(Name("z.i.g.h")).code);
+ memory_client.findZone(Name("z.i.g.h")).code);
EXPECT_EQ(Name("i.g.h"),
- memory_datasrc.findZone(Name("z.i.g.h")).zone->getOrigin());
+ memory_client.findZone(Name("z.i.g.h")).zone_finder->
+ getOrigin());
}
-TEST_F(MemoryDataSrcTest, getZoneCount) {
- EXPECT_EQ(0, memory_datasrc.getZoneCount());
- memory_datasrc.addZone(
- ZonePtr(new MemoryZone(rrclass, Name("example.com"))));
- EXPECT_EQ(1, memory_datasrc.getZoneCount());
+TEST_F(InMemoryClientTest, iterator) {
+ // Just some preparations of data
+ boost::shared_ptr<InMemoryZoneFinder>
+ zone(new InMemoryZoneFinder(RRClass::IN(), Name("a")));
+ RRsetPtr aRRsetA(new RRset(Name("a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ aRRsetA->addRdata(rdata::in::A("192.0.2.1"));
+ RRsetPtr aRRsetAAAA(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+ RRTTL(300)));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::1"));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::2"));
+ RRsetPtr subRRsetA(new RRset(Name("sub.x.a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ subRRsetA->addRdata(rdata::in::A("192.0.2.2"));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(zone));
+ // First, the zone is not there, so it should throw
+ EXPECT_THROW(memory_client.getIterator(Name("b")), DataSourceError);
+ // This zone is not there either, even when there's a zone containing this
+ EXPECT_THROW(memory_client.getIterator(Name("x.a")), DataSourceError);
+ // Now, an empty zone
+ ZoneIteratorPtr iterator(memory_client.getIterator(Name("a")));
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+ // It throws Unexpected when we are past the end
+ EXPECT_THROW(iterator->getNextRRset(), isc::Unexpected);
+ EXPECT_EQ(result::SUCCESS, zone->add(aRRsetA));
+ EXPECT_EQ(result::SUCCESS, zone->add(aRRsetAAAA));
+ EXPECT_EQ(result::SUCCESS, zone->add(subRRsetA));
+ // Check it with full zone, one by one.
+ // It should be in ascending order in case of InMemory data source
+ // (isn't guaranteed in general)
+ iterator = memory_client.getIterator(Name("a"));
+ EXPECT_EQ(aRRsetA, iterator->getNextRRset());
+ EXPECT_EQ(aRRsetAAAA, iterator->getNextRRset());
+ EXPECT_EQ(subRRsetA, iterator->getNextRRset());
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+}
+
+TEST_F(InMemoryClientTest, getZoneCount) {
+ EXPECT_EQ(0, memory_client.getZoneCount());
+ memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(rrclass,
+ Name("example.com"))));
+ EXPECT_EQ(1, memory_client.getZoneCount());
// duplicate add. counter shouldn't change
- memory_datasrc.addZone(
- ZonePtr(new MemoryZone(rrclass, Name("example.com"))));
- EXPECT_EQ(1, memory_datasrc.getZoneCount());
+ memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(rrclass,
+ Name("example.com"))));
+ EXPECT_EQ(1, memory_client.getZoneCount());
// add one more
- memory_datasrc.addZone(
- ZonePtr(new MemoryZone(rrclass, Name("example.org"))));
- EXPECT_EQ(2, memory_datasrc.getZoneCount());
+ memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(rrclass,
+ Name("example.org"))));
+ EXPECT_EQ(2, memory_client.getZoneCount());
}
-// A helper callback of masterLoad() used in MemoryZoneTest.
+TEST_F(InMemoryClientTest, startUpdateZone) {
+ EXPECT_THROW(memory_client.getUpdater(Name("example.org"), false),
+ isc::NotImplemented);
+}
+
+// A helper callback of masterLoad() used in InMemoryZoneFinderTest.
void
setRRset(RRsetPtr rrset, vector<RRsetPtr*>::iterator& it) {
*(*it) = rrset;
++it;
}
-/// \brief Test fixture for the MemoryZone class
-class MemoryZoneTest : public ::testing::Test {
+/// \brief Test fixture for the InMemoryZoneFinder class
+class InMemoryZoneFinderTest : public ::testing::Test {
// A straightforward pair of textual RR(set) and a RRsetPtr variable
// to store the RRset. Used to build test data below.
struct RRsetData {
@@ -162,10 +218,10 @@ class MemoryZoneTest : public ::testing::Test {
RRsetPtr* rrset;
};
public:
- MemoryZoneTest() :
+ InMemoryZoneFinderTest() :
class_(RRClass::IN()),
origin_("example.org"),
- zone_(class_, origin_)
+ zone_finder_(class_, origin_)
{
// Build test RRsets. Below, we construct an RRset for
// each textual RR(s) of zone_data, and assign it to the corresponding
@@ -224,8 +280,8 @@ public:
// Some data to test with
const RRClass class_;
const Name origin_;
- // The zone to torture by tests
- MemoryZone zone_;
+ // The zone finder to torture by tests
+ InMemoryZoneFinder zone_finder_;
/*
* Some RRsets to put inside the zone.
@@ -262,9 +318,9 @@ public:
RRsetPtr rr_not_wild_another_;
/**
- * \brief Test one find query to the zone.
+ * \brief Test one find query to the zone finder.
*
- * Asks a query to the zone and checks it does not throw and returns
+ * Asks a query to the zone finder and checks it does not throw and returns
* expected results. It returns nothing, it just signals failures
* to GTEST.
*
@@ -274,29 +330,31 @@ public:
* \param check_answer Should a check against equality of the answer be
* done?
* \param answer The expected rrset, if any should be returned.
- * \param zone Check different MemoryZone object than zone_ (if NULL,
- * uses zone_)
+ * \param zone_finder Check different InMemoryZoneFinder object than
+ * zone_finder_ (if NULL, uses zone_finder_)
* \param check_wild_answer Checks that the answer has the same RRs, type
* class and TTL as the eqxpected answer and that the name corresponds
* to the one searched. It is meant for checking answers for wildcard
* queries.
*/
- void findTest(const Name& name, const RRType& rrtype, Zone::Result result,
+ void findTest(const Name& name, const RRType& rrtype,
+ ZoneFinder::Result result,
bool check_answer = true,
const ConstRRsetPtr& answer = ConstRRsetPtr(),
RRsetList* target = NULL,
- MemoryZone* zone = NULL,
- Zone::FindOptions options = Zone::FIND_DEFAULT,
+ InMemoryZoneFinder* zone_finder = NULL,
+ ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT,
bool check_wild_answer = false)
{
- if (!zone) {
- zone = &zone_;
+ if (zone_finder == NULL) {
+ zone_finder = &zone_finder_;
}
// The whole block is inside, because we need to check the result and
// we can't assign to FindResult
EXPECT_NO_THROW({
- Zone::FindResult find_result(zone->find(name, rrtype, target,
- options));
+ ZoneFinder::FindResult find_result(zone_finder->find(
+ name, rrtype,
+ target, options));
// Check it returns correct answers
EXPECT_EQ(result, find_result.code);
if (check_answer) {
@@ -337,14 +395,22 @@ public:
};
/**
- * \brief Test MemoryZone::MemoryZone constructor.
+ * \brief Check that findPreviousName throws as it should now.
+ */
+TEST_F(InMemoryZoneFinderTest, findPreviousName) {
+ EXPECT_THROW(zone_finder_.findPreviousName(Name("www.example.org")),
+ isc::NotImplemented);
+}
+
+/**
+ * \brief Test InMemoryZoneFinder::InMemoryZoneFinder constructor.
*
- * Takes the created zone and checks its properties they are the same
+ * Takes the created zone finder and checks its properties they are the same
* as passed parameters.
*/
-TEST_F(MemoryZoneTest, constructor) {
- ASSERT_EQ(class_, zone_.getClass());
- ASSERT_EQ(origin_, zone_.getOrigin());
+TEST_F(InMemoryZoneFinderTest, constructor) {
+ ASSERT_EQ(class_, zone_finder_.getClass());
+ ASSERT_EQ(origin_, zone_finder_.getOrigin());
}
/**
* \brief Test adding.
@@ -352,174 +418,178 @@ TEST_F(MemoryZoneTest, constructor) {
* We test that it throws at the correct moments and the correct exceptions.
* And we test the return value.
*/
-TEST_F(MemoryZoneTest, add) {
+TEST_F(InMemoryZoneFinderTest, add) {
// This one does not belong to this zone
- EXPECT_THROW(zone_.add(rr_out_), MemoryZone::OutOfZone);
+ EXPECT_THROW(zone_finder_.add(rr_out_), InMemoryZoneFinder::OutOfZone);
// Test null pointer
- EXPECT_THROW(zone_.add(ConstRRsetPtr()), MemoryZone::NullRRset);
+ EXPECT_THROW(zone_finder_.add(ConstRRsetPtr()),
+ InMemoryZoneFinder::NullRRset);
// Now put all the data we have there. It should throw nothing
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_a_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_aaaa_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_aaaa_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_a_)));
// Try putting there something twice, it should be rejected
- EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_.add(rr_ns_)));
- EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_.add(rr_ns_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_finder_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_finder_.add(rr_ns_a_)));
}
-TEST_F(MemoryZoneTest, addMultipleCNAMEs) {
+TEST_F(InMemoryZoneFinderTest, addMultipleCNAMEs) {
rr_cname_->addRdata(generic::CNAME("canonical2.example.org."));
- EXPECT_THROW(zone_.add(rr_cname_), MemoryZone::AddError);
+ EXPECT_THROW(zone_finder_.add(rr_cname_), InMemoryZoneFinder::AddError);
}
-TEST_F(MemoryZoneTest, addCNAMEThenOther) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_cname_));
- EXPECT_THROW(zone_.add(rr_cname_a_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addCNAMEThenOther) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_));
+ EXPECT_THROW(zone_finder_.add(rr_cname_a_), InMemoryZoneFinder::AddError);
}
-TEST_F(MemoryZoneTest, addOtherThenCNAME) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_cname_a_));
- EXPECT_THROW(zone_.add(rr_cname_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addOtherThenCNAME) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_a_));
+ EXPECT_THROW(zone_finder_.add(rr_cname_), InMemoryZoneFinder::AddError);
}
-TEST_F(MemoryZoneTest, findCNAME) {
+TEST_F(InMemoryZoneFinderTest, findCNAME) {
// install CNAME RR
- EXPECT_EQ(SUCCESS, zone_.add(rr_cname_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_));
// Find A RR of the same. Should match the CNAME
- findTest(rr_cname_->getName(), RRType::NS(), Zone::CNAME, true, rr_cname_);
+ findTest(rr_cname_->getName(), RRType::NS(), ZoneFinder::CNAME, true,
+ rr_cname_);
// Find the CNAME itself. Should result in normal SUCCESS
- findTest(rr_cname_->getName(), RRType::CNAME(), Zone::SUCCESS, true,
+ findTest(rr_cname_->getName(), RRType::CNAME(), ZoneFinder::SUCCESS, true,
rr_cname_);
}
-TEST_F(MemoryZoneTest, findCNAMEUnderZoneCut) {
+TEST_F(InMemoryZoneFinderTest, findCNAMEUnderZoneCut) {
// There's nothing special when we find a CNAME under a zone cut
// (with FIND_GLUE_OK). The behavior is different from BIND 9,
// so we test this case explicitly.
- EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_));
RRsetPtr rr_cname_under_cut_(new RRset(Name("cname.child.example.org"),
class_, RRType::CNAME(),
RRTTL(300)));
- EXPECT_EQ(SUCCESS, zone_.add(rr_cname_under_cut_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_under_cut_));
findTest(Name("cname.child.example.org"), RRType::AAAA(),
- Zone::CNAME, true, rr_cname_under_cut_, NULL, NULL,
- Zone::FIND_GLUE_OK);
+ ZoneFinder::CNAME, true, rr_cname_under_cut_, NULL, NULL,
+ ZoneFinder::FIND_GLUE_OK);
}
// Two DNAMEs at single domain are disallowed by RFC 2672, section 3)
// Having a CNAME there is disallowed too, but it is tested by
// addOtherThenCNAME and addCNAMEThenOther.
-TEST_F(MemoryZoneTest, addMultipleDNAMEs) {
+TEST_F(InMemoryZoneFinderTest, addMultipleDNAMEs) {
rr_dname_->addRdata(generic::DNAME("target2.example.org."));
- EXPECT_THROW(zone_.add(rr_dname_), MemoryZone::AddError);
+ EXPECT_THROW(zone_finder_.add(rr_dname_), InMemoryZoneFinder::AddError);
}
/*
* These two tests ensure that we can't have DNAME and NS at the same
* node with the exception of the apex of zone (forbidden by RFC 2672)
*/
-TEST_F(MemoryZoneTest, addDNAMEThenNS) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_)));
- EXPECT_THROW(zone_.add(rr_dname_ns_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addDNAMEThenNS) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_)));
+ EXPECT_THROW(zone_finder_.add(rr_dname_ns_), InMemoryZoneFinder::AddError);
}
-TEST_F(MemoryZoneTest, addNSThenDNAME) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_ns_)));
- EXPECT_THROW(zone_.add(rr_dname_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addNSThenDNAME) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_ns_)));
+ EXPECT_THROW(zone_finder_.add(rr_dname_), InMemoryZoneFinder::AddError);
}
// It is allowed to have NS and DNAME at apex
-TEST_F(MemoryZoneTest, DNAMEAndNSAtApex) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_apex_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
+TEST_F(InMemoryZoneFinderTest, DNAMEAndNSAtApex) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_apex_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
// The NS should be possible to be found, below should be DNAME, not
// delegation
- findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
- findTest(rr_child_ns_->getName(), RRType::A(), Zone::DNAME, true,
+ findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
+ findTest(rr_child_ns_->getName(), RRType::A(), ZoneFinder::DNAME, true,
rr_dname_apex_);
}
-TEST_F(MemoryZoneTest, NSAndDNAMEAtApex) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_apex_)));
+TEST_F(InMemoryZoneFinderTest, NSAndDNAMEAtApex) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_apex_)));
}
// TODO: Test (and implement) adding data under DNAME. That is forbidden by
// 2672 as well.
// Search under a DNAME record. It should return the DNAME
-TEST_F(MemoryZoneTest, findBelowDNAME) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_)));
- findTest(Name("below.dname.example.org"), RRType::A(), Zone::DNAME, true,
- rr_dname_);
+TEST_F(InMemoryZoneFinderTest, findBelowDNAME) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_)));
+ findTest(Name("below.dname.example.org"), RRType::A(), ZoneFinder::DNAME,
+ true, rr_dname_);
}
// Search at the domain with DNAME. It should act as DNAME isn't there, DNAME
// influences only the data below (see RFC 2672, section 3)
-TEST_F(MemoryZoneTest, findAtDNAME) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_a_)));
+TEST_F(InMemoryZoneFinderTest, findAtDNAME) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_a_)));
const Name dname_name(rr_dname_->getName());
- findTest(dname_name, RRType::A(), Zone::SUCCESS, true, rr_dname_a_);
- findTest(dname_name, RRType::DNAME(), Zone::SUCCESS, true, rr_dname_);
- findTest(dname_name, RRType::TXT(), Zone::NXRRSET, true);
+ findTest(dname_name, RRType::A(), ZoneFinder::SUCCESS, true, rr_dname_a_);
+ findTest(dname_name, RRType::DNAME(), ZoneFinder::SUCCESS, true,
+ rr_dname_);
+ findTest(dname_name, RRType::TXT(), ZoneFinder::NXRRSET, true);
}
// Try searching something that is both under NS and DNAME, without and with
// GLUE_OK mode (it should stop at the NS and DNAME respectively).
-TEST_F(MemoryZoneTest, DNAMEUnderNS) {
- zone_.add(rr_child_ns_);
- zone_.add(rr_child_dname_);
+TEST_F(InMemoryZoneFinderTest, DNAMEUnderNS) {
+ zone_finder_.add(rr_child_ns_);
+ zone_finder_.add(rr_child_dname_);
Name lowName("below.dname.child.example.org.");
- findTest(lowName, RRType::A(), Zone::DELEGATION, true, rr_child_ns_);
- findTest(lowName, RRType::A(), Zone::DNAME, true, rr_child_dname_, NULL,
- NULL, Zone::FIND_GLUE_OK);
+ findTest(lowName, RRType::A(), ZoneFinder::DELEGATION, true, rr_child_ns_);
+ findTest(lowName, RRType::A(), ZoneFinder::DNAME, true, rr_child_dname_,
+ NULL, NULL, ZoneFinder::FIND_GLUE_OK);
}
// Test adding child zones and zone cut handling
-TEST_F(MemoryZoneTest, delegationNS) {
+TEST_F(InMemoryZoneFinderTest, delegationNS) {
// add in-zone data
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
// install a zone cut
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_)));
// below the zone cut
- findTest(Name("www.child.example.org"), RRType::A(), Zone::DELEGATION,
- true, rr_child_ns_);
+ findTest(Name("www.child.example.org"), RRType::A(),
+ ZoneFinder::DELEGATION, true, rr_child_ns_);
// at the zone cut
- findTest(Name("child.example.org"), RRType::A(), Zone::DELEGATION,
+ findTest(Name("child.example.org"), RRType::A(), ZoneFinder::DELEGATION,
true, rr_child_ns_);
- findTest(Name("child.example.org"), RRType::NS(), Zone::DELEGATION,
+ findTest(Name("child.example.org"), RRType::NS(), ZoneFinder::DELEGATION,
true, rr_child_ns_);
// finding NS for the apex (origin) node. This must not be confused
// with delegation due to the existence of an NS RR.
- findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
+ findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
// unusual case of "nested delegation": the highest cut should be used.
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_grandchild_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_grandchild_ns_)));
findTest(Name("www.grand.child.example.org"), RRType::A(),
- Zone::DELEGATION, true, rr_child_ns_); // note: !rr_grandchild_ns_
+ // note: !rr_grandchild_ns_
+ ZoneFinder::DELEGATION, true, rr_child_ns_);
}
-TEST_F(MemoryZoneTest, findAny) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_a_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_glue_)));
+TEST_F(InMemoryZoneFinderTest, findAny) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_glue_)));
// origin
RRsetList origin_rrsets;
- findTest(origin_, RRType::ANY(), Zone::SUCCESS, true,
+ findTest(origin_, RRType::ANY(), ZoneFinder::SUCCESS, true,
ConstRRsetPtr(), &origin_rrsets);
EXPECT_EQ(2, origin_rrsets.size());
EXPECT_EQ(rr_a_, origin_rrsets.findRRset(RRType::A(), RRClass::IN()));
@@ -527,13 +597,13 @@ TEST_F(MemoryZoneTest, findAny) {
// out zone name
RRsetList out_rrsets;
- findTest(Name("example.com"), RRType::ANY(), Zone::NXDOMAIN, true,
+ findTest(Name("example.com"), RRType::ANY(), ZoneFinder::NXDOMAIN, true,
ConstRRsetPtr(), &out_rrsets);
EXPECT_EQ(0, out_rrsets.size());
RRsetList glue_child_rrsets;
- findTest(rr_child_glue_->getName(), RRType::ANY(), Zone::SUCCESS, true,
- ConstRRsetPtr(), &glue_child_rrsets);
+ findTest(rr_child_glue_->getName(), RRType::ANY(), ZoneFinder::SUCCESS,
+ true, ConstRRsetPtr(), &glue_child_rrsets);
EXPECT_EQ(rr_child_glue_, glue_child_rrsets.findRRset(RRType::A(),
RRClass::IN()));
EXPECT_EQ(1, glue_child_rrsets.size());
@@ -542,59 +612,60 @@ TEST_F(MemoryZoneTest, findAny) {
// been implemented
// add zone cut
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_)));
// zone cut
RRsetList child_rrsets;
- findTest(rr_child_ns_->getName(), RRType::ANY(), Zone::DELEGATION, true,
- rr_child_ns_, &child_rrsets);
+ findTest(rr_child_ns_->getName(), RRType::ANY(), ZoneFinder::DELEGATION,
+ true, rr_child_ns_, &child_rrsets);
EXPECT_EQ(0, child_rrsets.size());
// glue for this zone cut
RRsetList new_glue_child_rrsets;
- findTest(rr_child_glue_->getName(), RRType::ANY(), Zone::DELEGATION, true,
- rr_child_ns_, &new_glue_child_rrsets);
+ findTest(rr_child_glue_->getName(), RRType::ANY(), ZoneFinder::DELEGATION,
+ true, rr_child_ns_, &new_glue_child_rrsets);
EXPECT_EQ(0, new_glue_child_rrsets.size());
}
-TEST_F(MemoryZoneTest, glue) {
+TEST_F(InMemoryZoneFinderTest, glue) {
// install zone data:
// a zone cut
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_)));
// glue for this cut
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_glue_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_glue_)));
// a nested zone cut (unusual)
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_grandchild_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_grandchild_ns_)));
// glue under the deeper zone cut
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_grandchild_glue_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_grandchild_glue_)));
// by default glue is hidden due to the zone cut
- findTest(rr_child_glue_->getName(), RRType::A(), Zone::DELEGATION, true,
- rr_child_ns_);
+ findTest(rr_child_glue_->getName(), RRType::A(), ZoneFinder::DELEGATION,
+ true, rr_child_ns_);
// If we do it in the "glue OK" mode, we should find the exact match.
- findTest(rr_child_glue_->getName(), RRType::A(), Zone::SUCCESS, true,
- rr_child_glue_, NULL, NULL, Zone::FIND_GLUE_OK);
+ findTest(rr_child_glue_->getName(), RRType::A(), ZoneFinder::SUCCESS, true,
+ rr_child_glue_, NULL, NULL, ZoneFinder::FIND_GLUE_OK);
// glue OK + NXRRSET case
- findTest(rr_child_glue_->getName(), RRType::AAAA(), Zone::NXRRSET, true,
- ConstRRsetPtr(), NULL, NULL, Zone::FIND_GLUE_OK);
+ findTest(rr_child_glue_->getName(), RRType::AAAA(), ZoneFinder::NXRRSET,
+ true, ConstRRsetPtr(), NULL, NULL, ZoneFinder::FIND_GLUE_OK);
// glue OK + NXDOMAIN case
- findTest(Name("www.child.example.org"), RRType::A(), Zone::DELEGATION,
- true, rr_child_ns_, NULL, NULL, Zone::FIND_GLUE_OK);
+ findTest(Name("www.child.example.org"), RRType::A(),
+ ZoneFinder::DELEGATION, true, rr_child_ns_, NULL, NULL,
+ ZoneFinder::FIND_GLUE_OK);
// nested cut case. The glue should be found.
findTest(rr_grandchild_glue_->getName(), RRType::AAAA(),
- Zone::SUCCESS,
- true, rr_grandchild_glue_, NULL, NULL, Zone::FIND_GLUE_OK);
+ ZoneFinder::SUCCESS,
+ true, rr_grandchild_glue_, NULL, NULL, ZoneFinder::FIND_GLUE_OK);
// A non-existent name in nested cut. This should result in delegation
// at the highest zone cut.
findTest(Name("www.grand.child.example.org"), RRType::TXT(),
- Zone::DELEGATION, true, rr_child_ns_, NULL, NULL,
- Zone::FIND_GLUE_OK);
+ ZoneFinder::DELEGATION, true, rr_child_ns_, NULL, NULL,
+ ZoneFinder::FIND_GLUE_OK);
}
/**
@@ -604,28 +675,29 @@ TEST_F(MemoryZoneTest, glue) {
* \todo This doesn't do any kind of CNAME and so on. If it isn't
* directly there, it just tells it doesn't exist.
*/
-TEST_F(MemoryZoneTest, find) {
+TEST_F(InMemoryZoneFinderTest, find) {
// Fill some data inside
// Now put all the data we have there. It should throw nothing
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_a_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_aaaa_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_aaaa_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_a_)));
// These two should be successful
- findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
- findTest(rr_ns_a_->getName(), RRType::A(), Zone::SUCCESS, true, rr_ns_a_);
+ findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
+ findTest(rr_ns_a_->getName(), RRType::A(), ZoneFinder::SUCCESS, true,
+ rr_ns_a_);
// These domain exist but don't have the provided RRType
- findTest(origin_, RRType::AAAA(), Zone::NXRRSET);
- findTest(rr_ns_a_->getName(), RRType::NS(), Zone::NXRRSET);
+ findTest(origin_, RRType::AAAA(), ZoneFinder::NXRRSET);
+ findTest(rr_ns_a_->getName(), RRType::NS(), ZoneFinder::NXRRSET);
// These domains don't exist (and one is out of the zone)
- findTest(Name("nothere.example.org"), RRType::A(), Zone::NXDOMAIN);
- findTest(Name("example.net"), RRType::A(), Zone::NXDOMAIN);
+ findTest(Name("nothere.example.org"), RRType::A(), ZoneFinder::NXDOMAIN);
+ findTest(Name("example.net"), RRType::A(), ZoneFinder::NXDOMAIN);
}
-TEST_F(MemoryZoneTest, emptyNode) {
+TEST_F(InMemoryZoneFinderTest, emptyNode) {
/*
* The backend RBTree for this test should look like as follows:
* example.org
@@ -645,52 +717,53 @@ TEST_F(MemoryZoneTest, emptyNode) {
for (int i = 0; names[i] != NULL; ++i) {
ConstRRsetPtr rrset(new RRset(Name(names[i]), class_, RRType::A(),
RRTTL(300)));
- EXPECT_EQ(SUCCESS, zone_.add(rrset));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rrset));
}
// empty node matching, easy case: the node for 'baz' exists with
// no data.
- findTest(Name("baz.example.org"), RRType::A(), Zone::NXRRSET);
+ findTest(Name("baz.example.org"), RRType::A(), ZoneFinder::NXRRSET);
// empty node matching, a trickier case: the node for 'foo' is part of
// "x.foo", which should be considered an empty node.
- findTest(Name("foo.example.org"), RRType::A(), Zone::NXRRSET);
+ findTest(Name("foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
// "org" is contained in "example.org", but it shouldn't be treated as
// NXRRSET because it's out of zone.
// Note: basically we don't expect such a query to be performed (the common
// operation is to identify the best matching zone first then perform
// search it), but we shouldn't be confused even in the unexpected case.
- findTest(Name("org"), RRType::A(), Zone::NXDOMAIN);
+ findTest(Name("org"), RRType::A(), ZoneFinder::NXDOMAIN);
}
-TEST_F(MemoryZoneTest, load) {
+TEST_F(InMemoryZoneFinderTest, load) {
// Put some data inside the zone
- EXPECT_NO_THROW(EXPECT_EQ(result::SUCCESS, zone_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(result::SUCCESS, zone_finder_.add(rr_ns_)));
// Loading with different origin should fail
- EXPECT_THROW(zone_.load(TEST_DATA_DIR "/root.zone"), MasterLoadError);
+ EXPECT_THROW(zone_finder_.load(TEST_DATA_DIR "/root.zone"),
+ MasterLoadError);
// See the original data is still there, survived the exception
- findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
+ findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
// Create correct zone
- MemoryZone rootzone(class_, Name("."));
+ InMemoryZoneFinder rootzone(class_, Name("."));
// Try putting something inside
EXPECT_NO_THROW(EXPECT_EQ(result::SUCCESS, rootzone.add(rr_ns_aaaa_)));
// Load the zone. It should overwrite/remove the above RRset
EXPECT_NO_THROW(rootzone.load(TEST_DATA_DIR "/root.zone"));
// Now see there are some rrsets (we don't look inside, though)
- findTest(Name("."), RRType::SOA(), Zone::SUCCESS, false, ConstRRsetPtr(),
- NULL, &rootzone);
- findTest(Name("."), RRType::NS(), Zone::SUCCESS, false, ConstRRsetPtr(),
- NULL, &rootzone);
- findTest(Name("a.root-servers.net."), RRType::A(), Zone::SUCCESS, false,
- ConstRRsetPtr(), NULL, &rootzone);
+ findTest(Name("."), RRType::SOA(), ZoneFinder::SUCCESS, false,
+ ConstRRsetPtr(), NULL, &rootzone);
+ findTest(Name("."), RRType::NS(), ZoneFinder::SUCCESS, false,
+ ConstRRsetPtr(), NULL, &rootzone);
+ findTest(Name("a.root-servers.net."), RRType::A(), ZoneFinder::SUCCESS,
+ false, ConstRRsetPtr(), NULL, &rootzone);
// But this should no longer be here
- findTest(rr_ns_a_->getName(), RRType::AAAA(), Zone::NXDOMAIN, true,
+ findTest(rr_ns_a_->getName(), RRType::AAAA(), ZoneFinder::NXDOMAIN, true,
ConstRRsetPtr(), NULL, &rootzone);
// Try loading zone that is wrong in a different way
- EXPECT_THROW(zone_.load(TEST_DATA_DIR "/duplicate_rrset.zone"),
+ EXPECT_THROW(zone_finder_.load(TEST_DATA_DIR "/duplicate_rrset.zone"),
MasterLoadError);
}
@@ -698,7 +771,7 @@ TEST_F(MemoryZoneTest, load) {
* Test that puts a (simple) wildcard into the zone and checks we can
* correctly find the data.
*/
-TEST_F(MemoryZoneTest, wildcard) {
+TEST_F(InMemoryZoneFinderTest, wildcard) {
/*
* example.org.
* |
@@ -706,40 +779,41 @@ TEST_F(MemoryZoneTest, wildcard) {
* |
* *
*/
- EXPECT_EQ(SUCCESS, zone_.add(rr_wild_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_));
// Search at the parent. The parent will not have the A, but it will
// be in the wildcard (so check the wildcard isn't matched at the parent)
{
SCOPED_TRACE("Search at parrent");
- findTest(Name("wild.example.org"), RRType::A(), Zone::NXRRSET);
+ findTest(Name("wild.example.org"), RRType::A(), ZoneFinder::NXRRSET);
}
// Search the original name of wildcard
{
SCOPED_TRACE("Search directly at *");
- findTest(Name("*.wild.example.org"), RRType::A(), Zone::SUCCESS, true,
- rr_wild_);
+ findTest(Name("*.wild.example.org"), RRType::A(), ZoneFinder::SUCCESS,
+ true, rr_wild_);
}
// Search "created" name.
{
SCOPED_TRACE("Search at created child");
- findTest(Name("a.wild.example.org"), RRType::A(), Zone::SUCCESS, false,
- rr_wild_, NULL, NULL, Zone::FIND_DEFAULT, true);
+ findTest(Name("a.wild.example.org"), RRType::A(), ZoneFinder::SUCCESS,
+ false, rr_wild_, NULL, NULL, ZoneFinder::FIND_DEFAULT, true);
}
// Search another created name, this time little bit lower
{
SCOPED_TRACE("Search at created grand-child");
- findTest(Name("a.b.wild.example.org"), RRType::A(), Zone::SUCCESS,
- false, rr_wild_, NULL, NULL, Zone::FIND_DEFAULT, true);
+ findTest(Name("a.b.wild.example.org"), RRType::A(),
+ ZoneFinder::SUCCESS, false, rr_wild_, NULL, NULL,
+ ZoneFinder::FIND_DEFAULT, true);
}
- EXPECT_EQ(SUCCESS, zone_.add(rr_under_wild_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_under_wild_));
{
SCOPED_TRACE("Search under non-wildcard");
findTest(Name("bar.foo.wild.example.org"), RRType::A(),
- Zone::NXDOMAIN);
+ ZoneFinder::NXDOMAIN);
}
}
@@ -750,33 +824,34 @@ TEST_F(MemoryZoneTest, wildcard) {
* - When the query is in another zone. That is, delegation cancels
* the wildcard defaults."
*/
-TEST_F(MemoryZoneTest, delegatedWildcard) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_child_wild_));
- EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_));
+TEST_F(InMemoryZoneFinderTest, delegatedWildcard) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_wild_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_));
{
SCOPED_TRACE("Looking under delegation point");
- findTest(Name("a.child.example.org"), RRType::A(), Zone::DELEGATION,
- true, rr_child_ns_);
+ findTest(Name("a.child.example.org"), RRType::A(),
+ ZoneFinder::DELEGATION, true, rr_child_ns_);
}
{
SCOPED_TRACE("Looking under delegation point in GLUE_OK mode");
- findTest(Name("a.child.example.org"), RRType::A(), Zone::DELEGATION,
- true, rr_child_ns_, NULL, NULL, Zone::FIND_GLUE_OK);
+ findTest(Name("a.child.example.org"), RRType::A(),
+ ZoneFinder::DELEGATION, true, rr_child_ns_, NULL, NULL,
+ ZoneFinder::FIND_GLUE_OK);
}
}
// Tests combination of wildcard and ANY.
-TEST_F(MemoryZoneTest, anyWildcard) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_wild_));
+TEST_F(InMemoryZoneFinderTest, anyWildcard) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_));
// First try directly the name (normal match)
{
SCOPED_TRACE("Asking direcly for *");
RRsetList target;
- findTest(Name("*.wild.example.org"), RRType::ANY(), Zone::SUCCESS,
- true, ConstRRsetPtr(), &target);
+ findTest(Name("*.wild.example.org"), RRType::ANY(),
+ ZoneFinder::SUCCESS, true, ConstRRsetPtr(), &target);
ASSERT_EQ(1, target.size());
EXPECT_EQ(RRType::A(), (*target.begin())->getType());
EXPECT_EQ(Name("*.wild.example.org"), (*target.begin())->getName());
@@ -786,8 +861,8 @@ TEST_F(MemoryZoneTest, anyWildcard) {
{
SCOPED_TRACE("Asking in the wild way");
RRsetList target;
- findTest(Name("a.wild.example.org"), RRType::ANY(), Zone::SUCCESS,
- true, ConstRRsetPtr(), &target);
+ findTest(Name("a.wild.example.org"), RRType::ANY(),
+ ZoneFinder::SUCCESS, true, ConstRRsetPtr(), &target);
ASSERT_EQ(1, target.size());
EXPECT_EQ(RRType::A(), (*target.begin())->getType());
EXPECT_EQ(Name("a.wild.example.org"), (*target.begin())->getName());
@@ -796,56 +871,56 @@ TEST_F(MemoryZoneTest, anyWildcard) {
// Test there's nothing in the wildcard in the middle if we load
// wild.*.foo.example.org.
-TEST_F(MemoryZoneTest, emptyWildcard) {
+TEST_F(InMemoryZoneFinderTest, emptyWildcard) {
/*
* example.org.
* foo
* *
* wild
*/
- EXPECT_EQ(SUCCESS, zone_.add(rr_emptywild_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_emptywild_));
{
SCOPED_TRACE("Asking for the original record under wildcard");
- findTest(Name("wild.*.foo.example.org"), RRType::A(), Zone::SUCCESS,
- true, rr_emptywild_);
+ findTest(Name("wild.*.foo.example.org"), RRType::A(),
+ ZoneFinder::SUCCESS, true, rr_emptywild_);
}
{
SCOPED_TRACE("Asking for A record");
- findTest(Name("a.foo.example.org"), RRType::A(), Zone::NXRRSET);
- findTest(Name("*.foo.example.org"), RRType::A(), Zone::NXRRSET);
- findTest(Name("foo.example.org"), RRType::A(), Zone::NXRRSET);
+ findTest(Name("a.foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
+ findTest(Name("*.foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
+ findTest(Name("foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
}
{
SCOPED_TRACE("Asking for ANY record");
RRsetList normalTarget;
- findTest(Name("*.foo.example.org"), RRType::ANY(), Zone::NXRRSET, true,
- ConstRRsetPtr(), &normalTarget);
+ findTest(Name("*.foo.example.org"), RRType::ANY(), ZoneFinder::NXRRSET,
+ true, ConstRRsetPtr(), &normalTarget);
EXPECT_EQ(0, normalTarget.size());
RRsetList wildTarget;
- findTest(Name("a.foo.example.org"), RRType::ANY(), Zone::NXRRSET, true,
- ConstRRsetPtr(), &wildTarget);
+ findTest(Name("a.foo.example.org"), RRType::ANY(),
+ ZoneFinder::NXRRSET, true, ConstRRsetPtr(), &wildTarget);
EXPECT_EQ(0, wildTarget.size());
}
{
SCOPED_TRACE("Asking on the non-terminal");
findTest(Name("wild.bar.foo.example.org"), RRType::A(),
- Zone::NXRRSET);
+ ZoneFinder::NXRRSET);
}
}
// Same as emptyWildcard, but with multiple * in the path.
-TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_nested_emptywild_));
+TEST_F(InMemoryZoneFinderTest, nestedEmptyWildcard) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_nested_emptywild_));
{
SCOPED_TRACE("Asking for the original record under wildcards");
findTest(Name("wild.*.foo.*.bar.example.org"), RRType::A(),
- Zone::SUCCESS, true, rr_nested_emptywild_);
+ ZoneFinder::SUCCESS, true, rr_nested_emptywild_);
}
{
@@ -860,7 +935,7 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
for (const char** name(names); *name != NULL; ++ name) {
SCOPED_TRACE(string("Node ") + *name);
- findTest(Name(*name), RRType::A(), Zone::NXRRSET);
+ findTest(Name(*name), RRType::A(), ZoneFinder::NXRRSET);
}
}
@@ -878,7 +953,7 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
for (const char** name(names); *name != NULL; ++ name) {
SCOPED_TRACE(string("Node ") + *name);
- findTest(Name(*name), RRType::A(), Zone::NXRRSET);
+ findTest(Name(*name), RRType::A(), ZoneFinder::NXRRSET);
}
}
@@ -889,7 +964,7 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
SCOPED_TRACE(string("Node ") + *name);
RRsetList target;
- findTest(Name(*name), RRType::ANY(), Zone::NXRRSET, true,
+ findTest(Name(*name), RRType::ANY(), ZoneFinder::NXRRSET, true,
ConstRRsetPtr(), &target);
EXPECT_EQ(0, target.size());
}
@@ -899,21 +974,21 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
// We run this part twice from the below test, in two slightly different
// situations
void
-MemoryZoneTest::doCancelWildcardTest() {
+InMemoryZoneFinderTest::doCancelWildcardTest() {
// These should be canceled
{
SCOPED_TRACE("Canceled under foo.wild.example.org");
findTest(Name("aaa.foo.wild.example.org"), RRType::A(),
- Zone::NXDOMAIN);
+ ZoneFinder::NXDOMAIN);
findTest(Name("zzz.foo.wild.example.org"), RRType::A(),
- Zone::NXDOMAIN);
+ ZoneFinder::NXDOMAIN);
}
// This is existing, non-wildcard domain, shouldn't wildcard at all
{
SCOPED_TRACE("Existing domain under foo.wild.example.org");
- findTest(Name("bar.foo.wild.example.org"), RRType::A(), Zone::SUCCESS,
- true, rr_not_wild_);
+ findTest(Name("bar.foo.wild.example.org"), RRType::A(),
+ ZoneFinder::SUCCESS, true, rr_not_wild_);
}
// These should be caught by the wildcard
@@ -930,15 +1005,16 @@ MemoryZoneTest::doCancelWildcardTest() {
for (const char** name(names); *name != NULL; ++ name) {
SCOPED_TRACE(string("Node ") + *name);
- findTest(Name(*name), RRType::A(), Zone::SUCCESS, false, rr_wild_,
- NULL, NULL, Zone::FIND_DEFAULT, true);
+ findTest(Name(*name), RRType::A(), ZoneFinder::SUCCESS, false,
+ rr_wild_, NULL, NULL, ZoneFinder::FIND_DEFAULT, true);
}
}
// This shouldn't be wildcarded, it's an existing domain
{
SCOPED_TRACE("The foo.wild.example.org itself");
- findTest(Name("foo.wild.example.org"), RRType::A(), Zone::NXRRSET);
+ findTest(Name("foo.wild.example.org"), RRType::A(),
+ ZoneFinder::NXRRSET);
}
}
@@ -952,9 +1028,9 @@ MemoryZoneTest::doCancelWildcardTest() {
* Tests few cases "around" the canceled wildcard match, to see something that
* shouldn't be canceled isn't.
*/
-TEST_F(MemoryZoneTest, cancelWildcard) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_wild_));
- EXPECT_EQ(SUCCESS, zone_.add(rr_not_wild_));
+TEST_F(InMemoryZoneFinderTest, cancelWildcard) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_not_wild_));
{
SCOPED_TRACE("Runnig with single entry under foo.wild.example.org");
@@ -964,61 +1040,63 @@ TEST_F(MemoryZoneTest, cancelWildcard) {
// Try putting another one under foo.wild....
// The result should be the same but it will be done in another way in the
// code, because the foo.wild.example.org will exist in the tree.
- EXPECT_EQ(SUCCESS, zone_.add(rr_not_wild_another_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_not_wild_another_));
{
SCOPED_TRACE("Runnig with two entries under foo.wild.example.org");
doCancelWildcardTest();
}
}
-TEST_F(MemoryZoneTest, loadBadWildcard) {
+TEST_F(InMemoryZoneFinderTest, loadBadWildcard) {
// We reject loading the zone if it contains a wildcard name for
// NS or DNAME.
- EXPECT_THROW(zone_.add(rr_nswild_), MemoryZone::AddError);
- EXPECT_THROW(zone_.add(rr_dnamewild_), MemoryZone::AddError);
+ EXPECT_THROW(zone_finder_.add(rr_nswild_), InMemoryZoneFinder::AddError);
+ EXPECT_THROW(zone_finder_.add(rr_dnamewild_),
+ InMemoryZoneFinder::AddError);
}
-TEST_F(MemoryZoneTest, swap) {
- // build one zone with some data
- MemoryZone zone1(class_, origin_);
- EXPECT_EQ(result::SUCCESS, zone1.add(rr_ns_));
- EXPECT_EQ(result::SUCCESS, zone1.add(rr_ns_aaaa_));
+TEST_F(InMemoryZoneFinderTest, swap) {
+ // build one zone finder with some data
+ InMemoryZoneFinder finder1(class_, origin_);
+ EXPECT_EQ(result::SUCCESS, finder1.add(rr_ns_));
+ EXPECT_EQ(result::SUCCESS, finder1.add(rr_ns_aaaa_));
- // build another zone of a different RR class with some other data
+ // build another zone finder of a different RR class with some other data
const Name other_origin("version.bind");
ASSERT_NE(origin_, other_origin); // make sure these two are different
- MemoryZone zone2(RRClass::CH(), other_origin);
+ InMemoryZoneFinder finder2(RRClass::CH(), other_origin);
EXPECT_EQ(result::SUCCESS,
- zone2.add(RRsetPtr(new RRset(Name("version.bind"),
+ finder2.add(RRsetPtr(new RRset(Name("version.bind"),
RRClass::CH(), RRType::TXT(),
RRTTL(0)))));
- zone1.swap(zone2);
- EXPECT_EQ(other_origin, zone1.getOrigin());
- EXPECT_EQ(origin_, zone2.getOrigin());
- EXPECT_EQ(RRClass::CH(), zone1.getClass());
- EXPECT_EQ(RRClass::IN(), zone2.getClass());
+ finder1.swap(finder2);
+ EXPECT_EQ(other_origin, finder1.getOrigin());
+ EXPECT_EQ(origin_, finder2.getOrigin());
+ EXPECT_EQ(RRClass::CH(), finder1.getClass());
+ EXPECT_EQ(RRClass::IN(), finder2.getClass());
// make sure the zone data is swapped, too
- findTest(origin_, RRType::NS(), Zone::NXDOMAIN, false, ConstRRsetPtr(),
- NULL, &zone1);
- findTest(other_origin, RRType::TXT(), Zone::SUCCESS, false,
- ConstRRsetPtr(), NULL, &zone1);
- findTest(origin_, RRType::NS(), Zone::SUCCESS, false, ConstRRsetPtr(),
- NULL, &zone2);
- findTest(other_origin, RRType::TXT(), Zone::NXDOMAIN, false,
- ConstRRsetPtr(), NULL, &zone2);
+ findTest(origin_, RRType::NS(), ZoneFinder::NXDOMAIN, false,
+ ConstRRsetPtr(), NULL, &finder1);
+ findTest(other_origin, RRType::TXT(), ZoneFinder::SUCCESS, false,
+ ConstRRsetPtr(), NULL, &finder1);
+ findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, false,
+ ConstRRsetPtr(), NULL, &finder2);
+ findTest(other_origin, RRType::TXT(), ZoneFinder::NXDOMAIN, false,
+ ConstRRsetPtr(), NULL, &finder2);
}
-TEST_F(MemoryZoneTest, getFileName) {
+TEST_F(InMemoryZoneFinderTest, getFileName) {
// for an empty zone the file name should also be empty.
- EXPECT_TRUE(zone_.getFileName().empty());
+ EXPECT_TRUE(zone_finder_.getFileName().empty());
// if loading a zone fails the file name shouldn't be set.
- EXPECT_THROW(zone_.load(TEST_DATA_DIR "/root.zone"), MasterLoadError);
- EXPECT_TRUE(zone_.getFileName().empty());
+ EXPECT_THROW(zone_finder_.load(TEST_DATA_DIR "/root.zone"),
+ MasterLoadError);
+ EXPECT_TRUE(zone_finder_.getFileName().empty());
// after a successful load, the specified file name should be set
- MemoryZone rootzone(class_, Name("."));
+ InMemoryZoneFinder rootzone(class_, Name("."));
EXPECT_NO_THROW(rootzone.load(TEST_DATA_DIR "/root.zone"));
EXPECT_EQ(TEST_DATA_DIR "/root.zone", rootzone.getFileName());
// overriding load, which will fail
@@ -1028,9 +1106,8 @@ TEST_F(MemoryZoneTest, getFileName) {
EXPECT_EQ(TEST_DATA_DIR "/root.zone", rootzone.getFileName());
// After swap, file names should also be swapped.
- zone_.swap(rootzone);
- EXPECT_EQ(TEST_DATA_DIR "/root.zone", zone_.getFileName());
+ zone_finder_.swap(rootzone);
+ EXPECT_EQ(TEST_DATA_DIR "/root.zone", zone_finder_.getFileName());
EXPECT_TRUE(rootzone.getFileName().empty());
}
-
}
diff --git a/src/lib/datasrc/tests/run_unittests.cc b/src/lib/datasrc/tests/run_unittests.cc
index a35a646..ffef333 100644
--- a/src/lib/datasrc/tests/run_unittests.cc
+++ b/src/lib/datasrc/tests/run_unittests.cc
@@ -13,6 +13,8 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
+#include <log/logger_support.h>
#include <dns/tests/unittest_util.h>
@@ -21,5 +23,7 @@ main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
isc::UnitTestUtil::addDataPath(TEST_DATA_DIR);
- return (RUN_ALL_TESTS());
+ isc::log::initLogger();
+
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
new file mode 100644
index 0000000..90b2ac1
--- /dev/null
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -0,0 +1,1115 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <vector>
+
+#include <datasrc/sqlite3_accessor.h>
+
+#include <datasrc/data_source.h>
+
+#include <dns/rrclass.h>
+
+#include <gtest/gtest.h>
+#include <boost/lexical_cast.hpp>
+#include <boost/scoped_ptr.hpp>
+#include <fstream>
+#include <sqlite3.h>
+
+using namespace std;
+using namespace isc::datasrc;
+using boost::shared_ptr;
+using boost::lexical_cast;
+using isc::data::ConstElementPtr;
+using isc::data::Element;
+using isc::dns::RRClass;
+using isc::dns::Name;
+
+namespace {
+// Some test data
+std::string SQLITE_DBFILE_EXAMPLE = TEST_DATA_DIR "/test.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE2 = TEST_DATA_DIR "/example2.com.sqlite3";
+std::string SQLITE_DBNAME_EXAMPLE2 = "sqlite3_example2.com.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE_ROOT = TEST_DATA_DIR "/test-root.sqlite3";
+std::string SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
+std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
+std::string SQLITE_DBFILE_MEMORY = ":memory:";
+std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+
+// The following file must be non existent and must be non"creatable";
+// the sqlite3 library will try to create a new DB file if it doesn't exist,
+// so to test a failure case the create operation should also fail.
+// The "nodir", a non existent directory, is inserted for this purpose.
+std::string SQLITE_DBFILE_NOTEXIST = TEST_DATA_DIR "/nodir/notexist";
+
+// new db file, we don't need this to be a std::string, and given the
+// raw calls we use it in a const char* is more convenient
+const char* SQLITE_NEW_DBFILE = TEST_DATA_BUILDDIR "/newdb.sqlite3";
+
+// Opening works (the content is tested in different tests)
+TEST(SQLite3Open, common) {
+ EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE, "IN"));
+}
+
+// The file can't be opened
+TEST(SQLite3Open, notExist) {
+ EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_NOTEXIST, "IN"),
+ SQLite3Error);
+}
+
+// It rejects broken DB
+TEST(SQLite3Open, brokenDB) {
+ EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_BROKENDB, "IN"),
+ SQLite3Error);
+}
+
+// Test we can create the schema on the fly
+TEST(SQLite3Open, memoryDB) {
+ EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_MEMORY, "IN"));
+}
+
+// Test fixture for querying the db
+class SQLite3AccessorTest : public ::testing::Test {
+public:
+ SQLite3AccessorTest() {
+ initAccessor(SQLITE_DBFILE_EXAMPLE, "IN");
+ }
+ // So it can be re-created with different data
+ void initAccessor(const std::string& filename, const string& rrclass) {
+ accessor.reset(new SQLite3Accessor(filename, rrclass));
+ }
+ // The tested accessor
+ boost::shared_ptr<SQLite3Accessor> accessor;
+};
+
+// This zone exists in the data, so it should be found
+TEST_F(SQLite3AccessorTest, getZone) {
+ std::pair<bool, int> result(accessor->getZone("example.com."));
+ EXPECT_TRUE(result.first);
+ EXPECT_EQ(1, result.second);
+}
+
+// But it should find only the zone, nothing below it
+TEST_F(SQLite3AccessorTest, subZone) {
+ EXPECT_FALSE(accessor->getZone("sub.example.com.").first);
+}
+
+// This zone is not there at all
+TEST_F(SQLite3AccessorTest, noZone) {
+ EXPECT_FALSE(accessor->getZone("example.org.").first);
+}
+
+// This zone is there, but in different class
+TEST_F(SQLite3AccessorTest, noClass) {
+ initAccessor(SQLITE_DBFILE_EXAMPLE, "CH");
+ EXPECT_FALSE(accessor->getZone("example.com.").first);
+}
+
+// This tests the iterator context
+TEST_F(SQLite3AccessorTest, iterator) {
+ // Our test zone is conveniently small, but not empty
+ initAccessor(SQLITE_DBFILE_EXAMPLE_ORG, "IN");
+
+ const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+ ASSERT_TRUE(zone_info.first);
+
+ // Get the iterator context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor->getAllRecords(zone_info.second));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context);
+
+ std::string data[DatabaseAccessor::COLUMN_COUNT];
+ // Get and check the first and only record
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("MX", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("10 mail.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns1.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns2.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns3.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("SOA", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200",
+ data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("dname.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("dname.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("dname2.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("dname2.foo.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.10", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("mail.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.101", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.1", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("www.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ // Check there's no other
+ EXPECT_FALSE(context->getNext(data));
+
+ // And make sure calling it again won't cause problems.
+ EXPECT_FALSE(context->getNext(data));
+}
+
+TEST(SQLite3Open, getDBNameExample2) {
+ SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE2, "IN");
+ EXPECT_EQ(SQLITE_DBNAME_EXAMPLE2, accessor.getDBName());
+}
+
+TEST(SQLite3Open, getDBNameExampleROOT) {
+ SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE_ROOT, "IN");
+ EXPECT_EQ(SQLITE_DBNAME_EXAMPLE_ROOT, accessor.getDBName());
+}
+
+// Simple function to match records
+void
+checkRecordRow(const std::string columns[],
+ const std::string& field0,
+ const std::string& field1,
+ const std::string& field2,
+ const std::string& field3,
+ const std::string& field4)
+{
+ EXPECT_EQ(field0, columns[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ(field1, columns[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ(field2, columns[DatabaseAccessor::SIGTYPE_COLUMN]);
+ EXPECT_EQ(field3, columns[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ(field4, columns[DatabaseAccessor::NAME_COLUMN]);
+}
+
+TEST_F(SQLite3AccessorTest, getRecords) {
+ const std::pair<bool, int> zone_info(accessor->getZone("example.com."));
+ ASSERT_TRUE(zone_info.first);
+
+ const int zone_id = zone_info.second;
+ ASSERT_EQ(1, zone_id);
+
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor->getRecords("foo.bar", 1));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(),
+ context);
+ EXPECT_FALSE(context->getNext(columns));
+ checkRecordRow(columns, "", "", "", "", "");
+
+ // now try some real searches
+ context = accessor->getRecords("foo.example.com.", zone_id);
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "CNAME", "3600", "",
+ "cnametest.example.org.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "CNAME",
+ "CNAME 5 3 3600 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NSEC", "7200", "",
+ "mail.example.com. CNAME RRSIG NSEC", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 3 7200 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE", "");
+ EXPECT_FALSE(context->getNext(columns));
+
+ // with no more records, the array should not have been modified
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 3 7200 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE", "");
+
+ context = accessor->getRecords("example.com.", zone_id);
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "SOA", "3600", "",
+ "master.example.com. admin.example.com. "
+ "1234 3600 1800 2419200 7200", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "SOA",
+ "SOA 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "1200", "", "dns01.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "3600", "", "dns02.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "1800", "", "dns03.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "NS",
+ "NS 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "MX", "3600", "", "10 mail.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "MX", "3600", "",
+ "20 mail.subzone.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "MX",
+ "MX 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NSEC", "7200", "",
+ "cname-ext.example.com. NS SOA MX RRSIG NSEC DNSKEY", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 2 7200 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "DNSKEY", "3600", "",
+ "256 3 5 AwEAAcOUBllYc1hf7ND9uDy+Yz1BF3sI0m4q NGV7W"
+ "cTD0WEiuV7IjXgHE36fCmS9QsUxSSOV o1I/FMxI2PJVqTYHkX"
+ "FBS7AzLGsQYMU7UjBZ SotBJ6Imt5pXMu+lEDNy8TOUzG3xm7g"
+ "0qcbW YF6qCEfvZoBtAqi5Rk7Mlrqs8agxYyMx", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "DNSKEY", "3600", "",
+ "257 3 5 AwEAAe5WFbxdCPq2jZrZhlMj7oJdff3W7syJ tbvzg"
+ "62tRx0gkoCDoBI9DPjlOQG0UAbj+xUV 4HQZJStJaZ+fHU5AwV"
+ "NT+bBZdtV+NujSikhd THb4FYLg2b3Cx9NyJvAVukHp/91HnWu"
+ "G4T36 CzAFrfPwsHIrBz9BsaIQ21VRkcmj7DswfI/i DGd8j6b"
+ "qiODyNZYQ+ZrLmF0KIJ2yPN3iO6Zq 23TaOrVTjB7d1a/h31OD"
+ "fiHAxFHrkY3t3D5J R9Nsl/7fdRmSznwtcSDgLXBoFEYmw6p86"
+ "Acv RyoYNcL1SXjaKVLG5jyU3UR+LcGZT5t/0xGf oIK/aKwEN"
+ "rsjcKZZj660b1M=", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "4456 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ EXPECT_FALSE(context->getNext(columns));
+ // getnextrecord returning false should mean array is not altered
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+
+ // check that another getNext does not cause problems
+ EXPECT_FALSE(context->getNext(columns));
+
+ // Try searching for subdomain
+ // There's foo.bar.example.com in the data
+ context = accessor->getRecords("bar.example.com.", zone_id, true);
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "A", "3600", "", "192.0.2.1", "");
+ EXPECT_FALSE(context->getNext(columns));
+ // But we shouldn't match mix.example.com here
+ context = accessor->getRecords("ix.example.com.", zone_id, true);
+ EXPECT_FALSE(context->getNext(columns));
+}
+
+TEST_F(SQLite3AccessorTest, findPrevious) {
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.example.dns02."));
+ // A name that doesn't exist
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.example.dns01x."));
+ // Largest name
+ EXPECT_EQ("www.example.com.",
+ accessor->findPreviousName(1, "com.example.wwww"));
+ // Out of zone after the last name
+ EXPECT_EQ("www.example.com.",
+ accessor->findPreviousName(1, "org.example."));
+ // Case insensitive?
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.exaMple.DNS02."));
+ // A name that doesn't exist
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.exaMple.DNS01X."));
+ // The DB contains foo.bar.example.com., which would be in between
+ // these two names. However, that one does not have an NSEC record,
+ // which is how this database recognizes glue data, so it should
+ // be skipped.
+ EXPECT_EQ("example.com.",
+ accessor->findPreviousName(1, "com.example.cname-ext."));
+ // Throw when we are before the origin
+ EXPECT_THROW(accessor->findPreviousName(1, "com.example."),
+ isc::NotImplemented);
+ EXPECT_THROW(accessor->findPreviousName(1, "a.example."),
+ isc::NotImplemented);
+}
+
+TEST_F(SQLite3AccessorTest, findPreviousNoData) {
+ // This one doesn't hold any NSEC records, so it shouldn't work
+ // The underlying DB/data don't support DNSSEC, so it's not implemented
+ // (does it make sense? Or different exception here?)
+ EXPECT_THROW(accessor->findPreviousName(3, "com.example.sql2.www."),
+ isc::NotImplemented);
+}
+
+// Test fixture for creating a db that automatically deletes it before start,
+// and when done
+class SQLite3Create : public ::testing::Test {
+public:
+ SQLite3Create() {
+ remove(SQLITE_NEW_DBFILE);
+ }
+
+ ~SQLite3Create() {
+ remove(SQLITE_NEW_DBFILE);
+ }
+};
+
+bool isReadable(const char* filename) {
+ return (std::ifstream(filename).is_open());
+}
+
+TEST_F(SQLite3Create, creationtest) {
+ ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+ // Should simply be created
+ SQLite3Accessor accessor(SQLITE_NEW_DBFILE, "IN");
+ ASSERT_TRUE(isReadable(SQLITE_NEW_DBFILE));
+}
+
+TEST_F(SQLite3Create, emptytest) {
+ ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+
+ // open one manualle
+ sqlite3* db;
+ ASSERT_EQ(SQLITE_OK, sqlite3_open(SQLITE_NEW_DBFILE, &db));
+
+ // empty, but not locked, so creating it now should work
+ SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, "IN");
+
+ sqlite3_close(db);
+
+ // should work now that we closed it
+ SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, "IN");
+}
+
+TEST_F(SQLite3Create, lockedtest) {
+ ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+
+ // open one manually
+ sqlite3* db;
+ ASSERT_EQ(SQLITE_OK, sqlite3_open(SQLITE_NEW_DBFILE, &db));
+ sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL, NULL);
+
+ // should not be able to open it
+ EXPECT_THROW(SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, "IN"),
+ SQLite3Error);
+
+ sqlite3_exec(db, "ROLLBACK TRANSACTION", NULL, NULL, NULL);
+
+ // should work now that we closed it
+ SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, "IN");
+}
+
+TEST_F(SQLite3AccessorTest, clone) {
+ shared_ptr<DatabaseAccessor> cloned = accessor->clone();
+ EXPECT_EQ(accessor->getDBName(), cloned->getDBName());
+
+ // The cloned accessor should have a separate connection and search
+ // context, so it should be able to perform search in concurrent with
+ // the original accessor.
+ string columns1[DatabaseAccessor::COLUMN_COUNT];
+ string columns2[DatabaseAccessor::COLUMN_COUNT];
+
+ const std::pair<bool, int> zone_info1(
+ accessor->getZone("example.com."));
+ DatabaseAccessor::IteratorContextPtr iterator1 =
+ accessor->getRecords("foo.example.com.", zone_info1.second);
+ const std::pair<bool, int> zone_info2(
+ accessor->getZone("example.com."));
+ DatabaseAccessor::IteratorContextPtr iterator2 =
+ cloned->getRecords("foo.example.com.", zone_info2.second);
+
+ ASSERT_TRUE(iterator1->getNext(columns1));
+ checkRecordRow(columns1, "CNAME", "3600", "", "cnametest.example.org.",
+ "");
+
+ ASSERT_TRUE(iterator2->getNext(columns2));
+ checkRecordRow(columns2, "CNAME", "3600", "", "cnametest.example.org.",
+ "");
+}
+
+//
+// Commonly used data for update tests
+//
+const char* const common_expected_data[] = {
+ // Test record already stored in the tested sqlite3 DB file.
+ "foo.bar.example.com.", "com.example.bar.foo.", "3600", "A", "",
+ "192.0.2.1"
+};
+const char* const new_data[] = {
+ // Newly added data commonly used by some of the tests below
+ "newdata.example.com.", "com.example.newdata.", "3600", "A", "",
+ "192.0.2.1"
+};
+const char* const deleted_data[] = {
+ // Existing data to be removed commonly used by some of the tests below
+ "foo.bar.example.com.", "A", "192.0.2.1"
+};
+
+class SQLite3Update : public SQLite3AccessorTest {
+protected:
+ SQLite3Update() {
+ // Note: if "installing" the test file fails some of the subsequent
+ // tests would fail.
+ const char *install_cmd = INSTALL_PROG " " TEST_DATA_DIR
+ "/test.sqlite3 " TEST_DATA_BUILDDIR
+ "/test.sqlite3.copied";
+ if (system(install_cmd) != 0) {
+ // any exception will do, this is failure in test setup, but nice
+ // to show the command that fails, and shouldn't be caught
+ isc_throw(isc::Exception,
+ "Error setting up; command failed: " << install_cmd);
+ };
+ initAccessor(TEST_DATA_BUILDDIR "/test.sqlite3.copied", "IN");
+ zone_id = accessor->getZone("example.com.").second;
+ another_accessor.reset(new SQLite3Accessor(
+ TEST_DATA_BUILDDIR "/test.sqlite3.copied",
+ "IN"));
+ expected_stored.push_back(common_expected_data);
+ }
+
+ int zone_id;
+ std::string get_columns[DatabaseAccessor::COLUMN_COUNT];
+ std::string add_columns[DatabaseAccessor::ADD_COLUMN_COUNT];
+ std::string del_params[DatabaseAccessor::DEL_PARAM_COUNT];
+ std::string diff_params[DatabaseAccessor::DIFF_PARAM_COUNT];
+
+ vector<const char* const*> expected_stored; // placeholder for checkRecords
+ vector<const char* const*> empty_stored; // indicate no corresponding data
+
+ // Another accessor, emulating one running on a different process/thread
+ shared_ptr<SQLite3Accessor> another_accessor;
+ DatabaseAccessor::IteratorContextPtr iterator;
+};
+
+void
+checkRecords(SQLite3Accessor& accessor, int zone_id, const std::string& name,
+ vector<const char* const*> expected_rows)
+{
+ DatabaseAccessor::IteratorContextPtr iterator =
+ accessor.getRecords(name, zone_id);
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ vector<const char* const*>::const_iterator it = expected_rows.begin();
+ while (iterator->getNext(columns)) {
+ ASSERT_TRUE(it != expected_rows.end());
+ checkRecordRow(columns, (*it)[3], (*it)[2], (*it)[4], (*it)[5], "");
+ ++it;
+ }
+ EXPECT_TRUE(it == expected_rows.end());
+}
+
+TEST_F(SQLite3Update, emptyUpdate) {
+ // If we do nothing between start and commit, the zone content
+ // should be intact.
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ accessor->commit();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, flushZone) {
+ // With 'replace' being true startUpdateZone() will flush the existing
+ // zone content.
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+ accessor->commit();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, readWhileUpdate) {
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Until commit is done, the other accessor should see the old data
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+
+ // Once the changes are committed, the other accessor will see the new
+ // data.
+ accessor->commit();
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ empty_stored);
+}
+
+TEST_F(SQLite3Update, rollback) {
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Rollback will revert the change made by startUpdateZone(, true).
+ accessor->rollback();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, rollbackFailure) {
+ // This test emulates a rare scenario of making rollback attempt fail.
+ // The iterator is paused in the middle of getting records, which prevents
+ // the rollback operation at the end of the test.
+
+ string columns[DatabaseAccessor::COLUMN_COUNT];
+ iterator = accessor->getRecords("example.com.", zone_id);
+ EXPECT_TRUE(iterator->getNext(columns));
+
+ accessor->startUpdateZone("example.com.", true);
+ EXPECT_THROW(accessor->rollback(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, commitConflict) {
+ // Start reading the DB by another accessor. We should stop at a single
+ // call to getNextRecord() to keep holding the lock.
+ iterator = another_accessor->getRecords("foo.example.com.", zone_id);
+ EXPECT_TRUE(iterator->getNext(get_columns));
+
+ // Due to getNextRecord() above, the other accessor holds a DB lock,
+ // which will prevent commit.
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+ EXPECT_THROW(accessor->commit(), DataSourceError);
+ accessor->rollback(); // rollback should still succeed
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, updateConflict) {
+ // Similar to the previous case, but this is a conflict with another
+ // update attempt. Note that these two accessors modify disjoint sets
+ // of data; sqlite3 only has a coarse-grained lock so we cannot allow
+ // these updates to run concurrently.
+ EXPECT_TRUE(another_accessor->startUpdateZone("sql1.example.com.",
+ true).first);
+ EXPECT_THROW(accessor->startUpdateZone("example.com.", true),
+ DataSourceError);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Once we rollback the other attempt of change, we should be able to
+ // start and commit the transaction using the main accessor.
+ another_accessor->rollback();
+ accessor->startUpdateZone("example.com.", true);
+ accessor->commit();
+}
+
+TEST_F(SQLite3Update, duplicateUpdate) {
+ accessor->startUpdateZone("example.com.", false);
+ EXPECT_THROW(accessor->startUpdateZone("example.com.", false),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, commitWithoutTransaction) {
+ EXPECT_THROW(accessor->commit(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, rollbackWithoutTransaction) {
+ EXPECT_THROW(accessor->rollback(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, addRecord) {
+ // Before update, there should be no record for this name
+ checkRecords(*accessor, zone_id, "newdata.example.com.", empty_stored);
+
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(new_data, new_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+
+ expected_stored.clear();
+ expected_stored.push_back(new_data);
+ checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+
+ // Commit the change, and confirm the new data is still there.
+ accessor->commit();
+ checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, addThenRollback) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(new_data, new_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+
+ expected_stored.clear();
+ expected_stored.push_back(new_data);
+ checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+
+ accessor->rollback();
+ checkRecords(*accessor, zone_id, "newdata.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, duplicateAdd) {
+ const char* const dup_data[] = {
+ "foo.bar.example.com.", "com.example.bar.foo.", "3600", "A", "",
+ "192.0.2.1"
+ };
+ expected_stored.clear();
+ expected_stored.push_back(dup_data);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Adding exactly the same data. As this backend is "dumb", another
+ // row of the same content will be inserted.
+ copy(dup_data, dup_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ accessor->addRecordToZone(add_columns);
+ expected_stored.push_back(dup_data);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, invalidAdd) {
+ // An attempt of add before an explicit start of transaction
+ EXPECT_THROW(accessor->addRecordToZone(add_columns), DataSourceError);
+}
+
+TEST_F(SQLite3Update, deleteRecord) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Commit the change, and confirm the deleted data still isn't there.
+ accessor->commit();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, deleteThenRollback) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Rollback the change, and confirm the data still exists.
+ accessor->rollback();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, deleteNonexistent) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+
+ // Replace the name with a non existent one, then try to delete it.
+ // nothing should happen.
+ del_params[DatabaseAccessor::DEL_NAME] = "no-such-name.example.com.";
+ checkRecords(*accessor, zone_id, "no-such-name.example.com.",
+ empty_stored);
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "no-such-name.example.com.",
+ empty_stored);
+
+ // Name exists but the RR type is different. Delete attempt shouldn't
+ // delete only by name.
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ del_params[DatabaseAccessor::DEL_TYPE] = "AAAA";
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Similar to the previous case, but RDATA is different.
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ del_params[DatabaseAccessor::DEL_RDATA] = "192.0.2.2";
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, invalidDelete) {
+ // An attempt of delete before an explicit start of transaction
+ EXPECT_THROW(accessor->deleteRecordInZone(del_params), DataSourceError);
+}
+
+TEST_F(SQLite3Update, emptyTransaction) {
+ // A generic transaction without doing anything inside it. Just check
+ // it doesn't throw or break the database.
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ accessor->startTransaction();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ accessor->commit();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, duplicateTransaction) {
+ accessor->startTransaction();
+ EXPECT_THROW(accessor->startTransaction(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, transactionInUpdate) {
+ accessor->startUpdateZone("example.com.", true);
+ EXPECT_THROW(accessor->startTransaction(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, updateInTransaction) {
+ accessor->startTransaction();
+ EXPECT_THROW(accessor->startUpdateZone("example.com.", true),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, updateWithTransaction) {
+ // Start a read-only transaction, wherein we execute two reads.
+ // Meanwhile we start a write (update) transaction. The commit attempt
+ // for the write transaction will due to the lock held by the read
+ // transaction. The database should be intact.
+ another_accessor->startTransaction();
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+
+ ASSERT_TRUE(accessor->startUpdateZone("example.com.", true).first);
+ EXPECT_THROW(accessor->commit(), DataSourceError);
+
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+ another_accessor->commit(); // this shouldn't throw
+}
+
+TEST_F(SQLite3Update, updateWithoutTransaction) {
+ // Similar to the previous test, but reads are not protected in a
+ // transaction. So the write transaction will succeed and flush the DB,
+ // and the result of the second read is different from the first.
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+
+ ASSERT_TRUE(accessor->startUpdateZone("example.com.", true).first);
+ accessor->commit();
+
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ empty_stored);
+}
+
+TEST_F(SQLite3Update, concurrentTransactions) {
+ // Two read-only transactions coexist (unlike the read vs write)
+ // Start one transaction.
+ accessor->startTransaction();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Start a new one.
+ another_accessor->startTransaction();
+
+ // The second transaction doesn't affect the first or vice versa.
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+
+ // Commit should be successful for both transactions.
+ accessor->commit();
+ another_accessor->commit();
+}
+
+//
+// Commonly used data for diff related tests. The last two entries are
+// a textual representation of "version" and a textual representation of
+// diff operation (either DIFF_ADD_TEXT or DIFF_DELETE_TEXT). We use this
+// format for the convenience of generating test data and checking the results.
+//
+const char* const DIFF_ADD_TEXT = "0";
+const char* const DIFF_DELETE_TEXT = "1";
+const char* const diff_begin_data[] = {
+ "example.com.", "SOA", "3600",
+ "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200",
+ "1234", DIFF_DELETE_TEXT
+};
+const char* const diff_del_a_data[] = {
+ "dns01.example.com.", "A", "3600", "192.0.2.1", "1234", DIFF_DELETE_TEXT
+};
+const char* const diff_end_data[] = {
+ "example.com.", "SOA", "3600",
+ "ns.example.com. admin.example.com. 1300 3600 1800 2419200 7200",
+ "1300", DIFF_ADD_TEXT
+};
+const char* const diff_add_a_data[] = {
+ "dns01.example.com.", "A", "3600", "192.0.2.10", "1234", DIFF_ADD_TEXT
+};
+
+// The following two are helper functions to convert textual test data
+// to integral zone ID and diff operation.
+int
+getVersion(const char* const diff_data[]) {
+ return (lexical_cast<int>(diff_data[DatabaseAccessor::DIFF_PARAM_COUNT]));
+}
+
+DatabaseAccessor::DiffOperation
+getOperation(const char* const diff_data[]) {
+ return (static_cast<DatabaseAccessor::DiffOperation>(
+ lexical_cast<int>(
+ diff_data[DatabaseAccessor::DIFF_PARAM_COUNT + 1])));
+}
+
+// Common checker function that compares expected and actual sequence of
+// diffs.
+void
+checkDiffs(const vector<const char* const*>& expected,
+ const vector<vector<string> >& actual)
+{
+ EXPECT_EQ(expected.size(), actual.size());
+ const size_t n_diffs = std::min(expected.size(), actual.size());
+ for (size_t i = 0; i < n_diffs; ++i) {
+ for (int j = 0; j < actual[i].size(); ++j) {
+ EXPECT_EQ(expected[i][j], actual[i][j]);
+ }
+ }
+}
+
+TEST_F(SQLite3Update, addRecordDiff) {
+ // A simple case of adding diffs: just changing the SOA, and confirm
+ // the diffs are stored as expected.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ // Until the diffs are committed, they are not visible to other accessors.
+ EXPECT_TRUE(another_accessor->getRecordDiff(zone_id).empty());
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(diff_begin_data);
+ expected_stored.push_back(diff_end_data);
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+ // Now it should be visible to others, too.
+ checkDiffs(expected_stored, another_accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addRecordOfLargeSerial) {
+ // This is essentially the same as the previous test, but using a
+ // very large "version" (SOA serial), which is actually the possible
+ // largest value to confirm the internal code doesn't have an overflow bug
+ // or other failure due to the larger value.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ const char* const begin_data[] = {
+ "example.com.", "SOA", "3600",
+ "ns.example.com. admin.example.com. 4294967295 3600 1800 2419200 7200",
+ "4294967295", DIFF_DELETE_TEXT
+ };
+
+ copy(begin_data, begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ // For "serial" parameter, we intentionally hardcode the value rather
+ // than converting it from the data.
+ accessor->addRecordDiff(zone_id, 0xffffffff, getOperation(diff_begin_data),
+ diff_params);
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(begin_data);
+ expected_stored.push_back(diff_end_data);
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithoutUpdate) {
+ // Right now we require startUpdateZone() prior to performing
+ // addRecordDiff.
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ EXPECT_THROW(accessor->addRecordDiff(0, getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ DataSourceError);
+
+ // For now, we don't allow adding diffs in a general transaction either.
+ accessor->startTransaction();
+ EXPECT_THROW(accessor->addRecordDiff(0, getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, addDiffWithBadZoneID) {
+ // For now, we require zone ID passed to addRecordDiff be equal to
+ // that for the zone being updated.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ EXPECT_THROW(accessor->addRecordDiff(zone_id + 1,
+ getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, addDiffRollback) {
+ // Rollback tentatively added diffs. This is no different from the
+ // update case, but we test it explicitly just in case.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+ accessor->rollback();
+
+ EXPECT_TRUE(accessor->getRecordDiff(zone_id).empty());
+}
+
+TEST_F(SQLite3Update, addDiffInBadOrder) {
+ // At this level, the API is naive, and doesn't care if the diff sequence
+ // is a valid IXFR order.
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ // Add diff of 'end', then 'begin'
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(diff_end_data);
+ expected_stored.push_back(diff_begin_data);
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithUpdate) {
+ // A more realistic example: add corresponding diffs while updating zone.
+ // Implementation wise, there should be no reason this could fail if
+ // the basic tests so far pass. But we check it in case we miss something.
+
+ const char* const old_a_record[] = {
+ "dns01.example.com.", "A", "192.0.2.1"
+ };
+ const char* const new_a_record[] = {
+ "dns01.example.com.", "com.example.dns01.", "3600", "A", "",
+ "192.0.2.10"
+ };
+ const char* const old_soa_record[] = {
+ "example.com.", "SOA",
+ "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200",
+ };
+ const char* const new_soa_record[] = {
+ "dns01.example.com.", "com.example.dns01.", "3600", "A", "",
+ "ns.example.com. admin.example.com. 1300 3600 1800 2419200 7200",
+ };
+
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ // Delete SOA (and add that diff)
+ copy(old_soa_record, old_soa_record + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data), diff_params);
+
+ // Delete A
+ copy(old_a_record, old_a_record + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ copy(diff_del_a_data, diff_del_a_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_del_a_data),
+ getOperation(diff_del_a_data), diff_params);
+
+ // Add SOA
+ copy(new_soa_record, new_soa_record + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+ copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+ getOperation(diff_end_data), diff_params);
+
+ // Add A
+ copy(new_a_record, new_a_record + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+ copy(diff_add_a_data, diff_add_a_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ accessor->addRecordDiff(zone_id, getVersion(diff_add_a_data),
+ getOperation(diff_add_a_data), diff_params);
+
+ accessor->commit();
+
+ expected_stored.clear();
+ expected_stored.push_back(diff_begin_data);
+ expected_stored.push_back(diff_del_a_data);
+ expected_stored.push_back(diff_end_data);
+ expected_stored.push_back(diff_add_a_data);
+
+ checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithNoTable) {
+ // An attempt of adding diffs to an old version of database that doesn't
+ // have a diffs table. This will fail in preparing the statement.
+ initAccessor(SQLITE_DBFILE_EXAMPLE + ".nodiffs", "IN");
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+ diff_params);
+ EXPECT_THROW(accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+ getOperation(diff_begin_data),
+ diff_params),
+ SQLite3Error);
+}
+} // end anonymous namespace
diff --git a/src/lib/datasrc/tests/static_unittest.cc b/src/lib/datasrc/tests/static_unittest.cc
index a11e889..4c9fe42 100644
--- a/src/lib/datasrc/tests/static_unittest.cc
+++ b/src/lib/datasrc/tests/static_unittest.cc
@@ -53,6 +53,7 @@ protected:
// NOTE: in addition, the order of the following items matter.
authors_data.push_back("Chen Zhengzhang");
+ authors_data.push_back("Dmitriy Volodin");
authors_data.push_back("Evan Hunt");
authors_data.push_back("Haidong Wang");
authors_data.push_back("Han Feng");
diff --git a/src/lib/datasrc/tests/testdata/Makefile.am b/src/lib/datasrc/tests/testdata/Makefile.am
new file mode 100644
index 0000000..64ae955
--- /dev/null
+++ b/src/lib/datasrc/tests/testdata/Makefile.am
@@ -0,0 +1,6 @@
+CLEANFILES = *.copied
+BUILT_SOURCES = rwtest.sqlite3.copied
+
+# We use install-sh with the -m option to make sure it's writable
+rwtest.sqlite3.copied: $(srcdir)/rwtest.sqlite3
+ $(top_srcdir)/install-sh -m 644 $(srcdir)/rwtest.sqlite3 $@
diff --git a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 b/src/lib/datasrc/tests/testdata/rwtest.sqlite3
new file mode 100644
index 0000000..ce95a1d
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/rwtest.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test.sqlite3 b/src/lib/datasrc/tests/testdata/test.sqlite3
index cc8cfc3..521cf31 100644
Binary files a/src/lib/datasrc/tests/testdata/test.sqlite3 and b/src/lib/datasrc/tests/testdata/test.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs
new file mode 100644
index 0000000..cc8cfc3
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs differ
diff --git a/src/lib/datasrc/tests/zonetable_unittest.cc b/src/lib/datasrc/tests/zonetable_unittest.cc
index a117176..fa74c0e 100644
--- a/src/lib/datasrc/tests/zonetable_unittest.cc
+++ b/src/lib/datasrc/tests/zonetable_unittest.cc
@@ -18,7 +18,7 @@
#include <dns/rrclass.h>
#include <datasrc/zonetable.h>
-// We use MemoryZone to put something into the table
+// We use InMemoryZone to put something into the table
#include <datasrc/memory_datasrc.h>
#include <gtest/gtest.h>
@@ -28,31 +28,32 @@ using namespace isc::datasrc;
namespace {
TEST(ZoneTest, init) {
- MemoryZone zone(RRClass::IN(), Name("example.com"));
+ InMemoryZoneFinder zone(RRClass::IN(), Name("example.com"));
EXPECT_EQ(Name("example.com"), zone.getOrigin());
EXPECT_EQ(RRClass::IN(), zone.getClass());
- MemoryZone ch_zone(RRClass::CH(), Name("example"));
+ InMemoryZoneFinder ch_zone(RRClass::CH(), Name("example"));
EXPECT_EQ(Name("example"), ch_zone.getOrigin());
EXPECT_EQ(RRClass::CH(), ch_zone.getClass());
}
TEST(ZoneTest, find) {
- MemoryZone zone(RRClass::IN(), Name("example.com"));
- EXPECT_EQ(Zone::NXDOMAIN,
+ InMemoryZoneFinder zone(RRClass::IN(), Name("example.com"));
+ EXPECT_EQ(ZoneFinder::NXDOMAIN,
zone.find(Name("www.example.com"), RRType::A()).code);
}
class ZoneTableTest : public ::testing::Test {
protected:
- ZoneTableTest() : zone1(new MemoryZone(RRClass::IN(),
- Name("example.com"))),
- zone2(new MemoryZone(RRClass::IN(),
- Name("example.net"))),
- zone3(new MemoryZone(RRClass::IN(), Name("example")))
+ ZoneTableTest() : zone1(new InMemoryZoneFinder(RRClass::IN(),
+ Name("example.com"))),
+ zone2(new InMemoryZoneFinder(RRClass::IN(),
+ Name("example.net"))),
+ zone3(new InMemoryZoneFinder(RRClass::IN(),
+ Name("example")))
{}
ZoneTable zone_table;
- ZonePtr zone1, zone2, zone3;
+ ZoneFinderPtr zone1, zone2, zone3;
};
TEST_F(ZoneTableTest, addZone) {
@@ -60,7 +61,8 @@ TEST_F(ZoneTableTest, addZone) {
EXPECT_EQ(result::EXIST, zone_table.addZone(zone1));
// names are compared in a case insensitive manner.
EXPECT_EQ(result::EXIST, zone_table.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(), Name("EXAMPLE.COM")))));
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("EXAMPLE.COM")))));
EXPECT_EQ(result::SUCCESS, zone_table.addZone(zone2));
EXPECT_EQ(result::SUCCESS, zone_table.addZone(zone3));
@@ -68,11 +70,11 @@ TEST_F(ZoneTableTest, addZone) {
// Zone table is indexed only by name. Duplicate origin name with
// different zone class isn't allowed.
EXPECT_EQ(result::EXIST, zone_table.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(),
- Name("example.com")))));
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("example.com")))));
/// Bogus zone (NULL)
- EXPECT_THROW(zone_table.addZone(ZonePtr()), isc::InvalidParameter);
+ EXPECT_THROW(zone_table.addZone(ZoneFinderPtr()), isc::InvalidParameter);
}
TEST_F(ZoneTableTest, DISABLED_removeZone) {
@@ -95,7 +97,7 @@ TEST_F(ZoneTableTest, findZone) {
EXPECT_EQ(result::NOTFOUND,
zone_table.findZone(Name("example.org")).code);
- EXPECT_EQ(ConstZonePtr(),
+ EXPECT_EQ(ConstZoneFinderPtr(),
zone_table.findZone(Name("example.org")).zone);
// there's no exact match. the result should be the longest match,
@@ -107,7 +109,7 @@ TEST_F(ZoneTableTest, findZone) {
// make sure the partial match is indeed the longest match by adding
// a zone with a shorter origin and query again.
- ZonePtr zone_com(new MemoryZone(RRClass::IN(), Name("com")));
+ ZoneFinderPtr zone_com(new InMemoryZoneFinder(RRClass::IN(), Name("com")));
EXPECT_EQ(result::SUCCESS, zone_table.addZone(zone_com));
EXPECT_EQ(Name("example.com"),
zone_table.findZone(Name("www.example.com")).zone->getOrigin());
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index 1252c94..fa1c744 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -15,59 +15,128 @@
#ifndef __ZONE_H
#define __ZONE_H 1
-#include <datasrc/result.h>
+#include <dns/rrset.h>
#include <dns/rrsetlist.h>
+#include <datasrc/result.h>
+
namespace isc {
namespace datasrc {
-/// \brief The base class for a single authoritative zone
+/// \brief The base class to search a zone for RRsets
///
-/// The \c Zone class is an abstract base class for representing
-/// a DNS zone as part of data source.
+/// The \c ZoneFinder class is an abstract base class for representing
+/// an object that performs DNS lookups in a specific zone accessible via
+/// a data source. In general, different types of data sources (in-memory,
+/// database-based, etc) define their own derived classes of \c ZoneFinder,
+/// implementing ways to retrieve the required data through the common
+/// interfaces declared in the base class. Each concrete \c ZoneFinder
+/// object is therefore (conceptually) associated with a specific zone
+/// of one specific data source instance.
///
-/// At the moment this is provided mainly for making the \c ZoneTable class
-/// and the authoritative query logic testable, and only provides a minimal
-/// set of features.
-/// This is why this class is defined in the same header file, but it may
-/// have to move to a separate header file when we understand what is
-/// necessary for this class for actual operation.
+/// The origin name and the RR class of the associated zone are available
+/// via the \c getOrigin() and \c getClass() methods, respectively.
///
-/// The idea is to provide a specific derived zone class for each data
-/// source, beginning with in memory one. At that point the derived classes
-/// will have more specific features. For example, they will maintain
-/// information about the location of a zone file, whether it's loaded in
-/// memory, etc.
+/// The most important method of this class is \c find(), which performs
+/// the lookup for a given domain and type. See the description of the
+/// method for details.
///
-/// It's not yet clear how the derived zone classes work with various other
-/// data sources when we integrate these components, but one possibility is
-/// something like this:
-/// - If the underlying database such as some variant of SQL doesn't have an
-/// explicit representation of zones (as part of public interface), we can
-/// probably use a "default" zone class that simply encapsulates the
-/// corresponding data source and calls a common "find" like method.
-/// - Some data source may want to specialize it by inheritance as an
-/// optimization. For example, in the current schema design of the sqlite3
-/// data source, its (derived) zone class would contain the information of
-/// the "zone ID".
-///
-/// <b>Note:</b> Unlike some other abstract base classes we don't name the
-/// class beginning with "Abstract". This is because we want to have
-/// commonly used definitions such as \c Result and \c ZonePtr, and we want
-/// to make them look more intuitive.
-class Zone {
+/// \note It's not clear whether we should request that a zone finder form a
+/// "transaction", that is, whether to ensure the finder is not susceptible
+/// to changes made by someone else than the creator of the finder. If we
+/// don't request that, for example, two different lookup results for the
+/// same name and type can be different if other threads or programs make
+/// updates to the zone between the lookups. We should revisit this point
+/// as we gain more experiences.
+class ZoneFinder {
public:
/// Result codes of the \c find() method.
///
/// Note: the codes are tentative. We may need more, or we may find
/// some of them unnecessary as we implement more details.
+ ///
+ /// Some are synonyms of others in terms of RCODE returned to user.
+ /// But they help the logic to decide if it should ask for a NSEC
+ /// that covers something or not (for example, in case of NXRRSET,
+ /// the directly returned NSEC is sufficient, but with wildcard one,
+ /// we need to add one proving there's no exact match and this is
+ /// actually the best wildcard we have). Data sources that don't
+ /// support DNSSEC don't need to distinguish them.
+ ///
+ /// In case of CNAME, if the CNAME is a wildcard (i.e., its owner name
+ /// starts with the label "*"), WILDCARD_CNAME will be returned instead
+ /// of CNAME.
+ ///
+ /// In case of NXDOMAIN, the returned NSEC covers the queried domain
+ /// that proves that the query name does not exist in the zone. Note that
+ /// this does not necessarily prove it doesn't even match a wildcard
+ /// (even if the result of NXDOMAIN can only happen when there's no
+ /// matching wildcard either). It is caller's responsibility to provide
+ /// a proof that there is no matching wildcard if that proof is necessary.
+ ///
+ /// Various variants of "no data" cases are complicated, when involves
+ /// DNSSEC and wildcard processing. Referring to Section 3.1.3 of
+ /// RFC4035, we need to consider the following cases:
+ /// -# (Normal) no data: there is a matching non-wildcard name with a
+ /// different RR type. This is the "No Data" case of the RFC.
+ /// -# (Normal) empty non terminal: there is no matching (exact or
+ /// wildcard) name, but there is a subdomain with an RR of the query
+ /// name. This is one case of "Name Error" of the RFC.
+ /// -# Wildcard empty non terminal: similar to 2a, but the empty name
+ /// is a wildcard, and matches the query name by wildcard expansion.
+ /// This is a special case of "Name Error" of the RFC.
+ /// -# Wildcard no data: there is no exact match name, but there is a
+ /// wildcard name that matches the query name with a different type
+ /// of RR. This is the "Wildcard No Data" case of the RFC.
+ ///
+ /// In any case, \c find() will result in \c NXRRSET with no RRset
+ /// unless the \c FIND_DNSSEC option is specified. The rest of the
+ /// discussion only applies to the case where this option is specified.
+ ///
+ /// In case 1, \c find() will result in NXRRSET, and return NSEC of the
+ /// matching name.
+ ///
+ /// In case 2, \c find() will result in NXRRSET, and return NSEC for the
+ /// interval where the empty nonterminal lives. The end of the interval
+ /// is the subdomain causing existence of the empty nonterminal (if
+ /// there's sub.x.example.com, and no record in x.example.com, then
+ /// x.example.com exists implicitly - is the empty nonterminal and
+ /// sub.x.example.com is the subdomain causing it). Note that this NSEC
+ /// proves not only the existence of empty non terminal name but also
+ /// the non existence of possibly matching wildcard name, because
+ /// there can be no better wildcard match than the exact matching empty
+ /// name.
+ ///
+ /// In case 3, \c find() will result in WILDCARD_NXRRSET, and return NSEC
+ /// for the interval where the wildcard empty nonterminal lives.
+ /// Cases 2 and 3 are especially complicated and confusing. See the
+ /// examples below.
+ ///
+ /// In case 4, \c find() will result in WILDCARD_NXRRSET, and return
+ /// NSEC of the matching wildcard name.
+ ///
+ /// Examples: if zone "example.com" has the following record:
+ /// \code
+ /// a.example.com. NSEC a.b.example.com.
+ /// \endcode
+ /// a call to \c find() for "b.example.com." with the FIND_DNSSEC option
+ /// will result in NXRRSET, and this NSEC will be returned.
+ /// Likewise, if zone "example.org" has the following record,
+ /// \code
+ /// a.example.org. NSEC x.*.b.example.org.
+ /// \endcode
+ /// a call to \c find() for "y.b.example.org" with FIND_DNSSEC will
+ /// result in NXRRSET_NXRRSET, and this NSEC will be returned.
enum Result {
SUCCESS, ///< An exact match is found.
DELEGATION, ///< The search encounters a zone cut.
NXDOMAIN, ///< There is no domain name that matches the search name
NXRRSET, ///< There is a matching name but no RRset of the search type
CNAME, ///< The search encounters and returns a CNAME RR
- DNAME ///< The search encounters and returns a DNAME RR
+ DNAME, ///< The search encounters and returns a DNAME RR
+ WILDCARD, ///< Succes by wildcard match, for DNSSEC
+ WILDCARD_CNAME, ///< CNAME on wildcard, search returns CNAME, for DNSSEC
+ WILDCARD_NXRRSET ///< NXRRSET on wildcard, for DNSSEC
};
/// A helper structure to represent the search result of \c find().
@@ -107,7 +176,12 @@ public:
/// performed on these values to express compound options.
enum FindOptions {
FIND_DEFAULT = 0, ///< The default options
- FIND_GLUE_OK = 1 ///< Allow search under a zone cut
+ FIND_GLUE_OK = 1, ///< Allow search under a zone cut
+ FIND_DNSSEC = 2, ///< Require DNSSEC data in the answer
+ ///< (RRSIG, NSEC, etc.). The implementation
+ ///< is allowed to include it even if it is
+ ///< not set.
+ NO_WILDCARD = 4 ///< Do not try wildcard matching.
};
///
@@ -119,10 +193,10 @@ protected:
///
/// This is intentionally defined as \c protected as this base class should
/// never be instantiated (except as part of a derived class).
- Zone() {}
+ ZoneFinder() {}
public:
/// The destructor.
- virtual ~Zone() {}
+ virtual ~ZoneFinder() {}
//@}
///
@@ -131,14 +205,14 @@ public:
/// These methods should never throw an exception.
//@{
/// Return the origin name of the zone.
- virtual const isc::dns::Name& getOrigin() const = 0;
+ virtual isc::dns::Name getOrigin() const = 0;
/// Return the RR class of the zone.
- virtual const isc::dns::RRClass& getClass() const = 0;
+ virtual isc::dns::RRClass getClass() const = 0;
//@}
///
- /// \name Search Method
+ /// \name Search Methods
///
//@{
/// Search the zone for a given pair of domain name and RR type.
@@ -147,6 +221,7 @@ public:
/// for the data that best matches the given name and type.
/// This method is expected to be "intelligent", and identifies the
/// best possible answer for the search key. Specifically,
+ ///
/// - If the search name belongs under a zone cut, it returns the code
/// of \c DELEGATION and the NS RRset at the zone cut.
/// - If there is no matching name, it returns the code of \c NXDOMAIN,
@@ -165,13 +240,15 @@ public:
/// - If the target isn't NULL, all RRsets under the domain are inserted
/// there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned
/// instead of normall processing. This is intended to handle ANY query.
- /// \note: this behavior is controversial as we discussed in
- /// https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html
- /// We should revisit the interface before we heavily rely on it.
+ ///
+ /// \note This behavior is controversial as we discussed in
+ /// https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html
+ /// We should revisit the interface before we heavily rely on it.
///
/// The \c options parameter specifies customized behavior of the search.
- /// Their semantics is as follows:
- /// - \c GLUE_OK Allow search under a zone cut. By default the search
+ /// Their semantics is as follows (they are or bit-field):
+ ///
+ /// - \c FIND_GLUE_OK Allow search under a zone cut. By default the search
/// will stop once it encounters a zone cut. If this option is specified
/// it remembers information about the highest zone cut and continues
/// the search until it finds an exact match for the given name or it
@@ -179,6 +256,13 @@ public:
/// RRsets for that name are searched just like the normal case;
/// otherwise, if the search has encountered a zone cut, \c DELEGATION
/// with the information of the highest zone cut will be returned.
+ /// - \c FIND_DNSSEC Request that DNSSEC data (like NSEC, RRSIGs) are
+ /// returned with the answer. It is allowed for the data source to
+ /// include them even when not requested.
+ /// - \c NO_WILDCARD Do not try wildcard matching. This option is of no
+ /// use for normal lookups; it's intended to be used to get a DNSSEC
+ /// proof of the non existence of any matching wildcard or non existence
+ /// of an exact match when a wildcard match is found.
///
/// A derived version of this method may involve internal resource
/// allocation, especially for constructing the resulting RRset, and may
@@ -197,18 +281,273 @@ public:
const isc::dns::RRType& type,
isc::dns::RRsetList* target = NULL,
const FindOptions options
- = FIND_DEFAULT) const = 0;
+ = FIND_DEFAULT) = 0;
+
+ /// \brief Get previous name in the zone
+ ///
+ /// Gets the previous name in the DNSSEC order. This can be used
+ /// to find the correct NSEC records for proving nonexistence
+ /// of domains.
+ ///
+ /// The concrete implementation might throw anything it thinks appropriate,
+ /// however it is recommended to stick to the ones listed here. The user
+ /// of this method should be able to handle any exceptions.
+ ///
+ /// This method does not include under-zone-cut data (glue data).
+ ///
+ /// \param query The name for which one we look for a previous one. The
+ /// queried name doesn't have to exist in the zone.
+ /// \return The preceding name
+ ///
+ /// \throw NotImplemented in case the data source backend doesn't support
+ /// DNSSEC or there is no previous in the zone (NSEC records might be
+ /// missing in the DB, the queried name is less or equal to the apex).
+ /// \throw DataSourceError for low-level or internal datasource errors
+ /// (like broken connection to database, wrong data living there).
+ /// \throw std::bad_alloc For allocation errors.
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
+ const = 0;
//@}
};
-/// \brief A pointer-like type pointing to a \c Zone object.
-typedef boost::shared_ptr<Zone> ZonePtr;
+/// \brief Operator to combine FindOptions
+///
+/// We would need to manually static-cast the options if we put or
+/// between them, which is undesired with bit-flag options. Therefore
+/// we hide the cast here, which is the simplest solution and it still
+/// provides reasonable level of type safety.
+inline ZoneFinder::FindOptions operator |(ZoneFinder::FindOptions a,
+ ZoneFinder::FindOptions b)
+{
+ return (static_cast<ZoneFinder::FindOptions>(static_cast<unsigned>(a) |
+ static_cast<unsigned>(b)));
+}
-/// \brief A pointer-like type pointing to a \c Zone object.
-typedef boost::shared_ptr<const Zone> ConstZonePtr;
+/// \brief A pointer-like type pointing to a \c ZoneFinder object.
+typedef boost::shared_ptr<ZoneFinder> ZoneFinderPtr;
-}
-}
+/// \brief A pointer-like type pointing to a \c ZoneFinder object.
+typedef boost::shared_ptr<const ZoneFinder> ConstZoneFinderPtr;
+
+/// The base class to make updates to a single zone.
+///
+/// On construction, each derived class object will start a "transaction"
+/// for making updates to a specific zone (this means a constructor of
+/// a derived class would normally take parameters to identify the zone
+/// to be updated). The underlying realization of a "transaction" will differ
+/// for different derived classes; if it uses a general purpose database
+/// as a backend, it will involve performing some form of "begin transaction"
+/// statement for the database.
+///
+/// Updates (adding or deleting RRs) are made via \c addRRset() and
+/// \c deleteRRset() methods. Until the \c commit() method is called the
+/// changes are local to the updater object. For example, they won't be
+/// visible via a \c ZoneFinder object except the one returned by the
+/// updater's own \c getFinder() method. The \c commit() completes the
+/// transaction and makes the changes visible to others.
+///
+/// This class does not provide an explicit "rollback" interface. If
+/// something wrong or unexpected happens during the updates and the
+/// caller wants to cancel the intermediate updates, the caller should
+/// simply destruct the updater object without calling \c commit().
+/// The destructor is supposed to perform the "rollback" operation,
+/// depending on the internal details of the derived class.
+///
+/// \note This initial implementation provides a quite simple interface of
+/// adding and deleting RRs (see the description of the related methods).
+/// It may be revisited as we gain more experiences.
+class ZoneUpdater {
+protected:
+ /// The default constructor.
+ ///
+ /// This is intentionally defined as protected to ensure that this base
+ /// class is never instantiated directly.
+ ZoneUpdater() {}
+
+public:
+ /// The destructor
+ ///
+ /// Each derived class implementation must ensure that if \c commit()
+ /// has not been performed by the time of the call to it, then it
+ /// "rollbacks" the updates made via the updater so far.
+ virtual ~ZoneUpdater() {}
+
+ /// Return a finder for the zone being updated.
+ ///
+ /// The returned finder provides the functionalities of \c ZoneFinder
+ /// for the zone as updates are made via the updater. That is, before
+ /// making any update, the finder will be able to find all RRsets that
+ /// exist in the zone at the time the updater is created. If RRsets
+ /// are added or deleted via \c addRRset() or \c deleteRRset(),
+ /// this finder will find the added ones or miss the deleted ones
+ /// respectively.
+ ///
+ /// The finder returned by this method is effective only while the updates
+ /// are performed, i.e., from the construction of the corresponding
+ /// updater until \c commit() is performed or the updater is destructed
+ /// without commit. The result of a subsequent call to this method (or
+ /// the use of the result) after that is undefined.
+ ///
+ /// \return A reference to a \c ZoneFinder for the updated zone
+ virtual ZoneFinder& getFinder() = 0;
+
+ /// Add an RRset to a zone via the updater
+ ///
+ /// This may be revisited in a future version, but right now the intended
+ /// behavior of this method is simple: It "naively" adds the specified
+ /// RRset to the zone specified on creation of the updater.
+ /// It performs minimum level of validation on the specified RRset:
+ /// - Whether the RR class is identical to that for the zone to be updated
+ /// - Whether the RRset is not empty, i.e., it has at least one RDATA
+ /// - Whether the RRset is not associated with an RRSIG, i.e.,
+ /// whether \c getRRsig() on the RRset returns a NULL pointer.
+ ///
+ /// and otherwise does not check any oddity. For example, it doesn't
+ /// check whether the owner name of the specified RRset is a subdomain
+ /// of the zone's origin; it doesn't care whether or not there is already
+ /// an RRset of the same name and RR type in the zone, and if there is,
+ /// whether any of the existing RRs have duplicate RDATA with the added
+ /// ones. If these conditions matter the calling application must examine
+ /// the existing data beforehand using the \c ZoneFinder returned by
+ /// \c getFinder().
+ ///
+ /// The validation requirement on the associated RRSIG is temporary.
+ /// If we find it more reasonable and useful to allow adding a pair of
+ /// RRset and its RRSIG RRset as we gain experiences with the interface,
+ /// we may remove this restriction. Until then we explicitly check it
+ /// to prevent accidental misuse.
+ ///
+ /// Conceptually, on successful call to this method, the zone will have
+ /// the specified RRset, and if there is already an RRset of the same
+ /// name and RR type, these two sets will be "merged". "Merged" means
+ /// that a subsequent call to \c ZoneFinder::find() for the name and type
+ /// will result in success and the returned RRset will contain all
+ /// previously existing and newly added RDATAs with the TTL being the
+ /// minimum of the two RRsets. The underlying representation of the
+ /// "merged" RRsets may vary depending on the characteristic of the
+ /// underlying data source. For example, if it uses a general purpose
+ /// database that stores each RR of the same RRset separately, it may
+ /// simply be a larger sets of RRs based on both the existing and added
+ /// RRsets; the TTLs of the RRs may be different within the database, and
+ /// there may even be duplicate RRs in different database rows. As long
+ /// as the RRset returned via \c ZoneFinder::find() conforms to the
+ /// concept of "merge", the actual internal representation is up to the
+ /// implementation.
+ ///
+ /// This method must not be called once commit() is performed. If it
+ /// calls after \c commit() the implementation must throw a
+ /// \c DataSourceError exception.
+ ///
+ /// \todo As noted above we may have to revisit the design details as we
+ /// gain experiences:
+ ///
+ /// - we may want to check (and maybe reject) if there is already a
+ /// duplicate RR (that has the same RDATA).
+ /// - we may want to check (and maybe reject) if there is already an
+ /// RRset of the same name and RR type with different TTL
+ /// - we may even want to check if there is already any RRset of the
+ /// same name and RR type.
+ /// - we may want to add an "options" parameter that can control the
+ /// above points
+ /// - we may want to have this method return a value containing the
+ /// information on whether there's a duplicate, etc.
+ ///
+ /// \exception DataSourceError Called after \c commit(), RRset is invalid
+ /// (see above), internal data source error
+ /// \exception std::bad_alloc Resource allocation failure
+ ///
+ /// \param rrset The RRset to be added
+ virtual void addRRset(const isc::dns::RRset& rrset) = 0;
+
+ /// Delete an RRset from a zone via the updater
+ ///
+ /// Like \c addRRset(), the detailed semantics and behavior of this method
+ /// may have to be revisited in a future version. The following are
+ /// based on the initial implementation decisions.
+ ///
+ /// On successful completion of this method, it will remove from the zone
+ /// the RRs of the specified owner name and RR type that match one of
+ /// the RDATAs of the specified RRset. There are several points to be
+ /// noted:
+ /// - Existing RRs that don't match any of the specified RDATAs will
+ /// remain in the zone.
+ /// - Any RRs of the specified RRset that doesn't exist in the zone will
+ /// simply be ignored; the implementation of this method is not supposed
+ /// to check that condition.
+ /// - The TTL of the RRset is ignored; matching is only performed by
+ /// the owner name, RR type and RDATA
+ ///
+ /// Ignoring the TTL may not look sensible, but it's based on the
+ /// observation that it will result in more intuitive result, especially
+ /// when the underlying data source is a general purpose database.
+ /// See also \c DatabaseAccessor::deleteRecordInZone() on this point.
+ /// It also matches the dynamic update protocol (RFC2136), where TTLs
+ /// are ignored when deleting RRs.
+ ///
+ /// \note Since the TTL is ignored, this method could take the RRset
+ /// to be deleted as a tuple of name, RR type, and a list of RDATAs.
+ /// But in practice, it's quite likely that the caller has the RRset
+ /// in the form of the \c RRset object (e.g., extracted from a dynamic
+ /// update request message), so this interface would rather be more
+ /// convenient. If it turns out not to be true we can change or extend
+ /// the method signature.
+ ///
+ /// This method performs minimum level of validation on the specified
+ /// RRset:
+ /// - Whether the RR class is identical to that for the zone to be updated
+ /// - Whether the RRset is not empty, i.e., it has at least one RDATA
+ /// - Whether the RRset is not associated with an RRSIG, i.e.,
+ /// whether \c getRRsig() on the RRset returns a NULL pointer.
+ ///
+ /// This method must not be called once commit() is performed. If it
+ /// calls after \c commit() the implementation must throw a
+ /// \c DataSourceError exception.
+ ///
+ /// \todo As noted above we may have to revisit the design details as we
+ /// gain experiences:
+ ///
+ /// - we may want to check (and maybe reject) if some or all of the RRs
+ /// for the specified RRset don't exist in the zone
+ /// - we may want to allow an option to "delete everything" for specified
+ /// name and/or specified name + RR type.
+ /// - as mentioned above, we may want to include the TTL in matching the
+ /// deleted RRs
+ /// - we may want to add an "options" parameter that can control the
+ /// above points
+ /// - we may want to have this method return a value containing the
+ /// information on whether there's any RRs that are specified but don't
+ /// exit, the number of actually deleted RRs, etc.
+ ///
+ /// \exception DataSourceError Called after \c commit(), RRset is invalid
+ /// (see above), internal data source error
+ /// \exception std::bad_alloc Resource allocation failure
+ ///
+ /// \param rrset The RRset to be deleted
+ virtual void deleteRRset(const isc::dns::RRset& rrset) = 0;
+
+ /// Commit the updates made in the updater to the zone
+ ///
+ /// This method completes the "transaction" started at the creation
+ /// of the updater. After successful completion of this method, the
+ /// updates will be visible outside the scope of the updater.
+ /// The actual internal behavior will defer for different derived classes.
+ /// For a derived class with a general purpose database as a backend,
+ /// for example, this method would perform a "commit" statement for the
+ /// database.
+ ///
+ /// This operation can only be performed at most once. A duplicate call
+ /// must result in a DatasourceError exception.
+ ///
+ /// \exception DataSourceError Duplicate call of the method,
+ /// internal data source error
+ virtual void commit() = 0;
+};
+
+/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
+typedef boost::shared_ptr<ZoneUpdater> ZoneUpdaterPtr;
+
+} // end of datasrc
+} // end of isc
#endif // __ZONE_H
diff --git a/src/lib/datasrc/zonetable.cc b/src/lib/datasrc/zonetable.cc
index bc09286..644861c 100644
--- a/src/lib/datasrc/zonetable.cc
+++ b/src/lib/datasrc/zonetable.cc
@@ -28,8 +28,8 @@ namespace datasrc {
/// \short Private data and implementation of ZoneTable
struct ZoneTable::ZoneTableImpl {
// Type aliases to make it shorter
- typedef RBTree<Zone> ZoneTree;
- typedef RBNode<Zone> ZoneNode;
+ typedef RBTree<ZoneFinder> ZoneTree;
+ typedef RBNode<ZoneFinder> ZoneNode;
// The actual storage
ZoneTree zones_;
@@ -40,7 +40,7 @@ struct ZoneTable::ZoneTableImpl {
*/
// Implementation of ZoneTable::addZone
- result::Result addZone(ZonePtr zone) {
+ result::Result addZone(ZoneFinderPtr zone) {
// Sanity check
if (!zone) {
isc_throw(InvalidParameter,
@@ -85,12 +85,12 @@ struct ZoneTable::ZoneTableImpl {
break;
// We have no data there, so translate the pointer to NULL as well
case ZoneTree::NOTFOUND:
- return (FindResult(result::NOTFOUND, ZonePtr()));
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
// Can Not Happen
default:
assert(0);
// Because of warning
- return (FindResult(result::NOTFOUND, ZonePtr()));
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
}
// Can Not Happen (remember, NOTFOUND is handled)
@@ -108,7 +108,7 @@ ZoneTable::~ZoneTable() {
}
result::Result
-ZoneTable::addZone(ZonePtr zone) {
+ZoneTable::addZone(ZoneFinderPtr zone) {
return (impl_->addZone(zone));
}
diff --git a/src/lib/datasrc/zonetable.h b/src/lib/datasrc/zonetable.h
index 5b873d1..5a34480 100644
--- a/src/lib/datasrc/zonetable.h
+++ b/src/lib/datasrc/zonetable.h
@@ -41,11 +41,11 @@ namespace datasrc {
class ZoneTable {
public:
struct FindResult {
- FindResult(result::Result param_code, const ZonePtr param_zone) :
+ FindResult(result::Result param_code, const ZoneFinderPtr param_zone) :
code(param_code), zone(param_zone)
{}
const result::Result code;
- const ZonePtr zone;
+ const ZoneFinderPtr zone;
};
///
/// \name Constructors and Destructor.
@@ -83,7 +83,7 @@ public:
/// added to the zone table.
/// \return \c result::EXIST The zone table already contains
/// zone of the same origin.
- result::Result addZone(ZonePtr zone);
+ result::Result addZone(ZoneFinderPtr zone);
/// Remove a \c Zone of the given origin name from the \c ZoneTable.
///
diff --git a/src/lib/dhcp/Makefile.am b/src/lib/dhcp/Makefile.am
new file mode 100644
index 0000000..64dda17
--- /dev/null
+++ b/src/lib/dhcp/Makefile.am
@@ -0,0 +1,26 @@
+SUBDIRS = . tests
+
+AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+CLEANFILES = *.gcno *.gcda
+
+lib_LTLIBRARIES = libdhcp.la
+libdhcp_la_SOURCES =
+libdhcp_la_SOURCES += libdhcp.cc libdhcp.h
+libdhcp_la_SOURCES += option.cc option.h
+libdhcp_la_SOURCES += option6_ia.cc option6_ia.h
+libdhcp_la_SOURCES += option6_iaaddr.cc option6_iaaddr.h
+libdhcp_la_SOURCES += option6_addrlst.cc option6_addrlst.h
+libdhcp_la_SOURCES += dhcp6.h dhcp4.h
+libdhcp_la_SOURCES += pkt6.cc pkt6.h
+libdhcp_la_SOURCES += pkt4.cc pkt4.h
+
+EXTRA_DIST = README
+#EXTRA_DIST += log_messages.mes
+
+libdhcp_la_CXXFLAGS = $(AM_CXXFLAGS)
+libdhcp_la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES)
+libdhcp_la_LIBADD = $(top_builddir)/src/lib/util/libutil.la
diff --git a/src/lib/dhcp/README b/src/lib/dhcp/README
new file mode 100644
index 0000000..6c5353d
--- /dev/null
+++ b/src/lib/dhcp/README
@@ -0,0 +1,11 @@
+This directory holds implementation for libdhcp.
+
+
+Basic Ideas
+===========
+
+
+Notes
+=====
+This work just begun. Don't expect to see much useful code here.
+We are working on it.
\ No newline at end of file
diff --git a/src/lib/dhcp/dhcp4.h b/src/lib/dhcp/dhcp4.h
new file mode 100644
index 0000000..98381ac
--- /dev/null
+++ b/src/lib/dhcp/dhcp4.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2004-2011 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1995-2003 by Internet Software Consortium
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Internet Systems Consortium, Inc.
+ * 950 Charter Street
+ * Redwood City, CA 94063
+ * <info at isc.org>
+ * https://www.isc.org/
+ *
+ * This software has been written for Internet Systems Consortium
+ * by Ted Lemon in cooperation with Vixie Enterprises. To learn more
+ * about Internet Systems Consortium, see ``https://www.isc.org''.
+ * To learn more about Vixie Enterprises, see ``http://www.vix.com''.
+ */
+
+/*
+ * NOTE: This files is imported from ISC DHCP. It uses C notation.
+ * Format kept for easier merge.
+ */
+
+#ifndef DHCP_H
+#define DHCP_H
+
+#include <stdint.h>
+
+namespace isc {
+namespace dhcp {
+
+/* BOOTP (rfc951) message types */
+enum BOOTPTypes {
+ BOOTREQUEST = 1,
+ BOOTREPLY = 2
+};
+
+/* Possible values for flags field... */
+static const uint16_t BOOTP_BROADCAST = 32768L;
+
+/* Possible values for hardware type (htype) field... */
+enum HType {
+ HTYPE_ETHER = 1, /* Ethernet 10Mbps */
+ HTYPE_IEEE802 = 6, /* IEEE 802.2 Token Ring */
+ HTYPE_FDDI = 8 /* FDDI */
+ /// TODO Add infiniband here
+};
+
+/* DHCP Option codes: */
+enum DHCPOptionType {
+ DHO_PAD = 0,
+ DHO_SUBNET_MASK = 1,
+ DHO_TIME_OFFSET = 2,
+ DHO_ROUTERS = 3,
+ DHO_TIME_SERVERS = 4,
+ DHO_NAME_SERVERS = 5,
+ DHO_DOMAIN_NAME_SERVERS = 6,
+ DHO_LOG_SERVERS = 7,
+ DHO_COOKIE_SERVERS = 8,
+ DHO_LPR_SERVERS = 9,
+ DHO_IMPRESS_SERVERS = 10,
+ DHO_RESOURCE_LOCATION_SERVERS = 11,
+ DHO_HOST_NAME = 12,
+ DHO_BOOT_SIZE = 13,
+ DHO_MERIT_DUMP = 14,
+ DHO_DOMAIN_NAME = 15,
+ DHO_SWAP_SERVER = 16,
+ DHO_ROOT_PATH = 17,
+ DHO_EXTENSIONS_PATH = 18,
+ DHO_IP_FORWARDING = 19,
+ DHO_NON_LOCAL_SOURCE_ROUTING = 20,
+ DHO_POLICY_FILTER = 21,
+ DHO_MAX_DGRAM_REASSEMBLY = 22,
+ DHO_DEFAULT_IP_TTL = 23,
+ DHO_PATH_MTU_AGING_TIMEOUT = 24,
+ DHO_PATH_MTU_PLATEAU_TABLE = 25,
+ DHO_INTERFACE_MTU = 26,
+ DHO_ALL_SUBNETS_LOCAL = 27,
+ DHO_BROADCAST_ADDRESS = 28,
+ DHO_PERFORM_MASK_DISCOVERY = 29,
+ DHO_MASK_SUPPLIER = 30,
+ DHO_ROUTER_DISCOVERY = 31,
+ DHO_ROUTER_SOLICITATION_ADDRESS = 32,
+ DHO_STATIC_ROUTES = 33,
+ DHO_TRAILER_ENCAPSULATION = 34,
+ DHO_ARP_CACHE_TIMEOUT = 35,
+ DHO_IEEE802_3_ENCAPSULATION = 36,
+ DHO_DEFAULT_TCP_TTL = 37,
+ DHO_TCP_KEEPALIVE_INTERVAL = 38,
+ DHO_TCP_KEEPALIVE_GARBAGE = 39,
+ DHO_NIS_DOMAIN = 40,
+ DHO_NIS_SERVERS = 41,
+ DHO_NTP_SERVERS = 42,
+ DHO_VENDOR_ENCAPSULATED_OPTIONS = 43,
+ DHO_NETBIOS_NAME_SERVERS = 44,
+ DHO_NETBIOS_DD_SERVER = 45,
+ DHO_NETBIOS_NODE_TYPE = 46,
+ DHO_NETBIOS_SCOPE = 47,
+ DHO_FONT_SERVERS = 48,
+ DHO_X_DISPLAY_MANAGER = 49,
+ DHO_DHCP_REQUESTED_ADDRESS = 50,
+ DHO_DHCP_LEASE_TIME = 51,
+ DHO_DHCP_OPTION_OVERLOAD = 52,
+ DHO_DHCP_MESSAGE_TYPE = 53,
+ DHO_DHCP_SERVER_IDENTIFIER = 54,
+ DHO_DHCP_PARAMETER_REQUEST_LIST = 55,
+ DHO_DHCP_MESSAGE = 56,
+ DHO_DHCP_MAX_MESSAGE_SIZE = 57,
+ DHO_DHCP_RENEWAL_TIME = 58,
+ DHO_DHCP_REBINDING_TIME = 59,
+ DHO_VENDOR_CLASS_IDENTIFIER = 60,
+ DHO_DHCP_CLIENT_IDENTIFIER = 61,
+ DHO_NWIP_DOMAIN_NAME = 62,
+ DHO_NWIP_SUBOPTIONS = 63,
+ DHO_USER_CLASS = 77,
+ DHO_FQDN = 81,
+ DHO_DHCP_AGENT_OPTIONS = 82,
+ DHO_AUTHENTICATE = 90, /* RFC3118, was 210 */
+ DHO_CLIENT_LAST_TRANSACTION_TIME = 91,
+ DHO_ASSOCIATED_IP = 92,
+ DHO_SUBNET_SELECTION = 118, /* RFC3011! */
+ DHO_DOMAIN_SEARCH = 119, /* RFC3397 */
+ DHO_VIVCO_SUBOPTIONS = 124,
+ DHO_VIVSO_SUBOPTIONS = 125,
+
+ DHO_END = 255
+};
+
+/* DHCP message types. */
+enum DHCPMessageType {
+ DHCPDISCOVER = 1,
+ DHCPOFFER = 2,
+ DHCPREQUEST = 3,
+ DHCPDECLINE = 4,
+ DHCPACK = 5,
+ DHCPNAK = 6,
+ DHCPRELEASE = 7,
+ DHCPINFORM = 8,
+ DHCPLEASEQUERY = 10,
+ DHCPLEASEUNASSIGNED = 11,
+ DHCPLEASEUNKNOWN = 12,
+ DHCPLEASEACTIVE = 13
+};
+
+static const uint16_t DHCP4_CLIENT_PORT = 68;
+static const uint16_t DHCP4_SERVER_PORT = 67;
+
+/// Magic cookie validating dhcp options field (and bootp vendor
+/// extensions field).
+///static const char* DHCP_OPTIONS_COOKIE = "\143\202\123\143";
+
+// TODO: Following are leftovers from dhcp.h import from ISC DHCP
+// They will be converted to C++-style defines once they will start
+// to be used.
+#if 0
+/* Relay Agent Information option subtypes: */
+#define RAI_CIRCUIT_ID 1
+#define RAI_REMOTE_ID 2
+#define RAI_AGENT_ID 3
+#define RAI_LINK_SELECT 5
+
+/* FQDN suboptions: */
+#define FQDN_NO_CLIENT_UPDATE 1
+#define FQDN_SERVER_UPDATE 2
+#define FQDN_ENCODED 3
+#define FQDN_RCODE1 4
+#define FQDN_RCODE2 5
+#define FQDN_HOSTNAME 6
+#define FQDN_DOMAINNAME 7
+#define FQDN_FQDN 8
+#define FQDN_SUBOPTION_COUNT 8
+
+/* Enterprise Suboptions: */
+#define VENDOR_ISC_SUBOPTIONS 2495
+
+#endif
+
+} // end of isc::dhcp namespace
+} // end of isc namespace
+
+#endif /* DHCP_H */
diff --git a/src/lib/dhcp/dhcp6.h b/src/lib/dhcp/dhcp6.h
new file mode 100644
index 0000000..6012003
--- /dev/null
+++ b/src/lib/dhcp/dhcp6.h
@@ -0,0 +1,184 @@
+// Copyright (C) 2006-2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DHCP6_H
+#define DHCP6_H
+
+/* DHCPv6 Option codes: */
+
+#define D6O_CLIENTID 1 /* RFC3315 */
+#define D6O_SERVERID 2
+#define D6O_IA_NA 3
+#define D6O_IA_TA 4
+#define D6O_IAADDR 5
+#define D6O_ORO 6
+#define D6O_PREFERENCE 7
+#define D6O_ELAPSED_TIME 8
+#define D6O_RELAY_MSG 9
+/* Option code 10 unassigned. */
+#define D6O_AUTH 11
+#define D6O_UNICAST 12
+#define D6O_STATUS_CODE 13
+#define D6O_RAPID_COMMIT 14
+#define D6O_USER_CLASS 15
+#define D6O_VENDOR_CLASS 16
+#define D6O_VENDOR_OPTS 17
+#define D6O_INTERFACE_ID 18
+#define D6O_RECONF_MSG 19
+#define D6O_RECONF_ACCEPT 20
+#define D6O_SIP_SERVERS_DNS 21 /* RFC3319 */
+#define D6O_SIP_SERVERS_ADDR 22 /* RFC3319 */
+#define D6O_NAME_SERVERS 23 /* RFC3646 */
+#define D6O_DOMAIN_SEARCH 24 /* RFC3646 */
+#define D6O_IA_PD 25 /* RFC3633 */
+#define D6O_IAPREFIX 26 /* RFC3633 */
+#define D6O_NIS_SERVERS 27 /* RFC3898 */
+#define D6O_NISP_SERVERS 28 /* RFC3898 */
+#define D6O_NIS_DOMAIN_NAME 29 /* RFC3898 */
+#define D6O_NISP_DOMAIN_NAME 30 /* RFC3898 */
+#define D6O_SNTP_SERVERS 31 /* RFC4075 */
+#define D6O_INFORMATION_REFRESH_TIME 32 /* RFC4242 */
+#define D6O_BCMCS_SERVER_D 33 /* RFC4280 */
+#define D6O_BCMCS_SERVER_A 34 /* RFC4280 */
+/* 35 is unassigned */
+#define D6O_GEOCONF_CIVIC 36 /* RFC4776 */
+#define D6O_REMOTE_ID 37 /* RFC4649 */
+#define D6O_SUBSCRIBER_ID 38 /* RFC4580 */
+#define D6O_CLIENT_FQDN 39 /* RFC4704 */
+#define D6O_PANA_AGENT 40 /* paa-option */
+#define D6O_NEW_POSIX_TIMEZONE 41 /* RFC4833 */
+#define D6O_NEW_TZDB_TIMEZONE 42 /* RFC4833 */
+#define D6O_ERO 43 /* RFC4994 */
+#define D6O_LQ_QUERY 44 /* RFC5007 */
+#define D6O_CLIENT_DATA 45 /* RFC5007 */
+#define D6O_CLT_TIME 46 /* RFC5007 */
+#define D6O_LQ_RELAY_DATA 47 /* RFC5007 */
+#define D6O_LQ_CLIENT_LINK 48 /* RFC5007 */
+
+/*
+ * Status Codes, from RFC 3315 section 24.4, and RFC 3633, 5007.
+ */
+#define STATUS_Success 0
+#define STATUS_UnspecFail 1
+#define STATUS_NoAddrsAvail 2
+#define STATUS_NoBinding 3
+#define STATUS_NotOnLink 4
+#define STATUS_UseMulticast 5
+#define STATUS_NoPrefixAvail 6
+#define STATUS_UnknownQueryType 7
+#define STATUS_MalformedQuery 8
+#define STATUS_NotConfigured 9
+#define STATUS_NotAllowed 10
+
+/*
+ * DHCPv6 message types, defined in section 5.3 of RFC 3315
+ */
+#define DHCPV6_SOLICIT 1
+#define DHCPV6_ADVERTISE 2
+#define DHCPV6_REQUEST 3
+#define DHCPV6_CONFIRM 4
+#define DHCPV6_RENEW 5
+#define DHCPV6_REBIND 6
+#define DHCPV6_REPLY 7
+#define DHCPV6_RELEASE 8
+#define DHCPV6_DECLINE 9
+#define DHCPV6_RECONFIGURE 10
+#define DHCPV6_INFORMATION_REQUEST 11
+#define DHCPV6_RELAY_FORW 12
+#define DHCPV6_RELAY_REPL 13
+#define DHCPV6_LEASEQUERY 14
+#define DHCPV6_LEASEQUERY_REPLY 15
+
+extern const char *dhcpv6_type_names[];
+extern const int dhcpv6_type_name_max;
+
+/* DUID type definitions (RFC3315 section 9).
+ */
+#define DUID_LLT 1
+#define DUID_EN 2
+#define DUID_LL 3
+
+/* Offsets into IA_*'s where Option spaces commence. */
+#define IA_NA_OFFSET 12 /* IAID, T1, T2, all 4 octets each */
+#define IA_TA_OFFSET 4 /* IAID only, 4 octets */
+#define IA_PD_OFFSET 12 /* IAID, T1, T2, all 4 octets each */
+
+/* Offset into IAADDR's where Option spaces commence. */
+#define IAADDR_OFFSET 24
+
+/* Offset into IAPREFIX's where Option spaces commence. */
+#define IAPREFIX_OFFSET 25
+
+/* Offset into LQ_QUERY's where Option spaces commence. */
+#define LQ_QUERY_OFFSET 17
+
+/*
+ * DHCPv6 well-known multicast addressess, from section 5.1 of RFC 3315
+ */
+#define ALL_DHCP_RELAY_AGENTS_AND_SERVERS "ff02::1:2"
+#define ALL_DHCP_SERVERS "ff05::1:3"
+
+#define DHCP6_CLIENT_PORT 546
+#define DHCP6_SERVER_PORT 547
+
+/*
+ * DHCPv6 Retransmission Constants (RFC3315 section 5.5, RFC 5007)
+ */
+
+#define SOL_MAX_DELAY 1
+#define SOL_TIMEOUT 1
+#define SOL_MAX_RT 120
+#define REQ_TIMEOUT 1
+#define REQ_MAX_RT 30
+#define REQ_MAX_RC 10
+#define CNF_MAX_DELAY 1
+#define CNF_TIMEOUT 1
+#define CNF_MAX_RT 4
+#define CNF_MAX_RD 10
+#define REN_TIMEOUT 10
+#define REN_MAX_RT 600
+#define REB_TIMEOUT 10
+#define REB_MAX_RT 600
+#define INF_MAX_DELAY 1
+#define INF_TIMEOUT 1
+#define INF_MAX_RT 120
+#define REL_TIMEOUT 1
+#define REL_MAX_RC 5
+#define DEC_TIMEOUT 1
+#define DEC_MAX_RC 5
+#define REC_TIMEOUT 2
+#define REC_MAX_RC 8
+#define HOP_COUNT_LIMIT 32
+#define LQ6_TIMEOUT 1
+#define LQ6_MAX_RT 10
+#define LQ6_MAX_RC 5
+
+/* Leasequery query-types (RFC 5007) */
+
+#define LQ6QT_BY_ADDRESS 1
+#define LQ6QT_BY_CLIENTID 2
+
+/*
+ * DUID time starts 2000-01-01.
+ * This constant is the number of seconds since 1970-01-01,
+ * when the Unix epoch began.
+ */
+#define DUID_TIME_EPOCH 946684800
+
+/* Information-Request Time option (RFC 4242) */
+
+#define IRT_DEFAULT 86400
+#define IRT_MINIMUM 600
+
+#endif
diff --git a/src/lib/dhcp/libdhcp.cc b/src/lib/dhcp/libdhcp.cc
new file mode 100644
index 0000000..b95a427
--- /dev/null
+++ b/src/lib/dhcp/libdhcp.cc
@@ -0,0 +1,170 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/shared_array.hpp>
+#include <boost/shared_ptr.hpp>
+#include <util/buffer.h>
+#include <dhcp/libdhcp.h>
+#include "config.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/option6_iaaddr.h>
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+// static array with factories for options
+std::map<unsigned short, Option::Factory*> LibDHCP::v6factories_;
+
+unsigned int
+LibDHCP::unpackOptions6(const boost::shared_array<uint8_t> buf,
+ unsigned int buf_len,
+ unsigned int offset, unsigned int parse_len,
+ isc::dhcp::Option::OptionCollection& options) {
+ if (offset + parse_len > buf_len) {
+ isc_throw(OutOfRange, "Option parse failed. Tried to parse "
+ << parse_len << " bytes at offset " << offset
+ << ": out of buffer");
+ }
+ unsigned int end = offset + parse_len;
+
+ while (offset +4 <= end) {
+ uint16_t opt_type = buf[offset]*256 + buf[offset+1];
+ offset += 2;
+ uint16_t opt_len = buf[offset]*256 + buf[offset+1];
+ offset += 2;
+
+ if (offset + opt_len > end ) {
+ cout << "Option " << opt_type << " truncated." << endl;
+ return (offset);
+ }
+ boost::shared_ptr<Option> opt;
+ switch (opt_type) {
+ case D6O_IA_NA:
+ case D6O_IA_PD:
+ // cout << "Creating Option6IA" << endl;
+ opt = boost::shared_ptr<Option>(new Option6IA(opt_type,
+ buf, buf_len,
+ offset,
+ opt_len));
+ break;
+ case D6O_IAADDR:
+ // cout << "Creating Option6IAAddr" << endl;
+ opt = boost::shared_ptr<Option>(new Option6IAAddr(opt_type,
+ buf, buf_len,
+ offset, opt_len));
+ break;
+ default:
+ // cout << "Creating Option" << endl;
+ opt = boost::shared_ptr<Option>(new Option(Option::V6,
+ opt_type,
+ buf,
+ offset,
+ opt_len));
+ break;
+ }
+ // add option to options
+ options.insert(pair<int, boost::shared_ptr<Option> >(opt_type, opt));
+ offset += opt_len;
+ }
+
+ return (offset);
+}
+
+void
+LibDHCP::unpackOptions4(const std::vector<uint8_t>& buf,
+ isc::dhcp::Option::OptionCollection& options) {
+ size_t offset = 0;
+
+ // 2 - header of DHCPv4 option
+ while (offset + 2 <= buf.size()) {
+ uint8_t opt_type = buf[offset++];
+ uint8_t opt_len = buf[offset++];
+ if (offset + opt_len > buf.size() ) {
+ isc_throw(OutOfRange, "Option parse failed. Tried to parse "
+ << offset + opt_len << " bytes from " << buf.size()
+ << "-byte long buffer.");
+ }
+
+ boost::shared_ptr<Option> opt;
+ switch(opt_type) {
+ default:
+ opt = boost::shared_ptr<Option>(new Option(Option::V4, opt_type,
+ buf.begin()+offset,
+ buf.begin()+offset+opt_len));
+ }
+
+ options.insert(pair<int, boost::shared_ptr<Option> >(opt_type, opt));
+ offset += opt_len;
+ }
+}
+
+unsigned int
+LibDHCP::packOptions6(boost::shared_array<uint8_t> data,
+ unsigned int data_len,
+ unsigned int offset,
+ const isc::dhcp::Option::OptionCollection& options) {
+ try {
+ for (Option::OptionCollection::const_iterator it = options.begin();
+ it != options.end();
+ ++it) {
+ unsigned short opt_len = (*it).second->len();
+ if (offset + opt_len > data_len) {
+ isc_throw(OutOfRange, "Failed to build option " <<
+ (*it).first << ": out of buffer");
+ }
+ offset = it->second->pack(data, data_len, offset);
+ }
+ }
+ catch (const Exception& e) {
+ cout << "Packet build failed (Option build failed)." << endl;
+ throw;
+ }
+ return (offset);
+}
+
+void
+LibDHCP::packOptions(isc::util::OutputBuffer& buf,
+ const Option::OptionCollection& options) {
+ for (Option::OptionCollection::const_iterator it = options.begin();
+ it != options.end();
+ ++it) {
+ it->second->pack4(buf);
+ }
+}
+
+
+bool
+LibDHCP::OptionFactoryRegister(Option::Universe u,
+ unsigned short opt_type,
+ Option::Factory * factory) {
+ switch (u) {
+ case Option::V6: {
+ if (v6factories_.find(opt_type)!=v6factories_.end()) {
+ isc_throw(BadValue, "There is already DHCPv6 factory registered "
+ << "for option type " << opt_type);
+ }
+ v6factories_[opt_type]=factory;
+ return true;
+ }
+ case Option::V4:
+ default:{
+ isc_throw(BadValue, "This universe type is not supported yet.");
+ return false; // never happens
+ }
+ }
+
+}
diff --git a/src/lib/dhcp/libdhcp.h b/src/lib/dhcp/libdhcp.h
new file mode 100644
index 0000000..468e6bb
--- /dev/null
+++ b/src/lib/dhcp/libdhcp.h
@@ -0,0 +1,103 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef LIBDHCP_H_
+#define LIBDHCP_H_
+
+#include <iostream>
+#include <util/buffer.h>
+#include <dhcp/pkt6.h>
+
+namespace isc {
+namespace dhcp {
+
+class LibDHCP {
+
+public:
+ /// Builds collection of options.
+ ///
+ /// Builds raw (on-wire) data for provided collection of options.
+ ///
+ /// @param buf shared pointer to buffer. Data will be stored there.
+ /// @param buf_len buffer length. Used for buffer overflow protection.
+ /// @param offset Offset from beginning of the buffer, where store options
+ /// @param options collection of options to store to
+ ///
+ /// @return offset to the first unused byte in buffer (next one after last
+ /// used byte)
+ ///
+ static unsigned int
+ packOptions6(boost::shared_array<uint8_t> buf, unsigned int buf_len,
+ unsigned int offset,
+ const isc::dhcp::Option::OptionCollection& options);
+
+
+ /// @brief Stores options in a buffer.
+ ///
+ /// Stores all options defined in options containers in a on-wire
+ /// format in output buffer specified by buf.
+ ///
+ /// May throw different exceptions if option assembly fails. There
+ /// may be different reasons (option too large, option malformed,
+ /// too many options etc.)
+ ///
+ /// @param buf
+ /// @param options
+ static void
+ packOptions(isc::util::OutputBuffer& buf,
+ const isc::dhcp::Option::OptionCollection& options);
+
+ static void
+ unpackOptions4(const std::vector<uint8_t>& buf,
+ isc::dhcp::Option::OptionCollection& options);
+ ///
+ /// Parses provided buffer and creates Option objects.
+ ///
+ /// Parses provided buf array and stores created Option objects
+ /// in options container.
+ ///
+ /// @param buf Buffer to be parsed.
+ /// @param offset Specifies offset for the first option.
+ /// @param options Reference to option container. Options will be
+ /// put here.
+ ///
+ /// @return offset to first byte after last parsed option
+ ///
+ static unsigned int
+ unpackOptions6(const boost::shared_array<uint8_t> buf, unsigned int buf_len,
+ unsigned int offset, unsigned int parse_len,
+ isc::dhcp::Option::OptionCollection& options_);
+
+ ///
+ /// Registers factory method that produces options of specific option types.
+ ///
+ /// @param u universe of the option (V4 or V6)
+ /// @param opt_type option-type
+ /// @param factory function pointer
+ ///
+ /// @return true, if registration was successful, false otherwise
+ ///
+ static bool
+ OptionFactoryRegister(Option::Universe u,
+ unsigned short type,
+ Option::Factory * factory);
+protected:
+ // pointers to factories that produce DHCPv6 options
+ static std::map<unsigned short, Option::Factory*> v6factories_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc
new file mode 100644
index 0000000..daef288
--- /dev/null
+++ b/src/lib/dhcp/option.cc
@@ -0,0 +1,333 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string.h>
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include <iomanip>
+#include <boost/shared_array.hpp>
+#include "exceptions/exceptions.h"
+#include "util/io_utilities.h"
+
+#include "dhcp/option.h"
+#include "dhcp/libdhcp.h"
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+Option::Option(Universe u, unsigned short type)
+ :universe_(u), type_(type) {
+
+ if ((u == V4) && (type > 255)) {
+ isc_throw(BadValue, "Can't create V4 option of type "
+ << type << ", V4 options are in range 0..255");
+ }
+}
+
+Option::Option(Universe u, unsigned short type,
+ const boost::shared_array<uint8_t>& buf,
+ unsigned int offset, unsigned int len)
+ :universe_(u), type_(type),
+ offset_(offset)
+{
+ uint8_t* ptr = &buf[offset];
+ data_ = std::vector<uint8_t>(ptr, ptr + len);
+
+ check();
+}
+
+Option::Option(Universe u, unsigned short type, std::vector<uint8_t>& data)
+ :universe_(u), type_(type), data_(data) {
+ check();
+}
+
+Option::Option(Universe u, uint16_t type, vector<uint8_t>::const_iterator first,
+ vector<uint8_t>::const_iterator last)
+ :universe_(u), type_(type), data_(std::vector<uint8_t>(first,last)) {
+ check();
+}
+
+void
+Option::check() {
+ if ( (universe_ != V4) && (universe_ != V6) ) {
+ isc_throw(BadValue, "Invalid universe type specified."
+ << "Only V4 and V6 are allowed.");
+ }
+
+ if (universe_ == V4) {
+
+ if (type_ > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option type " << type_ << " is too big."
+ << "For DHCPv4 allowed type range is 0..255");
+ } else if (data_.size() > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option " << type_ << " is too big.");
+ /// TODO Larger options can be stored as separate instances
+ /// of DHCPv4 options. Clients MUST concatenate them.
+ /// Fortunately, there are no such large options used today.
+ }
+ }
+
+ // no need to check anything for DHCPv6. It allows full range (0-64k) of
+ // both types and data size.
+}
+
+unsigned int
+Option::pack(boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset) {
+ if (universe_ != V6) {
+ isc_throw(BadValue, "Failed to pack " << type_ << " option. Do not "
+ << "use this method for options other than DHCPv6.");
+ }
+ return pack6(buf, buf_len, offset);
+}
+
+void
+Option::pack4(isc::util::OutputBuffer& buf) {
+ switch (universe_) {
+ case V4: {
+ if (data_.size() > 255) {
+ isc_throw(OutOfRange, "DHCPv4 Option " << type_ << " is too big."
+ << "At most 255 bytes are supported.");
+ /// TODO Larger options can be stored as separate instances
+ /// of DHCPv4 options. Clients MUST concatenate them.
+ /// Fortunately, there are no such large options used today.
+ }
+
+ buf.writeUint8(type_);
+ buf.writeUint8(len() - getHeaderLen());
+
+ buf.writeData(&data_[0], data_.size());
+
+ LibDHCP::packOptions(buf, options_);
+ return;
+ }
+ case V6:
+ /// TODO: Do we need a sanity check for option size here?
+ buf.writeUint16(type_);
+ buf.writeUint16(len() - getHeaderLen());
+
+ LibDHCP::packOptions(buf, options_);
+ return;
+ default:
+ isc_throw(OutOfRange, "Invalid universe type" << universe_);
+ }
+}
+
+unsigned int
+Option::pack4(boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset) {
+ if (offset + len() > buf_len) {
+ isc_throw(OutOfRange, "Failed to pack v4 option=" <<
+ type_ << ",len=" << len() << ": too small buffer.");
+ }
+ uint8_t *ptr = &buf[offset];
+ ptr[0] = type_;
+ ptr[1] = len() - getHeaderLen();
+ ptr += 2;
+ memcpy(ptr, &data_[0], data_.size());
+
+ return offset + len();
+}
+
+unsigned int
+Option::pack6(boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset) {
+ if (offset+len() > buf_len) {
+ isc_throw(OutOfRange, "Failed to pack v6 option=" <<
+ type_ << ",len=" << len() << ": too small buffer.");
+ }
+
+ uint8_t* ptr = &buf[offset];
+
+ ptr = writeUint16(type_, ptr);
+
+ ptr = writeUint16(len() - getHeaderLen(), ptr);
+
+ if (! data_.empty())
+ memcpy(ptr, &data_[0], data_.size());
+
+ // end of fixed part of this option
+ offset += OPTION6_HDR_LEN + data_.size();
+
+ return LibDHCP::packOptions6(buf, buf_len, offset, options_);
+}
+
+unsigned int
+Option::unpack(const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int parse_len) {
+ switch (universe_) {
+ case V4:
+ return unpack4(buf, buf_len, offset, parse_len);
+ case V6:
+ return unpack6(buf, buf_len, offset, parse_len);
+ default:
+ isc_throw(BadValue, "Unknown universe defined for Option " << type_);
+ }
+
+ return 0; // should not happen
+}
+
+unsigned int
+Option::unpack4(const boost::shared_array<uint8_t>&,
+ unsigned int ,
+ unsigned int ,
+ unsigned int ) {
+ isc_throw(Unexpected, "IPv4 support not implemented yet.");
+ return 0;
+}
+
+unsigned int
+Option::unpack6(const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int parse_len) {
+
+ if (buf_len < offset+parse_len) {
+ isc_throw(OutOfRange, "Failed to unpack DHCPv6 option len="
+ << parse_len << " offset=" << offset
+ << " from buffer (length=" << buf_len
+ << "): too small buffer.");
+ }
+
+ uint8_t* ptr = &buf[offset];
+ data_ = std::vector<uint8_t>(ptr, ptr + parse_len);
+
+ offset_ = offset;
+
+ return (offset+parse_len);
+
+ //return LibDHCP::unpackOptions6(buf, buf_len, offset, parse_len,
+ // options_);
+}
+
+/// Returns length of the complete option (data length + DHCPv4/DHCPv6
+/// option header)
+unsigned short
+Option::len() {
+
+ // length of the whole option is header and data stored in this option...
+ int length = getHeaderLen() + data_.size();
+
+ // ... and sum of lengths of all suboptions
+ for (Option::OptionCollection::iterator it = options_.begin();
+ it != options_.end();
+ ++it) {
+ length += (*it).second->len();
+ }
+
+ // note that this is not equal to lenght field. This value denotes
+ // number of bytes required to store this option. length option should
+ // contain (len()-getHeaderLen()) value.
+ return (length);
+}
+
+bool
+Option::valid() {
+ if (universe_ != V4 &&
+ universe_ != V6) {
+ return (false);
+ }
+
+ return (true);
+}
+
+boost::shared_ptr<isc::dhcp::Option>
+Option::getOption(unsigned short opt_type) {
+ isc::dhcp::Option::OptionCollection::const_iterator x =
+ options_.find(opt_type);
+ if ( x != options_.end() ) {
+ return (*x).second;
+ }
+ return boost::shared_ptr<isc::dhcp::Option>(); // NULL
+}
+
+bool
+Option::delOption(unsigned short opt_type) {
+ isc::dhcp::Option::OptionCollection::iterator x = options_.find(opt_type);
+ if ( x != options_.end() ) {
+ options_.erase(x);
+ return true; // delete successful
+ }
+ return (false); // option not found, can't delete
+}
+
+
+std::string Option::toText(int indent /* =0 */ ) {
+ std::stringstream tmp;
+
+ for (int i = 0; i < indent; i++)
+ tmp << " ";
+
+ tmp << "type=" << type_ << ", len=" << len()-getHeaderLen() << ": ";
+
+ for (unsigned int i = 0; i < data_.size(); i++) {
+ if (i) {
+ tmp << ":";
+ }
+ tmp << setfill('0') << setw(2) << hex
+ << static_cast<unsigned short>(data_[i]);
+ }
+
+ // print suboptions
+ for (OptionCollection::const_iterator opt = options_.begin();
+ opt != options_.end();
+ ++opt) {
+ tmp << (*opt).second->toText(indent+2);
+ }
+ return tmp.str();
+}
+
+unsigned short
+Option::getType() {
+ return type_;
+}
+
+const std::vector<uint8_t>&
+Option::getData() {
+ return (data_);
+}
+
+unsigned short
+Option::getHeaderLen() {
+ switch (universe_) {
+ case V4:
+ return OPTION4_HDR_LEN; // header length for v4
+ case V6:
+ return OPTION6_HDR_LEN; // header length for v6
+ }
+ return 0; // should not happen
+}
+
+void
+Option::addOption(boost::shared_ptr<Option> opt) {
+ if (universe_ == V4) {
+ // check for uniqueness (DHCPv4 options must be unique)
+ if (getOption(opt->getType())) {
+ isc_throw(BadValue, "Option " << opt->getType()
+ << " already present in this message.");
+ }
+ }
+ options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
+Option::~Option() {
+
+}
diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h
new file mode 100644
index 0000000..3822cf0
--- /dev/null
+++ b/src/lib/dhcp/option.h
@@ -0,0 +1,331 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION_H_
+#define OPTION_H_
+
+#include <string>
+#include <map>
+#include <vector>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include <util/buffer.h>
+
+namespace isc {
+namespace dhcp {
+
+class Option {
+public:
+ /// length of the usual DHCPv4 option header (there are exceptions)
+ const static size_t OPTION4_HDR_LEN = 2;
+
+ /// length of any DHCPv6 option header
+ const static size_t OPTION6_HDR_LEN = 4;
+
+ /// defines option universe DHCPv4 or DHCPv6
+ enum Universe { V4, V6 };
+
+ /// a collection of DHCPv6 options
+ typedef std::multimap<unsigned int, boost::shared_ptr<Option> >
+ OptionCollection;
+
+ /// @brief a factory function prototype
+ ///
+ /// @param u option universe (DHCPv4 or DHCPv6)
+ /// @param type option type
+ /// @param buf pointer to a buffer
+ /// @param offset offset to first data byte in that buffer
+ /// @param len data length of this option
+ ///
+ /// @return a pointer to a created option object
+ typedef boost::shared_ptr<Option> Factory(Option::Universe u,
+ unsigned short type,
+ boost::shared_array<uint8_t>& buf,
+ unsigned int offset,
+ unsigned int len);
+
+ /// @brief ctor, used for options constructed, usually during transmission
+ ///
+ /// @param u option universe (DHCPv4 or DHCPv6)
+ /// @param type option type
+ Option(Universe u, unsigned short type);
+
+ /// @brief ctor, used for received options
+ ///
+ /// boost::shared_array allows sharing a buffer, but it requires that
+ /// different instances share pointer to the whole array, not point
+ /// to different elements in shared array. Therefore we need to share
+ /// pointer to the whole array and remember offset where data for
+ /// this option begins
+ ///
+ /// @param u specifies universe (V4 or V6)
+ /// @param type option type
+ /// @param buf pointer to a buffer
+ /// @param offset offset in a buffer pointing to first byte of data
+ /// @param len length of the option data
+ Option(Universe u, unsigned short type,
+ const boost::shared_array<uint8_t>& buf, unsigned int offset,
+ unsigned int len);
+
+ /// @brief Constructor, used for received options.
+ ///
+ /// This constructor takes vector<uint8_t>& which is used in cases
+ /// when content of the option will be copied and stored within
+ /// option object. V4 Options follow that approach already.
+ /// TODO Migrate V6 options to that approach.
+ ///
+ /// @param u specifies universe (V4 or V6)
+ /// @param type option type (0-255 for V4 and 0-65535 for V6)
+ /// @param data content of the option
+ Option(Universe u, unsigned short type, std::vector<uint8_t>& data);
+
+ /// @brief Constructor, used for received options.
+ ///
+ /// This contructor is similar to the previous one, but it does not take
+ /// the whole vector<uint8_t>, but rather subset of it.
+ ///
+ /// TODO: This can be templated to use different containers, not just
+ /// vector. Prototype should look like this:
+ /// template<typename InputIterator> Option(Universe u, uint16_t type,
+ /// InputIterator first, InputIterator last);
+ ///
+ /// vector<int8_t> myData;
+ /// Example usage: new Option(V4, 123, myData.begin()+1, myData.end()-1)
+ /// This will create DHCPv4 option of type 123 that contains data from
+ /// trimmed (first and last byte removed) myData vector.
+ ///
+ /// @param u specifies universe (V4 or V6)
+ /// @param type option type (0-255 for V4 and 0-65535 for V6)
+ /// @param first iterator to the first element that should be copied
+ /// @param last iterator to the next element after the last one
+ /// to be copied.
+ Option(Universe u, uint16_t type,
+ std::vector<uint8_t>::const_iterator first,
+ std::vector<uint8_t>::const_iterator last);
+
+ /// @brief returns option universe (V4 or V6)
+ ///
+ /// @return universe type
+ Universe
+ getUniverse() { return universe_; };
+
+ /// @brief Writes option in wire-format to a buffer.
+ ///
+ /// Writes option in wire-format to buffer, returns pointer to first unused
+ /// byte after stored option (that is useful for writing options one after
+ /// another). Used in DHCPv6 options.
+ ///
+ /// TODO: Migrate DHCPv6 code to pack(OutputBuffer& buf) version
+ ///
+ /// @param buf pointer to a buffer
+ /// @param buf_len length of the buffer
+ /// @param offset offset to place, where option shout be stored
+ ///
+ /// @return offset to first unused byte after stored option
+ ///
+ virtual unsigned int
+ pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
+ unsigned int offset);
+
+ /// @brief Writes option in a wire-format to a buffer.
+ ///
+ /// Method will throw if option storing fails for some reason.
+ ///
+ /// TODO Once old (DHCPv6) implementation is rewritten,
+ /// unify pack4() and pack6() and rename them to just pack().
+ ///
+ /// @param buf output buffer (option will be stored there)
+ virtual void
+ pack4(isc::util::OutputBuffer& buf);
+
+
+ /// @brief Parses buffer.
+ ///
+ /// Parses received buffer, returns offset to the first unused byte after
+ /// parsed option.
+ ///
+ /// @param buf pointer to buffer
+ /// @param buf_len length of buf
+ /// @param offset offset, where start parsing option
+ /// @param parse_len how many bytes should be parsed
+ ///
+ /// @return offset after last parsed octet
+ virtual unsigned int
+ unpack(const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int parse_len);
+
+ /// Returns string representation of the option.
+ ///
+ /// @param indent number of spaces before printing text
+ ///
+ /// @return string with text representation.
+ virtual std::string
+ toText(int indent = 0);
+
+ /// Returns option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
+ ///
+ /// @return option type
+ unsigned short
+ getType();
+
+ /// Returns length of the complete option (data length + DHCPv4/DHCPv6
+ /// option header)
+ ///
+ /// @return length of the option
+ virtual unsigned short
+ len();
+
+ /// @brief Returns length of header (2 for v4, 4 for v6)
+ ///
+ /// @return length of option header
+ virtual unsigned short
+ getHeaderLen();
+
+ /// returns if option is valid (e.g. option may be truncated)
+ ///
+ /// @return true, if option is valid
+ virtual bool
+ valid();
+
+ /// Returns pointer to actual data.
+ ///
+ /// @return pointer to actual data (or NULL if there is no data)
+ virtual const std::vector<uint8_t>&
+ getData();
+
+ /// Adds a sub-option.
+ ///
+ /// Some DHCPv6 options can have suboptions. This method allows adding
+ /// options within options.
+ ///
+ /// Note: option is passed by value. That is very convenient as it allows
+ /// downcasting from any derived classes, e.g. shared_ptr<Option6_IA> type
+ /// can be passed directly, without any casts. That would not be possible
+ /// with passing by reference. addOption() is expected to be used in
+ /// many places. Requiring casting is not feasible.
+ ///
+ /// @param opt shared pointer to a suboption that is going to be added.
+ void
+ addOption(boost::shared_ptr<Option> opt);
+
+ /// Returns shared_ptr to suboption of specific type
+ ///
+ /// @param type type of requested suboption
+ ///
+ /// @return shared_ptr to requested suoption
+ boost::shared_ptr<isc::dhcp::Option>
+ getOption(unsigned short type);
+
+ /// Attempts to delete first suboption of requested type
+ ///
+ /// @param type Type of option to be deleted.
+ ///
+ /// @return true if option was deleted, false if no such option existed
+ bool
+ delOption(unsigned short type);
+
+ /// just to force that every option has virtual dtor
+ virtual
+ ~Option();
+
+protected:
+
+ /// Builds raw (over-wire) buffer of this option, including all
+ /// defined suboptions. Version for building DHCPv4 options.
+ ///
+ /// @param buf output buffer (built options will be stored here)
+ /// @param buf_len buffer length (used for buffer overflow checks)
+ /// @param offset offset from start of the buf buffer
+ ///
+ /// @return offset to the next byte after last used byte
+ virtual unsigned int
+ pack4(boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset);
+
+ /// Builds raw (over-wire) buffer of this option, including all
+ /// defined suboptions. Version for building DHCPv4 options.
+ ///
+ /// @param buf output buffer (built options will be stored here)
+ /// @param buf_len buffer length (used for buffer overflow checks)
+ /// @param offset offset from start of the buf buffer
+ ///
+ /// @return offset to the next byte after last used byte
+ virtual unsigned int
+ pack6(boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset);
+
+ /// Parses provided buffer and creates DHCPv4 options.
+ ///
+ /// @param buf buffer that contains raw buffer to parse (on-wire format)
+ /// @param buf_len buffer length (used for buffer overflow checks)
+ /// @param offset offset from start of the buf buffer
+ ///
+ /// @return offset to the next byte after last parsed byte
+ virtual unsigned int
+ unpack4(const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int parse_len);
+
+ /// Parses provided buffer and creates DHCPv6 options.
+ ///
+ /// @param buf buffer that contains raw buffer to parse (on-wire format)
+ /// @param buf_len buffer length (used for buffer overflow checks)
+ /// @param offset offset from start of the buf buffer
+ ///
+ /// @return offset to the next byte after last parsed byte
+ virtual unsigned int
+ unpack6(const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int parse_len);
+
+ /// @brief A private method used for option correctness.
+ ///
+ /// It is used in constructors. In there are any problems detected
+ /// (like specifying type > 255 for DHCPv4 option), it will throw
+ /// BadValue or OutOfRange exceptions.
+ void check();
+
+ /// option universe (V4 or V6)
+ Universe universe_;
+
+ /// option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
+ unsigned short type_;
+
+ /// contains content of this data
+ std::vector<uint8_t> data_;
+
+ /// TODO: Remove this field. vector<uint8_t> should be used
+ /// instead.
+ /// data is a shared_pointer that points out to the
+ /// whole packet. offset_ specifies where data for
+ /// this option begins.
+ unsigned int offset_;
+
+ /// collection for storing suboptions
+ OptionCollection options_;
+
+ /// TODO: probably 2 different containers have to be used for v4 (unique
+ /// options) and v6 (options with the same type can repeat)
+};
+
+} // namespace isc::dhcp
+} // namespace isc
+
+#endif
diff --git a/src/lib/dhcp/option6_addrlst.cc b/src/lib/dhcp/option6_addrlst.cc
new file mode 100644
index 0000000..fc981fa
--- /dev/null
+++ b/src/lib/dhcp/option6_addrlst.cc
@@ -0,0 +1,134 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include "exceptions/exceptions.h"
+
+#include "asiolink/io_address.h"
+#include "util/io_utilities.h"
+#include "dhcp/libdhcp.h"
+#include "dhcp/option6_addrlst.h"
+#include "dhcp/dhcp6.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+using namespace isc::util;
+
+Option6AddrLst::Option6AddrLst(unsigned short type,
+ const AddressContainer& addrs)
+ :Option(V6, type), addrs_(addrs) {
+}
+
+Option6AddrLst::Option6AddrLst(unsigned short type,
+ const isc::asiolink::IOAddress& addr)
+ :Option(V6, type), addrs_(1,addr) {
+}
+
+Option6AddrLst::Option6AddrLst(unsigned short type,
+ boost::shared_array<uint8_t> buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int option_len)
+ :Option(V6, type) {
+ unpack(buf, buf_len, offset, option_len);
+}
+
+void
+Option6AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+ addrs_.clear();
+ addrs_.push_back(addr);
+}
+
+void
+Option6AddrLst::setAddresses(const AddressContainer& addrs) {
+ addrs_ = addrs;
+}
+
+unsigned int
+Option6AddrLst::pack(boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset) {
+ if (len() > buf_len) {
+ isc_throw(OutOfRange, "Failed to pack IA option: len=" << len()
+ << ", buffer=" << buf_len << ": too small buffer.");
+ }
+
+ writeUint16(type_, &buf[offset]);
+ offset += sizeof(uint16_t);
+
+ // len() returns complete option length.
+ // len field contains length without 4-byte option header
+ writeUint16(len() - OPTION6_HDR_LEN, &buf[offset]);
+ offset += sizeof(uint16_t);
+
+ // this wrapping is *ugly*. I wish there was a a
+ for (AddressContainer::const_iterator addr=addrs_.begin();
+ addr!=addrs_.end();
+ ++addr) {
+ memcpy(&buf[offset],
+ addr->getAddress().to_v6().to_bytes().data(),
+ V6ADDRESS_LEN);
+ offset += V6ADDRESS_LEN;
+ }
+
+ return offset;
+}
+
+unsigned int
+Option6AddrLst::unpack(const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int option_len) {
+ if (offset+option_len > buf_len) {
+ isc_throw(OutOfRange, "Option " << type_
+ << " truncated.");
+ }
+
+ if (option_len%16) {
+ isc_throw(OutOfRange, "Option " << type_
+ << " malformed: len=" << option_len
+ << " is not divisible by 16.");
+ }
+ while (option_len > 0) {
+ addrs_.push_back(IOAddress::from_bytes(AF_INET6, &buf[offset]));
+ offset += 16;
+ option_len -= 16;
+ }
+
+ return offset;
+}
+
+std::string Option6AddrLst::toText(int indent /* =0 */) {
+ stringstream tmp;
+ for (int i=0; i<indent; i++)
+ tmp << " ";
+
+ tmp << "type=" << type_ << " " << addrs_.size() << "addr(s): ";
+
+ for (AddressContainer::const_iterator addr=addrs_.begin();
+ addr!=addrs_.end();
+ ++addr) {
+ tmp << addr->toText() << " ";
+ }
+ return tmp.str();
+}
+
+unsigned short Option6AddrLst::len() {
+
+ return (OPTION6_HDR_LEN + addrs_.size()*16);
+}
diff --git a/src/lib/dhcp/option6_addrlst.h b/src/lib/dhcp/option6_addrlst.h
new file mode 100644
index 0000000..c5b32af
--- /dev/null
+++ b/src/lib/dhcp/option6_addrlst.h
@@ -0,0 +1,127 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION6_ADDRLST_H_
+#define OPTION6_ADDRLST_H_
+
+#include <vector>
+#include "asiolink/io_address.h"
+#include "dhcp/option.h"
+
+namespace isc {
+namespace dhcp {
+
+/// @brief Option class for handling list of IPv6 addresses.
+///
+/// This class handles a list of IPv6 addresses. An example of such option
+/// is dns-servers option. It can also be used to handle single address.
+///
+class Option6AddrLst: public Option {
+
+public:
+ /// a container for (IPv6) addresses
+ typedef std::vector<isc::asiolink::IOAddress> AddressContainer;
+
+ /// @brief Constructor used during option generation.
+ ///
+ /// @param type option type
+ /// @param addrs vector of addresses to be stored
+ ///
+ Option6AddrLst(unsigned short type,
+ const AddressContainer& addrs);
+
+ /// @brief Simplified constructor for a single address
+ ///
+ /// @param type option type
+ /// @param addr a single address to be stored
+ ///
+ Option6AddrLst(unsigned short type,
+ const isc::asiolink::IOAddress& addr);
+
+ /// @brief Constructor used for parsing received option
+ ///
+ /// @param type option type
+ /// @param buf pointer to packet buffer
+ /// @param buf_len length of packet buffer
+ /// @param offset offset to beginning of option data
+ /// @param len length of option data
+ ///
+ Option6AddrLst(unsigned short type, boost::shared_array<uint8_t> buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int len);
+
+ /// @brief Assembles on-wire form of this option
+ ///
+ /// @param buf pointer to packet buffer
+ /// @param buf_len length of packet buffer
+ /// @param offset offset to place, where option is to be stored
+ ///
+ /// @return offset to the next unused char (just after stored option)
+ ///
+ unsigned int
+ pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
+ unsigned int offset);
+
+ /// @brief Parses received data
+ ///
+ /// @param buf pointer to packet buffer
+ /// @param buf_len length of packet buffer
+ /// @param offset offset to option data
+ /// @param parse_len specified option data length
+ ///
+ /// @return offset to the next unparsed char (just after parsed option)
+ ///
+ virtual unsigned int
+ unpack(const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int parse_len);
+
+ virtual std::string toText(int indent = 0);
+
+ /// @brief Sets a single address.
+ ///
+ /// @param addr a single address to be added
+ ///
+ void setAddress(const isc::asiolink::IOAddress& addr);
+
+ /// @brief Sets list of addresses.
+ ///
+ /// @param addrs a vector of addresses to be added
+ ///
+ void setAddresses(const AddressContainer& addrs);
+
+ /// @brief Returns vector with addresses.
+ ///
+ /// As user may want to use/modify this list, it is better to return
+ /// a copy rather than const reference to the original. This is
+ /// usually one or two addresses long, so it is not a big deal.
+ ///
+ /// @return vector with addresses
+ ///
+ AddressContainer
+ getAddresses() { return addrs_; };
+
+ // returns data length (data length + DHCPv4/DHCPv6 option header)
+ virtual unsigned short len();
+
+protected:
+ AddressContainer addrs_;
+};
+
+} // isc::dhcp namespace
+} // isc namespace
+
+#endif /* OPTION_ADDRLST_H_ */
diff --git a/src/lib/dhcp/option6_ia.cc b/src/lib/dhcp/option6_ia.cc
new file mode 100644
index 0000000..46daee1
--- /dev/null
+++ b/src/lib/dhcp/option6_ia.cc
@@ -0,0 +1,136 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include "exceptions/exceptions.h"
+
+#include "dhcp/libdhcp.h"
+#include "dhcp/option6_ia.h"
+#include "dhcp/dhcp6.h"
+#include "util/io_utilities.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+Option6IA::Option6IA(unsigned short type, unsigned int iaid)
+ :Option(Option::V6, type), iaid_(iaid) {
+}
+
+Option6IA::Option6IA(unsigned short type,
+ const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int option_len)
+ :Option(Option::V6, type) {
+ unpack(buf, buf_len, offset, option_len);
+}
+
+unsigned int
+Option6IA::pack(boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset) {
+ if (offset + len() > buf_len) {
+ isc_throw(OutOfRange, "Failed to pack IA option: len=" << len()
+ << ", buffer=" << buf_len << ": too small buffer.");
+ }
+
+ if (len() < 16 ) {
+ isc_throw(OutOfRange, "Attempt to build malformed IA option: len="
+ << len() << " is too small (at least 16 is required).");
+ }
+
+ uint8_t* ptr = &buf[offset];
+
+ ptr = writeUint16(type_, ptr);
+ ptr = writeUint16(len() - OPTION6_HDR_LEN, ptr);
+ offset += OPTION6_HDR_LEN;
+
+ ptr = writeUint32(iaid_, ptr);
+ ptr = writeUint32(t1_, ptr);
+ ptr = writeUint32(t2_, ptr);
+ offset += OPTION6_IA_LEN;
+
+ offset = LibDHCP::packOptions6(buf, buf_len, offset, options_);
+ return offset;
+}
+
+unsigned int
+Option6IA::unpack(const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int parse_len) {
+ if ( parse_len < OPTION6_IA_LEN || offset + OPTION6_IA_LEN > buf_len) {
+ isc_throw(OutOfRange, "Option " << type_ << " truncated");
+ }
+
+ iaid_ = readUint32(&buf[offset]);
+ offset += sizeof(uint32_t);
+
+ t1_ = readUint32(&buf[offset]);
+ offset += sizeof(uint32_t);
+
+ t2_ = readUint32(&buf[offset]);
+ offset += sizeof(uint32_t);
+
+ offset = LibDHCP::unpackOptions6(buf, buf_len, offset,
+ parse_len - OPTION6_IA_LEN, options_);
+
+ return (offset);
+}
+
+std::string Option6IA::toText(int indent /* = 0*/) {
+ stringstream tmp;
+
+ for (int i=0; i<indent; i++)
+ tmp << " ";
+ tmp << "type=" << type_;
+
+ switch (type_) {
+ case D6O_IA_NA:
+ tmp << "(IA_NA)";
+ break;
+ case D6O_IA_PD:
+ tmp << "(IA_PD)";
+ break;
+ default:
+ tmp << "(unknown)";
+ }
+ tmp << " iaid=" << iaid_ << ", t1=" << t1_ << ", t2=" << t2_
+ << " " << options_.size() << " sub-options:" << endl;
+
+ for (OptionCollection::const_iterator opt=options_.begin();
+ opt!=options_.end();
+ ++opt) {
+ tmp << (*opt).second->toText(indent+2);
+ }
+ return tmp.str();
+}
+
+unsigned short Option6IA::len() {
+
+ unsigned short length = OPTION6_HDR_LEN /*header (4)*/ +
+ OPTION6_IA_LEN /* option content (12) */;
+
+ // length of all suboptions
+ for (Option::OptionCollection::iterator it = options_.begin();
+ it != options_.end();
+ ++it) {
+ length += (*it).second->len();
+ }
+ return (length);
+}
diff --git a/src/lib/dhcp/option6_ia.h b/src/lib/dhcp/option6_ia.h
new file mode 100644
index 0000000..516b2fc
--- /dev/null
+++ b/src/lib/dhcp/option6_ia.h
@@ -0,0 +1,137 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION_IA_H_
+#define OPTION_IA_H_
+
+#include <stdint.h>
+#include "option.h"
+
+namespace isc {
+namespace dhcp {
+
+class Option6IA: public Option {
+
+public:
+ /// Length of IA_NA and IA_PD content
+ const static size_t OPTION6_IA_LEN = 12;
+
+ /// @brief ctor, used for options constructed, usually during transmission
+ ///
+ /// @param type option type (usually 4 for IA_NA, 25 for IA_PD)
+ /// @param iaid identity association identifier (id of IA)
+ Option6IA(uint16_t type, unsigned int iaid);
+
+ /// @brief ctor, used for received options
+ ///
+ /// boost::shared_array allows sharing a buffer, but it requires that
+ /// different instances share pointer to the whole array, not point
+ /// to different elements in shared array. Therefore we need to share
+ /// pointer to the whole array and remember offset where data for
+ /// this option begins
+ ///
+ /// @param type option type (usually 4 for IA_NA, 25 for IA_PD)
+ /// @param buf buffer to be parsed
+ /// @param buf_len buffer length
+ /// @param offset offset in buffer
+ /// @param len number of bytes to parse
+ Option6IA(uint16_t type, const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len, unsigned int offset, unsigned int len);
+
+ /// Writes option in wire-format to buf, returns pointer to first unused
+ /// byte after stored option.
+ ///
+ /// @param buf buffer (option will be stored here)
+ /// @param buf_len (buffer length)
+ /// @param offset offset place where option should be stored
+ ///
+ /// @return offset to the first unused byte after stored option
+ unsigned int
+ pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
+ unsigned int offset);
+
+ /// @brief Parses received buffer
+ ///
+ /// Parses received buffer and returns offset to the first unused byte after
+ /// parsed option.
+ ///
+ /// @param buf pointer to buffer
+ /// @param buf_len length of buf
+ /// @param offset offset, where start parsing option
+ /// @param parse_len how many bytes should be parsed
+ ///
+ /// @return offset after last parsed octet
+ virtual unsigned int
+ unpack(const boost::shared_array<uint8_t>& buf, unsigned int buf_len,
+ unsigned int offset, unsigned int parse_len);
+
+ /// Provides human readable text representation
+ ///
+ /// @param indent number of leading space characters
+ ///
+ /// @return string with text represenation
+ virtual std::string
+ toText(int indent = 0);
+
+ /// Sets T1 timer.
+ ///
+ /// @param t1 t1 value to be set
+ void setT1(unsigned int t1) { t1_=t1; }
+
+
+ /// Sets T2 timer.
+ ///
+ /// @param t2 t2 value to be set
+ void setT2(unsigned int t2) { t2_=t2; }
+
+ /// Returns IA identifier.
+ ///
+ /// @return IAID value.
+ ///
+ unsigned int getIAID() const { return iaid_; }
+
+ /// Returns T1 timer.
+ ///
+ /// @return T1 value.
+ unsigned int getT1() const { return t1_; }
+
+ /// Returns T2 timer.
+ ///
+ /// @return T2 value.
+ unsigned int getT2() const { return t2_; }
+
+ /// @brief returns complete length of option
+ ///
+ /// Returns length of this option, including option header and suboptions
+ ///
+ /// @return length of this option
+ virtual unsigned short
+ len();
+
+protected:
+
+ /// keeps IA identifier
+ unsigned int iaid_;
+
+ /// keeps T1 timer value
+ unsigned int t1_;
+
+ /// keeps T2 timer value
+ unsigned int t2_;
+};
+
+} // isc::dhcp namespace
+} // isc namespace
+
+#endif /* OPTION_IA_H_ */
diff --git a/src/lib/dhcp/option6_iaaddr.cc b/src/lib/dhcp/option6_iaaddr.cc
new file mode 100644
index 0000000..4177714
--- /dev/null
+++ b/src/lib/dhcp/option6_iaaddr.cc
@@ -0,0 +1,132 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include "exceptions/exceptions.h"
+
+#include "dhcp/libdhcp.h"
+#include "dhcp/option6_iaaddr.h"
+#include "dhcp/dhcp6.h"
+#include "asiolink/io_address.h"
+#include "util/io_utilities.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+using namespace isc::util;
+
+Option6IAAddr::Option6IAAddr(unsigned short type,
+ const isc::asiolink::IOAddress& addr,
+ unsigned int pref, unsigned int valid)
+ :Option(V6, type), addr_(addr), preferred_(pref),
+ valid_(valid) {
+}
+
+Option6IAAddr::Option6IAAddr(unsigned short type,
+ boost::shared_array<uint8_t> buf,
+ unsigned int buf_len, unsigned int offset,
+ unsigned int option_len)
+ :Option(V6, type), addr_("::") {
+ unpack(buf, buf_len, offset, option_len);
+}
+
+unsigned int
+Option6IAAddr::pack(boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset) {
+ if (len() > buf_len) {
+ isc_throw(OutOfRange, "Failed to pack IA option: len=" << len()
+ << ", buffer=" << buf_len << ": too small buffer.");
+ }
+
+ uint8_t* ptr = &buf[offset];
+
+ ptr = writeUint16(type_, ptr);
+
+ // len() returns complete option length. len field contains
+ // length without 4-byte option header
+ ptr = writeUint16(len() - OPTION6_HDR_LEN, ptr);
+ offset += OPTION6_HDR_LEN;
+
+ memcpy(ptr, addr_.getAddress().to_v6().to_bytes().data(), 16);
+ ptr += V6ADDRESS_LEN;
+
+ ptr = writeUint32(preferred_, ptr);
+
+ ptr = writeUint32(valid_, ptr);
+ offset += OPTION6_IAADDR_LEN;
+
+ // parse suboption (there shouldn't be any)
+ offset = LibDHCP::packOptions6(buf, buf_len, offset, options_);
+ return offset;
+}
+
+unsigned int
+Option6IAAddr::unpack(const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int parse_len) {
+ if ( parse_len < OPTION6_IAADDR_LEN || offset + OPTION6_IAADDR_LEN > buf_len) {
+ isc_throw(OutOfRange, "Option " << type_ << " truncated");
+ }
+
+ // 16 bytes: IPv6 address
+ addr_ = IOAddress::from_bytes(AF_INET6, &buf[offset]);
+ offset += V6ADDRESS_LEN;
+
+ preferred_ = readUint32(&buf[offset]);
+ offset += sizeof(uint32_t);
+
+ valid_ = readUint32(&buf[offset]);
+ offset += sizeof(uint32_t);
+ offset = LibDHCP::unpackOptions6(buf, buf_len, offset,
+ parse_len - 24, options_);
+
+ return offset;
+}
+
+std::string Option6IAAddr::toText(int indent /* =0 */) {
+ stringstream tmp;
+ for (int i=0; i<indent; i++)
+ tmp << " ";
+
+ tmp << "type=" << type_ << "(IAADDR) addr=" << addr_.toText()
+ << ", preferred-lft=" << preferred_ << ", valid-lft="
+ << valid_ << endl;
+
+ for (OptionCollection::const_iterator opt=options_.begin();
+ opt!=options_.end();
+ ++opt) {
+ tmp << (*opt).second->toText(indent+2);
+ }
+ return tmp.str();
+}
+
+unsigned short Option6IAAddr::len() {
+
+ unsigned short length = OPTION6_HDR_LEN + OPTION6_IAADDR_LEN;
+
+ // length of all suboptions
+ // TODO implement:
+ // protected: unsigned short Option::lenHelper(int header_size);
+ for (Option::OptionCollection::iterator it = options_.begin();
+ it != options_.end();
+ ++it) {
+ length += (*it).second->len();
+ }
+ return (length);
+}
diff --git a/src/lib/dhcp/option6_iaaddr.h b/src/lib/dhcp/option6_iaaddr.h
new file mode 100644
index 0000000..60c5c48
--- /dev/null
+++ b/src/lib/dhcp/option6_iaaddr.h
@@ -0,0 +1,146 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION6_IAADDR_H_
+#define OPTION6_IAADDR_H_
+
+#include "asiolink/io_address.h"
+#include "dhcp/option.h"
+
+namespace isc {
+namespace dhcp {
+
+class Option6IAAddr: public Option {
+
+public:
+ /// length of the fixed part of the IAADDR option
+ static const size_t OPTION6_IAADDR_LEN = 24;
+
+ /// @brief ctor, used for options constructed (during transmission)
+ ///
+ /// @param type option type
+ /// @param addr reference to an address
+ /// @param preferred address preferred lifetime (in seconds)
+ /// @param valid address valid lifetime (in seconds)
+ Option6IAAddr(unsigned short type, const isc::asiolink::IOAddress& addr,
+ unsigned int preferred, unsigned int valid);
+
+ /// ctor, used for received options
+ /// boost::shared_array allows sharing a buffer, but it requires that
+ /// different instances share pointer to the whole array, not point
+ /// to different elements in shared array. Therefore we need to share
+ /// pointer to the whole array and remember offset where data for
+ /// this option begins
+ ///
+ /// @param type option type
+ /// @param buf pointer to a buffer
+ /// @param offset offset to first data byte in that buffer
+ /// @param len data length of this option
+ Option6IAAddr(unsigned short type, boost::shared_array<uint8_t> buf,
+ unsigned int buf_len, unsigned int offset, unsigned int len);
+
+ /// @brief Writes option in wire-format.
+ ///
+ /// Writes option in wire-format to buf, returns pointer to first unused
+ /// byte after stored option.
+ ///
+ /// @param buf pointer to a buffer
+ /// @param buf_len length of the buffer
+ /// @param offset offset to place, where option shout be stored
+ ///
+ /// @return offset to first unused byte after stored option
+ unsigned int
+ pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
+ unsigned int offset);
+
+ /// @brief Parses buffer.
+ ///
+ /// Parses received buffer, returns offset to the first unused byte after
+ /// parsed option.
+ ///
+ /// @param buf pointer to buffer
+ /// @param buf_len length of buf
+ /// @param offset offset, where start parsing option
+ /// @param parse_len how many bytes should be parsed
+ ///
+ /// @return offset after last parsed octet
+ virtual unsigned int
+ unpack(const boost::shared_array<uint8_t>& buf,
+ unsigned int buf_len,
+ unsigned int offset,
+ unsigned int parse_len);
+
+ /// Returns string representation of the option.
+ ///
+ /// @param indent number of spaces before printing text
+ ///
+ /// @return string with text representation.
+ virtual std::string
+ toText(int indent = 0);
+
+
+ /// sets address in this option.
+ ///
+ /// @param addr address to be sent in this option
+ void setAddress(const isc::asiolink::IOAddress& addr) { addr_ = addr; }
+
+ /// Sets preferred lifetime (in seconds)
+ ///
+ /// @param pref address preferred lifetime (in seconds)
+ ///
+ void setPreferred(unsigned int pref) { preferred_=pref; }
+
+ /// Sets valid lifetime (in seconds).
+ ///
+ /// @param valid address valid lifetime (in seconds)
+ ///
+ void setValid(unsigned int valid) { valid_=valid; }
+
+ /// Returns address contained within this option.
+ ///
+ /// @return address
+ isc::asiolink::IOAddress
+ getAddress() const { return addr_; }
+
+ /// Returns preferred lifetime of an address.
+ ///
+ /// @return preferred lifetime (in seconds)
+ unsigned int
+ getPreferred() const { return preferred_; }
+
+ /// Returns valid lifetime of an address.
+ ///
+ /// @return valid lifetime (in seconds)
+ unsigned int
+ getValid() const { return valid_; }
+
+ /// returns data length (data length + DHCPv4/DHCPv6 option header)
+ virtual unsigned short
+ len();
+
+protected:
+ /// contains an IPv6 address
+ isc::asiolink::IOAddress addr_;
+
+ /// contains preferred-lifetime timer (in seconds)
+ unsigned int preferred_;
+
+ /// contains valid-lifetime timer (in seconds)
+ unsigned int valid_;
+};
+
+} // isc::dhcp namespace
+} // isc namespace
+
+#endif /* OPTION_IA_H_ */
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
new file mode 100644
index 0000000..ba07a10
--- /dev/null
+++ b/src/lib/dhcp/pkt4.cc
@@ -0,0 +1,255 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dhcp/pkt4.h>
+#include <dhcp/libdhcp.h>
+#include <dhcp/dhcp4.h>
+#include <exceptions/exceptions.h>
+#include <asiolink/io_address.h>
+#include <iostream>
+#include <sstream>
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+
+namespace isc {
+namespace dhcp {
+
+const IOAddress DEFAULT_ADDRESS("0.0.0.0");
+
+Pkt4::Pkt4(uint8_t msg_type, uint32_t transid)
+ :local_addr_(DEFAULT_ADDRESS),
+ remote_addr_(DEFAULT_ADDRESS),
+ iface_(""),
+ ifindex_(0),
+ local_port_(DHCP4_SERVER_PORT),
+ remote_port_(DHCP4_CLIENT_PORT),
+ op_(DHCPTypeToBootpType(msg_type)),
+ htype_(HTYPE_ETHER),
+ hlen_(0),
+ hops_(0),
+ transid_(transid),
+ secs_(0),
+ flags_(0),
+ ciaddr_(DEFAULT_ADDRESS),
+ yiaddr_(DEFAULT_ADDRESS),
+ siaddr_(DEFAULT_ADDRESS),
+ giaddr_(DEFAULT_ADDRESS),
+ bufferIn_(NULL, 0), // not used, this is TX packet
+ bufferOut_(DHCPV4_PKT_HDR_LEN),
+ msg_type_(msg_type)
+{
+ /// TODO: fixed fields, uncomment in ticket #1224
+ memset(chaddr_, 0, MAX_CHADDR_LEN);
+ memset(sname_, 0, MAX_SNAME_LEN);
+ memset(file_, 0, MAX_FILE_LEN);
+}
+
+Pkt4::Pkt4(const uint8_t* data, size_t len)
+ :local_addr_(DEFAULT_ADDRESS),
+ remote_addr_(DEFAULT_ADDRESS),
+ iface_(""),
+ ifindex_(-1),
+ local_port_(DHCP4_SERVER_PORT),
+ remote_port_(DHCP4_CLIENT_PORT),
+ /// TODO Fixed fields, uncomment in ticket #1224
+ op_(BOOTREQUEST),
+ transid_(0),
+ secs_(0),
+ flags_(0),
+ ciaddr_(DEFAULT_ADDRESS),
+ yiaddr_(DEFAULT_ADDRESS),
+ siaddr_(DEFAULT_ADDRESS),
+ giaddr_(DEFAULT_ADDRESS),
+ bufferIn_(data, len),
+ bufferOut_(0), // not used, this is RX packet
+ msg_type_(DHCPDISCOVER)
+{
+ if (len < DHCPV4_PKT_HDR_LEN) {
+ isc_throw(OutOfRange, "Truncated DHCPv4 packet (len=" << len
+ << " received, at least " << DHCPV4_PKT_HDR_LEN
+ << "is expected");
+ }
+}
+
+size_t
+Pkt4::len() {
+ size_t length = DHCPV4_PKT_HDR_LEN; // DHCPv4 header
+
+ // ... and sum of lengths of all options
+ for (Option::OptionCollection::const_iterator it = options_.begin();
+ it != options_.end();
+ ++it) {
+ length += (*it).second->len();
+ }
+
+ return (length);
+}
+
+bool
+Pkt4::pack() {
+ bufferOut_.writeUint8(op_);
+ bufferOut_.writeUint8(htype_);
+ bufferOut_.writeUint8(hlen_);
+ bufferOut_.writeUint8(hops_);
+ bufferOut_.writeUint32(transid_);
+ bufferOut_.writeUint16(secs_);
+ bufferOut_.writeUint16(flags_);
+ bufferOut_.writeUint32(ciaddr_);
+ bufferOut_.writeUint32(yiaddr_);
+ bufferOut_.writeUint32(siaddr_);
+ bufferOut_.writeUint32(giaddr_);
+ bufferOut_.writeData(chaddr_, MAX_CHADDR_LEN);
+ bufferOut_.writeData(sname_, MAX_SNAME_LEN);
+ bufferOut_.writeData(file_, MAX_FILE_LEN);
+
+ LibDHCP::packOptions(bufferOut_, options_);
+
+ return (true);
+}
+bool
+Pkt4::unpack() {
+ if (bufferIn_.getLength()<DHCPV4_PKT_HDR_LEN) {
+ isc_throw(OutOfRange, "Received truncated DHCPv4 packet (len="
+ << bufferIn_.getLength() << " received, at least "
+ << DHCPV4_PKT_HDR_LEN << "is expected");
+ }
+
+ op_ = bufferIn_.readUint8();
+ htype_ = bufferIn_.readUint8();
+ hlen_ = bufferIn_.readUint8();
+ hops_ = bufferIn_.readUint8();
+ transid_ = bufferIn_.readUint32();
+ secs_ = bufferIn_.readUint16();
+ flags_ = bufferIn_.readUint16();
+ ciaddr_ = IOAddress(bufferIn_.readUint32());
+ yiaddr_ = IOAddress(bufferIn_.readUint32());
+ siaddr_ = IOAddress(bufferIn_.readUint32());
+ giaddr_ = IOAddress(bufferIn_.readUint32());
+ bufferIn_.readData(chaddr_, MAX_CHADDR_LEN);
+ bufferIn_.readData(sname_, MAX_SNAME_LEN);
+ bufferIn_.readData(file_, MAX_FILE_LEN);
+
+ size_t opts_len = bufferIn_.getLength() - bufferIn_.getPosition();
+ vector<uint8_t> optsBuffer;
+ // fist use of readVector
+ bufferIn_.readVector(optsBuffer, opts_len);
+ LibDHCP::unpackOptions4(optsBuffer, options_);
+
+ return (true);
+}
+
+std::string
+Pkt4::toText() {
+ stringstream tmp;
+ tmp << "localAddr=[" << local_addr_.toText() << "]:" << local_port_
+ << " remoteAddr=[" << remote_addr_.toText()
+ << "]:" << remote_port_ << endl;
+ tmp << "msgtype=" << msg_type_
+ << ", transid=0x" << hex << transid_ << dec
+ << endl;
+
+ return tmp.str();
+}
+
+void
+Pkt4::setHWAddr(uint8_t hType, uint8_t hlen,
+ const std::vector<uint8_t>& macAddr) {
+ /// TODO Rewrite this once support for client-identifier option
+ /// is implemented (ticket 1228?)
+ if (hlen>MAX_CHADDR_LEN) {
+ isc_throw(OutOfRange, "Hardware address (len=" << hlen
+ << " too long. Max " << MAX_CHADDR_LEN << " supported.");
+ }
+ if ( (macAddr.size() == 0) && (hlen > 0) ) {
+ isc_throw(OutOfRange, "Invalid HW Address specified");
+ }
+
+ htype_ = hType;
+ hlen_ = hlen;
+ memset(chaddr_, 0, MAX_CHADDR_LEN);
+ memcpy(chaddr_, &macAddr[0], hlen);
+}
+
+void
+Pkt4::setSname(const uint8_t* sname, size_t snameLen /*= MAX_SNAME_LEN*/) {
+ if (snameLen > MAX_SNAME_LEN) {
+ isc_throw(OutOfRange, "sname field (len=" << snameLen
+ << ") too long, Max " << MAX_SNAME_LEN << " supported.");
+ }
+ memset(sname_, 0, MAX_SNAME_LEN);
+ memcpy(sname_, sname, snameLen);
+
+ // no need to store snameLen as any empty space is filled with 0s
+}
+
+void
+Pkt4::setFile(const uint8_t* file, size_t fileLen /*= MAX_FILE_LEN*/) {
+ if (fileLen > MAX_FILE_LEN) {
+ isc_throw(OutOfRange, "file field (len=" << fileLen
+ << ") too long, Max " << MAX_FILE_LEN << " supported.");
+ }
+ memset(file_, 0, MAX_FILE_LEN);
+ memcpy(file_, file, fileLen);
+
+ // no need to store fileLen as any empty space is filled with 0s
+}
+
+uint8_t
+Pkt4::DHCPTypeToBootpType(uint8_t dhcpType) {
+ switch (dhcpType) {
+ case DHCPDISCOVER:
+ case DHCPREQUEST:
+ case DHCPDECLINE:
+ case DHCPRELEASE:
+ case DHCPINFORM:
+ case DHCPLEASEQUERY:
+ return (BOOTREQUEST);
+ case DHCPACK:
+ case DHCPNAK:
+ case DHCPOFFER:
+ case DHCPLEASEUNASSIGNED:
+ case DHCPLEASEUNKNOWN:
+ case DHCPLEASEACTIVE:
+ return (BOOTREPLY);
+ default:
+ isc_throw(OutOfRange, "Invalid message type: "
+ << static_cast<int>(dhcpType) );
+ }
+}
+
+void
+Pkt4::addOption(boost::shared_ptr<Option> opt) {
+ // check for uniqueness (DHCPv4 options must be unique)
+ if (getOption(opt->getType())) {
+ isc_throw(BadValue, "Option " << opt->getType()
+ << " already present in this message.");
+ }
+ options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
+boost::shared_ptr<isc::dhcp::Option>
+Pkt4::getOption(uint8_t type) {
+ Option::OptionCollection::const_iterator x = options_.find(type);
+ if (x!=options_.end()) {
+ return (*x).second;
+ }
+ return boost::shared_ptr<isc::dhcp::Option>(); // NULL
+}
+
+
+} // end of namespace isc::dhcp
+
+} // end of namespace isc
diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h
new file mode 100644
index 0000000..8517091
--- /dev/null
+++ b/src/lib/dhcp/pkt4.h
@@ -0,0 +1,409 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef PKT4_H
+#define PKT4_H
+
+#include <iostream>
+#include <vector>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include "asiolink/io_address.h"
+#include "util/buffer.h"
+#include "dhcp/option.h"
+
+namespace isc {
+
+namespace dhcp {
+
+class Pkt4 {
+public:
+
+ /// length of the CHADDR field in DHCPv4 message
+ const static size_t MAX_CHADDR_LEN = 16;
+
+ /// length of the SNAME field in DHCPv4 message
+ const static size_t MAX_SNAME_LEN = 64;
+
+ /// length of the FILE field in DHCPv4 message
+ const static size_t MAX_FILE_LEN = 128;
+
+ /// specifies DHCPv4 packet header length (fixed part)
+ const static size_t DHCPV4_PKT_HDR_LEN = 236;
+
+ /// Constructor, used in replying to a message.
+ ///
+ /// @param msg_type type of message (e.g. DHCPDISOVER=1)
+ /// @param transid transaction-id
+ Pkt4(uint8_t msg_type, uint32_t transid);
+
+ /// @brief Constructor, used in message reception.
+ ///
+ /// Creates new message. Pkt4 will copy data to bufferIn_
+ /// buffer on creation.
+ ///
+ /// @param data pointer to received data
+ /// @param len size of buffer to be allocated for this packet.
+ Pkt4(const uint8_t* data, size_t len);
+
+ /// @brief Prepares on-wire format of DHCPv4 packet.
+ ///
+ /// Prepares on-wire format of message and all its options.
+ /// Options must be stored in options_ field.
+ /// Output buffer will be stored in bufferOut_.
+ ///
+ /// @return true if packing procedure was successful
+ bool
+ pack();
+
+ /// @brief Parses on-wire form of DHCPv4 packet.
+ ///
+ /// Parses received packet, stored in on-wire format in bufferIn_.
+ ///
+ /// Will create a collection of option objects that will
+ /// be stored in options_ container.
+ ///
+ /// @return true, if parsing was successful
+ bool
+ unpack();
+
+ /// @brief Returns text representation of the packet.
+ ///
+ /// This function is useful mainly for debugging.
+ ///
+ /// @return string with text representation
+ std::string
+ toText();
+
+ /// @brief Returns the size of the required buffer to build the packet.
+ ///
+ /// Returns the size of the required buffer to build the packet with
+ /// the current set of packet options.
+ ///
+ /// @return number of bytes required to build this packet
+ size_t
+ len();
+
+ /// Sets hops field
+ ///
+ /// @param hops value to be set
+ void
+ setHops(uint8_t hops) { hops_ = hops; };
+
+ /// Returns hops field
+ ///
+ /// @return hops field
+ uint8_t
+ getHops() const { return (hops_); };
+
+ // Note: There's no need to manipulate OP field directly,
+ // thus no setOp() method. See op_ comment.
+
+ /// Returns op field
+ ///
+ /// @return op field
+ uint8_t
+ getOp() const { return (op_); };
+
+ /// Sets secs field
+ ///
+ /// @param secs value to be set
+ void
+ setSecs(uint16_t secs) { secs_ = secs; };
+
+ /// Returns secs field
+ ///
+ /// @return secs field
+ uint16_t
+ getSecs() const { return (secs_); };
+
+ /// Sets flags field
+ ///
+ /// @param flags value to be set
+ void
+ setFlags(uint16_t flags) { flags_ = flags; };
+
+ /// Returns flags field
+ ///
+ /// @return flags field
+ uint16_t
+ getFlags() const { return (flags_); };
+
+
+ /// Returns ciaddr field
+ ///
+ /// @return ciaddr field
+ const isc::asiolink::IOAddress&
+ getCiaddr() const { return (ciaddr_); };
+
+ /// Sets ciaddr field
+ ///
+ /// @param ciaddr value to be set
+ void
+ setCiaddr(const isc::asiolink::IOAddress& ciaddr) { ciaddr_ = ciaddr; };
+
+
+ /// Returns siaddr field
+ ///
+ /// @return siaddr field
+ const isc::asiolink::IOAddress&
+ getSiaddr() const { return (siaddr_); };
+
+ /// Sets siaddr field
+ ///
+ /// @param siaddr value to be set
+ void
+ setSiaddr(const isc::asiolink::IOAddress& siaddr) { siaddr_ = siaddr; };
+
+
+ /// Returns yiaddr field
+ ///
+ /// @return yiaddr field
+ const isc::asiolink::IOAddress&
+ getYiaddr() const { return (yiaddr_); };
+
+ /// Sets yiaddr field
+ ///
+ /// @param yiaddr value to be set
+ void
+ setYiaddr(const isc::asiolink::IOAddress& yiaddr) { yiaddr_ = yiaddr; };
+
+
+ /// Returns giaddr field
+ ///
+ /// @return giaddr field
+ const isc::asiolink::IOAddress&
+ getGiaddr() const { return (giaddr_); };
+
+ /// Sets giaddr field
+ ///
+ /// @param giaddr value to be set
+ void
+ setGiaddr(const isc::asiolink::IOAddress& giaddr) { giaddr_ = giaddr; };
+
+ /// Returns value of transaction-id field
+ ///
+ /// @return transaction-id
+ uint32_t getTransid() const { return (transid_); };
+
+ /// Returns message type (e.g. 1 = DHCPDISCOVER)
+ ///
+ /// @return message type
+ uint8_t
+ getType() const { return (msg_type_); }
+
+ /// Sets message type (e.g. 1 = DHCPDISCOVER)
+ ///
+ /// @param type message type to be set
+ void setType(uint8_t type) { msg_type_=type; };
+
+ /// @brief Returns sname field
+ ///
+ /// Note: This is 64 bytes long field. It doesn't have to be
+ /// null-terminated. Do not use strlen() or similar on it.
+ ///
+ /// @return sname field
+ const std::vector<uint8_t>
+ getSname() const { return (std::vector<uint8_t>(sname_, &sname_[MAX_SNAME_LEN])); };
+
+ /// Sets sname field
+ ///
+ /// @param sname value to be set
+ void
+ setSname(const uint8_t* sname, size_t snameLen = MAX_SNAME_LEN);
+
+ /// @brief Returns file field
+ ///
+ /// Note: This is 128 bytes long field. It doesn't have to be
+ /// null-terminated. Do not use strlen() or similar on it.
+ ///
+ /// @return pointer to file field
+ const std::vector<uint8_t>
+ getFile() const { return (std::vector<uint8_t>(file_, &file_[MAX_FILE_LEN])); };
+
+ /// Sets file field
+ ///
+ /// @param file value to be set
+ void
+ setFile(const uint8_t* file, size_t fileLen = MAX_FILE_LEN);
+
+ /// @brief Sets hardware address.
+ ///
+ /// Sets parameters of hardware address. hlen specifies
+ /// length of macAddr buffer. Content of macAddr buffer
+ /// will be copied to appropriate field.
+ ///
+ /// Note: macAddr must be a buffer of at least hlen bytes.
+ ///
+ /// @param hwType hardware type (will be sent in htype field)
+ /// @param hlen hardware length (will be sent in hlen field)
+ /// @param macAddr pointer to hardware address
+ void setHWAddr(uint8_t hType, uint8_t hlen,
+ const std::vector<uint8_t>& macAddr);
+
+ /// Returns htype field
+ ///
+ /// @return hardware type
+ uint8_t
+ getHtype() const { return (htype_); };
+
+ /// Returns hlen field
+ ///
+ /// @return hardware address length
+ uint8_t
+ getHlen() const { return (hlen_); };
+
+ /// @brief Returns chaddr field.
+ ///
+ /// Note: This is 16 bytes long field. It doesn't have to be
+ /// null-terminated. Do no use strlen() or similar on it.
+ ///
+ /// @return pointer to hardware address
+ const uint8_t*
+ getChaddr() const { return (chaddr_); };
+
+
+ /// @brief Returns reference to output buffer.
+ ///
+ /// Returned buffer will contain reasonable data only for
+ /// output (TX) packet and after pack() was called. This buffer
+ /// is only valid till Pkt4 object is valid.
+ ///
+ /// RX packet or TX packet before pack() will return buffer with
+ /// zero length
+ ///
+ /// @return reference to output buffer
+ const isc::util::OutputBuffer&
+ getBuffer() const { return (bufferOut_); };
+
+ /// @brief Add an option.
+ ///
+ /// Throws BadValue if option with that type is already present.
+ ///
+ /// @param opt option to be added
+ void
+ addOption(boost::shared_ptr<Option> opt);
+
+ /// @brief Returns an option of specified type.
+ ///
+ /// @return returns option of requested type (or NULL)
+ /// if no such option is present
+
+ boost::shared_ptr<Option>
+ getOption(uint8_t opt_type);
+
+protected:
+
+ /// converts DHCP message type to BOOTP op type
+ ///
+ /// @param dhcpType DHCP message type (e.g. DHCPDISCOVER)
+ ///
+ /// @return BOOTP type (BOOTREQUEST or BOOTREPLY)
+ uint8_t
+ DHCPTypeToBootpType(uint8_t dhcpType);
+
+ /// local address (dst if receiving packet, src if sending packet)
+ isc::asiolink::IOAddress local_addr_;
+
+ /// remote address (src if receiving packet, dst if sending packet)
+ isc::asiolink::IOAddress remote_addr_;
+
+ /// name of the network interface the packet was received/to be sent over
+ std::string iface_;
+
+ /// @brief interface index
+ ///
+ /// Each network interface has assigned unique ifindex. It is functional
+ /// equvalent of name, but sometimes more useful, e.g. when using crazy
+ /// systems that allow spaces in interface names e.g. MS Windows)
+ int ifindex_;
+
+ /// local UDP port
+ int local_port_;
+
+ /// remote UDP port
+ int remote_port_;
+
+ /// @brief message operation code
+ ///
+ /// Note: This is legacy BOOTP field. There's no need to manipulate it
+ /// directly. Its value is set based on DHCP message type. Note that
+ /// DHCPv4 protocol reuses BOOTP message format, so this field is
+ /// kept due to BOOTP format. This is NOT DHCPv4 type (DHCPv4 message
+ /// type is kept in message type option).
+ uint8_t op_;
+
+ /// link-layer address type
+ uint8_t htype_;
+
+ /// link-layer address length
+ uint8_t hlen_;
+
+ /// Number of relay agents traversed
+ uint8_t hops_;
+
+ /// DHCPv4 transaction-id (32 bits, not 24 bits as in DHCPv6)
+ uint32_t transid_;
+
+ /// elapsed (number of seconds since beginning of transmission)
+ uint16_t secs_;
+
+ /// flags
+ uint16_t flags_;
+
+ /// ciaddr field (32 bits): Client's IP address
+ isc::asiolink::IOAddress ciaddr_;
+
+ /// yiaddr field (32 bits): Client's IP address ("your"), set by server
+ isc::asiolink::IOAddress yiaddr_;
+
+ /// siaddr field (32 bits): next server IP address in boot process(e.g.TFTP)
+ isc::asiolink::IOAddress siaddr_;
+
+ /// giaddr field (32 bits): Gateway IP address
+ isc::asiolink::IOAddress giaddr_;
+
+ /// Hardware address field (16 bytes)
+ uint8_t chaddr_[MAX_CHADDR_LEN];
+
+ /// sname field (64 bytes)
+ uint8_t sname_[MAX_SNAME_LEN];
+
+ /// file field (128 bytes)
+ uint8_t file_[MAX_FILE_LEN];
+
+ // end of real DHCPv4 fields
+
+ /// input buffer (used during message reception)
+ /// Note that it must be modifiable as hooks can modify incoming buffer),
+ /// thus OutputBuffer, not InputBuffer
+ isc::util::InputBuffer bufferIn_;
+
+ /// output buffer (used during message
+ isc::util::OutputBuffer bufferOut_;
+
+ /// message type (e.g. 1=DHCPDISCOVER)
+ /// TODO: this will eventually be replaced with DHCP Message Type
+ /// option (option 53)
+ uint8_t msg_type_;
+
+ /// collection of options present in this message
+ isc::dhcp::Option::OptionCollection options_;
+}; // Pkt4 class
+
+} // isc::dhcp namespace
+
+} // isc namespace
+
+#endif
diff --git a/src/lib/dhcp/pkt6.cc b/src/lib/dhcp/pkt6.cc
new file mode 100644
index 0000000..84c5729
--- /dev/null
+++ b/src/lib/dhcp/pkt6.cc
@@ -0,0 +1,232 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#include "dhcp/dhcp6.h"
+#include "dhcp/pkt6.h"
+#include "dhcp/libdhcp.h"
+#include "exceptions/exceptions.h"
+#include <iostream>
+#include <sstream>
+
+using namespace std;
+using namespace isc::dhcp;
+
+namespace isc {
+
+Pkt6::Pkt6(unsigned int dataLen, DHCPv6Proto proto /* = UDP */)
+ :data_len_(dataLen),
+ local_addr_("::"),
+ remote_addr_("::"),
+ iface_(""),
+ ifindex_(-1),
+ local_port_(-1),
+ remote_port_(-1),
+ proto_(proto),
+ msg_type_(-1),
+ transid_(rand()%0xffffff)
+{
+
+ data_ = boost::shared_array<uint8_t>(new uint8_t[dataLen]);
+ data_len_ = dataLen;
+}
+
+Pkt6::Pkt6(uint8_t msg_type,
+ unsigned int transid,
+ DHCPv6Proto proto /*= UDP*/)
+ :local_addr_("::"),
+ remote_addr_("::"),
+ iface_(""),
+ ifindex_(-1),
+ local_port_(-1),
+ remote_port_(-1),
+ proto_(proto),
+ msg_type_(msg_type),
+ transid_(transid) {
+
+ data_ = boost::shared_array<uint8_t>(new uint8_t[4]);
+ data_len_ = 4;
+}
+
+unsigned short
+Pkt6::len() {
+ unsigned int length = DHCPV6_PKT_HDR_LEN; // DHCPv6 header
+
+ for (Option::OptionCollection::iterator it = options_.begin();
+ it != options_.end();
+ ++it) {
+ length += (*it).second->len();
+ }
+
+ return (length);
+}
+
+
+bool
+Pkt6::pack() {
+ switch (proto_) {
+ case UDP:
+ return packUDP();
+ case TCP:
+ return packTCP();
+ default:
+ isc_throw(BadValue, "Invalid protocol specified (non-TCP, non-UDP)");
+ }
+ return (false); // never happens
+}
+
+bool
+Pkt6::packUDP() {
+
+ // TODO: Once OutputBuffer is used here, some thing like this
+ // will be used. Yikes! That's ugly.
+ // bufferOut_.writeData(ciaddr_.getAddress().to_v6().to_bytes().data(), 16);
+ // It is better to implement a method in IOAddress that extracts
+ // vector<uint8_t>
+
+ unsigned short length = len();
+ if (data_len_ < length) {
+ cout << "Previous len=" << data_len_ << ", allocating new buffer: len="
+ << length << endl;
+
+ // May throw exception if out of memory. That is rather fatal,
+ // so we don't catch this
+ data_ = boost::shared_array<uint8_t>(new uint8_t[length]);
+ data_len_ = length;
+ }
+
+ data_len_ = length;
+ try {
+ // DHCPv6 header: message-type (1 octect) + transaction id (3 octets)
+ data_[0] = msg_type_;
+
+ // store 3-octet transaction-id
+ data_[1] = (transid_ >> 16) & 0xff;
+ data_[2] = (transid_ >> 8) & 0xff;
+ data_[3] = (transid_) & 0xff;
+
+ // the rest are options
+ unsigned short offset = LibDHCP::packOptions6(data_, length,
+ 4/*offset*/,
+ options_);
+
+ // sanity check
+ if (offset != length) {
+ isc_throw(OutOfRange, "Packet build failed: expected size="
+ << length << ", actual len=" << offset);
+ }
+ }
+ catch (const Exception& e) {
+ cout << "Packet build failed:" << e.what() << endl;
+ return (false);
+ }
+ // Limited verbosity of this method
+ // cout << "Packet built, len=" << len() << endl;
+ return (true);
+}
+
+bool
+Pkt6::packTCP() {
+ /// TODO Implement this function.
+ isc_throw(Unexpected, "DHCPv6 over TCP (bulk leasequery and failover)"
+ "not implemented yet.");
+}
+
+bool
+Pkt6::unpack() {
+ switch (proto_) {
+ case UDP:
+ return unpackUDP();
+ case TCP:
+ return unpackTCP();
+ default:
+ isc_throw(BadValue, "Invalid protocol specified (non-TCP, non-UDP)");
+ }
+ return (false); // never happens
+}
+
+bool
+Pkt6::unpackUDP() {
+ if (data_len_ < 4) {
+ std::cout << "DHCPv6 packet truncated. Only " << data_len_
+ << " bytes. Need at least 4." << std::endl;
+ return (false);
+ }
+ msg_type_ = data_[0];
+ transid_ = ( (data_[1]) << 16 ) +
+ ((data_[2]) << 8) + (data_[3]);
+ transid_ = transid_ & 0xffffff;
+
+ unsigned int offset = LibDHCP::unpackOptions6(data_,
+ data_len_,
+ 4, //offset
+ data_len_ - 4,
+ options_);
+ if (offset != data_len_) {
+ cout << "DHCPv6 packet contains trailing garbage. Parsed "
+ << offset << " bytes, packet is " << data_len_ << " bytes."
+ << endl;
+ // just a warning. Ignore trailing garbage and continue
+ }
+ return (true);
+}
+
+bool
+Pkt6::unpackTCP() {
+ isc_throw(Unexpected, "DHCPv6 over TCP (bulk leasequery and failover) "
+ "not implemented yet.");
+}
+
+
+std::string
+Pkt6::toText() {
+ stringstream tmp;
+ tmp << "localAddr=[" << local_addr_.toText() << "]:" << local_port_
+ << " remoteAddr=[" << remote_addr_.toText()
+ << "]:" << remote_port_ << endl;
+ tmp << "msgtype=" << msg_type_ << ", transid=0x" << hex << transid_
+ << dec << endl;
+ for (isc::dhcp::Option::OptionCollection::iterator opt=options_.begin();
+ opt != options_.end();
+ ++opt) {
+ tmp << opt->second->toText() << std::endl;
+ }
+ return tmp.str();
+}
+
+boost::shared_ptr<isc::dhcp::Option>
+Pkt6::getOption(unsigned short opt_type) {
+ isc::dhcp::Option::OptionCollection::const_iterator x = options_.find(opt_type);
+ if (x!=options_.end()) {
+ return (*x).second;
+ }
+ return boost::shared_ptr<isc::dhcp::Option>(); // NULL
+}
+
+void
+Pkt6::addOption(boost::shared_ptr<Option> opt) {
+ options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
+bool
+Pkt6::delOption(unsigned short type) {
+ isc::dhcp::Option::OptionCollection::iterator x = options_.find(type);
+ if (x!=options_.end()) {
+ options_.erase(x);
+ return (true); // delete successful
+ }
+ return (false); // can't find option to be deleted
+}
+
+};
diff --git a/src/lib/dhcp/pkt6.h b/src/lib/dhcp/pkt6.h
new file mode 100644
index 0000000..019eeb2
--- /dev/null
+++ b/src/lib/dhcp/pkt6.h
@@ -0,0 +1,234 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef PKT6_H
+#define PKT6_H
+
+#include <iostream>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include "asiolink/io_address.h"
+#include "dhcp/option.h"
+
+namespace isc {
+
+namespace dhcp {
+
+class Pkt6 {
+public:
+ /// specifes DHCPv6 packet header length
+ const static size_t DHCPV6_PKT_HDR_LEN = 4;
+
+ /// DHCPv6 transport protocol
+ enum DHCPv6Proto {
+ UDP = 0, // most packets are UDP
+ TCP = 1 // there are TCP DHCPv6 packets (bulk leasequery, failover)
+ };
+
+ /// Constructor, used in replying to a message
+ ///
+ /// @param msg_type type of message (SOLICIT=1, ADVERTISE=2, ...)
+ /// @param transid transaction-id
+ /// @param proto protocol (TCP or UDP)
+ Pkt6(unsigned char msg_type,
+ unsigned int transid,
+ DHCPv6Proto proto = UDP);
+
+ /// Constructor, used in message transmission
+ ///
+ /// Creates new message. Transaction-id will randomized.
+ ///
+ /// @param len size of buffer to be allocated for this packet.
+ /// @param proto protocol (usually UDP, but TCP will be supported eventually)
+ Pkt6(unsigned int len, DHCPv6Proto proto = UDP);
+
+ /// @brief Prepares on-wire format.
+ ///
+ /// Prepares on-wire format of message and all its options.
+ /// Options must be stored in options_ field.
+ /// Output buffer will be stored in data_. Length
+ /// will be set in data_len_.
+ ///
+ /// @return true if packing procedure was successful
+ bool
+ pack();
+
+ /// @brief Dispatch method that handles binary packet parsing.
+ ///
+ /// This method calls appropriate dispatch function (unpackUDP or
+ /// unpackTCP).
+ ///
+ /// @return true if parsing was successful
+ bool
+ unpack();
+
+ /// Returns protocol of this packet (UDP or TCP)
+ ///
+ /// @return protocol type
+ DHCPv6Proto
+ getProto();
+
+ /// Sets protocol of this packet.
+ ///
+ /// @param proto protocol (UDP or TCP)
+ ///
+ void
+ setProto(DHCPv6Proto proto = UDP) { proto_ = proto; }
+
+ /// @brief Returns text representation of the packet.
+ ///
+ /// This function is useful mainly for debugging.
+ ///
+ /// @return string with text representation
+ std::string
+ toText();
+
+ /// @brief Returns calculated length of the packet.
+ ///
+ /// This function returns size of required buffer to buld this packet.
+ /// To use that function, options_ field must be set.
+ ///
+ /// @return number of bytes required to build this packet
+ unsigned short
+ len();
+
+ /// Returns message type (e.g. 1 = SOLICIT)
+ ///
+ /// @return message type
+ unsigned char
+ getType() { return (msg_type_); }
+
+ /// Sets message type (e.g. 1 = SOLICIT)
+ ///
+ /// @param type message type to be set
+ void setType(unsigned char type) { msg_type_=type; };
+
+ /// Returns value of transaction-id field
+ ///
+ /// @return transaction-id
+ unsigned int getTransid() { return (transid_); };
+
+ /// Adds an option to this packet.
+ ///
+ /// @param opt option to be added.
+ void addOption(boost::shared_ptr<isc::dhcp::Option> opt);
+
+ /// @brief Returns the first option of specified type.
+ ///
+ /// Returns the first option of specified type. Note that in DHCPv6 several
+ /// instances of the same option are allowed (and frequently used).
+ /// See getOptions().
+ ///
+ /// @param opt_type option type we are looking for
+ ///
+ /// @return pointer to found option (or NULL)
+ boost::shared_ptr<isc::dhcp::Option>
+ getOption(unsigned short type);
+
+ /// Attempts to delete first suboption of requested type
+ ///
+ /// @param type Type of option to be deleted.
+ ///
+ /// @return true if option was deleted, false if no such option existed
+ bool
+ delOption(unsigned short type);
+
+ /// TODO need getter/setter wrappers
+ /// and hide following fields as protected
+
+ /// buffer that holds memory. It is shared_array as options may
+ /// share pointer to this buffer
+ boost::shared_array<uint8_t> data_;
+
+ /// length of the data
+ unsigned int data_len_;
+
+ /// local address (dst if receiving packet, src if sending packet)
+ isc::asiolink::IOAddress local_addr_;
+
+ /// remote address (src if receiving packet, dst if sending packet)
+ isc::asiolink::IOAddress remote_addr_;
+
+ /// name of the network interface the packet was received/to be sent over
+ std::string iface_;
+
+ /// @brief interface index
+ ///
+ /// interface index (each network interface has assigned unique ifindex
+ /// it is functional equvalent of name, but sometimes more useful, e.g.
+ /// when using crazy systems that allow spaces in interface names
+ /// e.g. windows
+ int ifindex_;
+
+ /// local TDP or UDP port
+ int local_port_;
+
+ /// remote TCP or UDP port
+ int remote_port_;
+
+ /// TODO Need to implement getOptions() as well
+
+ /// collection of options present in this message
+ isc::dhcp::Option::OptionCollection options_;
+
+protected:
+ /// Builds on wire packet for TCP transmission.
+ ///
+ /// TODO This function is not implemented yet.
+ ///
+ /// @return true, if build was successful
+ bool packTCP();
+
+ /// Builds on wire packet for UDP transmission.
+ ///
+ /// @return true, if build was successful
+ bool packUDP();
+
+ /// @brief Parses on-wire form of TCP DHCPv6 packet.
+ ///
+ /// Parses received packet, stored in on-wire format in data_.
+ /// data_len_ must be set to indicate data length.
+ /// Will create a collection of option objects that will
+ /// be stored in options_ container.
+ ///
+ /// TODO This function is not implemented yet.
+ ///
+ /// @return true, if build was successful
+ bool unpackTCP();
+
+ /// @brief Parses on-wire form of UDP DHCPv6 packet.
+ ///
+ /// Parses received packet, stored in on-wire format in data_.
+ /// data_len_ must be set to indicate data length.
+ /// Will create a collection of option objects that will
+ /// be stored in options_ container.
+ ///
+ /// @return true, if build was successful
+ bool unpackUDP();
+
+ /// UDP (usually) or TCP (bulk leasequery or failover)
+ DHCPv6Proto proto_;
+
+ /// DHCPv6 message type
+ int msg_type_;
+
+ /// DHCPv6 transaction-id
+ unsigned int transid_;
+}; // Pkt6 class
+
+} // isc::dhcp namespace
+
+} // isc namespace
+
+#endif
diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am
new file mode 100644
index 0000000..01799da
--- /dev/null
+++ b/src/lib/dhcp/tests/Makefile.am
@@ -0,0 +1,41 @@
+SUBDIRS = .
+
+AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+CLEANFILES = *.gcno *.gcda
+
+TESTS =
+if HAVE_GTEST
+TESTS += libdhcp_unittests
+libdhcp_unittests_SOURCES = run_unittests.cc
+libdhcp_unittests_SOURCES += ../libdhcp.h ../libdhcp.cc libdhcp_unittest.cc
+libdhcp_unittests_SOURCES += ../option6_iaaddr.h ../option6_iaaddr.cc option6_iaaddr_unittest.cc
+libdhcp_unittests_SOURCES += ../option6_ia.h ../option6_ia.cc option6_ia_unittest.cc
+libdhcp_unittests_SOURCES += ../option6_addrlst.h ../option6_addrlst.cc option6_addrlst_unittest.cc
+libdhcp_unittests_SOURCES += ../option.h ../option.cc option_unittest.cc
+libdhcp_unittests_SOURCES += ../pkt6.h ../pkt6.cc pkt6_unittest.cc
+libdhcp_unittests_SOURCES += ../pkt4.h ../pkt4.cc pkt4_unittest.cc
+
+libdhcp_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) $(LOG4CPLUS_INCLUDES)
+libdhcp_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+libdhcp_unittests_CXXFLAGS = $(AM_CXXFLAGS)
+if USE_CLANGPP
+# This is to workaround unused variables tcout and tcerr in
+# log4cplus's streams.h.
+libdhcp_unittests_CXXFLAGS += -Wno-unused-variable
+endif
+libdhcp_unittests_LDADD = $(GTEST_LDADD)
+libdhcp_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+libdhcp_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+libdhcp_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+libdhcp_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/dhcp/tests/libdhcp_unittest.cc b/src/lib/dhcp/tests/libdhcp_unittest.cc
new file mode 100644
index 0000000..11b618c
--- /dev/null
+++ b/src/lib/dhcp/tests/libdhcp_unittest.cc
@@ -0,0 +1,234 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <util/buffer.h>
+#include <dhcp/libdhcp.h>
+#include "config.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+namespace {
+class LibDhcpTest : public ::testing::Test {
+public:
+ LibDhcpTest() {
+ }
+};
+
+static const uint8_t packed[] = {
+ 0, 12, 0, 5, 100, 101, 102, 103, 104, // opt1 (9 bytes)
+ 0, 13, 0, 3, 105, 106, 107, // opt2 (7 bytes)
+ 0, 14, 0, 2, 108, 109, // opt3 (6 bytes)
+ 1, 0, 0, 4, 110, 111, 112, 113, // opt4 (8 bytes)
+ 1, 1, 0, 1, 114 // opt5 (5 bytes)
+};
+
+TEST(LibDhcpTest, packOptions6) {
+ boost::shared_array<uint8_t> buf(new uint8_t[512]);
+ isc::dhcp::Option::OptionCollection opts; // list of options
+
+ // generate content for options
+ for (int i = 0; i < 64; i++) {
+ buf[i]=i+100;
+ }
+
+ boost::shared_ptr<Option> opt1(new Option(Option::V6, 12, buf, 0, 5));
+ boost::shared_ptr<Option> opt2(new Option(Option::V6, 13, buf, 5, 3));
+ boost::shared_ptr<Option> opt3(new Option(Option::V6, 14, buf, 8, 2));
+ boost::shared_ptr<Option> opt4(new Option(Option::V6,256, buf,10, 4));
+ boost::shared_ptr<Option> opt5(new Option(Option::V6,257, buf,14, 1));
+
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt1));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt2));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt3));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt4));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt5));
+
+ unsigned int offset;
+ EXPECT_NO_THROW ({
+ offset = LibDHCP::packOptions6(buf, 512, 100, opts);
+ });
+ EXPECT_EQ(135, offset); // options should take 35 bytes
+ EXPECT_EQ(0, memcmp(&buf[100], packed, 35) );
+}
+
+TEST(LibDhcpTest, unpackOptions6) {
+
+ // just couple of random options
+ // Option is used as a simple option implementation
+ // More advanced uses are validated in tests dedicated for
+ // specific derived classes.
+ isc::dhcp::Option::OptionCollection options; // list of options
+
+ // we can't use packed directly, as shared_array would try to
+ // free it eventually
+ boost::shared_array<uint8_t> buf(new uint8_t[512]);
+ memcpy(&buf[0], packed, 35);
+
+ unsigned int offset;
+ EXPECT_NO_THROW ({
+ offset = LibDHCP::unpackOptions6(buf, 512, 0, 35, options);
+ });
+
+ EXPECT_EQ(35, offset); // parsed first 35 bytes (offset 0..34)
+ EXPECT_EQ(options.size(), 5); // there should be 5 options
+
+ isc::dhcp::Option::OptionCollection::const_iterator x = options.find(12);
+ ASSERT_FALSE(x == options.end()); // option 1 should exist
+ EXPECT_EQ(12, x->second->getType()); // this should be option 12
+ ASSERT_EQ(9, x->second->len()); // it should be of length 9
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+4, 5)); // data len=5
+
+ x = options.find(13);
+ ASSERT_FALSE(x == options.end()); // option 13 should exist
+ EXPECT_EQ(13, x->second->getType()); // this should be option 13
+ ASSERT_EQ(7, x->second->len()); // it should be of length 7
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+13, 3)); // data len=3
+
+ x = options.find(14);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(14, x->second->getType()); // this should be option 14
+ ASSERT_EQ(6, x->second->len()); // it should be of length 6
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+20, 2)); // data len=2
+
+ x = options.find(256);
+ ASSERT_FALSE(x == options.end()); // option 256 should exist
+ EXPECT_EQ(256, x->second->getType()); // this should be option 256
+ ASSERT_EQ(8, x->second->len()); // it should be of length 7
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+26, 4)); // data len=4
+
+ x = options.find(257);
+ ASSERT_FALSE(x == options.end()); // option 257 should exist
+ EXPECT_EQ(257, x->second->getType()); // this should be option 257
+ ASSERT_EQ(5, x->second->len()); // it should be of length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+34, 1)); // data len=1
+
+ x = options.find(0);
+ EXPECT_TRUE(x == options.end()); // option 0 not found
+
+ x = options.find(1); // 1 is htons(256) on little endians. Worth checking
+ EXPECT_TRUE(x == options.end()); // option 1 not found
+
+ x = options.find(2);
+ EXPECT_TRUE(x == options.end()); // option 2 not found
+
+ x = options.find(32000);
+ EXPECT_TRUE(x == options.end()); // option 32000 not found
+}
+
+
+static uint8_t v4Opts[] = {
+ 12, 3, 0, 1, 2,
+ 13, 3, 10, 11, 12,
+ 14, 3, 20, 21, 22,
+ 254, 3, 30, 31, 32,
+ 128, 3, 40, 41, 42
+};
+
+TEST(LibDhcpTest, packOptions4) {
+
+ vector<uint8_t> payload[5];
+ for (int i = 0; i < 5; i++) {
+ payload[i].resize(3);
+ payload[i][0] = i*10;
+ payload[i][1] = i*10+1;
+ payload[i][2] = i*10+2;
+ }
+
+ boost::shared_ptr<Option> opt1(new Option(Option::V4, 12, payload[0]));
+ boost::shared_ptr<Option> opt2(new Option(Option::V4, 13, payload[1]));
+ boost::shared_ptr<Option> opt3(new Option(Option::V4, 14, payload[2]));
+ boost::shared_ptr<Option> opt4(new Option(Option::V4,254, payload[3]));
+ boost::shared_ptr<Option> opt5(new Option(Option::V4,128, payload[4]));
+
+ isc::dhcp::Option::OptionCollection opts; // list of options
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt1));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt2));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt3));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt4));
+ opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt5));
+
+ vector<uint8_t> expVect(v4Opts, v4Opts + sizeof(v4Opts));
+
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW (
+ LibDHCP::packOptions(buf, opts);
+ );
+ ASSERT_EQ(buf.getLength(), sizeof(v4Opts));
+ EXPECT_EQ(0, memcmp(v4Opts, buf.getData(), sizeof(v4Opts)));
+
+}
+
+TEST(LibDhcpTest, unpackOptions4) {
+
+ vector<uint8_t> packed(v4Opts, v4Opts + sizeof(v4Opts));
+ isc::dhcp::Option::OptionCollection options; // list of options
+
+ ASSERT_NO_THROW(
+ LibDHCP::unpackOptions4(packed, options);
+ );
+
+ isc::dhcp::Option::OptionCollection::const_iterator x = options.find(12);
+ ASSERT_FALSE(x == options.end()); // option 1 should exist
+ EXPECT_EQ(12, x->second->getType()); // this should be option 12
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+2, 3)); // data len=3
+
+ x = options.find(13);
+ ASSERT_FALSE(x == options.end()); // option 1 should exist
+ EXPECT_EQ(13, x->second->getType()); // this should be option 13
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+7, 3)); // data len=3
+
+ x = options.find(14);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(14, x->second->getType()); // this should be option 14
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+12, 3)); // data len=3
+
+ x = options.find(254);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(254, x->second->getType()); // this should be option 254
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+17, 3)); // data len=3
+
+ x = options.find(128);
+ ASSERT_FALSE(x == options.end()); // option 3 should exist
+ EXPECT_EQ(128, x->second->getType()); // this should be option 254
+ ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->second->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+22, 3)); // data len=3
+
+ x = options.find(0);
+ EXPECT_TRUE(x == options.end()); // option 0 not found
+
+ x = options.find(1);
+ EXPECT_TRUE(x == options.end()); // option 1 not found
+
+ x = options.find(2);
+ EXPECT_TRUE(x == options.end()); // option 2 not found
+}
+
+}
diff --git a/src/lib/dhcp/tests/option6_addrlst_unittest.cc b/src/lib/dhcp/tests/option6_addrlst_unittest.cc
new file mode 100644
index 0000000..60b618b
--- /dev/null
+++ b/src/lib/dhcp/tests/option6_addrlst_unittest.cc
@@ -0,0 +1,232 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_addrlst.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+
+namespace {
+class Option6AddrLstTest : public ::testing::Test {
+public:
+ Option6AddrLstTest() {
+ }
+};
+
+TEST_F(Option6AddrLstTest, basic) {
+
+ // Limiting tests to just a 2001:db8::/32 as is *wrong*.
+ // Good tests check corner cases as well.
+ // ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff checks
+ // for integer overflow.
+ // ff02::face:b00c checks if multicast addresses
+ // can be represented properly.
+
+ uint8_t sampledata[] = {
+ // 2001:db8:1::dead:beef
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0, 0,
+ 0, 0, 0, 0, 0xde, 0xad, 0xbe, 0xef,
+
+ // ff02::face:b00c
+ 0xff, 02, 0, 0, 0, 0, 0 , 0,
+ 0, 0, 0, 0, 0xfa, 0xce, 0xb0, 0x0c,
+
+ // ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ uint8_t expected1[] = {
+ D6O_NAME_SERVERS/256, D6O_NAME_SERVERS%256,//type
+ 0, 16, // len = 16 (1 address)
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0, 0,
+ 0, 0, 0, 0, 0xde, 0xad, 0xbe, 0xef,
+
+ };
+
+ uint8_t expected2[] = {
+ D6O_SIP_SERVERS_ADDR/256, D6O_SIP_SERVERS_ADDR%256,
+ 0, 32, // len = 32 (2 addresses)
+ // 2001:db8:1::dead:beef
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0, 0,
+ 0, 0, 0, 0, 0xde, 0xad, 0xbe, 0xef,
+
+ // ff02::face:b00c
+ 0xff, 02, 0, 0, 0, 0, 0 , 0,
+ 0, 0, 0, 0, 0xfa, 0xce, 0xb0, 0x0c,
+
+ // ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ uint8_t expected3[] = {
+ D6O_NIS_SERVERS/256, D6O_NIS_SERVERS%256,
+ 0, 48,
+ // 2001:db8:1::dead:beef
+ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0, 0,
+ 0, 0, 0, 0, 0xde, 0xad, 0xbe, 0xef,
+
+ // ff02::face:b00c
+ 0xff, 02, 0, 0, 0, 0, 0 , 0,
+ 0, 0, 0, 0, 0xfa, 0xce, 0xb0, 0x0c,
+
+ // ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ boost::shared_array<uint8_t> buf(new uint8_t[300]);
+ for (int i = 0; i < 300; i++)
+ buf[i] = 0;
+
+ memcpy(&buf[0], sampledata, 48);
+
+ // just a single address
+ Option6AddrLst* opt1 = 0;
+ EXPECT_NO_THROW(
+ opt1 = new Option6AddrLst(D6O_NAME_SERVERS, buf, 128, 0, 16);
+ );
+
+ EXPECT_EQ(Option::V6, opt1->getUniverse());
+
+ EXPECT_EQ(D6O_NAME_SERVERS, opt1->getType());
+ EXPECT_EQ(20, opt1->len());
+ Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
+ ASSERT_EQ(1, addrs.size());
+ IOAddress addr = addrs[0];
+ EXPECT_EQ("2001:db8:1::dead:beef", addr.toText());
+
+ // pack this option again in the same buffer, but in
+ // different place
+ int offset = opt1->pack(buf,300, 100);
+
+ EXPECT_EQ(120, offset);
+ EXPECT_EQ( 0, memcmp(expected1, &buf[100], 20) );
+
+ // two addresses
+ Option6AddrLst* opt2 = 0;
+ EXPECT_NO_THROW(
+ opt2 = new Option6AddrLst(D6O_SIP_SERVERS_ADDR, buf, 128, 0, 32);
+ );
+ EXPECT_EQ(D6O_SIP_SERVERS_ADDR, opt2->getType());
+ EXPECT_EQ(36, opt2->len());
+ addrs = opt2->getAddresses();
+ ASSERT_EQ(2, addrs.size());
+ EXPECT_EQ("2001:db8:1::dead:beef", addrs[0].toText());
+ EXPECT_EQ("ff02::face:b00c", addrs[1].toText());
+
+ // pack this option again in the same buffer, but in
+ // different place
+ offset = opt2->pack(buf,300, 150);
+
+ EXPECT_EQ(150+36, offset);
+ EXPECT_EQ( 0, memcmp(expected2, &buf[150], 36));
+
+ // three addresses
+ Option6AddrLst* opt3 = 0;
+ EXPECT_NO_THROW(
+ opt3 = new Option6AddrLst(D6O_NIS_SERVERS, buf, 128, 0, 48);
+ );
+
+ EXPECT_EQ(D6O_NIS_SERVERS, opt3->getType());
+ EXPECT_EQ(52, opt3->len());
+ addrs = opt3->getAddresses();
+ ASSERT_EQ(3, addrs.size());
+ EXPECT_EQ("2001:db8:1::dead:beef", addrs[0].toText());
+ EXPECT_EQ("ff02::face:b00c", addrs[1].toText());
+ EXPECT_EQ("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", addrs[2].toText());
+
+ // pack this option again in the same buffer, but in
+ // different place
+ offset = opt3->pack(buf,300, 200);
+
+ EXPECT_EQ(252, offset);
+ EXPECT_EQ( 0, memcmp(expected3, &buf[200], 52) );
+
+ EXPECT_NO_THROW(
+ delete opt1;
+ delete opt2;
+ delete opt3;
+ );
+}
+
+TEST_F(Option6AddrLstTest, constructors) {
+
+ Option6AddrLst* opt1 = 0;
+ EXPECT_NO_THROW(
+ opt1 = new Option6AddrLst(1234, IOAddress("::1"));
+ );
+ EXPECT_EQ(Option::V6, opt1->getUniverse());
+ EXPECT_EQ(1234, opt1->getType());
+
+ Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
+ ASSERT_EQ(1, addrs.size() );
+ EXPECT_EQ("::1", addrs[0].toText());
+
+ addrs.clear();
+ addrs.push_back(IOAddress(string("fe80::1234")));
+ addrs.push_back(IOAddress(string("2001:db8:1::baca")));
+
+ Option6AddrLst* opt2 = 0;
+ EXPECT_NO_THROW(
+ opt2 = new Option6AddrLst(5678, addrs);
+ );
+
+ Option6AddrLst::AddressContainer check = opt2->getAddresses();
+ ASSERT_EQ(2, check.size() );
+ EXPECT_EQ("fe80::1234", check[0].toText());
+ EXPECT_EQ("2001:db8:1::baca", check[1].toText());
+
+ EXPECT_NO_THROW(
+ delete opt1;
+ delete opt2;
+ );
+}
+
+TEST_F(Option6AddrLstTest, setAddress) {
+ Option6AddrLst* opt1 = 0;
+ EXPECT_NO_THROW(
+ opt1 = new Option6AddrLst(1234, IOAddress("::1"));
+ );
+ opt1->setAddress(IOAddress("2001:db8:1::2"));
+ /// TODO It used to be ::2 address, but io_address represents
+ /// it as ::0.0.0.2. Purpose of this test is to verify
+ /// that setAddress() works, not deal with subtleties of
+ /// io_address handling of IPv4-mapped IPv6 addresses, we
+ /// switched to a more common address. User interested
+ /// in pursuing this matter further is encouraged to look
+ /// at section 2.5.5 of RFC4291 (and possibly implement
+ /// a test for IOAddress)
+
+ Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
+ ASSERT_EQ(1, addrs.size() );
+ EXPECT_EQ("2001:db8:1::2", addrs[0].toText());
+
+ EXPECT_NO_THROW(
+ delete opt1;
+ );
+}
+
+} // namespace
diff --git a/src/lib/dhcp/tests/option6_ia_unittest.cc b/src/lib/dhcp/tests/option6_ia_unittest.cc
new file mode 100644
index 0000000..3fd52f5
--- /dev/null
+++ b/src/lib/dhcp/tests/option6_ia_unittest.cc
@@ -0,0 +1,266 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include <boost/shared_array.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include "dhcp/dhcp6.h"
+#include "dhcp/option.h"
+#include "dhcp/option6_ia.h"
+#include "dhcp/option6_iaaddr.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+
+namespace {
+class Option6IATest : public ::testing::Test {
+public:
+ Option6IATest() {
+ }
+};
+
+TEST_F(Option6IATest, basic) {
+
+ boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
+ for (int i = 0; i < 128; i++)
+ simple_buf[i] = 0;
+ simple_buf[0] = 0xa1; // iaid
+ simple_buf[1] = 0xa2;
+ simple_buf[2] = 0xa3;
+ simple_buf[3] = 0xa4;
+
+ simple_buf[4] = 0x81; // T1
+ simple_buf[5] = 0x02;
+ simple_buf[6] = 0x03;
+ simple_buf[7] = 0x04;
+
+ simple_buf[8] = 0x84; // T2
+ simple_buf[9] = 0x03;
+ simple_buf[10] = 0x02;
+ simple_buf[11] = 0x01;
+
+ // create an option
+ // unpack() is called from constructor
+ Option6IA* opt = new Option6IA(D6O_IA_NA,
+ simple_buf,
+ 128,
+ 0,
+ 12);
+
+ EXPECT_EQ(Option::V6, opt->getUniverse());
+ EXPECT_EQ(D6O_IA_NA, opt->getType());
+ EXPECT_EQ(0xa1a2a3a4, opt->getIAID());
+ EXPECT_EQ(0x81020304, opt->getT1());
+ EXPECT_EQ(0x84030201, opt->getT2());
+
+ // pack this option again in the same buffer, but in
+ // different place
+
+ // test for pack()
+ int offset = opt->pack(simple_buf, 128, 60);
+
+ // 4 bytes header + 4 bytes content
+ EXPECT_EQ(12, opt->len() - 4);
+ EXPECT_EQ(D6O_IA_NA, opt->getType());
+
+ EXPECT_EQ(offset, 76); // 60 + lenght(IA_NA) = 76
+
+ // check if pack worked properly:
+ // if option type is correct
+ EXPECT_EQ(D6O_IA_NA, simple_buf[60]*256 + simple_buf[61]);
+
+ // if option length is correct
+ EXPECT_EQ(12, simple_buf[62]*256 + simple_buf[63]);
+
+ // if iaid is correct
+ unsigned int iaid = htonl(*(unsigned int*)&simple_buf[64]);
+ EXPECT_EQ(0xa1a2a3a4, iaid );
+
+ // if T1 is correct
+ EXPECT_EQ(0x81020304, (simple_buf[68] << 24) +
+ (simple_buf[69] << 16) +
+ (simple_buf[70] << 8) +
+ (simple_buf[71]) );
+
+ // if T1 is correct
+ EXPECT_EQ(0x84030201, (simple_buf[72] << 24) +
+ (simple_buf[73] << 16) +
+ (simple_buf[74] << 8) +
+ (simple_buf[75]) );
+
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+TEST_F(Option6IATest, simple) {
+ boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
+ for (int i = 0; i < 128; i++)
+ simple_buf[i] = 0;
+
+ Option6IA * ia = new Option6IA(D6O_IA_NA, 1234);
+ ia->setT1(2345);
+ ia->setT2(3456);
+
+ EXPECT_EQ(Option::V6, ia->getUniverse());
+ EXPECT_EQ(D6O_IA_NA, ia->getType());
+ EXPECT_EQ(1234, ia->getIAID());
+ EXPECT_EQ(2345, ia->getT1());
+ EXPECT_EQ(3456, ia->getT2());
+
+ EXPECT_NO_THROW(
+ delete ia;
+ );
+}
+
+// test if option can build suboptions
+TEST_F(Option6IATest, suboptions_pack) {
+ boost::shared_array<uint8_t> buf(new uint8_t[128]);
+ for (int i=0; i<128; i++)
+ buf[i] = 0;
+ buf[0] = 0xff;
+ buf[1] = 0xfe;
+ buf[2] = 0xfc;
+
+ Option6IA * ia = new Option6IA(D6O_IA_NA, 0x13579ace);
+ ia->setT1(0x2345);
+ ia->setT2(0x3456);
+
+ boost::shared_ptr<Option> sub1(new Option(Option::V6,
+ 0xcafe));
+
+ boost::shared_ptr<Option6IAAddr> addr1(
+ new Option6IAAddr(D6O_IAADDR, IOAddress("2001:db8:1234:5678::abcd"),
+ 0x5000, 0x7000));
+
+ ia->addOption(sub1);
+ ia->addOption(addr1);
+
+ ASSERT_EQ(28, addr1->len());
+ ASSERT_EQ(4, sub1->len());
+ ASSERT_EQ(48, ia->len());
+
+ uint8_t expected[] = {
+ D6O_IA_NA/256, D6O_IA_NA%256, // type
+ 0, 44, // length
+ 0x13, 0x57, 0x9a, 0xce, // iaid
+ 0, 0, 0x23, 0x45, // T1
+ 0, 0, 0x34, 0x56, // T2
+
+ // iaaddr suboption
+ D6O_IAADDR/256, D6O_IAADDR%256, // type
+ 0, 24, // len
+ 0x20, 0x01, 0xd, 0xb8, 0x12,0x34, 0x56, 0x78,
+ 0, 0, 0, 0, 0, 0, 0xab, 0xcd, // IP address
+ 0, 0, 0x50, 0, // preferred-lifetime
+ 0, 0, 0x70, 0, // valid-lifetime
+
+ // suboption
+ 0xca, 0xfe, // type
+ 0, 0 // len
+ };
+
+ int offset = ia->pack(buf, 128, 10);
+ ASSERT_EQ(offset, 10 + 48);
+
+ EXPECT_EQ(0, memcmp(&buf[10], expected, 48));
+
+ EXPECT_NO_THROW(
+ delete ia;
+ );
+}
+
+// test if option can parse suboptions
+TEST_F(Option6IATest, suboptions_unpack) {
+
+
+ uint8_t expected[] = {
+ D6O_IA_NA/256, D6O_IA_NA%256, // type
+ 0, 28, // length
+ 0x13, 0x57, 0x9a, 0xce, // iaid
+ 0, 0, 0x23, 0x45, // T1
+ 0, 0, 0x34, 0x56, // T2
+
+ // iaaddr suboption
+ D6O_IAADDR/256, D6O_IAADDR%256, // type
+ 0, 24, // len
+ 0x20, 0x01, 0xd, 0xb8, 0x12,0x34, 0x56, 0x78,
+ 0, 0, 0, 0, 0, 0, 0xab, 0xcd, // IP address
+ 0, 0, 0x50, 0, // preferred-lifetime
+ 0, 0, 0x70, 0, // valid-lifetime
+
+ // suboption
+ 0xca, 0xfe, // type
+ 0, 0 // len
+ };
+
+ boost::shared_array<uint8_t> buf(new uint8_t[128]);
+ for (int i = 0; i < 128; i++)
+ buf[i] = 0;
+ memcpy(&buf[0], expected, 48);
+
+ Option6IA* ia = 0;
+ EXPECT_NO_THROW({
+ ia = new Option6IA(D6O_IA_NA, buf, 128, 4, 44);
+
+ // let's limit verbosity of this test
+ // cout << "Parsed option:" << endl << ia->toText() << endl;
+ });
+ ASSERT_TRUE(ia);
+
+ EXPECT_EQ(D6O_IA_NA, ia->getType());
+ EXPECT_EQ(0x13579ace, ia->getIAID());
+ EXPECT_EQ(0x2345, ia->getT1());
+ EXPECT_EQ(0x3456, ia->getT2());
+
+ boost::shared_ptr<Option> subopt = ia->getOption(D6O_IAADDR);
+ ASSERT_NE(boost::shared_ptr<Option>(), subopt); // non-NULL
+
+ // checks for address option
+ Option6IAAddr * addr = dynamic_cast<Option6IAAddr*>(subopt.get());
+ ASSERT_TRUE(NULL != addr);
+
+ EXPECT_EQ(D6O_IAADDR, addr->getType());
+ EXPECT_EQ(28, addr->len());
+ EXPECT_EQ(0x5000, addr->getPreferred());
+ EXPECT_EQ(0x7000, addr->getValid());
+ EXPECT_EQ("2001:db8:1234:5678::abcd", addr->getAddress().toText());
+
+ // checks for dummy option
+ subopt = ia->getOption(0xcafe);
+ ASSERT_TRUE(subopt); // should be non-NULL
+
+ EXPECT_EQ(0xcafe, subopt->getType());
+ EXPECT_EQ(4, subopt->len());
+ // there should be no data at all
+ EXPECT_EQ(0, subopt->getData().size());
+
+ subopt = ia->getOption(1); // get option 1
+ ASSERT_FALSE(subopt); // should be NULL
+
+ EXPECT_NO_THROW(
+ delete ia;
+ );
+}
+
+}
diff --git a/src/lib/dhcp/tests/option6_iaaddr_unittest.cc b/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
new file mode 100644
index 0000000..81c3eb3
--- /dev/null
+++ b/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
@@ -0,0 +1,105 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include "dhcp/dhcp6.h"
+#include "dhcp/option.h"
+#include "dhcp/option6_iaaddr.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+
+namespace {
+class Option6IAAddrTest : public ::testing::Test {
+public:
+ Option6IAAddrTest() {
+ }
+};
+
+/// TODO reenable this once ticket #1313 is implemented.
+TEST_F(Option6IAAddrTest, basic) {
+
+ boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
+ for (int i = 0; i < 128; i++)
+ simple_buf[i] = 0;
+
+ simple_buf[0] = 0x20;
+ simple_buf[1] = 0x01;
+ simple_buf[2] = 0x0d;
+ simple_buf[3] = 0xb8;
+ simple_buf[4] = 0x00;
+ simple_buf[5] = 0x01;
+ simple_buf[12] = 0xde;
+ simple_buf[13] = 0xad;
+ simple_buf[14] = 0xbe;
+ simple_buf[15] = 0xef; // 2001:db8:1::dead:beef
+
+ simple_buf[16] = 0x00;
+ simple_buf[17] = 0x00;
+ simple_buf[18] = 0x03;
+ simple_buf[19] = 0xe8; // 1000
+
+ simple_buf[20] = 0xb2;
+ simple_buf[21] = 0xd0;
+ simple_buf[22] = 0x5e;
+ simple_buf[23] = 0x00; // 3,000,000,000
+
+ // create an option (unpack content)
+ Option6IAAddr* opt = new Option6IAAddr(D6O_IAADDR,
+ simple_buf,
+ 128,
+ 0,
+ 24);
+
+ // pack this option again in the same buffer, but in
+ // different place
+ int offset = opt->pack(simple_buf, 128, 50);
+
+ EXPECT_EQ(78, offset);
+
+ EXPECT_EQ(Option::V6, opt->getUniverse());
+
+ // 4 bytes header + 4 bytes content
+ EXPECT_EQ("2001:db8:1::dead:beef", opt->getAddress().toText());
+ EXPECT_EQ(1000, opt->getPreferred());
+ EXPECT_EQ(3000000000U, opt->getValid());
+
+ EXPECT_EQ(D6O_IAADDR, opt->getType());
+
+ EXPECT_EQ(Option::OPTION6_HDR_LEN + Option6IAAddr::OPTION6_IAADDR_LEN,
+ opt->len());
+
+ // check if pack worked properly:
+ // if option type is correct
+ EXPECT_EQ(D6O_IAADDR, simple_buf[50]*256 + simple_buf[51]);
+
+ // if option length is correct
+ EXPECT_EQ(24, simple_buf[52]*256 + simple_buf[53]);
+
+ // if option content is correct
+ EXPECT_EQ(0, memcmp(&simple_buf[0], &simple_buf[54],24));
+
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+}
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
new file mode 100644
index 0000000..db3ee3b
--- /dev/null
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -0,0 +1,419 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <boost/shared_ptr.hpp>
+#include <exceptions/exceptions.h>
+#include <util/buffer.h>
+
+#include "dhcp/dhcp6.h"
+#include "dhcp/option.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+namespace {
+class OptionTest : public ::testing::Test {
+public:
+ OptionTest() {
+ }
+};
+
+// v4 is not really implemented yet. A simple test will do for now
+TEST_F(OptionTest, v4_basic) {
+
+ Option* opt = 0;
+ EXPECT_NO_THROW(
+ opt = new Option(Option::V4, 17);
+ );
+
+ EXPECT_EQ(Option::V4, opt->getUniverse());
+ EXPECT_EQ(17, opt->getType());
+ EXPECT_EQ(0, opt->getData().size());
+ EXPECT_EQ(2, opt->len()); // just v4 header
+
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+ opt = 0;
+
+ // V4 options have type 0...255
+ EXPECT_THROW(
+ opt = new Option(Option::V4, 256),
+ BadValue
+ );
+ if (opt) {
+ delete opt;
+ opt = 0;
+ }
+}
+
+const uint8_t dummyPayload[] =
+{ 1, 2, 3, 4};
+
+TEST_F(OptionTest, v4_data1) {
+
+ vector<uint8_t> data(dummyPayload, dummyPayload + sizeof(dummyPayload));
+
+ Option* opt = 0;
+
+ // create DHCPv4 option of type 123
+ // that contains 4 bytes of data
+ ASSERT_NO_THROW(
+ opt= new Option(Option::V4,
+ 123, // type
+ data);
+ );
+
+ // check that content is reported properly
+ EXPECT_EQ(123, opt->getType());
+ vector<uint8_t> optData = opt->getData();
+ ASSERT_EQ(optData.size(), data.size());
+ EXPECT_TRUE(optData == data);
+ EXPECT_EQ(2, opt->getHeaderLen());
+ EXPECT_EQ(6, opt->len());
+
+ // now store that option into a buffer
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ // check content of that buffer
+
+ // 2 byte header + 4 bytes data
+ ASSERT_EQ(6, buf.getLength());
+
+ // that's how this option is supposed to look like
+ uint8_t exp[] = { 123, 4, 1, 2, 3, 4 };
+
+ /// TODO: use vector<uint8_t> getData() when it will be implemented
+ EXPECT_EQ(0, memcmp(exp, buf.getData(), 6));
+
+ // check that we can destroy that option
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+// this is almost the same test as v4_data1, but it uses
+// different constructor
+TEST_F(OptionTest, v4_data2) {
+
+ vector<uint8_t> data(dummyPayload, dummyPayload + sizeof(dummyPayload));
+
+ vector<uint8_t> expData = data;
+
+ // Add fake data in front and end. Main purpose of this test is to check
+ // that only subset of the whole vector can be used for creating option.
+ data.insert(data.begin(), 56);
+ data.push_back(67);
+
+ // Data contains extra garbage at beginning and at the end. It should be
+ // ignored, as we pass interators to proper data. Only subset (limited by
+ // iterators) of the vector should be used.
+ // expData contains expected content (just valid data, without garbage).
+
+ Option* opt = 0;
+
+ // Create DHCPv4 option of type 123 that contains
+ // 4 bytes (sizeof(dummyPayload).
+ ASSERT_NO_THROW(
+ opt= new Option(Option::V4,
+ 123, // type
+ data.begin() + 1,
+ data.end() - 1);
+ );
+
+ // check that content is reported properly
+ EXPECT_EQ(123, opt->getType());
+ vector<uint8_t> optData = opt->getData();
+ ASSERT_EQ(optData.size(), expData.size());
+ EXPECT_TRUE(optData == expData);
+ EXPECT_EQ(2, opt->getHeaderLen());
+ EXPECT_EQ(6, opt->len());
+
+ // now store that option into a buffer
+ OutputBuffer buf(100);
+ EXPECT_NO_THROW(
+ opt->pack4(buf);
+ );
+
+ // check content of that buffer
+
+ // 2 byte header + 4 bytes data
+ ASSERT_EQ(6, buf.getLength());
+
+ // that's how this option is supposed to look like
+ uint8_t exp[] = { 123, 4, 1, 2, 3, 4 };
+
+ /// TODO: use vector<uint8_t> getData() when it will be implemented
+ EXPECT_EQ(0, memcmp(exp, buf.getData(), 6));
+
+ // check that we can destroy that option
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+TEST_F(OptionTest, v4_toText) {
+
+ vector<uint8_t> buf(3);
+ buf[0] = 0;
+ buf[1] = 0xf;
+ buf[2] = 0xff;
+
+ Option opt(Option::V4, 253, buf);
+
+ EXPECT_EQ("type=253, len=3: 00:0f:ff", opt.toText());
+}
+
+// tests simple constructor
+TEST_F(OptionTest, v6_basic) {
+
+ Option* opt = new Option(Option::V6, 1);
+
+ EXPECT_EQ(Option::V6, opt->getUniverse());
+ EXPECT_EQ(1, opt->getType());
+ EXPECT_EQ(0, opt->getData().size());
+ EXPECT_EQ(4, opt->len()); // just v6 header
+
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+// tests contructor used in pkt reception
+// option contains actual data
+TEST_F(OptionTest, v6_data1) {
+ boost::shared_array<uint8_t> buf(new uint8_t[32]);
+ for (int i = 0; i < 32; i++)
+ buf[i] = 100+i;
+ Option* opt = new Option(Option::V6, 333, //type
+ buf,
+ 3, // offset
+ 7); // 7 bytes of data
+ EXPECT_EQ(333, opt->getType());
+
+ ASSERT_EQ(11, opt->len());
+ ASSERT_EQ(7, opt->getData().size());
+ EXPECT_EQ(0, memcmp(&buf[3], &opt->getData()[0], 7) );
+
+ int offset = opt->pack(buf, 32, 20);
+ EXPECT_EQ(31, offset);
+
+ EXPECT_EQ(buf[20], 333/256); // type
+ EXPECT_EQ(buf[21], 333%256);
+
+ EXPECT_EQ(buf[22], 0); // len
+ EXPECT_EQ(buf[23], 7);
+
+ // payload
+ EXPECT_EQ(0, memcmp(&buf[3], &buf[24], 7) );
+
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+// another text that tests the same thing, just
+// with different input parameters
+TEST_F(OptionTest, v6_data2) {
+
+ boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
+ for (int i = 0; i < 128; i++)
+ simple_buf[i] = 0;
+ simple_buf[0] = 0xa1;
+ simple_buf[1] = 0xa2;
+ simple_buf[2] = 0xa3;
+ simple_buf[3] = 0xa4;
+
+ // create an option (unpack content)
+ Option* opt = new Option(Option::V6,
+ D6O_CLIENTID,
+ simple_buf,
+ 0,
+ 4);
+
+ // pack this option again in the same buffer, but in
+ // different place
+ int offset18 = opt->pack(simple_buf, 128, 10);
+
+ // 4 bytes header + 4 bytes content
+ EXPECT_EQ(8, opt->len());
+ EXPECT_EQ(D6O_CLIENTID, opt->getType());
+
+ EXPECT_EQ(offset18, 18);
+
+ // check if pack worked properly:
+ // if option type is correct
+ EXPECT_EQ(D6O_CLIENTID, simple_buf[10]*256 + simple_buf[11]);
+
+ // if option length is correct
+ EXPECT_EQ(4, simple_buf[12]*256 + simple_buf[13]);
+
+ // if option content is correct
+ EXPECT_EQ(0, memcmp(&simple_buf[0], &simple_buf[14],4));
+
+ EXPECT_NO_THROW(
+ delete opt;
+ );
+}
+
+// check that an option can contain 2 suboptions:
+// opt1
+// +----opt2
+// |
+// +----opt3
+//
+TEST_F(OptionTest, v6_suboptions1) {
+ boost::shared_array<uint8_t> buf(new uint8_t[128]);
+ for (int i=0; i<128; i++)
+ buf[i] = 100+i;
+ Option* opt1 = new Option(Option::V6, 65535, //type
+ buf,
+ 0, // offset
+ 3); // 3 bytes of data
+ boost::shared_ptr<Option> opt2(new Option(Option::V6, 13));
+ boost::shared_ptr<Option> opt3(new Option(Option::V6, 7,
+ buf,
+ 3, // offset
+ 5)); // 5 bytes of data
+ opt1->addOption(opt2);
+ opt1->addOption(opt3);
+ // opt2 len = 4 (just header)
+ // opt3 len = 9 4(header)+5(data)
+ // opt1 len = 7 + suboptions() = 7 + 4 + 9 = 20
+
+ EXPECT_EQ(4, opt2->len());
+ EXPECT_EQ(9, opt3->len());
+ EXPECT_EQ(20, opt1->len());
+
+ uint8_t expected[] = {
+ 0xff, 0xff, 0, 16, 100, 101, 102,
+ 0, 7, 0, 5, 103, 104, 105, 106, 107,
+ 0, 13, 0, 0 // no data at all
+ };
+
+ int offset = opt1->pack(buf, 128, 20);
+ EXPECT_EQ(40, offset);
+
+ // payload
+ EXPECT_EQ(0, memcmp(&buf[20], expected, 20) );
+
+ EXPECT_NO_THROW(
+ delete opt1;
+ );
+}
+
+// check that an option can contain nested suboptions:
+// opt1
+// +----opt2
+// |
+// +----opt3
+//
+TEST_F(OptionTest, v6_suboptions2) {
+ boost::shared_array<uint8_t> buf(new uint8_t[128]);
+ for (int i=0; i<128; i++)
+ buf[i] = 100+i;
+ Option* opt1 = new Option(Option::V6, 65535, //type
+ buf,
+ 0, // offset
+ 3); // 3 bytes of data
+ boost::shared_ptr<Option> opt2(new Option(Option::V6, 13));
+ boost::shared_ptr<Option> opt3(new Option(Option::V6, 7,
+ buf,
+ 3, // offset
+ 5)); // 5 bytes of data
+ opt1->addOption(opt2);
+ opt2->addOption(opt3);
+ // opt3 len = 9 4(header)+5(data)
+ // opt2 len = 4 (just header) + len(opt3)
+ // opt1 len = 7 + len(opt2)
+
+ uint8_t expected[] = {
+ 0xff, 0xff, 0, 16, 100, 101, 102,
+ 0, 13, 0, 9,
+ 0, 7, 0, 5, 103, 104, 105, 106, 107,
+ };
+
+ int offset = opt1->pack(buf, 128, 20);
+ EXPECT_EQ(40, offset);
+
+ // payload
+ EXPECT_EQ(0, memcmp(&buf[20], expected, 20) );
+
+ EXPECT_NO_THROW(
+ delete opt1;
+ );
+}
+
+TEST_F(OptionTest, v6_addgetdel) {
+ boost::shared_array<uint8_t> buf(new uint8_t[128]);
+ for (int i=0; i<128; i++)
+ buf[i] = 100+i;
+ Option* parent = new Option(Option::V6, 65535); //type
+ boost::shared_ptr<Option> opt1(new Option(Option::V6, 1));
+ boost::shared_ptr<Option> opt2(new Option(Option::V6, 2));
+ boost::shared_ptr<Option> opt3(new Option(Option::V6, 2));
+
+ parent->addOption(opt1);
+ parent->addOption(opt2);
+
+ // getOption() test
+ EXPECT_EQ(opt1, parent->getOption(1));
+ EXPECT_EQ(opt2, parent->getOption(2));
+
+ // expect NULL
+ EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(4));
+
+ // now there are 2 options of type 2
+ parent->addOption(opt3);
+
+ // let's delete one of them
+ EXPECT_EQ(true, parent->delOption(2));
+
+ // there still should be the other option 2
+ EXPECT_NE(boost::shared_ptr<Option>(), parent->getOption(2));
+
+ // let's delete the other option 2
+ EXPECT_EQ(true, parent->delOption(2));
+
+ // no more options with type=2
+ EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(2));
+
+ // let's try to delete - should fail
+ EXPECT_TRUE(false == parent->delOption(2));
+}
+
+}
+
+TEST_F(OptionTest, v6_toText) {
+ boost::shared_array<uint8_t> buf(new uint8_t[3]);
+ buf[0] = 0;
+ buf[1] = 0xf;
+ buf[2] = 0xff;
+
+ boost::shared_ptr<Option> opt(new Option(Option::V6, 258,
+ buf, 0, 3));
+
+ EXPECT_EQ("type=258, len=3: 00:0f:ff", opt->toText());
+}
diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc
new file mode 100644
index 0000000..c89743f
--- /dev/null
+++ b/src/lib/dhcp/tests/pkt4_unittest.cc
@@ -0,0 +1,562 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <boost/static_assert.hpp>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include <util/buffer.h>
+#include <asiolink/io_address.h>
+#include <dhcp/pkt4.h>
+#include <dhcp/dhcp4.h>
+#include <exceptions/exceptions.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+using namespace isc::util;
+using namespace boost;
+
+namespace {
+
+TEST(Pkt4Test, constructor) {
+
+ ASSERT_EQ(236U, static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) );
+ Pkt4* pkt = 0;
+
+ // Just some dummy payload.
+ uint8_t testData[250];
+ for (int i = 0; i < 250; i++) {
+ testData[i]=i;
+ }
+
+ // Positive case1. Normal received packet.
+ EXPECT_NO_THROW(
+ pkt = new Pkt4(testData, Pkt4::DHCPV4_PKT_HDR_LEN);
+ );
+
+ EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
+
+ EXPECT_NO_THROW(
+ delete pkt;
+ pkt = 0;
+ );
+
+ // Positive case2. Normal outgoing packet.
+ EXPECT_NO_THROW(
+ pkt = new Pkt4(DHCPDISCOVER, 0xffffffff);
+ );
+
+ // DHCPv4 packet must be at least 236 bytes long
+ EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
+ EXPECT_EQ(DHCPDISCOVER, pkt->getType());
+ EXPECT_EQ(0xffffffff, pkt->getTransid());
+ EXPECT_NO_THROW(
+ delete pkt;
+ pkt = 0;
+ );
+
+ // Negative case. Should drop truncated messages.
+ EXPECT_THROW(
+ pkt = new Pkt4(testData, Pkt4::DHCPV4_PKT_HDR_LEN-1),
+ OutOfRange
+ );
+ if (pkt) {
+ // Test failed. Exception should have been thrown, but
+ // object was created instead. Let's clean this up.
+ delete pkt;
+ pkt = 0;
+ }
+}
+
+// a sample data
+const uint8_t dummyOp = BOOTREQUEST;
+const uint8_t dummyHtype = 6;
+const uint8_t dummyHlen = 6;
+const uint8_t dummyHops = 13;
+const uint32_t dummyTransid = 0x12345678;
+const uint16_t dummySecs = 42;
+const uint16_t dummyFlags = BOOTP_BROADCAST;
+
+const IOAddress dummyCiaddr("192.0.2.1");
+const IOAddress dummyYiaddr("1.2.3.4");
+const IOAddress dummySiaddr("192.0.2.255");
+const IOAddress dummyGiaddr("255.255.255.255");
+
+// a dummy MAC address
+const uint8_t dummyMacAddr[] = {0, 1, 2, 3, 4, 5};
+
+// a dummy MAC address, padded with 0s
+const uint8_t dummyChaddr[16] = {0, 1, 2, 3, 4, 5, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+
+// let's use some creative test content here (128 chars + \0)
+const uint8_t dummyFile[] = "Lorem ipsum dolor sit amet, consectetur "
+ "adipiscing elit. Proin mollis placerat metus, at "
+ "lacinia orci ornare vitae. Mauris amet.";
+
+// yet another type of test content (64 chars + \0)
+const uint8_t dummySname[] = "Lorem ipsum dolor sit amet, consectetur "
+ "adipiscing elit posuere.";
+
+BOOST_STATIC_ASSERT(sizeof(dummyFile) == Pkt4::MAX_FILE_LEN + 1);
+BOOST_STATIC_ASSERT(sizeof(dummySname) == Pkt4::MAX_SNAME_LEN + 1);
+
+/// @brief Generates test packet.
+///
+/// Allocates and generates test packet, with all fixed
+/// fields set to non-zero values. Content is not always
+/// reasonable.
+///
+/// See generateTestPacket2() function that returns
+/// exactly the same packet in on-wire format.
+///
+/// @return pointer to allocated Pkt4 object.
+boost::shared_ptr<Pkt4>
+generateTestPacket1() {
+
+ boost::shared_ptr<Pkt4> pkt(new Pkt4(DHCPDISCOVER, dummyTransid));
+
+ vector<uint8_t> vectorMacAddr(dummyMacAddr, dummyMacAddr
+ +sizeof(dummyMacAddr));
+
+ // hwType = 6(ETHERNET), hlen = 6(MAC address len)
+ pkt->setHWAddr(dummyHtype, dummyHlen, vectorMacAddr);
+ pkt->setHops(dummyHops); // 13 relays. Wow!
+ // Transaction-id is already set.
+ pkt->setSecs(dummySecs);
+ pkt->setFlags(dummyFlags); // all flags set
+ pkt->setCiaddr(dummyCiaddr);
+ pkt->setYiaddr(dummyYiaddr);
+ pkt->setSiaddr(dummySiaddr);
+ pkt->setGiaddr(dummyGiaddr);
+ // Chaddr already set with setHWAddr().
+ pkt->setSname(dummySname, 64);
+ pkt->setFile(dummyFile, 128);
+
+ return (pkt);
+}
+
+/// @brief Generates test packet.
+///
+/// Allocates and generates on-wire buffer that represents
+/// test packet, with all fixed fields set to non-zero values.
+/// Content is not always reasonable.
+///
+/// See generateTestPacket1() function that returns
+/// exactly the same packet as Pkt4 object.
+///
+/// @return pointer to allocated Pkt4 object
+// Returns a vector containing a DHCPv4 packet header.
+vector<uint8_t>
+generateTestPacket2() {
+
+ // That is only part of the header. It contains all "short" fields,
+ // larger fields are constructed separately.
+ uint8_t hdr[] = {
+ 1, 6, 6, 13, // op, htype, hlen, hops,
+ 0x12, 0x34, 0x56, 0x78, // transaction-id
+ 0, 42, 0x80, 0x00, // 42 secs, BROADCAST flags
+ 192, 0, 2, 1, // ciaddr
+ 1, 2, 3, 4, // yiaddr
+ 192, 0, 2, 255, // siaddr
+ 255, 255, 255, 255, // giaddr
+ };
+
+ // Initialize the vector with the header fields defined above.
+ vector<uint8_t> buf(hdr, hdr + sizeof(hdr));
+
+ // Append the large header fields.
+ copy(dummyChaddr, dummyChaddr + Pkt4::MAX_CHADDR_LEN, back_inserter(buf));
+ copy(dummySname, dummySname + Pkt4::MAX_SNAME_LEN, back_inserter(buf));
+ copy(dummyFile, dummyFile + Pkt4::MAX_FILE_LEN, back_inserter(buf));
+
+ // Should now have all the header, so check. The "static_cast" is used
+ // to get round an odd bug whereby the linker appears not to find the
+ // definition of DHCPV4_PKT_HDR_LEN if it appears within an EXPECT_EQ().
+ EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), buf.size());
+
+ return (buf);
+}
+
+TEST(Pkt4Test, fixedFields) {
+
+ shared_ptr<Pkt4> pkt = generateTestPacket1();
+
+ // ok, let's check packet values
+ EXPECT_EQ(dummyOp, pkt->getOp());
+ EXPECT_EQ(dummyHtype, pkt->getHtype());
+ EXPECT_EQ(dummyHlen, pkt->getHlen());
+ EXPECT_EQ(dummyHops, pkt->getHops());
+ EXPECT_EQ(dummyTransid, pkt->getTransid());
+ EXPECT_EQ(dummySecs, pkt->getSecs());
+ EXPECT_EQ(dummyFlags, pkt->getFlags());
+
+ EXPECT_EQ(dummyCiaddr.toText(), pkt->getCiaddr().toText());
+ EXPECT_EQ(dummyYiaddr.toText(), pkt->getYiaddr().toText());
+ EXPECT_EQ(dummySiaddr.toText(), pkt->getSiaddr().toText());
+ EXPECT_EQ(dummyGiaddr.toText(), pkt->getGiaddr().toText());
+
+ // chaddr is always 16 bytes long and contains link-layer addr (MAC)
+ EXPECT_EQ(0, memcmp(dummyChaddr, pkt->getChaddr(), 16));
+
+ EXPECT_EQ(0, memcmp(dummySname, &pkt->getSname()[0], 64));
+
+ EXPECT_EQ(0, memcmp(dummyFile, &pkt->getFile()[0], 128));
+
+ EXPECT_EQ(DHCPDISCOVER, pkt->getType());
+}
+
+TEST(Pkt4Test, fixedFieldsPack) {
+ shared_ptr<Pkt4> pkt = generateTestPacket1();
+ vector<uint8_t> expectedFormat = generateTestPacket2();
+
+ EXPECT_NO_THROW(
+ pkt->pack();
+ );
+
+ ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
+
+ // redundant but MUCH easier for debug in gdb
+ const uint8_t* exp = &expectedFormat[0];
+ const uint8_t* got = static_cast<const uint8_t*>(pkt->getBuffer().getData());
+
+ EXPECT_EQ(0, memcmp(exp, got, Pkt4::DHCPV4_PKT_HDR_LEN));
+}
+
+/// TODO Uncomment when ticket #1226 is implemented
+TEST(Pkt4Test, fixedFieldsUnpack) {
+ vector<uint8_t> expectedFormat = generateTestPacket2();
+
+ shared_ptr<Pkt4> pkt(new Pkt4(&expectedFormat[0],
+ Pkt4::DHCPV4_PKT_HDR_LEN));
+
+ EXPECT_NO_THROW(
+ pkt->unpack()
+ );
+
+ // ok, let's check packet values
+ EXPECT_EQ(dummyOp, pkt->getOp());
+ EXPECT_EQ(dummyHtype, pkt->getHtype());
+ EXPECT_EQ(dummyHlen, pkt->getHlen());
+ EXPECT_EQ(dummyHops, pkt->getHops());
+ EXPECT_EQ(dummyTransid, pkt->getTransid());
+ EXPECT_EQ(dummySecs, pkt->getSecs());
+ EXPECT_EQ(dummyFlags, pkt->getFlags());
+
+ EXPECT_EQ(dummyCiaddr.toText(), pkt->getCiaddr().toText());
+ EXPECT_EQ(string("1.2.3.4"), pkt->getYiaddr().toText());
+ EXPECT_EQ(string("192.0.2.255"), pkt->getSiaddr().toText());
+ EXPECT_EQ(string("255.255.255.255"), pkt->getGiaddr().toText());
+
+ // chaddr is always 16 bytes long and contains link-layer addr (MAC)
+ EXPECT_EQ(0, memcmp(dummyChaddr, pkt->getChaddr(), Pkt4::MAX_CHADDR_LEN));
+
+ ASSERT_EQ(static_cast<size_t>(Pkt4::MAX_SNAME_LEN), pkt->getSname().size());
+ EXPECT_EQ(0, memcmp(dummySname, &pkt->getSname()[0], Pkt4::MAX_SNAME_LEN));
+
+ ASSERT_EQ(static_cast<size_t>(Pkt4::MAX_FILE_LEN), pkt->getFile().size());
+ EXPECT_EQ(0, memcmp(dummyFile, &pkt->getFile()[0], Pkt4::MAX_FILE_LEN));
+
+ EXPECT_EQ(DHCPDISCOVER, pkt->getType());
+}
+
+// this test is for hardware addresses (htype, hlen and chaddr fields)
+TEST(Pkt4Test, hwAddr) {
+
+ vector<uint8_t> mac;
+ uint8_t expectedChaddr[Pkt4::MAX_CHADDR_LEN];
+
+ // We resize vector to specified length. It is more natural for fixed-length
+ // field, than clear it (shrink size to 0) and push_back each element
+ // (growing length back to MAX_CHADDR_LEN).
+ mac.resize(Pkt4::MAX_CHADDR_LEN);
+
+ Pkt4* pkt = 0;
+ // let's test each hlen, from 0 till 16
+ for (int macLen = 0; macLen < Pkt4::MAX_CHADDR_LEN; macLen++) {
+ for (int i = 0; i < Pkt4::MAX_CHADDR_LEN; i++) {
+ mac[i] = 0;
+ expectedChaddr[i] = 0;
+ }
+ for (int i = 0; i < macLen; i++) {
+ mac[i] = 128 + i;
+ expectedChaddr[i] = 128 + i;
+ }
+
+ // type and transaction doesn't matter in this test
+ pkt = new Pkt4(DHCPOFFER, 1234);
+ pkt->setHWAddr(255-macLen*10, // just weird htype
+ macLen,
+ mac);
+ EXPECT_EQ(0, memcmp(expectedChaddr, pkt->getChaddr(),
+ Pkt4::MAX_CHADDR_LEN));
+
+ EXPECT_NO_THROW(
+ pkt->pack();
+ );
+
+ // CHADDR starts at offset 28 in DHCP packet
+ const uint8_t* ptr =
+ static_cast<const uint8_t*>(pkt->getBuffer().getData())+28;
+
+ EXPECT_EQ(0, memcmp(ptr, expectedChaddr, Pkt4::MAX_CHADDR_LEN));
+
+ delete pkt;
+ }
+
+ /// TODO: extend this test once options support is implemented. HW address
+ /// longer than 16 bytes should be stored in client-identifier option
+}
+
+TEST(Pkt4Test, msgTypes) {
+
+ struct msgType {
+ uint8_t dhcp;
+ uint8_t bootp;
+ };
+
+ msgType types[] = {
+ {DHCPDISCOVER, BOOTREQUEST},
+ {DHCPOFFER, BOOTREPLY},
+ {DHCPREQUEST, BOOTREQUEST},
+ {DHCPDECLINE, BOOTREQUEST},
+ {DHCPACK, BOOTREPLY},
+ {DHCPNAK, BOOTREPLY},
+ {DHCPRELEASE, BOOTREQUEST},
+ {DHCPINFORM, BOOTREQUEST},
+ {DHCPLEASEQUERY, BOOTREQUEST},
+ {DHCPLEASEUNASSIGNED, BOOTREPLY},
+ {DHCPLEASEUNKNOWN, BOOTREPLY},
+ {DHCPLEASEACTIVE, BOOTREPLY}
+ };
+
+ Pkt4* pkt = 0;
+ for (int i = 0; i < sizeof(types) / sizeof(msgType); i++) {
+
+ pkt = new Pkt4(types[i].dhcp, 0);
+ EXPECT_EQ(types[i].dhcp, pkt->getType());
+
+ EXPECT_EQ(types[i].bootp, pkt->getOp());
+
+ delete pkt;
+ pkt = 0;
+ }
+
+ EXPECT_THROW(
+ pkt = new Pkt4(100, 0), // there's no message type 100
+ OutOfRange
+ );
+ if (pkt) {
+ delete pkt;
+ }
+}
+
+// this test verifies handling of sname field
+TEST(Pkt4Test, sname) {
+
+ uint8_t sname[Pkt4::MAX_SNAME_LEN];
+
+ Pkt4* pkt = 0;
+ // let's test each sname length, from 0 till 64
+ for (int snameLen=0; snameLen < Pkt4::MAX_SNAME_LEN; snameLen++) {
+ for (int i = 0; i < Pkt4::MAX_SNAME_LEN; i++) {
+ sname[i] = 0;
+ }
+ for (int i = 0; i < snameLen; i++) {
+ sname[i] = i;
+ }
+
+ // type and transaction doesn't matter in this test
+ pkt = new Pkt4(DHCPOFFER, 1234);
+ pkt->setSname(sname, snameLen);
+
+ EXPECT_EQ(0, memcmp(sname, &pkt->getSname()[0], Pkt4::MAX_SNAME_LEN));
+
+ EXPECT_NO_THROW(
+ pkt->pack();
+ );
+
+ // SNAME starts at offset 44 in DHCP packet
+ const uint8_t* ptr =
+ static_cast<const uint8_t*>(pkt->getBuffer().getData())+44;
+ EXPECT_EQ(0, memcmp(ptr, sname, Pkt4::MAX_SNAME_LEN));
+
+ delete pkt;
+ }
+}
+
+TEST(Pkt4Test, file) {
+
+ uint8_t file[Pkt4::MAX_FILE_LEN];
+
+ Pkt4* pkt = 0;
+ // Let's test each file length, from 0 till 128.
+ for (int fileLen = 0; fileLen < Pkt4::MAX_FILE_LEN; fileLen++) {
+ for (int i = 0; i < Pkt4::MAX_FILE_LEN; i++) {
+ file[i] = 0;
+ }
+ for (int i = 0; i < fileLen; i++) {
+ file[i] = i;
+ }
+
+ // Type and transaction doesn't matter in this test.
+ pkt = new Pkt4(DHCPOFFER, 1234);
+ pkt->setFile(file, fileLen);
+
+ EXPECT_EQ(0, memcmp(file, &pkt->getFile()[0], Pkt4::MAX_FILE_LEN));
+
+ //
+ EXPECT_NO_THROW(
+ pkt->pack();
+ );
+
+ // FILE starts at offset 108 in DHCP packet.
+ const uint8_t* ptr =
+ static_cast<const uint8_t*>(pkt->getBuffer().getData())+108;
+ EXPECT_EQ(0, memcmp(ptr, file, Pkt4::MAX_FILE_LEN));
+
+ delete pkt;
+ }
+
+}
+
+static uint8_t v4Opts[] = {
+ 12, 3, 0, 1, 2,
+ 13, 3, 10, 11, 12,
+ 14, 3, 20, 21, 22,
+ 128, 3, 30, 31, 32,
+ 254, 3, 40, 41, 42
+};
+
+TEST(Pkt4Test, options) {
+ Pkt4* pkt = new Pkt4(DHCPOFFER, 0);
+
+ vector<uint8_t> payload[5];
+ for (int i = 0; i < 5; i++) {
+ payload[i].push_back(i*10);
+ payload[i].push_back(i*10+1);
+ payload[i].push_back(i*10+2);
+ }
+
+ boost::shared_ptr<Option> opt1(new Option(Option::V4, 12, payload[0]));
+ boost::shared_ptr<Option> opt2(new Option(Option::V4, 13, payload[1]));
+ boost::shared_ptr<Option> opt3(new Option(Option::V4, 14, payload[2]));
+ boost::shared_ptr<Option> opt5(new Option(Option::V4,128, payload[3]));
+ boost::shared_ptr<Option> opt4(new Option(Option::V4,254, payload[4]));
+
+ pkt->addOption(opt1);
+ pkt->addOption(opt2);
+ pkt->addOption(opt3);
+ pkt->addOption(opt4);
+ pkt->addOption(opt5);
+
+ EXPECT_TRUE(pkt->getOption(12));
+ EXPECT_TRUE(pkt->getOption(13));
+ EXPECT_TRUE(pkt->getOption(14));
+ EXPECT_TRUE(pkt->getOption(128));
+ EXPECT_TRUE(pkt->getOption(254));
+ EXPECT_FALSE(pkt->getOption(127)); // no such option
+
+ // options are unique in DHCPv4. It should not be possible
+ // to add more than one option of the same type.
+ EXPECT_THROW(
+ pkt->addOption(opt1),
+ BadValue
+ );
+
+ EXPECT_NO_THROW(
+ pkt->pack();
+ );
+
+ const OutputBuffer& buf = pkt->getBuffer();
+ // check that all options are stored, they should take sizeof(v4Opts)
+ ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(v4Opts),
+ buf.getLength());
+
+ // that that this extra data actually contain our options
+ const uint8_t* ptr = static_cast<const uint8_t*>(buf.getData());
+ ptr += Pkt4::DHCPV4_PKT_HDR_LEN; // rewind to end of fixed part
+ EXPECT_EQ(0, memcmp(ptr, v4Opts, sizeof(v4Opts)));
+
+ EXPECT_NO_THROW(
+ delete pkt;
+ );
+}
+
+TEST(Pkt4Test, unpackOptions) {
+
+ vector<uint8_t> expectedFormat = generateTestPacket2();
+
+ for (int i=0; i < sizeof(v4Opts); i++) {
+ expectedFormat.push_back(v4Opts[i]);
+ }
+
+ // now expectedFormat contains fixed format and 5 options
+
+ shared_ptr<Pkt4> pkt(new Pkt4(&expectedFormat[0],
+ expectedFormat.size()));
+
+ EXPECT_NO_THROW(
+ pkt->unpack()
+ );
+
+ EXPECT_TRUE(pkt->getOption(12));
+ EXPECT_TRUE(pkt->getOption(13));
+ EXPECT_TRUE(pkt->getOption(14));
+ EXPECT_TRUE(pkt->getOption(128));
+ EXPECT_TRUE(pkt->getOption(254));
+
+ shared_ptr<Option> x = pkt->getOption(12);
+ ASSERT_TRUE(x); // option 1 should exist
+ EXPECT_EQ(12, x->getType()); // this should be option 12
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+2, 3)); // data len=3
+
+ x = pkt->getOption(13);
+ ASSERT_TRUE(x); // option 13 should exist
+ EXPECT_EQ(13, x->getType()); // this should be option 13
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+7, 3)); // data len=3
+
+ x = pkt->getOption(14);
+ ASSERT_TRUE(x); // option 14 should exist
+ EXPECT_EQ(14, x->getType()); // this should be option 14
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+12, 3)); // data len=3
+
+ x = pkt->getOption(128);
+ ASSERT_TRUE(x); // option 3 should exist
+ EXPECT_EQ(128, x->getType()); // this should be option 254
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+17, 3)); // data len=3
+
+ x = pkt->getOption(254);
+ ASSERT_TRUE(x); // option 3 should exist
+ EXPECT_EQ(254, x->getType()); // this should be option 254
+ ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+ EXPECT_EQ(5, x->len()); // total option length 5
+ EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+22, 3)); // data len=3
+}
+
+} // end of anonymous namespace
diff --git a/src/lib/dhcp/tests/pkt6_unittest.cc b/src/lib/dhcp/tests/pkt6_unittest.cc
new file mode 100644
index 0000000..968b24c
--- /dev/null
+++ b/src/lib/dhcp/tests/pkt6_unittest.cc
@@ -0,0 +1,207 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/dhcp6.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+
+namespace {
+// empty class for now, but may be extended once Addr6 becomes bigger
+class Pkt6Test : public ::testing::Test {
+public:
+ Pkt6Test() {
+ }
+};
+
+TEST_F(Pkt6Test, constructor) {
+ Pkt6 * pkt1 = new Pkt6(17);
+
+ EXPECT_EQ(pkt1->data_len_, 17);
+
+ delete pkt1;
+}
+
+// captured actual SOLICIT packet: transid=0x3d79fb
+// options: client-id, in_na, dns-server, elapsed-time, option-request
+// this code is autogenerated (see src/bin/dhcp6/tests/iface_mgr_unittest.c)
+Pkt6 *capture1() {
+ Pkt6* pkt;
+ pkt = new Pkt6(98);
+ pkt->remote_port_ = 546;
+ pkt->remote_addr_ = IOAddress("fe80::21e:8cff:fe9b:7349");
+ pkt->local_port_ = 0;
+ pkt->local_addr_ = IOAddress("ff02::1:2");
+ pkt->ifindex_ = 2;
+ pkt->iface_ = "eth0";
+ pkt->data_[0]=1;
+ pkt->data_[1]=01; pkt->data_[2]=02; pkt->data_[3]=03; pkt->data_[4]=0;
+ pkt->data_[5]=1; pkt->data_[6]=0; pkt->data_[7]=14; pkt->data_[8]=0;
+ pkt->data_[9]=1; pkt->data_[10]=0; pkt->data_[11]=1; pkt->data_[12]=21;
+ pkt->data_[13]=158; pkt->data_[14]=60; pkt->data_[15]=22; pkt->data_[16]=0;
+ pkt->data_[17]=30; pkt->data_[18]=140; pkt->data_[19]=155; pkt->data_[20]=115;
+ pkt->data_[21]=73; pkt->data_[22]=0; pkt->data_[23]=3; pkt->data_[24]=0;
+ pkt->data_[25]=40; pkt->data_[26]=0; pkt->data_[27]=0; pkt->data_[28]=0;
+ pkt->data_[29]=1; pkt->data_[30]=255; pkt->data_[31]=255; pkt->data_[32]=255;
+ pkt->data_[33]=255; pkt->data_[34]=255; pkt->data_[35]=255; pkt->data_[36]=255;
+ pkt->data_[37]=255; pkt->data_[38]=0; pkt->data_[39]=5; pkt->data_[40]=0;
+ pkt->data_[41]=24; pkt->data_[42]=32; pkt->data_[43]=1; pkt->data_[44]=13;
+ pkt->data_[45]=184; pkt->data_[46]=0; pkt->data_[47]=1; pkt->data_[48]=0;
+ pkt->data_[49]=0; pkt->data_[50]=0; pkt->data_[51]=0; pkt->data_[52]=0;
+ pkt->data_[53]=0; pkt->data_[54]=0; pkt->data_[55]=0; pkt->data_[56]=18;
+ pkt->data_[57]=52; pkt->data_[58]=255; pkt->data_[59]=255; pkt->data_[60]=255;
+ pkt->data_[61]=255; pkt->data_[62]=255; pkt->data_[63]=255; pkt->data_[64]=255;
+ pkt->data_[65]=255; pkt->data_[66]=0; pkt->data_[67]=23; pkt->data_[68]=0;
+ pkt->data_[69]=16; pkt->data_[70]=32; pkt->data_[71]=1; pkt->data_[72]=13;
+ pkt->data_[73]=184; pkt->data_[74]=0; pkt->data_[75]=1; pkt->data_[76]=0;
+ pkt->data_[77]=0; pkt->data_[78]=0; pkt->data_[79]=0; pkt->data_[80]=0;
+ pkt->data_[81]=0; pkt->data_[82]=0; pkt->data_[83]=0; pkt->data_[84]=221;
+ pkt->data_[85]=221; pkt->data_[86]=0; pkt->data_[87]=8; pkt->data_[88]=0;
+ pkt->data_[89]=2; pkt->data_[90]=0; pkt->data_[91]=100; pkt->data_[92]=0;
+ pkt->data_[93]=6; pkt->data_[94]=0; pkt->data_[95]=2; pkt->data_[96]=0;
+ pkt->data_[97]=23;
+ return (pkt);
+}
+
+TEST_F(Pkt6Test, unpack_solicit1) {
+ Pkt6 * sol = capture1();
+
+ ASSERT_EQ(true, sol->unpack());
+
+ // check for length
+ EXPECT_EQ(98, sol->len() );
+
+ // check for type
+ EXPECT_EQ(DHCPV6_SOLICIT, sol->getType() );
+
+ // check that all present options are returned
+ EXPECT_TRUE(sol->getOption(D6O_CLIENTID)); // client-id is present
+ EXPECT_TRUE(sol->getOption(D6O_IA_NA)); // IA_NA is present
+ EXPECT_TRUE(sol->getOption(D6O_ELAPSED_TIME)); // elapsed is present
+ EXPECT_TRUE(sol->getOption(D6O_NAME_SERVERS));
+ EXPECT_TRUE(sol->getOption(D6O_ORO));
+
+ // let's check that non-present options are not returned
+ EXPECT_FALSE(sol->getOption(D6O_SERVERID)); // server-id is missing
+ EXPECT_FALSE(sol->getOption(D6O_IA_TA));
+ EXPECT_FALSE(sol->getOption(D6O_IAADDR));
+
+ // let's limit verbosity of this test
+ // std::cout << sol->toText();
+
+ delete sol;
+}
+
+TEST_F(Pkt6Test, packUnpack) {
+
+ Pkt6 * parent = new Pkt6(100);
+
+ parent->setType(DHCPV6_SOLICIT);
+
+ boost::shared_ptr<Option> opt1(new Option(Option::V6, 1));
+ boost::shared_ptr<Option> opt2(new Option(Option::V6, 2));
+ boost::shared_ptr<Option> opt3(new Option(Option::V6, 100));
+ // let's not use zero-length option type 3 as it is IA_NA
+
+ parent->addOption(opt1);
+ parent->addOption(opt2);
+ parent->addOption(opt3);
+
+ EXPECT_EQ(DHCPV6_SOLICIT, parent->getType());
+ int transid = parent->getTransid();
+ // transaction-id was randomized, let's remember it
+
+ // calculated length should be 16
+ EXPECT_EQ( Pkt6::DHCPV6_PKT_HDR_LEN + 3*Option::OPTION6_HDR_LEN,
+ parent->len() );
+
+ EXPECT_TRUE( parent->pack() );
+
+ //
+ EXPECT_EQ( Pkt6::DHCPV6_PKT_HDR_LEN + 3*Option::OPTION6_HDR_LEN,
+ parent->len() );
+
+ // let's delete options from options_ collection
+ // they still be defined in packed
+ parent->options_.clear();
+
+ // that that removed options are indeed are gone
+ EXPECT_EQ( 4, parent->len() );
+
+ // now recreate options list
+ EXPECT_TRUE( parent->unpack() );
+
+ // transid, message-type should be the same as before
+ EXPECT_EQ(transid, parent->getTransid());
+ EXPECT_EQ(DHCPV6_SOLICIT, parent->getType());
+
+ EXPECT_TRUE( parent->getOption(1));
+ EXPECT_TRUE( parent->getOption(2));
+ EXPECT_TRUE( parent->getOption(100));
+ EXPECT_FALSE( parent->getOption(4));
+
+ delete parent;
+}
+
+TEST_F(Pkt6Test, addGetDelOptions) {
+ Pkt6 * parent = new Pkt6(100);
+
+ boost::shared_ptr<Option> opt1(new Option(Option::V6, 1));
+ boost::shared_ptr<Option> opt2(new Option(Option::V6, 2));
+ boost::shared_ptr<Option> opt3(new Option(Option::V6, 2));
+
+ parent->addOption(opt1);
+ parent->addOption(opt2);
+
+ // getOption() test
+ EXPECT_EQ(opt1, parent->getOption(1));
+ EXPECT_EQ(opt2, parent->getOption(2));
+
+ // expect NULL
+ EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(4));
+
+ // now there are 2 options of type 2
+ parent->addOption(opt3);
+
+ // let's delete one of them
+ EXPECT_EQ(true, parent->delOption(2));
+
+ // there still should be the other option 2
+ EXPECT_NE(boost::shared_ptr<Option>(), parent->getOption(2));
+
+ // let's delete the other option 2
+ EXPECT_EQ(true, parent->delOption(2));
+
+ // no more options with type=2
+ EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(2));
+
+ // let's try to delete - should fail
+ EXPECT_TRUE(false == parent->delOption(2));
+
+ delete parent;
+}
+
+
+}
diff --git a/src/lib/dhcp/tests/run_unittests.cc b/src/lib/dhcp/tests/run_unittests.cc
new file mode 100644
index 0000000..db27f76
--- /dev/null
+++ b/src/lib/dhcp/tests/run_unittests.cc
@@ -0,0 +1,27 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <log/logger_support.h>
+
+int
+main(int argc, char* argv[]) {
+ ::testing::InitGoogleTest(&argc, argv);
+ isc::log::initLogger();
+
+ int result = RUN_ALL_TESTS();
+
+ return (result);
+}
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 887ac09..0d2bffd 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -23,14 +23,22 @@ EXTRA_DIST += rdata/generic/cname_5.cc
EXTRA_DIST += rdata/generic/cname_5.h
EXTRA_DIST += rdata/generic/detail/nsec_bitmap.cc
EXTRA_DIST += rdata/generic/detail/nsec_bitmap.h
+EXTRA_DIST += rdata/generic/detail/txt_like.h
+EXTRA_DIST += rdata/generic/detail/ds_like.h
+EXTRA_DIST += rdata/generic/dlv_32769.cc
+EXTRA_DIST += rdata/generic/dlv_32769.h
EXTRA_DIST += rdata/generic/dname_39.cc
EXTRA_DIST += rdata/generic/dname_39.h
EXTRA_DIST += rdata/generic/dnskey_48.cc
EXTRA_DIST += rdata/generic/dnskey_48.h
EXTRA_DIST += rdata/generic/ds_43.cc
EXTRA_DIST += rdata/generic/ds_43.h
+EXTRA_DIST += rdata/generic/hinfo_13.cc
+EXTRA_DIST += rdata/generic/hinfo_13.h
EXTRA_DIST += rdata/generic/mx_15.cc
EXTRA_DIST += rdata/generic/mx_15.h
+EXTRA_DIST += rdata/generic/naptr_35.cc
+EXTRA_DIST += rdata/generic/naptr_35.h
EXTRA_DIST += rdata/generic/ns_2.cc
EXTRA_DIST += rdata/generic/ns_2.h
EXTRA_DIST += rdata/generic/nsec3_50.cc
@@ -49,14 +57,24 @@ EXTRA_DIST += rdata/generic/rrsig_46.cc
EXTRA_DIST += rdata/generic/rrsig_46.h
EXTRA_DIST += rdata/generic/soa_6.cc
EXTRA_DIST += rdata/generic/soa_6.h
+EXTRA_DIST += rdata/generic/spf_99.cc
+EXTRA_DIST += rdata/generic/spf_99.h
EXTRA_DIST += rdata/generic/txt_16.cc
EXTRA_DIST += rdata/generic/txt_16.h
+EXTRA_DIST += rdata/generic/minfo_14.cc
+EXTRA_DIST += rdata/generic/minfo_14.h
+EXTRA_DIST += rdata/generic/afsdb_18.cc
+EXTRA_DIST += rdata/generic/afsdb_18.h
EXTRA_DIST += rdata/hs_4/a_1.cc
EXTRA_DIST += rdata/hs_4/a_1.h
EXTRA_DIST += rdata/in_1/a_1.cc
EXTRA_DIST += rdata/in_1/a_1.h
EXTRA_DIST += rdata/in_1/aaaa_28.cc
EXTRA_DIST += rdata/in_1/aaaa_28.h
+EXTRA_DIST += rdata/in_1/dhcid_49.cc
+EXTRA_DIST += rdata/in_1/dhcid_49.h
+EXTRA_DIST += rdata/in_1/srv_33.cc
+EXTRA_DIST += rdata/in_1/srv_33.h
#EXTRA_DIST += rdata/template.cc
#EXTRA_DIST += rdata/template.h
@@ -88,8 +106,11 @@ libdns___la_SOURCES += tsig.h tsig.cc
libdns___la_SOURCES += tsigerror.h tsigerror.cc
libdns___la_SOURCES += tsigkey.h tsigkey.cc
libdns___la_SOURCES += tsigrecord.h tsigrecord.cc
+libdns___la_SOURCES += character_string.h character_string.cc
libdns___la_SOURCES += rdata/generic/detail/nsec_bitmap.h
libdns___la_SOURCES += rdata/generic/detail/nsec_bitmap.cc
+libdns___la_SOURCES += rdata/generic/detail/txt_like.h
+libdns___la_SOURCES += rdata/generic/detail/ds_like.h
libdns___la_CPPFLAGS = $(AM_CPPFLAGS)
# Most applications of libdns++ will only implicitly rely on libcryptolink,
diff --git a/src/lib/dns/benchmarks/Makefile.am b/src/lib/dns/benchmarks/Makefile.am
index 8645385..0d7856f 100644
--- a/src/lib/dns/benchmarks/Makefile.am
+++ b/src/lib/dns/benchmarks/Makefile.am
@@ -13,5 +13,6 @@ noinst_PROGRAMS = rdatarender_bench
rdatarender_bench_SOURCES = rdatarender_bench.cc
rdatarender_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+rdatarender_bench_LDADD += $(top_builddir)/src/lib/util/libutil.la
rdatarender_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
rdatarender_bench_LDADD += $(SQLITE_LIBS)
diff --git a/src/lib/dns/character_string.cc b/src/lib/dns/character_string.cc
new file mode 100644
index 0000000..3a289ac
--- /dev/null
+++ b/src/lib/dns/character_string.cc
@@ -0,0 +1,140 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "character_string.h"
+#include "rdata.h"
+
+using namespace std;
+using namespace isc::dns::rdata;
+
+namespace isc {
+namespace dns {
+
+namespace {
+bool isDigit(char c) {
+ return (('0' <= c) && (c <= '9'));
+}
+}
+
+std::string
+characterstr::getNextCharacterString(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ string result;
+
+ // If the input string only contains white-spaces, it is an invalid
+ // <character-string>
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText, "Invalid text format, \
+ <character-string> field is missing.");
+ }
+
+ // Whether the <character-string> is separated with double quotes (")
+ bool quotes_separated = (*input_iterator == '"');
+ // Whether the quotes are pared if the string is quotes separated
+ bool quotes_paired = false;
+
+ if (quotes_separated) {
+ ++input_iterator;
+ }
+
+ while(input_iterator < input_str.end()){
+ // Escaped characters processing
+ if (*input_iterator == '\\') {
+ if (input_iterator + 1 == input_str.end()) {
+ isc_throw(InvalidRdataText, "<character-string> ended \
+ prematurely.");
+ } else {
+ if (isDigit(*(input_iterator + 1))) {
+ // \DDD where each D is a digit. It its the octet
+ // corresponding to the decimal number described by DDD
+ if (input_iterator + 3 >= input_str.end()) {
+ isc_throw(InvalidRdataText, "<character-string> ended \
+ prematurely.");
+ } else {
+ int n = 0;
+ ++input_iterator;
+ for (int i = 0; i < 3; ++i) {
+ if (isDigit(*input_iterator)) {
+ n = n*10 + (*input_iterator - '0');
+ ++input_iterator;
+ } else {
+ isc_throw(InvalidRdataText, "Illegal decimal \
+ escaping series");
+ }
+ }
+ if (n > 255) {
+ isc_throw(InvalidRdataText, "Illegal octet \
+ number");
+ }
+ result.push_back(n);
+ continue;
+ }
+ } else {
+ ++input_iterator;
+ result.push_back(*input_iterator);
+ ++input_iterator;
+ continue;
+ }
+ }
+ }
+
+ if (quotes_separated) {
+ // If the <character-string> is seperated with quotes symbol and
+ // another quotes symbol is encountered, it is the end of the
+ // <character-string>
+ if (*input_iterator == '"') {
+ quotes_paired = true;
+ ++input_iterator;
+ // Reach the end of character string
+ break;
+ }
+ } else if (*input_iterator == ' ') {
+ // If the <character-string> is not seperated with quotes symbol,
+ // it is seperated with <space> char
+ break;
+ }
+
+ result.push_back(*input_iterator);
+
+ ++input_iterator;
+ }
+
+ if (result.size() > MAX_CHARSTRING_LEN) {
+ isc_throw(CharStringTooLong, "<character-string> is too long");
+ }
+
+ if (quotes_separated && !quotes_paired) {
+ isc_throw(InvalidRdataText, "The quotes are not paired");
+ }
+
+ return (result);
+}
+
+std::string
+characterstr::getNextCharacterString(util::InputBuffer& buffer, size_t len) {
+ uint8_t str_len = buffer.readUint8();
+
+ size_t pos = buffer.getPosition();
+ if (len - pos < str_len) {
+ isc_throw(InvalidRdataLength, "Invalid string length");
+ }
+
+ uint8_t buf[MAX_CHARSTRING_LEN];
+ buffer.readData(buf, str_len);
+ return (string(buf, buf + str_len));
+}
+
+} // end of namespace dns
+} // end of namespace isc
diff --git a/src/lib/dns/character_string.h b/src/lib/dns/character_string.h
new file mode 100644
index 0000000..7961274
--- /dev/null
+++ b/src/lib/dns/character_string.h
@@ -0,0 +1,57 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __CHARACTER_STRING_H
+#define __CHARACTER_STRING_H
+
+#include <string>
+#include <exceptions/exceptions.h>
+#include <util/buffer.h>
+
+namespace isc {
+namespace dns {
+
+// \brief Some utility functions to extract <character-string> from string
+// or InputBuffer
+//
+// <character-string> is expressed in one or two ways: as a contiguous set
+// of characters without interior spaces, or as a string beginning with a "
+// and ending with a ". Inside a " delimited string any character can
+// occur, except for a " itself, which must be quoted using \ (back slash).
+// Ref. RFC1035
+
+
+namespace characterstr {
+ /// Get a <character-string> from a string
+ ///
+ /// \param input_str The input string
+ /// \param input_iterator The iterator from which to start extracting,
+ /// the iterator will be updated to new position after the function
+ /// is returned
+ /// \return A std::string that contains the extracted <character-string>
+ std::string getNextCharacterString(const std::string& input_str,
+ std::string::const_iterator& input_iterator);
+
+ /// Get a <character-string> from a input buffer
+ ///
+ /// \param buffer The input buffer
+ /// \param len The input buffer total length
+ /// \return A std::string that contains the extracted <character-string>
+ std::string getNextCharacterString(util::InputBuffer& buffer, size_t len);
+
+} // namespace characterstr
+} // namespace dns
+} // namespace isc
+
+#endif // __CHARACTER_STRING_H
diff --git a/src/lib/dns/gen-rdatacode.py.in b/src/lib/dns/gen-rdatacode.py.in
index b3c8da2..f3cd5df 100755
--- a/src/lib/dns/gen-rdatacode.py.in
+++ b/src/lib/dns/gen-rdatacode.py.in
@@ -133,7 +133,15 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
if classdir_mtime < getmtime('@srcdir@/rdata'):
classdir_mtime = getmtime('@srcdir@/rdata')
- for dir in list(os.listdir('@srcdir@/rdata')):
+ # Sort directories before iterating through them so that the directory
+ # list is processed in the same order on all systems. The resulting
+ # files should compile regardless of the order in which the components
+ # are included but... Having a fixed order for the directories should
+ # eliminate system-dependent problems. (Note that the drectory names
+ # in BIND 10 are ASCII, so the order should be locale-independent.)
+ dirlist = os.listdir('@srcdir@/rdata')
+ dirlist.sort()
+ for dir in dirlist:
classdir = '@srcdir@/rdata' + os.sep + dir
m = re_typecode.match(dir)
if os.path.isdir(classdir) and (m != None or dir == 'generic'):
@@ -145,7 +153,12 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
class_code = m.group(2)
if not class_code in classcode2txt:
classcode2txt[class_code] = class_txt
- for file in list(os.listdir(classdir)):
+
+ # Same considerations as directories regarding sorted order
+ # also apply to files.
+ filelist = os.listdir(classdir)
+ filelist.sort()
+ for file in filelist:
file = classdir + os.sep + file
m = re_typecode.match(os.path.split(file)[1])
if m != None:
diff --git a/src/lib/dns/message.cc b/src/lib/dns/message.cc
index bf7ccd5..b3e9229 100644
--- a/src/lib/dns/message.cc
+++ b/src/lib/dns/message.cc
@@ -124,10 +124,12 @@ public:
void setOpcode(const Opcode& opcode);
void setRcode(const Rcode& rcode);
int parseQuestion(InputBuffer& buffer);
- int parseSection(const Message::Section section, InputBuffer& buffer);
+ int parseSection(const Message::Section section, InputBuffer& buffer,
+ Message::ParseOptions options);
void addRR(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
- const RRTTL& ttl, ConstRdataPtr rdata);
+ const RRTTL& ttl, ConstRdataPtr rdata,
+ Message::ParseOptions options);
void addEDNS(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
const RRTTL& ttl, const Rdata& rdata);
@@ -239,7 +241,28 @@ MessageImpl::toWire(AbstractMessageRenderer& renderer, TSIGContext* tsig_ctx) {
"Message rendering attempted without Opcode set");
}
+ // Reserve the space for TSIG (if needed) so that we can handle truncation
+ // case correctly later when that happens. orig_xxx variables remember
+ // some configured parameters of renderer in case they are needed in
+ // truncation processing below.
+ const size_t tsig_len = (tsig_ctx != NULL) ? tsig_ctx->getTSIGLength() : 0;
+ const size_t orig_msg_len_limit = renderer.getLengthLimit();
+ const AbstractMessageRenderer::CompressMode orig_compress_mode =
+ renderer.getCompressMode();
+ if (tsig_len > 0) {
+ if (tsig_len > orig_msg_len_limit) {
+ isc_throw(InvalidParameter, "Failed to render DNS message: "
+ "too small limit for a TSIG (" <<
+ orig_msg_len_limit << ")");
+ }
+ renderer.setLengthLimit(orig_msg_len_limit - tsig_len);
+ }
+
// reserve room for the header
+ if (renderer.getLengthLimit() < HEADERLEN) {
+ isc_throw(InvalidParameter, "Failed to render DNS message: "
+ "too small limit for a Header");
+ }
renderer.skip(HEADERLEN);
uint16_t qdcount =
@@ -284,6 +307,22 @@ MessageImpl::toWire(AbstractMessageRenderer& renderer, TSIGContext* tsig_ctx) {
}
}
+ // If we're adding a TSIG to a truncated message, clear all RRsets
+ // from the message except for the question before adding the TSIG.
+ // If even (some of) the question doesn't fit, don't include it.
+ if (tsig_ctx != NULL && renderer.isTruncated()) {
+ renderer.clear();
+ renderer.setLengthLimit(orig_msg_len_limit - tsig_len);
+ renderer.setCompressMode(orig_compress_mode);
+ renderer.skip(HEADERLEN);
+ qdcount = for_each(questions_.begin(), questions_.end(),
+ RenderSection<QuestionPtr>(renderer,
+ false)).getTotalCount();
+ ancount = 0;
+ nscount = 0;
+ arcount = 0;
+ }
+
// Adjust the counter buffer.
// XXX: these may not be equal to the number of corresponding entries
// in rrsets_[] or questions_ if truncation occurred or an EDNS OPT RR
@@ -315,10 +354,16 @@ MessageImpl::toWire(AbstractMessageRenderer& renderer, TSIGContext* tsig_ctx) {
renderer.writeUint16At(arcount, header_pos);
// Add TSIG, if necessary, at the end of the message.
- // TODO: truncate case consideration
if (tsig_ctx != NULL) {
- tsig_ctx->sign(qid_, renderer.getData(),
- renderer.getLength())->toWire(renderer);
+ // Release the reserved space in the renderer.
+ renderer.setLengthLimit(orig_msg_len_limit);
+
+ const int tsig_count =
+ tsig_ctx->sign(qid_, renderer.getData(),
+ renderer.getLength())->toWire(renderer);
+ if (tsig_count != 1) {
+ isc_throw(Unexpected, "Failed to render a TSIG RR");
+ }
// update the ARCOUNT for the TSIG RR. Note that for a sane DNS
// message arcount should never overflow to 0.
@@ -571,7 +616,7 @@ Message::parseHeader(InputBuffer& buffer) {
}
void
-Message::fromWire(InputBuffer& buffer) {
+Message::fromWire(InputBuffer& buffer, ParseOptions options) {
if (impl_->mode_ != Message::PARSE) {
isc_throw(InvalidMessageOperation,
"Message parse attempted in non parse mode");
@@ -583,11 +628,11 @@ Message::fromWire(InputBuffer& buffer) {
impl_->counts_[SECTION_QUESTION] = impl_->parseQuestion(buffer);
impl_->counts_[SECTION_ANSWER] =
- impl_->parseSection(SECTION_ANSWER, buffer);
+ impl_->parseSection(SECTION_ANSWER, buffer, options);
impl_->counts_[SECTION_AUTHORITY] =
- impl_->parseSection(SECTION_AUTHORITY, buffer);
+ impl_->parseSection(SECTION_AUTHORITY, buffer, options);
impl_->counts_[SECTION_ADDITIONAL] =
- impl_->parseSection(SECTION_ADDITIONAL, buffer);
+ impl_->parseSection(SECTION_ADDITIONAL, buffer, options);
}
int
@@ -663,7 +708,7 @@ struct MatchRR : public unary_function<RRsetPtr, bool> {
// is hardcoded here.
int
MessageImpl::parseSection(const Message::Section section,
- InputBuffer& buffer)
+ InputBuffer& buffer, Message::ParseOptions options)
{
assert(section < MessageImpl::NUM_SECTIONS);
@@ -695,7 +740,7 @@ MessageImpl::parseSection(const Message::Section section,
addTSIG(section, count, buffer, start_position, name, rrclass, ttl,
*rdata);
} else {
- addRR(section, name, rrclass, rrtype, ttl, rdata);
+ addRR(section, name, rrclass, rrtype, ttl, rdata, options);
++added;
}
}
@@ -706,19 +751,22 @@ MessageImpl::parseSection(const Message::Section section,
void
MessageImpl::addRR(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
- const RRTTL& ttl, ConstRdataPtr rdata)
+ const RRTTL& ttl, ConstRdataPtr rdata,
+ Message::ParseOptions options)
{
- vector<RRsetPtr>::iterator it =
- find_if(rrsets_[section].begin(), rrsets_[section].end(),
- MatchRR(name, rrtype, rrclass));
- if (it != rrsets_[section].end()) {
- (*it)->setTTL(min((*it)->getTTL(), ttl));
- (*it)->addRdata(rdata);
- } else {
- RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl));
- rrset->addRdata(rdata);
- rrsets_[section].push_back(rrset);
+ if ((options & Message::PRESERVE_ORDER) == 0) {
+ vector<RRsetPtr>::iterator it =
+ find_if(rrsets_[section].begin(), rrsets_[section].end(),
+ MatchRR(name, rrtype, rrclass));
+ if (it != rrsets_[section].end()) {
+ (*it)->setTTL(min((*it)->getTTL(), ttl));
+ (*it)->addRdata(rdata);
+ return;
+ }
}
+ RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl));
+ rrset->addRdata(rdata);
+ rrsets_[section].push_back(rrset);
}
void
diff --git a/src/lib/dns/message.h b/src/lib/dns/message.h
index fcc53e9..47632cb 100644
--- a/src/lib/dns/message.h
+++ b/src/lib/dns/message.h
@@ -526,7 +526,7 @@ public:
/// source message to the same section of this message
///
/// \param section the section to append
- /// \param target The source Message
+ /// \param source The source Message
void appendSection(const Section section, const Message& source);
/// \brief Prepare for making a response from a request.
@@ -565,16 +565,74 @@ public:
/// \c tsig_ctx will be updated based on the fact it was used for signing
/// and with the latest MAC.
///
+ /// \exception InvalidMessageOperation The message is not in the Render
+ /// mode, or either Rcode or Opcode is not set.
+ /// \exception InvalidParameter The allowable limit of \c renderer is too
+ /// small for a TSIG or the Header section. Note that this shouldn't
+ /// happen with parameters as defined in the standard protocols,
+ /// so it's more likely a program bug.
+ /// \exception Unexpected Rendering the TSIG RR fails. The implementation
+ /// internally makes sure this doesn't happen, so if that ever occurs
+ /// it should mean a bug either in the TSIG context or in the renderer
+ /// implementation.
+ ///
/// \param renderer See the other version
/// \param tsig_ctx A TSIG context that is to be used for signing the
/// message
void toWire(AbstractMessageRenderer& renderer, TSIGContext& tsig_ctx);
+ /// Parse options.
+ ///
+ /// describe PRESERVE_ORDER: note doesn't affect EDNS or TSIG.
+ ///
+ /// The option values are used as a parameter for \c fromWire().
+ /// These are values of a bitmask type. Bitwise operations can be
+ /// performed on these values to express compound options.
+ enum ParseOptions {
+ PARSE_DEFAULT = 0, ///< The default options
+ PRESERVE_ORDER = 1 ///< Preserve RR order and don't combine them
+ };
+
/// \brief Parse the header section of the \c Message.
void parseHeader(isc::util::InputBuffer& buffer);
- /// \brief Parse the \c Message.
- void fromWire(isc::util::InputBuffer& buffer);
+ /// \brief (Re)build a \c Message object from wire-format data.
+ ///
+ /// This method parses the given wire format data to build a
+ /// complete Message object. On success, the values of the header section
+ /// fields can be accessible via corresponding get methods, and the
+ /// question and following sections can be accessible via the
+ /// corresponding iterators. If the message contains an EDNS or TSIG,
+ /// they can be accessible via \c getEDNS() and \c getTSIGRecord(),
+ /// respectively.
+ ///
+ /// This \c Message must be in the \c PARSE mode.
+ ///
+ /// This method performs strict validation on the given message based
+ /// on the DNS protocol specifications. If the given message data is
+ /// invalid, this method throws an exception (see the exception list).
+ ///
+ /// By default, this method combines RRs of the same name, RR type and
+ /// RR class in a section into a single RRset, even if they are interleaved
+ /// with a different type of RR (though it would be a rare case in
+ /// practice). If the \c PRESERVE_ORDER option is specified, it handles
+ /// each RR separately, in the appearing order, and converts it to a
+ /// separate RRset (so this RRset should contain exactly one Rdata).
+ /// This mode will be necessary when the higher level protocol is
+ /// ordering conscious. For example, in AXFR and IXFR, the position of
+ /// the SOA RRs are crucial.
+ ///
+ /// \exception InvalidMessageOperation \c Message is in the RENDER mode
+ /// \exception DNSMessageFORMERR The given message data is syntactically
+ /// \exception MessageTooShort The given data is shorter than a valid
+ /// header section
+ /// \exception std::bad_alloc Memory allocation failure
+ /// \exception Others \c Name, \c Rdata, and \c EDNS classes can also throw
+ ///
+ /// \param buffer A input buffer object that stores the wire data
+ /// \param options Parse options
+ void fromWire(isc::util::InputBuffer& buffer, ParseOptions options
+ = PARSE_DEFAULT);
///
/// \name Protocol constants
@@ -610,7 +668,7 @@ typedef boost::shared_ptr<const Message> ConstMessagePtr;
///
/// \param os A \c std::ostream object on which the insertion operation is
/// performed.
-/// \param record A \c Message object output by the operation.
+/// \param message A \c Message object output by the operation.
/// \return A reference to the same \c std::ostream object referenced by
/// parameter \c os after the insertion operation.
std::ostream& operator<<(std::ostream& os, const Message& message);
@@ -618,6 +676,6 @@ std::ostream& operator<<(std::ostream& os, const Message& message);
}
#endif // __MESSAGE_H
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/dns/messagerenderer.cc b/src/lib/dns/messagerenderer.cc
index 767aca9..02f5519 100644
--- a/src/lib/dns/messagerenderer.cc
+++ b/src/lib/dns/messagerenderer.cc
@@ -150,8 +150,6 @@ private:
struct MessageRenderer::MessageRendererImpl {
/// \brief Constructor from an output buffer.
///
- /// \param buffer An \c OutputBuffer object to which wire format data is
- /// written.
MessageRendererImpl() :
nbuffer_(Name::MAX_WIRE), msglength_limit_(512),
truncated_(false), compress_mode_(MessageRenderer::CASE_INSENSITIVE)
diff --git a/src/lib/dns/name.cc b/src/lib/dns/name.cc
index 4cd0b2b..772417f 100644
--- a/src/lib/dns/name.cc
+++ b/src/lib/dns/name.cc
@@ -700,7 +700,7 @@ Name::split(const unsigned int first, const unsigned int n) const {
}
Name
-Name::split(const unsigned level) const {
+Name::split(const unsigned int level) const {
if (level >= getLabelCount()) {
isc_throw(OutOfRange, "invalid level for name split (" << level
<< ") for name " << *this);
diff --git a/src/lib/dns/python/Makefile.am b/src/lib/dns/python/Makefile.am
index 9162f4e..3b89358 100644
--- a/src/lib/dns/python/Makefile.am
+++ b/src/lib/dns/python/Makefile.am
@@ -4,32 +4,47 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
+lib_LTLIBRARIES = libpydnspp.la
+libpydnspp_la_SOURCES = pydnspp_common.cc pydnspp_common.h pydnspp_towire.h
+libpydnspp_la_SOURCES += name_python.cc name_python.h
+libpydnspp_la_SOURCES += rrset_python.cc rrset_python.h
+libpydnspp_la_SOURCES += rrclass_python.cc rrclass_python.h
+libpydnspp_la_SOURCES += rrtype_python.cc rrtype_python.h
+libpydnspp_la_SOURCES += rrttl_python.cc rrttl_python.h
+libpydnspp_la_SOURCES += rdata_python.cc rdata_python.h
+libpydnspp_la_SOURCES += messagerenderer_python.cc messagerenderer_python.h
+libpydnspp_la_SOURCES += rcode_python.cc rcode_python.h
+libpydnspp_la_SOURCES += opcode_python.cc opcode_python.h
+libpydnspp_la_SOURCES += question_python.cc question_python.h
+libpydnspp_la_SOURCES += tsigkey_python.cc tsigkey_python.h
+libpydnspp_la_SOURCES += tsigerror_python.cc tsigerror_python.h
+libpydnspp_la_SOURCES += tsig_rdata_python.cc tsig_rdata_python.h
+libpydnspp_la_SOURCES += tsigrecord_python.cc tsigrecord_python.h
+libpydnspp_la_SOURCES += tsig_python.cc tsig_python.h
+libpydnspp_la_SOURCES += edns_python.cc edns_python.h
+libpydnspp_la_SOURCES += message_python.cc message_python.h
+
+libpydnspp_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+libpydnspp_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+libpydnspp_la_LDFLAGS = $(PYTHON_LDFLAGS)
+
+
+
pyexec_LTLIBRARIES = pydnspp.la
-pydnspp_la_SOURCES = pydnspp.cc pydnspp_common.cc
+pydnspp_la_SOURCES = pydnspp.cc
pydnspp_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+pydnspp_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
pydnspp_la_LDFLAGS = $(PYTHON_LDFLAGS)
-# directly included from source files, so these don't have their own
-# rules
-EXTRA_DIST = pydnspp_common.h
-EXTRA_DIST += edns_python.cc
-EXTRA_DIST += messagerenderer_python.cc
-EXTRA_DIST += message_python.cc
-EXTRA_DIST += rrclass_python.cc
-EXTRA_DIST += name_python.cc
-EXTRA_DIST += opcode_python.cc
-EXTRA_DIST += rcode_python.cc
-EXTRA_DIST += rrset_python.cc
-EXTRA_DIST += question_python.cc
-EXTRA_DIST += rrttl_python.cc
-EXTRA_DIST += rdata_python.cc
-EXTRA_DIST += rrtype_python.cc
-EXTRA_DIST += tsigkey_python.cc
-EXTRA_DIST += tsig_python.cc
+EXTRA_DIST = tsigerror_python_inc.cc
+EXTRA_DIST += message_python_inc.cc
# Python prefers .so, while some OSes (specifically MacOS) use a different
# suffix for dynamic objects. -module is necessary to work this around.
pydnspp_la_LDFLAGS += -module
pydnspp_la_LIBADD = $(top_builddir)/src/lib/dns/libdns++.la
pydnspp_la_LIBADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+pydnspp_la_LIBADD += libpydnspp.la
pydnspp_la_LIBADD += $(PYTHON_LIB)
diff --git a/src/lib/dns/python/edns_python.cc b/src/lib/dns/python/edns_python.cc
index d781e89..8f0f1a4 100644
--- a/src/lib/dns/python/edns_python.cc
+++ b/src/lib/dns/python/edns_python.cc
@@ -12,38 +12,38 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
+
#include <cassert>
#include <dns/edns.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "edns_python.h"
+#include "name_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rdata_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
using namespace isc::dns;
-using namespace isc::util;
using namespace isc::dns::rdata;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
namespace {
-//
-// EDNS
-//
-
-// The s_* Class simply covers one instantiation of the object
class s_EDNS : public PyObject {
public:
- EDNS* edns;
+ EDNS* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
+typedef CPPPyObjectContainer<s_EDNS, EDNS> EDNSContainer;
// General creation and destruction
int EDNS_init(s_EDNS* self, PyObject* args);
@@ -103,60 +103,6 @@ PyMethodDef EDNS_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_EDNS
-// Most of the functions are not actually implemented and NULL here.
-PyTypeObject edns_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "libdns_python.EDNS",
- sizeof(s_EDNS), // tp_basicsize
- 0, // tp_itemsize
- (destructor)EDNS_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- EDNS_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The EDNS class encapsulates DNS extensions "
- "provided by the EDNSx protocol.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- EDNS_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)EDNS_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
EDNS*
createFromRR(const Name& name, const RRClass& rrclass, const RRType& rrtype,
const RRTTL& rrttl, const Rdata& rdata, uint8_t& extended_rcode)
@@ -179,15 +125,15 @@ createFromRR(const Name& name, const RRClass& rrclass, const RRType& rrtype,
int
EDNS_init(s_EDNS* self, PyObject* args) {
uint8_t version = EDNS::SUPPORTED_VERSION;
- const s_Name* name;
- const s_RRClass* rrclass;
- const s_RRType* rrtype;
- const s_RRTTL* rrttl;
- const s_Rdata* rdata;
+ const PyObject* name;
+ const PyObject* rrclass;
+ const PyObject* rrtype;
+ const PyObject* rrttl;
+ const PyObject* rdata;
if (PyArg_ParseTuple(args, "|b", &version)) {
try {
- self->edns = new EDNS(version);
+ self->cppobj = new EDNS(version);
} catch (const isc::InvalidParameter& ex) {
PyErr_SetString(po_InvalidParameter, ex.what());
return (-1);
@@ -203,10 +149,12 @@ EDNS_init(s_EDNS* self, PyObject* args) {
// in this context so that we can share the try-catch logic with
// EDNS_createFromRR() (see below).
uint8_t extended_rcode;
- self->edns = createFromRR(*name->name, *rrclass->rrclass,
- *rrtype->rrtype, *rrttl->rrttl,
- *rdata->rdata, extended_rcode);
- return (self->edns != NULL ? 0 : -1);
+ self->cppobj = createFromRR(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype),
+ PyRRTTL_ToRRTTL(rrttl),
+ PyRdata_ToRdata(rdata), extended_rcode);
+ return (self->cppobj != NULL ? 0 : -1);
}
PyErr_Clear();
@@ -217,19 +165,19 @@ EDNS_init(s_EDNS* self, PyObject* args) {
void
EDNS_destroy(s_EDNS* const self) {
- delete self->edns;
- self->edns = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
PyObject*
EDNS_toText(const s_EDNS* const self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->edns->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
PyObject*
-EDNS_str(PyObject* const self) {
+EDNS_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
const_cast<char*>("to_text"),
@@ -240,14 +188,14 @@ PyObject*
EDNS_toWire(const s_EDNS* const self, PyObject* args) {
PyObject* bytes;
uint8_t extended_rcode;
- s_MessageRenderer* renderer;
+ PyObject* renderer;
if (PyArg_ParseTuple(args, "Ob", &bytes, &extended_rcode) &&
PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(0);
- self->edns->toWire(buffer, extended_rcode);
+ self->cppobj->toWire(buffer, extended_rcode);
PyObject* rd_bytes = PyBytes_FromStringAndSize(
static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
@@ -257,8 +205,8 @@ EDNS_toWire(const s_EDNS* const self, PyObject* args) {
return (result);
} else if (PyArg_ParseTuple(args, "O!b", &messagerenderer_type,
&renderer, &extended_rcode)) {
- const unsigned int n = self->edns->toWire(*renderer->messagerenderer,
- extended_rcode);
+ const unsigned int n = self->cppobj->toWire(
+ PyMessageRenderer_ToMessageRenderer(renderer), extended_rcode);
return (Py_BuildValue("I", n));
}
@@ -269,12 +217,12 @@ EDNS_toWire(const s_EDNS* const self, PyObject* args) {
PyObject*
EDNS_getVersion(const s_EDNS* const self) {
- return (Py_BuildValue("B", self->edns->getVersion()));
+ return (Py_BuildValue("B", self->cppobj->getVersion()));
}
PyObject*
EDNS_getDNSSECAwareness(const s_EDNS* const self) {
- if (self->edns->getDNSSECAwareness()) {
+ if (self->cppobj->getDNSSECAwareness()) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -287,13 +235,13 @@ EDNS_setDNSSECAwareness(s_EDNS* self, PyObject* args) {
if (!PyArg_ParseTuple(args, "O!", &PyBool_Type, &b)) {
return (NULL);
}
- self->edns->setDNSSECAwareness(b == Py_True);
+ self->cppobj->setDNSSECAwareness(b == Py_True);
Py_RETURN_NONE;
}
PyObject*
EDNS_getUDPSize(const s_EDNS* const self) {
- return (Py_BuildValue("I", self->edns->getUDPSize()));
+ return (Py_BuildValue("I", self->cppobj->getUDPSize()));
}
PyObject*
@@ -310,17 +258,17 @@ EDNS_setUDPSize(s_EDNS* self, PyObject* args) {
"UDP size is not an unsigned 16-bit integer");
return (NULL);
}
- self->edns->setUDPSize(size);
+ self->cppobj->setUDPSize(size);
Py_RETURN_NONE;
}
PyObject*
EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
- const s_Name* name;
- const s_RRClass* rrclass;
- const s_RRType* rrtype;
- const s_RRTTL* rrttl;
- const s_Rdata* rdata;
+ const PyObject* name;
+ const PyObject* rrclass;
+ const PyObject* rrtype;
+ const PyObject* rrttl;
+ const PyObject* rdata;
s_EDNS* edns_obj = NULL;
assert(null_self == NULL);
@@ -334,14 +282,17 @@ EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
return (NULL);
}
- edns_obj->edns = createFromRR(*name->name, *rrclass->rrclass,
- *rrtype->rrtype, *rrttl->rrttl,
- *rdata->rdata, extended_rcode);
- if (edns_obj->edns != NULL) {
+ edns_obj->cppobj = createFromRR(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype),
+ PyRRTTL_ToRRTTL(rrttl),
+ PyRdata_ToRdata(rdata),
+ extended_rcode);
+ if (edns_obj->cppobj != NULL) {
PyObject* extrcode_obj = Py_BuildValue("B", extended_rcode);
return (Py_BuildValue("OO", edns_obj, extrcode_obj));
}
-
+
Py_DECREF(edns_obj);
return (NULL);
}
@@ -353,23 +304,90 @@ EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
}
} // end of anonymous namespace
-// end of EDNS
-// Module Initialization, all statics are initialized here
+namespace isc {
+namespace dns {
+namespace python {
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_EDNS
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject edns_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.EDNS",
+ sizeof(s_EDNS), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)EDNS_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ EDNS_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The EDNS class encapsulates DNS extensions "
+ "provided by the EDNSx protocol.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ EDNS_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)EDNS_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createEDNSObject(const EDNS& source) {
+ EDNSContainer container(PyObject_New(s_EDNS, &edns_type));
+ container.set(new EDNS(source));
+ return (container.release());
+}
+
bool
-initModulePart_EDNS(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&edns_type) < 0) {
- return (false);
+PyEDNS_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&edns_type);
- void* p = &edns_type;
- PyModule_AddObject(mod, "EDNS", static_cast<PyObject*>(p));
-
- addClassVariable(edns_type, "SUPPORTED_VERSION",
- Py_BuildValue("B", EDNS::SUPPORTED_VERSION));
+ return (PyObject_TypeCheck(obj, &edns_type));
+}
- return (true);
+const EDNS&
+PyEDNS_ToEDNS(const PyObject* edns_obj) {
+ if (edns_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in EDNS PyObject conversion");
+ }
+ const s_EDNS* edns = static_cast<const s_EDNS*>(edns_obj);
+ return (*edns->cppobj);
}
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/edns_python.h b/src/lib/dns/python/edns_python.h
new file mode 100644
index 0000000..30d92ab
--- /dev/null
+++ b/src/lib/dns/python/edns_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_EDNS_H
+#define __PYTHON_EDNS_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class EDNS;
+
+namespace python {
+
+extern PyTypeObject edns_type;
+
+/// This is a simple shortcut to create a python EDNS object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createEDNSObject(const EDNS& source);
+
+/// \brief Checks if the given python object is a EDNS object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type EDNS, false otherwise
+bool PyEDNS_Check(PyObject* obj);
+
+/// \brief Returns a reference to the EDNS object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type EDNS; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyEDNS_Check()
+///
+/// \note This is not a copy; if the EDNS is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param edns_obj The edns object to convert
+const EDNS& PyEDNS_ToEDNS(const PyObject* edns_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_EDNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/message_python.cc b/src/lib/dns/python/message_python.cc
index e3cc53f..48fff94 100644
--- a/src/lib/dns/python/message_python.cc
+++ b/src/lib/dns/python/message_python.cc
@@ -12,46 +12,44 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
#include <exceptions/exceptions.h>
+#include <util/python/pycppwrapper_util.h>
#include <dns/message.h>
+#include <dns/rcode.h>
+#include <dns/tsig.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+
+#include "name_python.h"
+#include "question_python.h"
+#include "edns_python.h"
+#include "rcode_python.h"
+#include "opcode_python.h"
+#include "rrset_python.h"
+#include "message_python.h"
+#include "messagerenderer_python.h"
+#include "tsig_python.h"
+#include "tsigrecord_python.h"
+#include "pydnspp_common.h"
+
+using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
-namespace {
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-PyObject* po_MessageTooShort;
-PyObject* po_InvalidMessageSection;
-PyObject* po_InvalidMessageOperation;
-PyObject* po_InvalidMessageUDPSize;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// Message
-//
+// Import pydoc text
+#include "message_python_inc.cc"
-// The s_* Class simply coverst one instantiation of the object
+namespace {
class s_Message : public PyObject {
public:
- Message* message;
+ isc::dns::Message* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
int Message_init(s_Message* self, PyObject* args);
void Message_destroy(s_Message* self);
@@ -65,10 +63,11 @@ PyObject* Message_getOpcode(s_Message* self);
PyObject* Message_setOpcode(s_Message* self, PyObject* args);
PyObject* Message_getEDNS(s_Message* self);
PyObject* Message_setEDNS(s_Message* self, PyObject* args);
+PyObject* Message_getTSIGRecord(s_Message* self);
PyObject* Message_getRRCount(s_Message* self, PyObject* args);
// use direct iterators for these? (or simply lists for now?)
-PyObject* Message_getQuestion(s_Message* self);
-PyObject* Message_getSection(s_Message* self, PyObject* args);
+PyObject* Message_getQuestion(PyObject* self, PyObject*);
+PyObject* Message_getSection(PyObject* self, PyObject* args);
//static PyObject* Message_beginQuestion(s_Message* self, PyObject* args);
//static PyObject* Message_endQuestion(s_Message* self, PyObject* args);
//static PyObject* Message_beginSection(s_Message* self, PyObject* args);
@@ -81,7 +80,7 @@ PyObject* Message_makeResponse(s_Message* self);
PyObject* Message_toText(s_Message* self);
PyObject* Message_str(PyObject* self);
PyObject* Message_toWire(s_Message* self, PyObject* args);
-PyObject* Message_fromWire(s_Message* self, PyObject* args);
+PyObject* Message_fromWire(PyObject* pyself, PyObject* args);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -123,12 +122,17 @@ PyMethodDef Message_methods[] = {
{ "set_edns", reinterpret_cast<PyCFunction>(Message_setEDNS), METH_VARARGS,
"Set EDNS for the message."
},
+ { "get_tsig_record",
+ reinterpret_cast<PyCFunction>(Message_getTSIGRecord), METH_NOARGS,
+ "Return, if any, the TSIG record contained in the received message. "
+ "If no TSIG RR is set in the message, None will be returned."
+ },
{ "get_rr_count", reinterpret_cast<PyCFunction>(Message_getRRCount), METH_VARARGS,
"Returns the number of RRs contained in the given section." },
- { "get_question", reinterpret_cast<PyCFunction>(Message_getQuestion), METH_NOARGS,
+ { "get_question", Message_getQuestion, METH_NOARGS,
"Returns a list of all Question objects in the message "
"(should be either 0 or 1)" },
- { "get_section", reinterpret_cast<PyCFunction>(Message_getSection), METH_VARARGS,
+ { "get_section", Message_getSection, METH_VARARGS,
"Returns a list of all RRset objects in the given section of the message\n"
"The argument must be of type Section" },
{ "add_question", reinterpret_cast<PyCFunction>(Message_addQuestion), METH_VARARGS,
@@ -158,70 +162,10 @@ PyMethodDef Message_methods[] = {
"If the given message is not in RENDER mode, an "
"InvalidMessageOperation is raised.\n"
},
- { "from_wire", reinterpret_cast<PyCFunction>(Message_fromWire), METH_VARARGS,
- "Parses the given wire format to a Message object.\n"
- "The first argument is a Message to parse the data into.\n"
- "The second argument must implement the buffer interface.\n"
- "If the given message is not in PARSE mode, an "
- "InvalidMessageOperation is raised.\n"
- "Raises MessageTooShort, DNSMessageFORMERR or DNSMessageBADVERS "
- " if there is a problem parsing the message." },
+ { "from_wire", Message_fromWire, METH_VARARGS, Message_fromWire_doc },
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Message
-// Most of the functions are not actually implemented and NULL here.
-PyTypeObject message_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Message",
- sizeof(s_Message), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Message_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Message_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Message class encapsulates a standard DNS message.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Message_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Message_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
int
Message_init(s_Message* self, PyObject* args) {
int i;
@@ -229,10 +173,10 @@ Message_init(s_Message* self, PyObject* args) {
if (PyArg_ParseTuple(args, "i", &i)) {
PyErr_Clear();
if (i == Message::PARSE) {
- self->message = new Message(Message::PARSE);
+ self->cppobj = new Message(Message::PARSE);
return (0);
} else if (i == Message::RENDER) {
- self->message = new Message(Message::RENDER);
+ self->cppobj = new Message(Message::RENDER);
return (0);
} else {
PyErr_SetString(PyExc_TypeError, "Message mode must be Message.PARSE or Message.RENDER");
@@ -247,8 +191,8 @@ Message_init(s_Message* self, PyObject* args) {
void
Message_destroy(s_Message* self) {
- delete self->message;
- self->message = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
@@ -262,7 +206,7 @@ Message_getHeaderFlag(s_Message* self, PyObject* args) {
return (NULL);
}
- if (self->message->getHeaderFlag(
+ if (self->cppobj->getHeaderFlag(
static_cast<Message::HeaderFlag>(messageflag))) {
Py_RETURN_TRUE;
} else {
@@ -287,7 +231,7 @@ Message_setHeaderFlag(s_Message* self, PyObject* args) {
}
try {
- self->message->setHeaderFlag(
+ self->cppobj->setHeaderFlag(
static_cast<Message::HeaderFlag>(messageflag), on == Py_True);
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
@@ -303,7 +247,7 @@ Message_setHeaderFlag(s_Message* self, PyObject* args) {
PyObject*
Message_getQid(s_Message* self) {
- return (Py_BuildValue("I", self->message->getQid()));
+ return (Py_BuildValue("I", self->cppobj->getQid()));
}
PyObject*
@@ -322,7 +266,7 @@ Message_setQid(s_Message* self, PyObject* args) {
}
try {
- self->message->setQid(id);
+ self->cppobj->setQid(id);
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -332,35 +276,25 @@ Message_setQid(s_Message* self, PyObject* args) {
PyObject*
Message_getRcode(s_Message* self) {
- s_Rcode* rcode;
-
- rcode = static_cast<s_Rcode*>(rcode_type.tp_alloc(&rcode_type, 0));
- if (rcode != NULL) {
- rcode->rcode = NULL;
- try {
- rcode->rcode = new Rcode(self->message->getRcode());
- } catch (const InvalidMessageOperation& imo) {
- PyErr_SetString(po_InvalidMessageOperation, imo.what());
- } catch (...) {
- PyErr_SetString(po_IscException, "Unexpected exception");
- }
- if (rcode->rcode == NULL) {
- Py_DECREF(rcode);
- return (NULL);
- }
+ try {
+ return (createRcodeObject(self->cppobj->getRcode()));
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(po_IscException, "Unexpected exception");
+ return (NULL);
}
-
- return (rcode);
}
PyObject*
Message_setRcode(s_Message* self, PyObject* args) {
- s_Rcode* rcode;
+ PyObject* rcode;
if (!PyArg_ParseTuple(args, "O!", &rcode_type, &rcode)) {
return (NULL);
}
try {
- self->message->setRcode(*rcode->rcode);
+ self->cppobj->setRcode(PyRcode_ToRcode(rcode));
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -370,35 +304,31 @@ Message_setRcode(s_Message* self, PyObject* args) {
PyObject*
Message_getOpcode(s_Message* self) {
- s_Opcode* opcode;
-
- opcode = static_cast<s_Opcode*>(opcode_type.tp_alloc(&opcode_type, 0));
- if (opcode != NULL) {
- opcode->opcode = NULL;
- try {
- opcode->opcode = new Opcode(self->message->getOpcode());
- } catch (const InvalidMessageOperation& imo) {
- PyErr_SetString(po_InvalidMessageOperation, imo.what());
- } catch (...) {
- PyErr_SetString(po_IscException, "Unexpected exception");
- }
- if (opcode->opcode == NULL) {
- Py_DECREF(opcode);
- return (NULL);
- }
+ try {
+ return (createOpcodeObject(self->cppobj->getOpcode()));
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get message opcode: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception getting opcode from message");
+ return (NULL);
}
-
- return (opcode);
}
PyObject*
Message_setOpcode(s_Message* self, PyObject* args) {
- s_Opcode* opcode;
+ PyObject* opcode;
if (!PyArg_ParseTuple(args, "O!", &opcode_type, &opcode)) {
return (NULL);
}
try {
- self->message->setOpcode(*opcode->opcode);
+ self->cppobj->setOpcode(PyOpcode_ToOpcode(opcode));
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -408,32 +338,31 @@ Message_setOpcode(s_Message* self, PyObject* args) {
PyObject*
Message_getEDNS(s_Message* self) {
- s_EDNS* edns;
- EDNS* edns_body;
- ConstEDNSPtr src = self->message->getEDNS();
-
+ ConstEDNSPtr src = self->cppobj->getEDNS();
if (!src) {
Py_RETURN_NONE;
}
- if ((edns_body = new(nothrow) EDNS(*src)) == NULL) {
- return (PyErr_NoMemory());
- }
- edns = static_cast<s_EDNS*>(opcode_type.tp_alloc(&edns_type, 0));
- if (edns != NULL) {
- edns->edns = edns_body;
+ try {
+ return (createEDNSObject(*src));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get EDNS from message: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting EDNS from message");
}
-
- return (edns);
+ return (NULL);
}
PyObject*
Message_setEDNS(s_Message* self, PyObject* args) {
- s_EDNS* edns;
+ PyObject* edns;
if (!PyArg_ParseTuple(args, "O!", &edns_type, &edns)) {
return (NULL);
}
try {
- self->message->setEDNS(EDNSPtr(new EDNS(*edns->edns)));
+ self->cppobj->setEDNS(EDNSPtr(new EDNS(PyEDNS_ToEDNS(edns))));
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -442,6 +371,29 @@ Message_setEDNS(s_Message* self, PyObject* args) {
}
PyObject*
+Message_getTSIGRecord(s_Message* self) {
+ try {
+ const TSIGRecord* tsig_record = self->cppobj->getTSIGRecord();
+
+ if (tsig_record == NULL) {
+ Py_RETURN_NONE;
+ }
+ return (createTSIGRecordObject(*tsig_record));
+ } catch (const InvalidMessageOperation& ex) {
+ PyErr_SetString(po_InvalidMessageOperation, ex.what());
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure in getting TSIGRecord from message: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "getting TSIGRecord from message");
+ }
+ return (NULL);
+}
+
+PyObject*
Message_getRRCount(s_Message* self, PyObject* args) {
unsigned int section;
if (!PyArg_ParseTuple(args, "I", §ion)) {
@@ -451,7 +403,7 @@ Message_getRRCount(s_Message* self, PyObject* args) {
return (NULL);
}
try {
- return (Py_BuildValue("I", self->message->getRRCount(
+ return (Py_BuildValue("I", self->cppobj->getRRCount(
static_cast<Message::Section>(section))));
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
@@ -459,48 +411,59 @@ Message_getRRCount(s_Message* self, PyObject* args) {
}
}
+// This is a helper templated class commonly used for getQuestion and
+// getSection in order to build a list of Message section items.
+template <typename ItemType, typename CreatorParamType>
+class SectionInserter {
+ typedef PyObject* (*creator_t)(const CreatorParamType&);
+public:
+ SectionInserter(PyObject* pylist, creator_t creator) :
+ pylist_(pylist), creator_(creator)
+ {}
+ void operator()(ItemType item) {
+ if (PyList_Append(pylist_, PyObjectContainer(creator_(*item)).get())
+ == -1) {
+ isc_throw(PyCPPWrapperException, "PyList_Append failed, "
+ "probably due to short memory");
+ }
+ }
+private:
+ PyObject* pylist_;
+ creator_t creator_;
+};
+
+typedef SectionInserter<ConstQuestionPtr, Question> QuestionInserter;
+typedef SectionInserter<ConstRRsetPtr, RRset> RRsetInserter;
+
// TODO use direct iterators for these? (or simply lists for now?)
PyObject*
-Message_getQuestion(s_Message* self) {
- QuestionIterator qi, qi_end;
+Message_getQuestion(PyObject* po_self, PyObject*) {
+ const s_Message* const self = static_cast<s_Message*>(po_self);
+
try {
- qi = self->message->beginQuestion();
- qi_end = self->message->endQuestion();
+ PyObjectContainer list_container(PyList_New(0));
+ for_each(self->cppobj->beginQuestion(),
+ self->cppobj->endQuestion(),
+ QuestionInserter(list_container.get(), createQuestionObject));
+ return (list_container.release());
} catch (const InvalidMessageSection& ex) {
PyErr_SetString(po_InvalidMessageSection, ex.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure in Message.get_question: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
- PyErr_SetString(po_IscException,
- "Unexpected exception in getting section iterators");
- return (NULL);
- }
-
- PyObject* list = PyList_New(0);
- if (list == NULL) {
- return (NULL);
- }
-
- for (; qi != qi_end; ++qi) {
- s_Question *question = static_cast<s_Question*>(
- question_type.tp_alloc(&question_type, 0));
- if (question == NULL) {
- Py_DECREF(question);
- Py_DECREF(list);
- return (NULL);
- }
- question->question = *qi;
- if (PyList_Append(list, question) == -1) {
- Py_DECREF(question);
- Py_DECREF(list);
- return (NULL);
- }
- Py_DECREF(question);
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Message.get_question");
}
- return (list);
+ return (NULL);
}
PyObject*
-Message_getSection(s_Message* self, PyObject* args) {
+Message_getSection(PyObject* po_self, PyObject* args) {
+ const s_Message* const self = static_cast<s_Message*>(po_self);
+
unsigned int section;
if (!PyArg_ParseTuple(args, "I", §ion)) {
PyErr_Clear();
@@ -508,47 +471,29 @@ Message_getSection(s_Message* self, PyObject* args) {
"no valid type in get_section argument");
return (NULL);
}
- RRsetIterator rrsi, rrsi_end;
+
try {
- rrsi = self->message->beginSection(
- static_cast<Message::Section>(section));
- rrsi_end = self->message->endSection(
- static_cast<Message::Section>(section));
+ PyObjectContainer list_container(PyList_New(0));
+ const Message::Section msgsection =
+ static_cast<Message::Section>(section);
+ for_each(self->cppobj->beginSection(msgsection),
+ self->cppobj->endSection(msgsection),
+ RRsetInserter(list_container.get(), createRRsetObject));
+ return (list_container.release());
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
- return (NULL);
} catch (const InvalidMessageSection& ex) {
PyErr_SetString(po_InvalidMessageSection, ex.what());
- return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure in Message.get_section: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
} catch (...) {
- PyErr_SetString(po_IscException,
- "Unexpected exception in getting section iterators");
- return (NULL);
- }
-
- PyObject* list = PyList_New(0);
- if (list == NULL) {
- return (NULL);
- }
- for (; rrsi != rrsi_end; ++rrsi) {
- s_RRset *rrset = static_cast<s_RRset*>(
- rrset_type.tp_alloc(&rrset_type, 0));
- if (rrset == NULL) {
- Py_DECREF(rrset);
- Py_DECREF(list);
- return (NULL);
- }
- rrset->rrset = *rrsi;
- if (PyList_Append(list, rrset) == -1) {
- Py_DECREF(rrset);
- Py_DECREF(list);
- return (NULL);
- }
- // PyList_Append increases refcount, so we remove ours since
- // we don't need it anymore
- Py_DECREF(rrset);
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Message.get_section");
}
- return (list);
+ return (NULL);
}
//static PyObject* Message_beginQuestion(s_Message* self, PyObject* args);
@@ -558,14 +503,14 @@ Message_getSection(s_Message* self, PyObject* args) {
//static PyObject* Message_addQuestion(s_Message* self, PyObject* args);
PyObject*
Message_addQuestion(s_Message* self, PyObject* args) {
- s_Question *question;
+ PyObject* question;
if (!PyArg_ParseTuple(args, "O!", &question_type, &question)) {
return (NULL);
}
- self->message->addQuestion(question->question);
-
+ self->cppobj->addQuestion(PyQuestion_ToQuestion(question));
+
Py_RETURN_NONE;
}
@@ -573,15 +518,15 @@ PyObject*
Message_addRRset(s_Message* self, PyObject* args) {
PyObject *sign = Py_False;
int section;
- s_RRset* rrset;
+ PyObject* rrset;
if (!PyArg_ParseTuple(args, "iO!|O!", §ion, &rrset_type, &rrset,
&PyBool_Type, &sign)) {
return (NULL);
}
try {
- self->message->addRRset(static_cast<Message::Section>(section),
- rrset->rrset, sign == Py_True);
+ self->cppobj->addRRset(static_cast<Message::Section>(section),
+ PyRRset_ToRRsetPtr(rrset), sign == Py_True);
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -602,10 +547,10 @@ Message_clear(s_Message* self, PyObject* args) {
if (PyArg_ParseTuple(args, "i", &i)) {
PyErr_Clear();
if (i == Message::PARSE) {
- self->message->clear(Message::PARSE);
+ self->cppobj->clear(Message::PARSE);
Py_RETURN_NONE;
} else if (i == Message::RENDER) {
- self->message->clear(Message::RENDER);
+ self->cppobj->clear(Message::RENDER);
Py_RETURN_NONE;
} else {
PyErr_SetString(PyExc_TypeError,
@@ -619,7 +564,7 @@ Message_clear(s_Message* self, PyObject* args) {
PyObject*
Message_makeResponse(s_Message* self) {
- self->message->makeResponse();
+ self->cppobj->makeResponse();
Py_RETURN_NONE;
}
@@ -627,7 +572,7 @@ PyObject*
Message_toText(s_Message* self) {
// Py_BuildValue makes python objects from native data
try {
- return (Py_BuildValue("s", self->message->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
} catch (const InvalidMessageOperation& imo) {
PyErr_Clear();
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -648,17 +593,17 @@ Message_str(PyObject* self) {
PyObject*
Message_toWire(s_Message* self, PyObject* args) {
- s_MessageRenderer* mr;
- s_TSIGContext* tsig_ctx = NULL;
-
+ PyObject* mr;
+ PyObject* tsig_ctx = NULL;
+
if (PyArg_ParseTuple(args, "O!|O!", &messagerenderer_type, &mr,
- &tsig_context_type, &tsig_ctx)) {
+ &tsigcontext_type, &tsig_ctx)) {
try {
if (tsig_ctx == NULL) {
- self->message->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
} else {
- self->message->toWire(*mr->messagerenderer,
- *tsig_ctx->tsig_ctx);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr),
+ PyTSIGContext_ToTSIGContext(tsig_ctx));
}
// If we return NULL it is seen as an error, so use this for
// None returns
@@ -667,6 +612,20 @@ Message_toWire(s_Message* self, PyObject* args) {
PyErr_Clear();
PyErr_SetString(po_InvalidMessageOperation, imo.what());
return (NULL);
+ } catch (const TSIGContextError& ex) {
+ // toWire() with a TSIG context can fail due to this if the
+ // python program has a bug.
+ PyErr_SetString(po_TSIGContextError, ex.what());
+ return (NULL);
+ } catch (const std::exception& ex) {
+ // Other exceptions should be rare (most likely an implementation
+ // bug)
+ PyErr_SetString(po_TSIGContextError, ex.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected C++ exception in Message.to_wire");
+ return (NULL);
}
}
PyErr_Clear();
@@ -676,97 +635,125 @@ Message_toWire(s_Message* self, PyObject* args) {
}
PyObject*
-Message_fromWire(s_Message* self, PyObject* args) {
+Message_fromWire(PyObject* pyself, PyObject* args) {
+ s_Message* const self = static_cast<s_Message*>(pyself);
const char* b;
Py_ssize_t len;
- if (!PyArg_ParseTuple(args, "y#", &b, &len)) {
- return (NULL);
- }
-
- InputBuffer inbuf(b, len);
- try {
- self->message->fromWire(inbuf);
- Py_RETURN_NONE;
- } catch (const InvalidMessageOperation& imo) {
- PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
- } catch (const DNSMessageFORMERR& dmfe) {
- PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
- return (NULL);
- } catch (const DNSMessageBADVERS& dmfe) {
- PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
- return (NULL);
- } catch (const MessageTooShort& mts) {
- PyErr_SetString(po_MessageTooShort, mts.what());
- return (NULL);
+ unsigned int options = Message::PARSE_DEFAULT;
+
+ if (PyArg_ParseTuple(args, "y#", &b, &len) ||
+ PyArg_ParseTuple(args, "y#I", &b, &len, &options)) {
+ // We need to clear the error in case the first call to ParseTuple
+ // fails.
+ PyErr_Clear();
+
+ InputBuffer inbuf(b, len);
+ try {
+ self->cppobj->fromWire(
+ inbuf, static_cast<Message::ParseOptions>(options));
+ Py_RETURN_NONE;
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ return (NULL);
+ } catch (const DNSMessageFORMERR& dmfe) {
+ PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
+ return (NULL);
+ } catch (const DNSMessageBADVERS& dmfe) {
+ PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
+ return (NULL);
+ } catch (const MessageTooShort& mts) {
+ PyErr_SetString(po_MessageTooShort, mts.what());
+ return (NULL);
+ } catch (const InvalidBufferPosition& ex) {
+ PyErr_SetString(po_DNSMessageFORMERR, ex.what());
+ return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Error in Message.from_wire: " + string(ex.what());
+ PyErr_SetString(PyExc_RuntimeError, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in Message.from_wire");
+ return (NULL);
+ }
}
-}
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Message(PyObject* mod) {
- if (PyType_Ready(&message_type) < 0) {
- return (false);
- }
- Py_INCREF(&message_type);
-
- // Class variables
- // These are added to the tp_dict of the type object
- //
- addClassVariable(message_type, "PARSE",
- Py_BuildValue("I", Message::PARSE));
- addClassVariable(message_type, "RENDER",
- Py_BuildValue("I", Message::RENDER));
-
- addClassVariable(message_type, "HEADERFLAG_QR",
- Py_BuildValue("I", Message::HEADERFLAG_QR));
- addClassVariable(message_type, "HEADERFLAG_AA",
- Py_BuildValue("I", Message::HEADERFLAG_AA));
- addClassVariable(message_type, "HEADERFLAG_TC",
- Py_BuildValue("I", Message::HEADERFLAG_TC));
- addClassVariable(message_type, "HEADERFLAG_RD",
- Py_BuildValue("I", Message::HEADERFLAG_RD));
- addClassVariable(message_type, "HEADERFLAG_RA",
- Py_BuildValue("I", Message::HEADERFLAG_RA));
- addClassVariable(message_type, "HEADERFLAG_AD",
- Py_BuildValue("I", Message::HEADERFLAG_AD));
- addClassVariable(message_type, "HEADERFLAG_CD",
- Py_BuildValue("I", Message::HEADERFLAG_CD));
-
- addClassVariable(message_type, "SECTION_QUESTION",
- Py_BuildValue("I", Message::SECTION_QUESTION));
- addClassVariable(message_type, "SECTION_ANSWER",
- Py_BuildValue("I", Message::SECTION_ANSWER));
- addClassVariable(message_type, "SECTION_AUTHORITY",
- Py_BuildValue("I", Message::SECTION_AUTHORITY));
- addClassVariable(message_type, "SECTION_ADDITIONAL",
- Py_BuildValue("I", Message::SECTION_ADDITIONAL));
-
- addClassVariable(message_type, "DEFAULT_MAX_UDPSIZE",
- Py_BuildValue("I", Message::DEFAULT_MAX_UDPSIZE));
-
- /* Class-specific exceptions */
- po_MessageTooShort = PyErr_NewException("pydnspp.MessageTooShort", NULL,
- NULL);
- PyModule_AddObject(mod, "MessageTooShort", po_MessageTooShort);
- po_InvalidMessageSection =
- PyErr_NewException("pydnspp.InvalidMessageSection", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageSection", po_InvalidMessageSection);
- po_InvalidMessageOperation =
- PyErr_NewException("pydnspp.InvalidMessageOperation", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageOperation",
- po_InvalidMessageOperation);
- po_InvalidMessageUDPSize =
- PyErr_NewException("pydnspp.InvalidMessageUDPSize", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageUDPSize", po_InvalidMessageUDPSize);
- po_DNSMessageBADVERS = PyErr_NewException("pydnspp.DNSMessageBADVERS",
- NULL, NULL);
- PyModule_AddObject(mod, "DNSMessageBADVERS", po_DNSMessageBADVERS);
-
- PyModule_AddObject(mod, "Message",
- reinterpret_cast<PyObject*>(&message_type));
-
-
- return (true);
+ PyErr_SetString(PyExc_TypeError,
+ "from_wire() arguments must be a byte object and "
+ "(optional) parse options");
+ return (NULL);
}
+
} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_MessageTooShort;
+PyObject* po_InvalidMessageSection;
+PyObject* po_InvalidMessageOperation;
+PyObject* po_InvalidMessageUDPSize;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Message
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject message_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Message",
+ sizeof(s_Message), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Message_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Message_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Message class encapsulates a standard DNS message.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Message_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Message_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/message_python.h b/src/lib/dns/python/message_python.h
new file mode 100644
index 0000000..be23890
--- /dev/null
+++ b/src/lib/dns/python/message_python.h
@@ -0,0 +1,40 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_MESSAGE_H
+#define __PYTHON_MESSAGE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Message;
+
+namespace python {
+
+extern PyObject* po_MessageTooShort;
+extern PyObject* po_InvalidMessageSection;
+extern PyObject* po_InvalidMessageOperation;
+extern PyObject* po_InvalidMessageUDPSize;
+
+extern PyTypeObject message_type;
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_MESSAGE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/message_python_inc.cc b/src/lib/dns/python/message_python_inc.cc
new file mode 100644
index 0000000..561c494
--- /dev/null
+++ b/src/lib/dns/python/message_python_inc.cc
@@ -0,0 +1,41 @@
+namespace {
+const char* const Message_fromWire_doc = "\
+from_wire(data, options=PARSE_DEFAULT)\n\
+\n\
+(Re)build a Message object from wire-format data.\n\
+\n\
+This method parses the given wire format data to build a complete\n\
+Message object. On success, the values of the header section fields\n\
+can be accessible via corresponding get methods, and the question and\n\
+following sections can be accessible via the corresponding iterators.\n\
+If the message contains an EDNS or TSIG, they can be accessible via\n\
+get_edns() and get_tsig_record(), respectively.\n\
+\n\
+This Message must be in the PARSE mode.\n\
+\n\
+This method performs strict validation on the given message based on\n\
+the DNS protocol specifications. If the given message data is invalid,\n\
+this method throws an exception (see the exception list).\n\
+\n\
+By default, this method combines RRs of the same name, RR type and RR\n\
+class in a section into a single RRset, even if they are interleaved\n\
+with a different type of RR (though it would be a rare case in\n\
+practice). If the PRESERVE_ORDER option is specified, it handles each\n\
+RR separately, in the appearing order, and converts it to a separate\n\
+RRset (so this RRset should contain exactly one Rdata). This mode will\n\
+be necessary when the higher level protocol is ordering conscious. For\n\
+example, in AXFR and IXFR, the position of the SOA RRs are crucial.\n\
+\n\
+Exceptions:\n\
+ InvalidMessageOperation Message is in the RENDER mode\n\
+ DNSMessageFORMERR The given message data is syntactically\n\
+ MessageTooShort The given data is shorter than a valid header\n\
+ section\n\
+ Others Name, Rdata, and EDNS classes can also throw\n\
+\n\
+Parameters:\n\
+ data A byte object of the wire data\n\
+ options Parse options\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/dns/python/messagerenderer_python.cc b/src/lib/dns/python/messagerenderer_python.cc
index 85a4f17..bb89622 100644
--- a/src/lib/dns/python/messagerenderer_python.cc
+++ b/src/lib/dns/python/messagerenderer_python.cc
@@ -12,39 +12,48 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
+
+#include <util/buffer.h>
+
#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "pydnspp_common.h"
+#include "messagerenderer_python.h"
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
-// MessageRenderer
-
+namespace {
+// The s_* Class simply covers one instantiation of the object.
+//
// since we don't use *Buffer in the python version (but work with
// the already existing bytearray type where we use these custom buffers
-// in c++, we need to keep track of one here.
+// in C++, we need to keep track of one here.
class s_MessageRenderer : public PyObject {
public:
- OutputBuffer* outputbuffer;
- MessageRenderer* messagerenderer;
+ s_MessageRenderer();
+ isc::util::OutputBuffer* outputbuffer;
+ MessageRenderer* cppobj;
};
-static int MessageRenderer_init(s_MessageRenderer* self);
-static void MessageRenderer_destroy(s_MessageRenderer* self);
+int MessageRenderer_init(s_MessageRenderer* self);
+void MessageRenderer_destroy(s_MessageRenderer* self);
-static PyObject* MessageRenderer_getData(s_MessageRenderer* self);
-static PyObject* MessageRenderer_getLength(s_MessageRenderer* self);
-static PyObject* MessageRenderer_isTruncated(s_MessageRenderer* self);
-static PyObject* MessageRenderer_getLengthLimit(s_MessageRenderer* self);
-static PyObject* MessageRenderer_getCompressMode(s_MessageRenderer* self);
-static PyObject* MessageRenderer_setTruncated(s_MessageRenderer* self);
-static PyObject* MessageRenderer_setLengthLimit(s_MessageRenderer* self, PyObject* args);
-static PyObject* MessageRenderer_setCompressMode(s_MessageRenderer* self, PyObject* args);
-static PyObject* MessageRenderer_clear(s_MessageRenderer* self);
+PyObject* MessageRenderer_getData(s_MessageRenderer* self);
+PyObject* MessageRenderer_getLength(s_MessageRenderer* self);
+PyObject* MessageRenderer_isTruncated(s_MessageRenderer* self);
+PyObject* MessageRenderer_getLengthLimit(s_MessageRenderer* self);
+PyObject* MessageRenderer_getCompressMode(s_MessageRenderer* self);
+PyObject* MessageRenderer_setTruncated(s_MessageRenderer* self);
+PyObject* MessageRenderer_setLengthLimit(s_MessageRenderer* self, PyObject* args);
+PyObject* MessageRenderer_setCompressMode(s_MessageRenderer* self, PyObject* args);
+PyObject* MessageRenderer_clear(s_MessageRenderer* self);
-static PyMethodDef MessageRenderer_methods[] = {
+PyMethodDef MessageRenderer_methods[] = {
{ "get_data", reinterpret_cast<PyCFunction>(MessageRenderer_getData), METH_NOARGS,
"Returns the data as a bytes() object" },
{ "get_length", reinterpret_cast<PyCFunction>(MessageRenderer_getLength), METH_NOARGS,
@@ -67,115 +76,60 @@ static PyMethodDef MessageRenderer_methods[] = {
{ NULL, NULL, 0, NULL }
};
-static PyTypeObject messagerenderer_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.MessageRenderer",
- sizeof(s_MessageRenderer), // tp_basicsize
- 0, // tp_itemsize
- (destructor)MessageRenderer_destroy,// tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- NULL, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The MessageRenderer class encapsulates implementation details "
- "of rendering a DNS message into a buffer in wire format. "
- "In effect, it's simply responsible for name compression at least in the "
- "current implementation. A MessageRenderer class object manages the "
- "positions of names rendered in a buffer and uses that information to render "
- "subsequent names with compression.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- MessageRenderer_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)MessageRenderer_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
MessageRenderer_init(s_MessageRenderer* self) {
self->outputbuffer = new OutputBuffer(4096);
- self->messagerenderer = new MessageRenderer(*self->outputbuffer);
+ self->cppobj = new MessageRenderer(*self->outputbuffer);
return (0);
}
-static void
+void
MessageRenderer_destroy(s_MessageRenderer* self) {
- delete self->messagerenderer;
+ delete self->cppobj;
delete self->outputbuffer;
- self->messagerenderer = NULL;
+ self->cppobj = NULL;
self->outputbuffer = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
MessageRenderer_getData(s_MessageRenderer* self) {
return (Py_BuildValue("y#",
- self->messagerenderer->getData(),
- self->messagerenderer->getLength()));
+ self->cppobj->getData(),
+ self->cppobj->getLength()));
}
-static PyObject*
+PyObject*
MessageRenderer_getLength(s_MessageRenderer* self) {
- return (Py_BuildValue("I", self->messagerenderer->getLength()));
+ return (Py_BuildValue("I", self->cppobj->getLength()));
}
-static PyObject*
+PyObject*
MessageRenderer_isTruncated(s_MessageRenderer* self) {
- if (self->messagerenderer->isTruncated()) {
+ if (self->cppobj->isTruncated()) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
}
-static PyObject*
+PyObject*
MessageRenderer_getLengthLimit(s_MessageRenderer* self) {
- return (Py_BuildValue("I", self->messagerenderer->getLengthLimit()));
+ return (Py_BuildValue("I", self->cppobj->getLengthLimit()));
}
-static PyObject*
+PyObject*
MessageRenderer_getCompressMode(s_MessageRenderer* self) {
- return (Py_BuildValue("I", self->messagerenderer->getCompressMode()));
+ return (Py_BuildValue("I", self->cppobj->getCompressMode()));
}
-static PyObject*
+PyObject*
MessageRenderer_setTruncated(s_MessageRenderer* self) {
- self->messagerenderer->setTruncated();
+ self->cppobj->setTruncated();
Py_RETURN_NONE;
}
-static PyObject*
+PyObject*
MessageRenderer_setLengthLimit(s_MessageRenderer* self,
PyObject* args)
{
@@ -191,11 +145,11 @@ MessageRenderer_setLengthLimit(s_MessageRenderer* self,
"MessageRenderer length limit out of range");
return (NULL);
}
- self->messagerenderer->setLengthLimit(lengthlimit);
+ self->cppobj->setLengthLimit(lengthlimit);
Py_RETURN_NONE;
}
-static PyObject*
+PyObject*
MessageRenderer_setCompressMode(s_MessageRenderer* self,
PyObject* args)
{
@@ -205,12 +159,12 @@ MessageRenderer_setCompressMode(s_MessageRenderer* self,
}
if (mode == MessageRenderer::CASE_INSENSITIVE) {
- self->messagerenderer->setCompressMode(MessageRenderer::CASE_INSENSITIVE);
+ self->cppobj->setCompressMode(MessageRenderer::CASE_INSENSITIVE);
// If we return NULL it is seen as an error, so use this for
// None returns, it also applies to CASE_SENSITIVE.
Py_RETURN_NONE;
} else if (mode == MessageRenderer::CASE_SENSITIVE) {
- self->messagerenderer->setCompressMode(MessageRenderer::CASE_SENSITIVE);
+ self->cppobj->setCompressMode(MessageRenderer::CASE_SENSITIVE);
Py_RETURN_NONE;
} else {
PyErr_SetString(PyExc_TypeError,
@@ -220,45 +174,94 @@ MessageRenderer_setCompressMode(s_MessageRenderer* self,
}
}
-static PyObject*
+PyObject*
MessageRenderer_clear(s_MessageRenderer* self) {
- self->messagerenderer->clear();
+ self->cppobj->clear();
Py_RETURN_NONE;
}
+} // end of unnamed namespace
-// end of MessageRenderer
+namespace isc {
+namespace dns {
+namespace python {
+PyTypeObject messagerenderer_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.MessageRenderer",
+ sizeof(s_MessageRenderer), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)MessageRenderer_destroy,// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The MessageRenderer class encapsulates implementation details "
+ "of rendering a DNS message into a buffer in wire format. "
+ "In effect, it's simply responsible for name compression at least in the "
+ "current implementation. A MessageRenderer class object manages the "
+ "positions of names rendered in a buffer and uses that information to render "
+ "subsequent names with compression.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ MessageRenderer_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)MessageRenderer_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+// If we need a createMessageRendererObject(), should we copy? can we?
+// copy the existing buffer into a new one, then create a new renderer with
+// that buffer?
-// Module Initialization, all statics are initialized here
bool
-initModulePart_MessageRenderer(PyObject* mod) {
- // Add the exceptions to the module
-
- // Add the enums to the module
-
- // Add the constants to the module
-
- // Add the classes to the module
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module
-
- // NameComparisonResult
- if (PyType_Ready(&messagerenderer_type) < 0) {
- return (false);
+PyMessageRenderer_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&messagerenderer_type);
-
- // Class variables
- // These are added to the tp_dict of the type object
- addClassVariable(messagerenderer_type, "CASE_INSENSITIVE",
- Py_BuildValue("I", MessageRenderer::CASE_INSENSITIVE));
- addClassVariable(messagerenderer_type, "CASE_SENSITIVE",
- Py_BuildValue("I", MessageRenderer::CASE_SENSITIVE));
+ return (PyObject_TypeCheck(obj, &messagerenderer_type));
+}
- PyModule_AddObject(mod, "MessageRenderer",
- reinterpret_cast<PyObject*>(&messagerenderer_type));
-
- return (true);
+MessageRenderer&
+PyMessageRenderer_ToMessageRenderer(PyObject* messagerenderer_obj) {
+ if (messagerenderer_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in MessageRenderer PyObject conversion");
+ }
+ s_MessageRenderer* messagerenderer = static_cast<s_MessageRenderer*>(messagerenderer_obj);
+ return (*messagerenderer->cppobj);
}
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/messagerenderer_python.h b/src/lib/dns/python/messagerenderer_python.h
new file mode 100644
index 0000000..ea9a940
--- /dev/null
+++ b/src/lib/dns/python/messagerenderer_python.h
@@ -0,0 +1,57 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_MESSAGERENDERER_H
+#define __PYTHON_MESSAGERENDERER_H 1
+
+#include <Python.h>
+
+#include <util/buffer.h>
+
+namespace isc {
+namespace dns {
+class MessageRenderer;
+
+namespace python {
+
+extern PyTypeObject messagerenderer_type;
+
+/// \brief Checks if the given python object is a MessageRenderer object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type MessageRenderer, false otherwise
+bool PyMessageRenderer_Check(PyObject* obj);
+
+/// \brief Returns a reference to the MessageRenderer object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type MessageRenderer; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyMessageRenderer_Check()
+///
+/// \note This is not a copy; if the MessageRenderer is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param messagerenderer_obj The messagerenderer object to convert
+MessageRenderer& PyMessageRenderer_ToMessageRenderer(PyObject* messagerenderer_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_MESSAGERENDERER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/name_python.cc b/src/lib/dns/python/name_python.cc
index b030ba1..ce556df 100644
--- a/src/lib/dns/python/name_python.cc
+++ b/src/lib/dns/python/name_python.cc
@@ -12,50 +12,48 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the module init at the
-// end
-//
-static PyObject* po_EmptyLabel;
-static PyObject* po_TooLongName;
-static PyObject* po_TooLongLabel;
-static PyObject* po_BadLabelType;
-static PyObject* po_BadEscape;
-static PyObject* po_IncompleteName;
-static PyObject* po_InvalidBufferPosition;
-static PyObject* po_DNSMessageFORMERR;
+#include <Python.h>
-//
-// Declaration of enums
-// Initialization and addition of these go in the module init at the
-// end
-//
-static PyObject* po_NameRelation;
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
-//
-// Definition of the classes
-//
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+
+#include "pydnspp_common.h"
+#include "messagerenderer_python.h"
+#include "name_python.h"
+
+#include <iostream>
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
-// NameComparisonResult
+namespace {
+// The s_* Class simply covers one instantiation of the object.
class s_NameComparisonResult : public PyObject {
public:
- isc::dns::NameComparisonResult* ncr;
+ s_NameComparisonResult() : cppobj(NULL) {}
+ NameComparisonResult* cppobj;
+};
+
+class s_Name : public PyObject {
+public:
+ s_Name() : cppobj(NULL), position(0) {}
+ Name* cppobj;
+ size_t position;
};
-static int NameComparisonResult_init(s_NameComparisonResult*, PyObject*);
-static void NameComparisonResult_destroy(s_NameComparisonResult* self);
-static PyObject* NameComparisonResult_getOrder(s_NameComparisonResult* self);
-static PyObject* NameComparisonResult_getCommonLabels(s_NameComparisonResult* self);
-static PyObject* NameComparisonResult_getRelation(s_NameComparisonResult* self);
+int NameComparisonResult_init(s_NameComparisonResult*, PyObject*);
+void NameComparisonResult_destroy(s_NameComparisonResult* self);
+PyObject* NameComparisonResult_getOrder(s_NameComparisonResult* self);
+PyObject* NameComparisonResult_getCommonLabels(s_NameComparisonResult* self);
+PyObject* NameComparisonResult_getRelation(s_NameComparisonResult* self);
-static PyMethodDef NameComparisonResult_methods[] = {
+PyMethodDef NameComparisonResult_methods[] = {
{ "get_order", reinterpret_cast<PyCFunction>(NameComparisonResult_getOrder), METH_NOARGS,
"Returns the order" },
{ "get_common_labels", reinterpret_cast<PyCFunction>(NameComparisonResult_getCommonLabels), METH_NOARGS,
@@ -65,130 +63,68 @@ static PyMethodDef NameComparisonResult_methods[] = {
{ NULL, NULL, 0, NULL }
};
-static PyTypeObject name_comparison_result_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.NameComparisonResult",
- sizeof(s_NameComparisonResult), // tp_basicsize
- 0, // tp_itemsize
- (destructor)NameComparisonResult_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- NULL, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "This is a supplemental class used only as a return value of Name.compare(). "
- "It encapsulate a tuple of the comparison: ordering, number of common labels, "
- "and relationship as follows:\n"
- "- ordering: relative ordering under the DNSSEC order relation\n"
- "- labels: the number of common significant labels of the two names being"
- " compared\n"
- "- relationship: see NameComparisonResult.NameRelation\n",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- NameComparisonResult_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)NameComparisonResult_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
NameComparisonResult_init(s_NameComparisonResult*, PyObject*) {
PyErr_SetString(PyExc_NotImplementedError,
"NameComparisonResult can't be built directly");
return (-1);
}
-static void
+void
NameComparisonResult_destroy(s_NameComparisonResult* self) {
- delete self->ncr;
- self->ncr = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
NameComparisonResult_getOrder(s_NameComparisonResult* self) {
- return (Py_BuildValue("i", self->ncr->getOrder()));
+ return (Py_BuildValue("i", self->cppobj->getOrder()));
}
-static PyObject*
+PyObject*
NameComparisonResult_getCommonLabels(s_NameComparisonResult* self) {
- return (Py_BuildValue("I", self->ncr->getCommonLabels()));
+ return (Py_BuildValue("I", self->cppobj->getCommonLabels()));
}
-static PyObject*
+PyObject*
NameComparisonResult_getRelation(s_NameComparisonResult* self) {
- return (Py_BuildValue("I", self->ncr->getRelation()));
+ return (Py_BuildValue("I", self->cppobj->getRelation()));
}
-// end of NameComparisonResult
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_Name, Name> NameContainer;
-// Name
+int Name_init(s_Name* self, PyObject* args);
+void Name_destroy(s_Name* self);
-class s_Name : public PyObject {
-public:
- isc::dns::Name* name;
- size_t position;
-};
+PyObject* Name_toWire(s_Name* self, PyObject* args);
+PyObject* Name_toText(s_Name* self, PyObject* args);
+PyObject* Name_str(PyObject* self);
+PyObject* Name_getLabelCount(s_Name* self);
+PyObject* Name_at(s_Name* self, PyObject* args);
+PyObject* Name_getLength(s_Name* self);
-static int Name_init(s_Name* self, PyObject* args);
-static void Name_destroy(s_Name* self);
+PyObject* Name_compare(s_Name* self, PyObject* args);
+PyObject* Name_equals(s_Name* self, PyObject* args);
-static PyObject* Name_toWire(s_Name* self, PyObject* args);
-static PyObject* Name_toText(s_Name* self);
-static PyObject* Name_str(PyObject* self);
-static PyObject* Name_getLabelCount(s_Name* self);
-static PyObject* Name_at(s_Name* self, PyObject* args);
-static PyObject* Name_getLength(s_Name* self);
+PyObject* Name_richcmp(s_Name* self, s_Name* other, int op);
+PyObject* Name_split(s_Name* self, PyObject* args);
+PyObject* Name_reverse(s_Name* self);
+PyObject* Name_concatenate(s_Name* self, PyObject* args);
+PyObject* Name_downcase(s_Name* self);
+PyObject* Name_isWildCard(s_Name* self);
-static PyObject* Name_compare(s_Name* self, PyObject* args);
-static PyObject* Name_equals(s_Name* self, PyObject* args);
-
-static PyObject* Name_richcmp(s_Name* self, s_Name* other, int op);
-static PyObject* Name_split(s_Name* self, PyObject* args);
-static PyObject* Name_reverse(s_Name* self);
-static PyObject* Name_concatenate(s_Name* self, PyObject* args);
-static PyObject* Name_downcase(s_Name* self);
-static PyObject* Name_isWildCard(s_Name* self);
-
-static PyMethodDef Name_methods[] = {
+PyMethodDef Name_methods[] = {
{ "at", reinterpret_cast<PyCFunction>(Name_at), METH_VARARGS,
"Returns the integer value of the name data at the specified position" },
{ "get_length", reinterpret_cast<PyCFunction>(Name_getLength), METH_NOARGS,
"Returns the length" },
{ "get_labelcount", reinterpret_cast<PyCFunction>(Name_getLabelCount), METH_NOARGS,
"Returns the number of labels" },
- { "to_text", reinterpret_cast<PyCFunction>(Name_toText), METH_NOARGS,
- "Returns the string representation" },
+ { "to_text", reinterpret_cast<PyCFunction>(Name_toText), METH_VARARGS,
+ "Returns the string representation. The optional argument must be either"
+ "True of False. If True, the final dot will be omitted." },
{ "to_wire", reinterpret_cast<PyCFunction>(Name_toWire), METH_VARARGS,
"Converts the Name object to wire format.\n"
"The argument can be either a MessageRenderer or an object that "
@@ -217,63 +153,7 @@ static PyMethodDef Name_methods[] = {
{ NULL, NULL, 0, NULL }
};
-static PyTypeObject name_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Name",
- sizeof(s_Name), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Name_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Name_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Name class encapsulates DNS names.\n"
- "It provides interfaces to construct a name from string or wire-format data, "
- "transform a name into a string or wire-format data, compare two names, get "
- "access to various properties of a name, etc.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)Name_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Name_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Name_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- // Note: not sure if the following are correct. Added them just to
- // make the compiler happy.
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-
-static int
+int
Name_init(s_Name* self, PyObject* args) {
const char* s;
PyObject* downcase = Py_False;
@@ -286,7 +166,7 @@ Name_init(s_Name* self, PyObject* args) {
try {
const std::string n(s);
- self->name = new Name(n, downcase == Py_True);
+ self->cppobj = new Name(n, downcase == Py_True);
self->position = 0;
} catch (const EmptyLabel&) {
PyErr_SetString(po_EmptyLabel, "EmptyLabel");
@@ -339,7 +219,7 @@ Name_init(s_Name* self, PyObject* args) {
InputBuffer buffer(bytes, len);
buffer.setPosition(position);
- self->name = new Name(buffer, downcase == Py_True);
+ self->cppobj = new Name(buffer, downcase == Py_True);
self->position = buffer.getPosition();
} catch (const InvalidBufferPosition&) {
PyErr_SetString(po_InvalidBufferPosition,
@@ -361,14 +241,14 @@ Name_init(s_Name* self, PyObject* args) {
return (-1);
}
-static void
+void
Name_destroy(s_Name* self) {
- delete self->name;
- self->name = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
Name_at(s_Name* self, PyObject* args) {
int pos;
if (!PyArg_ParseTuple(args, "i", &pos)) {
@@ -382,7 +262,7 @@ Name_at(s_Name* self, PyObject* args) {
}
try {
- return (Py_BuildValue("I", self->name->at(pos)));
+ return (Py_BuildValue("I", self->cppobj->at(pos)));
} catch (const isc::OutOfRange&) {
PyErr_SetString(PyExc_IndexError,
"name index out of range");
@@ -390,22 +270,38 @@ Name_at(s_Name* self, PyObject* args) {
}
}
-static PyObject*
+PyObject*
Name_getLength(s_Name* self) {
- return (Py_BuildValue("i", self->name->getLength()));
+ return (Py_BuildValue("i", self->cppobj->getLength()));
}
-static PyObject*
+PyObject*
Name_getLabelCount(s_Name* self) {
- return (Py_BuildValue("i", self->name->getLabelCount()));
+ return (Py_BuildValue("i", self->cppobj->getLabelCount()));
}
-static PyObject*
-Name_toText(s_Name* self) {
- return (Py_BuildValue("s", self->name->toText().c_str()));
+PyObject*
+Name_toText(s_Name* self, PyObject* args) {
+ PyObject* omit_final_dot_obj = NULL;
+ if (PyArg_ParseTuple(args, "|O", &omit_final_dot_obj)) {
+ bool omit_final_dot = false;
+ if (omit_final_dot_obj != NULL) {
+ if (PyBool_Check(omit_final_dot_obj) != 0) {
+ omit_final_dot = (omit_final_dot_obj == Py_True);
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "Optional argument 1 of to_text() should be True of False");
+ return (NULL);
+ }
+ }
+ return (Py_BuildValue("s",
+ self->cppobj->toText(omit_final_dot).c_str()));
+ } else {
+ return (NULL);
+ }
}
-static PyObject*
+PyObject*
Name_str(PyObject* self) {
// Simply call the to_text method we already defined
// str() is not defined in the c++ version, only to_text
@@ -415,16 +311,16 @@ Name_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
Name_toWire(s_Name* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
+ PyObject* mr;
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
OutputBuffer buffer(Name::MAX_WIRE);
- self->name->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* name_bytes = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, name_bytes);
// We need to release the object we temporarily created here
@@ -432,7 +328,7 @@ Name_toWire(s_Name* self, PyObject* args) {
Py_DECREF(name_bytes);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->name->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -443,7 +339,7 @@ Name_toWire(s_Name* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
Name_compare(s_Name* self, PyObject* args) {
s_Name* other;
@@ -452,26 +348,26 @@ Name_compare(s_Name* self, PyObject* args) {
s_NameComparisonResult* ret = PyObject_New(s_NameComparisonResult, &name_comparison_result_type);
if (ret != NULL) {
- ret->ncr = new NameComparisonResult(
- self->name->compare(*other->name));
+ ret->cppobj = new NameComparisonResult(
+ self->cppobj->compare(*other->cppobj));
}
return (ret);
}
-static PyObject*
+PyObject*
Name_equals(s_Name* self, PyObject* args) {
s_Name* other;
if (!PyArg_ParseTuple(args, "O!", &name_type, &other))
return (NULL);
- if (self->name->equals(*other->name))
+ if (self->cppobj->equals(*other->cppobj))
Py_RETURN_TRUE;
else
Py_RETURN_FALSE;
}
-static PyObject*
+PyObject*
Name_split(s_Name* self, PyObject* args) {
int first, n;
s_Name* ret = NULL;
@@ -485,14 +381,14 @@ Name_split(s_Name* self, PyObject* args) {
}
ret = PyObject_New(s_Name, &name_type);
if (ret != NULL) {
- ret->name = NULL;
+ ret->cppobj = NULL;
try {
- ret->name = new Name(self->name->split(first, n));
+ ret->cppobj = new Name(self->cppobj->split(first, n));
} catch(const isc::OutOfRange& oor) {
PyErr_SetString(PyExc_IndexError, oor.what());
- ret->name = NULL;
+ ret->cppobj = NULL;
}
- if (ret->name == NULL) {
+ if (ret->cppobj == NULL) {
Py_DECREF(ret);
return (NULL);
}
@@ -507,14 +403,14 @@ Name_split(s_Name* self, PyObject* args) {
}
ret = PyObject_New(s_Name, &name_type);
if (ret != NULL) {
- ret->name = NULL;
+ ret->cppobj = NULL;
try {
- ret->name = new Name(self->name->split(n));
+ ret->cppobj = new Name(self->cppobj->split(n));
} catch(const isc::OutOfRange& oor) {
PyErr_SetString(PyExc_IndexError, oor.what());
- ret->name = NULL;
+ ret->cppobj = NULL;
}
- if (ret->name == NULL) {
+ if (ret->cppobj == NULL) {
Py_DECREF(ret);
return (NULL);
}
@@ -526,14 +422,13 @@ Name_split(s_Name* self, PyObject* args) {
"No valid type in split argument");
return (ret);
}
-#include <iostream>
//
// richcmp defines the ==, !=, >, <, >= and <= operators in python
// It is translated to a function that gets 3 arguments, an object,
// an object to compare to, and an operator.
//
-static PyObject*
+PyObject*
Name_richcmp(s_Name* self, s_Name* other, int op) {
bool c;
@@ -545,22 +440,22 @@ Name_richcmp(s_Name* self, s_Name* other, int op) {
switch (op) {
case Py_LT:
- c = *self->name < *other->name;
+ c = *self->cppobj < *other->cppobj;
break;
case Py_LE:
- c = *self->name <= *other->name;
+ c = *self->cppobj <= *other->cppobj;
break;
case Py_EQ:
- c = *self->name == *other->name;
+ c = *self->cppobj == *other->cppobj;
break;
case Py_NE:
- c = *self->name != *other->name;
+ c = *self->cppobj != *other->cppobj;
break;
case Py_GT:
- c = *self->name > *other->name;
+ c = *self->cppobj > *other->cppobj;
break;
case Py_GE:
- c = *self->name >= *other->name;
+ c = *self->cppobj >= *other->cppobj;
break;
default:
PyErr_SetString(PyExc_IndexError,
@@ -574,13 +469,13 @@ Name_richcmp(s_Name* self, s_Name* other, int op) {
}
}
-static PyObject*
+PyObject*
Name_reverse(s_Name* self) {
s_Name* ret = PyObject_New(s_Name, &name_type);
if (ret != NULL) {
- ret->name = new Name(self->name->reverse());
- if (ret->name == NULL) {
+ ret->cppobj = new Name(self->cppobj->reverse());
+ if (ret->cppobj == NULL) {
Py_DECREF(ret);
return (NULL);
}
@@ -588,7 +483,7 @@ Name_reverse(s_Name* self) {
return (ret);
}
-static PyObject*
+PyObject*
Name_concatenate(s_Name* self, PyObject* args) {
s_Name* other;
@@ -598,7 +493,7 @@ Name_concatenate(s_Name* self, PyObject* args) {
s_Name* ret = PyObject_New(s_Name, &name_type);
if (ret != NULL) {
try {
- ret->name = new Name(self->name->concatenate(*other->name));
+ ret->cppobj = new Name(self->cppobj->concatenate(*other->cppobj));
} catch (const TooLongName& tln) {
PyErr_SetString(po_TooLongName, tln.what());
return (NULL);
@@ -607,102 +502,186 @@ Name_concatenate(s_Name* self, PyObject* args) {
return (ret);
}
-static PyObject*
+PyObject*
Name_downcase(s_Name* self) {
- self->name->downcase();
+ self->cppobj->downcase();
Py_INCREF(self);
return (self);
}
-static PyObject*
+PyObject*
Name_isWildCard(s_Name* self) {
- if (self->name->isWildcard()) {
+ if (self->cppobj->isWildcard()) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
}
-// end of Name
+} // end of unnamed namespace
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Name(PyObject* mod) {
- // Add the classes to the module
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module
-
- //
- // NameComparisonResult
- //
- if (PyType_Ready(&name_comparison_result_type) < 0) {
- return (false);
- }
- Py_INCREF(&name_comparison_result_type);
-
- // Add the enums to the module
- po_NameRelation = Py_BuildValue("{i:s,i:s,i:s,i:s}",
- NameComparisonResult::SUPERDOMAIN, "SUPERDOMAIN",
- NameComparisonResult::SUBDOMAIN, "SUBDOMAIN",
- NameComparisonResult::EQUAL, "EQUAL",
- NameComparisonResult::COMMONANCESTOR, "COMMONANCESTOR");
- addClassVariable(name_comparison_result_type, "NameRelation", po_NameRelation);
-
- PyModule_AddObject(mod, "NameComparisonResult",
- reinterpret_cast<PyObject*>(&name_comparison_result_type));
-
- //
- // Name
- //
-
- if (PyType_Ready(&name_type) < 0) {
- return (false);
- }
- Py_INCREF(&name_type);
+namespace isc {
+namespace dns {
+namespace python {
- // Add the constants to the module
- addClassVariable(name_type, "MAX_WIRE", Py_BuildValue("I", Name::MAX_WIRE));
- addClassVariable(name_type, "MAX_LABELS", Py_BuildValue("I", Name::MAX_LABELS));
- addClassVariable(name_type, "MAX_LABELLEN", Py_BuildValue("I", Name::MAX_LABELLEN));
- addClassVariable(name_type, "MAX_COMPRESS_POINTER", Py_BuildValue("I", Name::MAX_COMPRESS_POINTER));
- addClassVariable(name_type, "COMPRESS_POINTER_MARK8", Py_BuildValue("I", Name::COMPRESS_POINTER_MARK8));
- addClassVariable(name_type, "COMPRESS_POINTER_MARK16", Py_BuildValue("I", Name::COMPRESS_POINTER_MARK16));
-
- s_Name* root_name = PyObject_New(s_Name, &name_type);
- root_name->name = new Name(Name::ROOT_NAME());
- PyObject* po_ROOT_NAME = root_name;
- addClassVariable(name_type, "ROOT_NAME", po_ROOT_NAME);
-
- PyModule_AddObject(mod, "Name",
- reinterpret_cast<PyObject*>(&name_type));
-
-
- // Add the exceptions to the module
- po_EmptyLabel = PyErr_NewException("pydnspp.EmptyLabel", NULL, NULL);
- PyModule_AddObject(mod, "EmptyLabel", po_EmptyLabel);
+//
+// Definition of the custom exceptions
+// Initialization and addition of these go in the module init at the
+// end
+//
+PyObject* po_EmptyLabel;
+PyObject* po_TooLongName;
+PyObject* po_TooLongLabel;
+PyObject* po_BadLabelType;
+PyObject* po_BadEscape;
+PyObject* po_IncompleteName;
+PyObject* po_InvalidBufferPosition;
+PyObject* po_DNSMessageFORMERR;
- po_TooLongName = PyErr_NewException("pydnspp.TooLongName", NULL, NULL);
- PyModule_AddObject(mod, "TooLongName", po_TooLongName);
+//
+// Definition of enums
+// Initialization and addition of these go in the module init at the
+// end
+//
+PyObject* po_NameRelation;
- po_TooLongLabel = PyErr_NewException("pydnspp.TooLongLabel", NULL, NULL);
- PyModule_AddObject(mod, "TooLongLabel", po_TooLongLabel);
+PyTypeObject name_comparison_result_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.NameComparisonResult",
+ sizeof(s_NameComparisonResult), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)NameComparisonResult_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "This is a supplemental class used only as a return value of Name.compare(). "
+ "It encapsulate a tuple of the comparison: ordering, number of common labels, "
+ "and relationship as follows:\n"
+ "- ordering: relative ordering under the DNSSEC order relation\n"
+ "- labels: the number of common significant labels of the two names being"
+ " compared\n"
+ "- relationship: see NameComparisonResult.NameRelation\n",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ NameComparisonResult_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)NameComparisonResult_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
- po_BadLabelType = PyErr_NewException("pydnspp.BadLabelType", NULL, NULL);
- PyModule_AddObject(mod, "BadLabelType", po_BadLabelType);
+PyTypeObject name_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Name",
+ sizeof(s_Name), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Name_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Name_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Name class encapsulates DNS names.\n"
+ "It provides interfaces to construct a name from string or wire-format data, "
+ "transform a name into a string or wire-format data, compare two names, get "
+ "access to various properties of a name, etc.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)Name_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Name_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Name_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ // Note: not sure if the following are correct. Added them just to
+ // make the compiler happy.
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
- po_BadEscape = PyErr_NewException("pydnspp.BadEscape", NULL, NULL);
- PyModule_AddObject(mod, "BadEscape", po_BadEscape);
+PyObject*
+createNameObject(const Name& source) {
+ NameContainer container(PyObject_New(s_Name, &name_type));
+ container.set(new Name(source));
+ return (container.release());
+}
- po_IncompleteName = PyErr_NewException("pydnspp.IncompleteName", NULL, NULL);
- PyModule_AddObject(mod, "IncompleteName", po_IncompleteName);
+bool
+PyName_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &name_type));
+}
- po_InvalidBufferPosition = PyErr_NewException("pydnspp.InvalidBufferPosition", NULL, NULL);
- PyModule_AddObject(mod, "InvalidBufferPosition", po_InvalidBufferPosition);
+const Name&
+PyName_ToName(const PyObject* name_obj) {
+ if (name_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Name PyObject conversion");
+ }
+ const s_Name* name = static_cast<const s_Name*>(name_obj);
+ return (*name->cppobj);
+}
- // This one could have gone into the message_python.cc file, but is
- // already needed here.
- po_DNSMessageFORMERR = PyErr_NewException("pydnspp.DNSMessageFORMERR", NULL, NULL);
- PyModule_AddObject(mod, "DNSMessageFORMERR", po_DNSMessageFORMERR);
- return (true);
-}
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/name_python.h b/src/lib/dns/python/name_python.h
new file mode 100644
index 0000000..86d7fd0
--- /dev/null
+++ b/src/lib/dns/python/name_python.h
@@ -0,0 +1,81 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_NAME_H
+#define __PYTHON_NAME_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Name;
+
+namespace python {
+
+extern PyObject* po_EmptyLabel;
+extern PyObject* po_TooLongName;
+extern PyObject* po_TooLongLabel;
+extern PyObject* po_BadLabelType;
+extern PyObject* po_BadEscape;
+extern PyObject* po_IncompleteName;
+extern PyObject* po_InvalidBufferPosition;
+extern PyObject* po_DNSMessageFORMERR;
+
+//
+// Declaration of enums
+// Initialization and addition of these go in the module init at the
+// end
+//
+extern PyObject* po_NameRelation;
+
+extern PyTypeObject name_comparison_result_type;
+extern PyTypeObject name_type;
+
+/// This is A simple shortcut to create a python Name object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called with in a try block
+/// followed by necessary setup for python exception.
+PyObject* createNameObject(const Name& source);
+
+/// \brief Checks if the given python object is a Name object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Name, false otherwise
+bool PyName_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Name object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Name; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyName_Check()
+///
+/// \note This is not a copy; if the Name is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param name_obj The name object to convert
+const Name& PyName_ToName(const PyObject* name_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_NAME_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/opcode_python.cc b/src/lib/dns/python/opcode_python.cc
index 0e2a30b..50436a9 100644
--- a/src/lib/dns/python/opcode_python.cc
+++ b/src/lib/dns/python/opcode_python.cc
@@ -12,32 +12,31 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <dns/opcode.h>
-
-using namespace isc::dns;
+#include <Python.h>
-//
-// Declaration of the custom exceptions (None for this class)
+#include <dns/opcode.h>
+#include <util/python/pycppwrapper_util.h>
-//
-// Definition of the classes
-//
+#include "pydnspp_common.h"
+#include "opcode_python.h"
+#include "edns_python.h"
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
namespace {
-//
-// Opcode
-//
+
class s_Opcode : public PyObject {
public:
- s_Opcode() : opcode(NULL), static_code(false) {}
- const Opcode* opcode;
+ s_Opcode() : cppobj(NULL), static_code(false) {}
+ const isc::dns::Opcode* cppobj;
bool static_code;
};
+typedef CPPPyObjectContainer<s_Opcode, Opcode> OpcodeContainer;
+
int Opcode_init(s_Opcode* const self, PyObject* args);
void Opcode_destroy(s_Opcode* const self);
@@ -103,64 +102,13 @@ PyMethodDef Opcode_methods[] = {
{ NULL, NULL, 0, NULL }
};
-PyTypeObject opcode_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Opcode",
- sizeof(s_Opcode), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Opcode_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Opcode_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Opcode class objects represent standard OPCODEs "
- "of the header section of DNS messages.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)Opcode_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Opcode_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Opcode_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
int
Opcode_init(s_Opcode* const self, PyObject* args) {
uint8_t code = 0;
if (PyArg_ParseTuple(args, "b", &code)) {
try {
- self->opcode = new Opcode(code);
+ self->cppobj = new Opcode(code);
self->static_code = false;
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
@@ -181,22 +129,22 @@ Opcode_init(s_Opcode* const self, PyObject* args) {
void
Opcode_destroy(s_Opcode* const self) {
// Depending on whether we created the rcode or are referring
- // to a global static one, we do or do not delete self->opcode here
+ // to a global static one, we do or do not delete self->cppobj here
if (!self->static_code) {
- delete self->opcode;
+ delete self->cppobj;
}
- self->opcode = NULL;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
PyObject*
Opcode_getCode(const s_Opcode* const self) {
- return (Py_BuildValue("I", self->opcode->getCode()));
+ return (Py_BuildValue("I", self->cppobj->getCode()));
}
PyObject*
Opcode_toText(const s_Opcode* const self) {
- return (Py_BuildValue("s", self->opcode->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
PyObject*
@@ -211,7 +159,7 @@ PyObject*
Opcode_createStatic(const Opcode& opcode) {
s_Opcode* ret = PyObject_New(s_Opcode, &opcode_type);
if (ret != NULL) {
- ret->opcode = &opcode;
+ ret->cppobj = &opcode;
ret->static_code = true;
}
return (ret);
@@ -297,7 +245,7 @@ Opcode_RESERVED15(const s_Opcode*) {
return (Opcode_createStatic(Opcode::RESERVED15()));
}
-PyObject*
+PyObject*
Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
const int op)
{
@@ -318,10 +266,10 @@ Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
PyErr_SetString(PyExc_TypeError, "Unorderable type; Opcode");
return (NULL);
case Py_EQ:
- c = (*self->opcode == *other->opcode);
+ c = (*self->cppobj == *other->cppobj);
break;
case Py_NE:
- c = (*self->opcode != *other->opcode);
+ c = (*self->cppobj != *other->cppobj);
break;
case Py_GT:
PyErr_SetString(PyExc_TypeError, "Unorderable type; Opcode");
@@ -336,55 +284,88 @@ Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
Py_RETURN_FALSE;
}
-// Module Initialization, all statics are initialized here
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+PyTypeObject opcode_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Opcode",
+ sizeof(s_Opcode), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Opcode_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Opcode_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Opcode class objects represent standard OPCODEs "
+ "of the header section of DNS messages.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)Opcode_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Opcode_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Opcode_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createOpcodeObject(const Opcode& source) {
+ OpcodeContainer container(PyObject_New(s_Opcode, &opcode_type));
+ container.set(new Opcode(source));
+ return (container.release());
+}
+
bool
-initModulePart_Opcode(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&opcode_type) < 0) {
- return (false);
- }
- Py_INCREF(&opcode_type);
- void* p = &opcode_type;
- if (PyModule_AddObject(mod, "Opcode", static_cast<PyObject*>(p)) != 0) {
- Py_DECREF(&opcode_type);
- return (false);
+PyOpcode_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
+ return (PyObject_TypeCheck(obj, &opcode_type));
+}
- addClassVariable(opcode_type, "QUERY_CODE",
- Py_BuildValue("h", Opcode::QUERY_CODE));
- addClassVariable(opcode_type, "IQUERY_CODE",
- Py_BuildValue("h", Opcode::IQUERY_CODE));
- addClassVariable(opcode_type, "STATUS_CODE",
- Py_BuildValue("h", Opcode::STATUS_CODE));
- addClassVariable(opcode_type, "RESERVED3_CODE",
- Py_BuildValue("h", Opcode::RESERVED3_CODE));
- addClassVariable(opcode_type, "NOTIFY_CODE",
- Py_BuildValue("h", Opcode::NOTIFY_CODE));
- addClassVariable(opcode_type, "UPDATE_CODE",
- Py_BuildValue("h", Opcode::UPDATE_CODE));
- addClassVariable(opcode_type, "RESERVED6_CODE",
- Py_BuildValue("h", Opcode::RESERVED6_CODE));
- addClassVariable(opcode_type, "RESERVED7_CODE",
- Py_BuildValue("h", Opcode::RESERVED7_CODE));
- addClassVariable(opcode_type, "RESERVED8_CODE",
- Py_BuildValue("h", Opcode::RESERVED8_CODE));
- addClassVariable(opcode_type, "RESERVED9_CODE",
- Py_BuildValue("h", Opcode::RESERVED9_CODE));
- addClassVariable(opcode_type, "RESERVED10_CODE",
- Py_BuildValue("h", Opcode::RESERVED10_CODE));
- addClassVariable(opcode_type, "RESERVED11_CODE",
- Py_BuildValue("h", Opcode::RESERVED11_CODE));
- addClassVariable(opcode_type, "RESERVED12_CODE",
- Py_BuildValue("h", Opcode::RESERVED12_CODE));
- addClassVariable(opcode_type, "RESERVED13_CODE",
- Py_BuildValue("h", Opcode::RESERVED13_CODE));
- addClassVariable(opcode_type, "RESERVED14_CODE",
- Py_BuildValue("h", Opcode::RESERVED14_CODE));
- addClassVariable(opcode_type, "RESERVED15_CODE",
- Py_BuildValue("h", Opcode::RESERVED15_CODE));
-
- return (true);
+const Opcode&
+PyOpcode_ToOpcode(const PyObject* opcode_obj) {
+ if (opcode_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Opcode PyObject conversion");
+ }
+ const s_Opcode* opcode = static_cast<const s_Opcode*>(opcode_obj);
+ return (*opcode->cppobj);
}
-} // end of unnamed namespace
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/opcode_python.h b/src/lib/dns/python/opcode_python.h
new file mode 100644
index 0000000..d0aec15
--- /dev/null
+++ b/src/lib/dns/python/opcode_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_OPCODE_H
+#define __PYTHON_OPCODE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Opcode;
+
+namespace python {
+
+extern PyTypeObject opcode_type;
+
+/// This is a simple shortcut to create a python Opcode object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createOpcodeObject(const Opcode& source);
+
+/// \brief Checks if the given python object is a Opcode object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Opcode, false otherwise
+bool PyOpcode_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Opcode object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Opcode; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyOpcode_Check()
+///
+/// \note This is not a copy; if the Opcode is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param opcode_obj The opcode object to convert
+const Opcode& PyOpcode_ToOpcode(const PyObject* opcode_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_OPCODE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc
index 2138198..0a7d8e5 100644
--- a/src/lib/dns/python/pydnspp.cc
+++ b/src/lib/dns/python/pydnspp.cc
@@ -21,51 +21,708 @@
// name initModulePart_<name>, and return true/false instead of
// NULL/*mod
//
-// And of course care has to be taken that all identifiers be unique
+// The big init function is split up into a separate initModulePart function
+// for each class we add.
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <structmember.h>
-#include <config.h>
-
-#include <exceptions/exceptions.h>
-
-#include <util/buffer.h>
-#include <dns/exceptions.h>
-#include <dns/name.h>
-#include <dns/messagerenderer.h>
-
-#include <dns/python/pydnspp_common.h>
-
-// For our 'general' isc::Exceptions
-static PyObject* po_IscException;
-static PyObject* po_InvalidParameter;
-
-// For our own isc::dns::Exception
-static PyObject* po_DNSMessageBADVERS;
-
-// order is important here!
-#include <dns/python/messagerenderer_python.cc>
-#include <dns/python/name_python.cc> // needs Messagerenderer
-#include <dns/python/rrclass_python.cc> // needs Messagerenderer
-#include <dns/python/rrtype_python.cc> // needs Messagerenderer
-#include <dns/python/rrttl_python.cc> // needs Messagerenderer
-#include <dns/python/rdata_python.cc> // needs Type, Class
-#include <dns/python/rrset_python.cc> // needs Rdata, RRTTL
-#include <dns/python/question_python.cc> // needs RRClass, RRType, RRTTL,
- // Name
-#include <dns/python/tsigkey_python.cc> // needs Name
-#include <dns/python/tsig_python.cc> // needs tsigkey
-#include <dns/python/opcode_python.cc>
-#include <dns/python/rcode_python.cc>
-#include <dns/python/edns_python.cc> // needs Messagerenderer, Rcode
-#include <dns/python/message_python.cc> // needs RRset, Question
+#include <dns/message.h>
+#include <dns/opcode.h>
+#include <dns/tsig.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "pydnspp_common.h"
+
+#include "edns_python.h"
+#include "message_python.h"
+#include "messagerenderer_python.h"
+#include "name_python.h"
+#include "opcode_python.h"
+#include "pydnspp_common.h"
+#include "pydnspp_towire.h"
+#include "question_python.h"
+#include "rcode_python.h"
+#include "rdata_python.h"
+#include "rrclass_python.h"
+#include "rrset_python.h"
+#include "rrttl_python.h"
+#include "rrtype_python.h"
+#include "tsigerror_python.h"
+#include "tsigkey_python.h"
+#include "tsig_python.h"
+#include "tsig_rdata_python.h"
+#include "tsigrecord_python.h"
+
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util::python;
+
+namespace {
+
+bool
+initModulePart_EDNS(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ //
+ // After the type has been initialized, we initialize any exceptions
+ // that are defined in the wrapper for this class, and add constants
+ // to the type, if any
+
+ if (PyType_Ready(&edns_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&edns_type);
+ void* p = &edns_type;
+ PyModule_AddObject(mod, "EDNS", static_cast<PyObject*>(p));
-//
-// Definition of the module
-//
-static PyModuleDef pydnspp = {
+ addClassVariable(edns_type, "SUPPORTED_VERSION",
+ Py_BuildValue("B", EDNS::SUPPORTED_VERSION));
+
+ return (true);
+}
+
+bool
+initModulePart_Message(PyObject* mod) {
+ if (PyType_Ready(&message_type) < 0) {
+ return (false);
+ }
+ void* p = &message_type;
+ if (PyModule_AddObject(mod, "Message", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&message_type);
+
+ try {
+ //
+ // Constant class variables
+ //
+
+ // Parse mode
+ installClassVariable(message_type, "PARSE",
+ Py_BuildValue("I", Message::PARSE));
+ installClassVariable(message_type, "RENDER",
+ Py_BuildValue("I", Message::RENDER));
+
+ // Parse options
+ installClassVariable(message_type, "PARSE_DEFAULT",
+ Py_BuildValue("I", Message::PARSE_DEFAULT));
+ installClassVariable(message_type, "PRESERVE_ORDER",
+ Py_BuildValue("I", Message::PRESERVE_ORDER));
+
+ // Header flags
+ installClassVariable(message_type, "HEADERFLAG_QR",
+ Py_BuildValue("I", Message::HEADERFLAG_QR));
+ installClassVariable(message_type, "HEADERFLAG_AA",
+ Py_BuildValue("I", Message::HEADERFLAG_AA));
+ installClassVariable(message_type, "HEADERFLAG_TC",
+ Py_BuildValue("I", Message::HEADERFLAG_TC));
+ installClassVariable(message_type, "HEADERFLAG_RD",
+ Py_BuildValue("I", Message::HEADERFLAG_RD));
+ installClassVariable(message_type, "HEADERFLAG_RA",
+ Py_BuildValue("I", Message::HEADERFLAG_RA));
+ installClassVariable(message_type, "HEADERFLAG_AD",
+ Py_BuildValue("I", Message::HEADERFLAG_AD));
+ installClassVariable(message_type, "HEADERFLAG_CD",
+ Py_BuildValue("I", Message::HEADERFLAG_CD));
+
+ // Sections
+ installClassVariable(message_type, "SECTION_QUESTION",
+ Py_BuildValue("I", Message::SECTION_QUESTION));
+ installClassVariable(message_type, "SECTION_ANSWER",
+ Py_BuildValue("I", Message::SECTION_ANSWER));
+ installClassVariable(message_type, "SECTION_AUTHORITY",
+ Py_BuildValue("I", Message::SECTION_AUTHORITY));
+ installClassVariable(message_type, "SECTION_ADDITIONAL",
+ Py_BuildValue("I", Message::SECTION_ADDITIONAL));
+
+ // Protocol constant
+ installClassVariable(message_type, "DEFAULT_MAX_UDPSIZE",
+ Py_BuildValue("I", Message::DEFAULT_MAX_UDPSIZE));
+
+ /* Class-specific exceptions */
+ po_MessageTooShort =
+ PyErr_NewException("pydnspp.MessageTooShort", NULL, NULL);
+ PyObjectContainer(po_MessageTooShort).installToModule(
+ mod, "MessageTooShort");
+ po_InvalidMessageSection =
+ PyErr_NewException("pydnspp.InvalidMessageSection", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageSection).installToModule(
+ mod, "InvalidMessageSection");
+ po_InvalidMessageOperation =
+ PyErr_NewException("pydnspp.InvalidMessageOperation", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageOperation).installToModule(
+ mod, "InvalidMessageOperation");
+ po_InvalidMessageUDPSize =
+ PyErr_NewException("pydnspp.InvalidMessageUDPSize", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageUDPSize).installToModule(
+ mod, "InvalidMessageUDPSize");
+ po_DNSMessageBADVERS =
+ PyErr_NewException("pydnspp.DNSMessageBADVERS", NULL, NULL);
+ PyObjectContainer(po_DNSMessageBADVERS).installToModule(
+ mod, "DNSMessageBADVERS");
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in Message initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Message initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_MessageRenderer(PyObject* mod) {
+ if (PyType_Ready(&messagerenderer_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&messagerenderer_type);
+
+ addClassVariable(messagerenderer_type, "CASE_INSENSITIVE",
+ Py_BuildValue("I", MessageRenderer::CASE_INSENSITIVE));
+ addClassVariable(messagerenderer_type, "CASE_SENSITIVE",
+ Py_BuildValue("I", MessageRenderer::CASE_SENSITIVE));
+
+ PyModule_AddObject(mod, "MessageRenderer",
+ reinterpret_cast<PyObject*>(&messagerenderer_type));
+
+ return (true);
+}
+
+bool
+initModulePart_Name(PyObject* mod) {
+
+ //
+ // NameComparisonResult
+ //
+ if (PyType_Ready(&name_comparison_result_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&name_comparison_result_type);
+
+ // Add the enums to the module
+ po_NameRelation = Py_BuildValue("{i:s,i:s,i:s,i:s}",
+ NameComparisonResult::SUPERDOMAIN, "SUPERDOMAIN",
+ NameComparisonResult::SUBDOMAIN, "SUBDOMAIN",
+ NameComparisonResult::EQUAL, "EQUAL",
+ NameComparisonResult::COMMONANCESTOR, "COMMONANCESTOR");
+ addClassVariable(name_comparison_result_type, "NameRelation",
+ po_NameRelation);
+
+ PyModule_AddObject(mod, "NameComparisonResult",
+ reinterpret_cast<PyObject*>(&name_comparison_result_type));
+
+ //
+ // Name
+ //
+
+ if (PyType_Ready(&name_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&name_type);
+
+ // Add the constants to the module
+ addClassVariable(name_type, "MAX_WIRE",
+ Py_BuildValue("I", Name::MAX_WIRE));
+ addClassVariable(name_type, "MAX_LABELS",
+ Py_BuildValue("I", Name::MAX_LABELS));
+ addClassVariable(name_type, "MAX_LABELLEN",
+ Py_BuildValue("I", Name::MAX_LABELLEN));
+ addClassVariable(name_type, "MAX_COMPRESS_POINTER",
+ Py_BuildValue("I", Name::MAX_COMPRESS_POINTER));
+ addClassVariable(name_type, "COMPRESS_POINTER_MARK8",
+ Py_BuildValue("I", Name::COMPRESS_POINTER_MARK8));
+ addClassVariable(name_type, "COMPRESS_POINTER_MARK16",
+ Py_BuildValue("I", Name::COMPRESS_POINTER_MARK16));
+
+ addClassVariable(name_type, "ROOT_NAME",
+ createNameObject(Name::ROOT_NAME()));
+
+ PyModule_AddObject(mod, "Name",
+ reinterpret_cast<PyObject*>(&name_type));
+
+
+ // Add the exceptions to the module
+ po_EmptyLabel = PyErr_NewException("pydnspp.EmptyLabel", NULL, NULL);
+ PyModule_AddObject(mod, "EmptyLabel", po_EmptyLabel);
+
+ po_TooLongName = PyErr_NewException("pydnspp.TooLongName", NULL, NULL);
+ PyModule_AddObject(mod, "TooLongName", po_TooLongName);
+
+ po_TooLongLabel = PyErr_NewException("pydnspp.TooLongLabel", NULL, NULL);
+ PyModule_AddObject(mod, "TooLongLabel", po_TooLongLabel);
+
+ po_BadLabelType = PyErr_NewException("pydnspp.BadLabelType", NULL, NULL);
+ PyModule_AddObject(mod, "BadLabelType", po_BadLabelType);
+
+ po_BadEscape = PyErr_NewException("pydnspp.BadEscape", NULL, NULL);
+ PyModule_AddObject(mod, "BadEscape", po_BadEscape);
+
+ po_IncompleteName = PyErr_NewException("pydnspp.IncompleteName", NULL, NULL);
+ PyModule_AddObject(mod, "IncompleteName", po_IncompleteName);
+
+ po_InvalidBufferPosition =
+ PyErr_NewException("pydnspp.InvalidBufferPosition", NULL, NULL);
+ PyModule_AddObject(mod, "InvalidBufferPosition", po_InvalidBufferPosition);
+
+ // This one could have gone into the message_python.cc file, but is
+ // already needed here.
+ po_DNSMessageFORMERR = PyErr_NewException("pydnspp.DNSMessageFORMERR",
+ NULL, NULL);
+ PyModule_AddObject(mod, "DNSMessageFORMERR", po_DNSMessageFORMERR);
+
+ return (true);
+}
+
+bool
+initModulePart_Opcode(PyObject* mod) {
+ if (PyType_Ready(&opcode_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&opcode_type);
+ void* p = &opcode_type;
+ if (PyModule_AddObject(mod, "Opcode", static_cast<PyObject*>(p)) != 0) {
+ Py_DECREF(&opcode_type);
+ return (false);
+ }
+
+ addClassVariable(opcode_type, "QUERY_CODE",
+ Py_BuildValue("h", Opcode::QUERY_CODE));
+ addClassVariable(opcode_type, "IQUERY_CODE",
+ Py_BuildValue("h", Opcode::IQUERY_CODE));
+ addClassVariable(opcode_type, "STATUS_CODE",
+ Py_BuildValue("h", Opcode::STATUS_CODE));
+ addClassVariable(opcode_type, "RESERVED3_CODE",
+ Py_BuildValue("h", Opcode::RESERVED3_CODE));
+ addClassVariable(opcode_type, "NOTIFY_CODE",
+ Py_BuildValue("h", Opcode::NOTIFY_CODE));
+ addClassVariable(opcode_type, "UPDATE_CODE",
+ Py_BuildValue("h", Opcode::UPDATE_CODE));
+ addClassVariable(opcode_type, "RESERVED6_CODE",
+ Py_BuildValue("h", Opcode::RESERVED6_CODE));
+ addClassVariable(opcode_type, "RESERVED7_CODE",
+ Py_BuildValue("h", Opcode::RESERVED7_CODE));
+ addClassVariable(opcode_type, "RESERVED8_CODE",
+ Py_BuildValue("h", Opcode::RESERVED8_CODE));
+ addClassVariable(opcode_type, "RESERVED9_CODE",
+ Py_BuildValue("h", Opcode::RESERVED9_CODE));
+ addClassVariable(opcode_type, "RESERVED10_CODE",
+ Py_BuildValue("h", Opcode::RESERVED10_CODE));
+ addClassVariable(opcode_type, "RESERVED11_CODE",
+ Py_BuildValue("h", Opcode::RESERVED11_CODE));
+ addClassVariable(opcode_type, "RESERVED12_CODE",
+ Py_BuildValue("h", Opcode::RESERVED12_CODE));
+ addClassVariable(opcode_type, "RESERVED13_CODE",
+ Py_BuildValue("h", Opcode::RESERVED13_CODE));
+ addClassVariable(opcode_type, "RESERVED14_CODE",
+ Py_BuildValue("h", Opcode::RESERVED14_CODE));
+ addClassVariable(opcode_type, "RESERVED15_CODE",
+ Py_BuildValue("h", Opcode::RESERVED15_CODE));
+
+ return (true);
+}
+
+bool
+initModulePart_Question(PyObject* mod) {
+ if (PyType_Ready(&question_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&question_type);
+ PyModule_AddObject(mod, "Question",
+ reinterpret_cast<PyObject*>(&question_type));
+
+ return (true);
+}
+
+bool
+initModulePart_Rcode(PyObject* mod) {
+ if (PyType_Ready(&rcode_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rcode_type);
+ void* p = &rcode_type;
+ if (PyModule_AddObject(mod, "Rcode", static_cast<PyObject*>(p)) != 0) {
+ Py_DECREF(&rcode_type);
+ return (false);
+ }
+
+ addClassVariable(rcode_type, "NOERROR_CODE",
+ Py_BuildValue("h", Rcode::NOERROR_CODE));
+ addClassVariable(rcode_type, "FORMERR_CODE",
+ Py_BuildValue("h", Rcode::FORMERR_CODE));
+ addClassVariable(rcode_type, "SERVFAIL_CODE",
+ Py_BuildValue("h", Rcode::SERVFAIL_CODE));
+ addClassVariable(rcode_type, "NXDOMAIN_CODE",
+ Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
+ addClassVariable(rcode_type, "NOTIMP_CODE",
+ Py_BuildValue("h", Rcode::NOTIMP_CODE));
+ addClassVariable(rcode_type, "REFUSED_CODE",
+ Py_BuildValue("h", Rcode::REFUSED_CODE));
+ addClassVariable(rcode_type, "YXDOMAIN_CODE",
+ Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
+ addClassVariable(rcode_type, "YXRRSET_CODE",
+ Py_BuildValue("h", Rcode::YXRRSET_CODE));
+ addClassVariable(rcode_type, "NXRRSET_CODE",
+ Py_BuildValue("h", Rcode::NXRRSET_CODE));
+ addClassVariable(rcode_type, "NOTAUTH_CODE",
+ Py_BuildValue("h", Rcode::NOTAUTH_CODE));
+ addClassVariable(rcode_type, "NOTZONE_CODE",
+ Py_BuildValue("h", Rcode::NOTZONE_CODE));
+ addClassVariable(rcode_type, "RESERVED11_CODE",
+ Py_BuildValue("h", Rcode::RESERVED11_CODE));
+ addClassVariable(rcode_type, "RESERVED12_CODE",
+ Py_BuildValue("h", Rcode::RESERVED12_CODE));
+ addClassVariable(rcode_type, "RESERVED13_CODE",
+ Py_BuildValue("h", Rcode::RESERVED13_CODE));
+ addClassVariable(rcode_type, "RESERVED14_CODE",
+ Py_BuildValue("h", Rcode::RESERVED14_CODE));
+ addClassVariable(rcode_type, "RESERVED15_CODE",
+ Py_BuildValue("h", Rcode::RESERVED15_CODE));
+ addClassVariable(rcode_type, "BADVERS_CODE",
+ Py_BuildValue("h", Rcode::BADVERS_CODE));
+
+ return (true);
+}
+
+bool
+initModulePart_Rdata(PyObject* mod) {
+ if (PyType_Ready(&rdata_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rdata_type);
+ PyModule_AddObject(mod, "Rdata",
+ reinterpret_cast<PyObject*>(&rdata_type));
+
+ // Add the exceptions to the class
+ po_InvalidRdataLength = PyErr_NewException("pydnspp.InvalidRdataLength",
+ NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRdataLength", po_InvalidRdataLength);
+
+ po_InvalidRdataText = PyErr_NewException("pydnspp.InvalidRdataText",
+ NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRdataText", po_InvalidRdataText);
+
+ po_CharStringTooLong = PyErr_NewException("pydnspp.CharStringTooLong",
+ NULL, NULL);
+ PyModule_AddObject(mod, "CharStringTooLong", po_CharStringTooLong);
+
+
+ return (true);
+}
+
+bool
+initModulePart_RRClass(PyObject* mod) {
+ po_InvalidRRClass = PyErr_NewException("pydnspp.InvalidRRClass",
+ NULL, NULL);
+ Py_INCREF(po_InvalidRRClass);
+ PyModule_AddObject(mod, "InvalidRRClass", po_InvalidRRClass);
+ po_IncompleteRRClass = PyErr_NewException("pydnspp.IncompleteRRClass",
+ NULL, NULL);
+ Py_INCREF(po_IncompleteRRClass);
+ PyModule_AddObject(mod, "IncompleteRRClass", po_IncompleteRRClass);
+
+ if (PyType_Ready(&rrclass_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrclass_type);
+ PyModule_AddObject(mod, "RRClass",
+ reinterpret_cast<PyObject*>(&rrclass_type));
+
+ return (true);
+}
+
+bool
+initModulePart_RRset(PyObject* mod) {
+ po_EmptyRRset = PyErr_NewException("pydnspp.EmptyRRset", NULL, NULL);
+ PyModule_AddObject(mod, "EmptyRRset", po_EmptyRRset);
+
+ // NameComparisonResult
+ if (PyType_Ready(&rrset_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrset_type);
+ PyModule_AddObject(mod, "RRset",
+ reinterpret_cast<PyObject*>(&rrset_type));
+
+ return (true);
+}
+
+bool
+initModulePart_RRTTL(PyObject* mod) {
+ po_InvalidRRTTL = PyErr_NewException("pydnspp.InvalidRRTTL", NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRRTTL", po_InvalidRRTTL);
+ po_IncompleteRRTTL = PyErr_NewException("pydnspp.IncompleteRRTTL",
+ NULL, NULL);
+ PyModule_AddObject(mod, "IncompleteRRTTL", po_IncompleteRRTTL);
+
+ if (PyType_Ready(&rrttl_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrttl_type);
+ PyModule_AddObject(mod, "RRTTL",
+ reinterpret_cast<PyObject*>(&rrttl_type));
+
+ return (true);
+}
+
+bool
+initModulePart_RRType(PyObject* mod) {
+ // Add the exceptions to the module
+ po_InvalidRRType = PyErr_NewException("pydnspp.InvalidRRType", NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRRType", po_InvalidRRType);
+ po_IncompleteRRType = PyErr_NewException("pydnspp.IncompleteRRType",
+ NULL, NULL);
+ PyModule_AddObject(mod, "IncompleteRRType", po_IncompleteRRType);
+
+ if (PyType_Ready(&rrtype_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrtype_type);
+ PyModule_AddObject(mod, "RRType",
+ reinterpret_cast<PyObject*>(&rrtype_type));
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGError(PyObject* mod) {
+ if (PyType_Ready(&tsigerror_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigerror_type;
+ if (PyModule_AddObject(mod, "TSIGError", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigerror_type);
+
+ try {
+ // Constant class variables
+ // Error codes (bare values)
+ installClassVariable(tsigerror_type, "BAD_SIG_CODE",
+ Py_BuildValue("H", TSIGError::BAD_SIG_CODE));
+ installClassVariable(tsigerror_type, "BAD_KEY_CODE",
+ Py_BuildValue("H", TSIGError::BAD_KEY_CODE));
+ installClassVariable(tsigerror_type, "BAD_TIME_CODE",
+ Py_BuildValue("H", TSIGError::BAD_TIME_CODE));
+
+ // Error codes (constant objects)
+ installClassVariable(tsigerror_type, "NOERROR",
+ createTSIGErrorObject(TSIGError::NOERROR()));
+ installClassVariable(tsigerror_type, "FORMERR",
+ createTSIGErrorObject(TSIGError::FORMERR()));
+ installClassVariable(tsigerror_type, "SERVFAIL",
+ createTSIGErrorObject(TSIGError::SERVFAIL()));
+ installClassVariable(tsigerror_type, "NXDOMAIN",
+ createTSIGErrorObject(TSIGError::NXDOMAIN()));
+ installClassVariable(tsigerror_type, "NOTIMP",
+ createTSIGErrorObject(TSIGError::NOTIMP()));
+ installClassVariable(tsigerror_type, "REFUSED",
+ createTSIGErrorObject(TSIGError::REFUSED()));
+ installClassVariable(tsigerror_type, "YXDOMAIN",
+ createTSIGErrorObject(TSIGError::YXDOMAIN()));
+ installClassVariable(tsigerror_type, "YXRRSET",
+ createTSIGErrorObject(TSIGError::YXRRSET()));
+ installClassVariable(tsigerror_type, "NXRRSET",
+ createTSIGErrorObject(TSIGError::NXRRSET()));
+ installClassVariable(tsigerror_type, "NOTAUTH",
+ createTSIGErrorObject(TSIGError::NOTAUTH()));
+ installClassVariable(tsigerror_type, "NOTZONE",
+ createTSIGErrorObject(TSIGError::NOTZONE()));
+ installClassVariable(tsigerror_type, "RESERVED11",
+ createTSIGErrorObject(TSIGError::RESERVED11()));
+ installClassVariable(tsigerror_type, "RESERVED12",
+ createTSIGErrorObject(TSIGError::RESERVED12()));
+ installClassVariable(tsigerror_type, "RESERVED13",
+ createTSIGErrorObject(TSIGError::RESERVED13()));
+ installClassVariable(tsigerror_type, "RESERVED14",
+ createTSIGErrorObject(TSIGError::RESERVED14()));
+ installClassVariable(tsigerror_type, "RESERVED15",
+ createTSIGErrorObject(TSIGError::RESERVED15()));
+ installClassVariable(tsigerror_type, "BAD_SIG",
+ createTSIGErrorObject(TSIGError::BAD_SIG()));
+ installClassVariable(tsigerror_type, "BAD_KEY",
+ createTSIGErrorObject(TSIGError::BAD_KEY()));
+ installClassVariable(tsigerror_type, "BAD_TIME",
+ createTSIGErrorObject(TSIGError::BAD_TIME()));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGError initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGError initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGKey(PyObject* mod) {
+ if (PyType_Ready(&tsigkey_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigkey_type;
+ if (PyModule_AddObject(mod, "TSIGKey", static_cast<PyObject*>(p)) != 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigkey_type);
+
+ try {
+ // Constant class variables
+ installClassVariable(tsigkey_type, "HMACMD5_NAME",
+ createNameObject(TSIGKey::HMACMD5_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA1_NAME",
+ createNameObject(TSIGKey::HMACSHA1_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA256_NAME",
+ createNameObject(TSIGKey::HMACSHA256_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA224_NAME",
+ createNameObject(TSIGKey::HMACSHA224_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA384_NAME",
+ createNameObject(TSIGKey::HMACSHA384_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA512_NAME",
+ createNameObject(TSIGKey::HMACSHA512_NAME()));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGKey initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGKey initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGKeyRing(PyObject* mod) {
+ if (PyType_Ready(&tsigkeyring_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigkeyring_type);
+ void* p = &tsigkeyring_type;
+ if (PyModule_AddObject(mod, "TSIGKeyRing",
+ static_cast<PyObject*>(p)) != 0) {
+ Py_DECREF(&tsigkeyring_type);
+ return (false);
+ }
+
+ addClassVariable(tsigkeyring_type, "SUCCESS",
+ Py_BuildValue("I", TSIGKeyRing::SUCCESS));
+ addClassVariable(tsigkeyring_type, "EXIST",
+ Py_BuildValue("I", TSIGKeyRing::EXIST));
+ addClassVariable(tsigkeyring_type, "NOTFOUND",
+ Py_BuildValue("I", TSIGKeyRing::NOTFOUND));
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGContext(PyObject* mod) {
+ if (PyType_Ready(&tsigcontext_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigcontext_type;
+ if (PyModule_AddObject(mod, "TSIGContext",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigcontext_type);
+
+ try {
+ // Class specific exceptions
+ po_TSIGContextError = PyErr_NewException("pydnspp.TSIGContextError",
+ po_IscException, NULL);
+ PyObjectContainer(po_TSIGContextError).installToModule(
+ mod, "TSIGContextError");
+
+ // Constant class variables
+ installClassVariable(tsigcontext_type, "STATE_INIT",
+ Py_BuildValue("I", TSIGContext::INIT));
+ installClassVariable(tsigcontext_type, "STATE_SENT_REQUEST",
+ Py_BuildValue("I", TSIGContext::SENT_REQUEST));
+ installClassVariable(tsigcontext_type, "STATE_RECEIVED_REQUEST",
+ Py_BuildValue("I", TSIGContext::RECEIVED_REQUEST));
+ installClassVariable(tsigcontext_type, "STATE_SENT_RESPONSE",
+ Py_BuildValue("I", TSIGContext::SENT_RESPONSE));
+ installClassVariable(tsigcontext_type, "STATE_VERIFIED_RESPONSE",
+ Py_BuildValue("I",
+ TSIGContext::VERIFIED_RESPONSE));
+
+ installClassVariable(tsigcontext_type, "DEFAULT_FUDGE",
+ Py_BuildValue("H", TSIGContext::DEFAULT_FUDGE));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGContext initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGContext initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_TSIG(PyObject* mod) {
+ if (PyType_Ready(&tsig_type) < 0) {
+ return (false);
+ }
+ void* p = &tsig_type;
+ if (PyModule_AddObject(mod, "TSIG", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsig_type);
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGRecord(PyObject* mod) {
+ if (PyType_Ready(&tsigrecord_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigrecord_type;
+ if (PyModule_AddObject(mod, "TSIGRecord", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigrecord_type);
+
+ try {
+ // Constant class variables
+ installClassVariable(tsigrecord_type, "TSIG_TTL",
+ Py_BuildValue("I", 0));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGRecord initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGRecord initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+PyModuleDef pydnspp = {
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
"pydnspp",
"Python bindings for the classes in the isc::dns namespace.\n\n"
@@ -80,10 +737,11 @@ static PyModuleDef pydnspp = {
NULL,
NULL
};
+}
PyMODINIT_FUNC
PyInit_pydnspp(void) {
- PyObject *mod = PyModule_Create(&pydnspp);
+ PyObject* mod = PyModule_Create(&pydnspp);
if (mod == NULL) {
return (NULL);
}
@@ -154,10 +812,21 @@ PyInit_pydnspp(void) {
return (NULL);
}
+ if (!initModulePart_TSIG(mod)) {
+ return (NULL);
+ }
+
+ if (!initModulePart_TSIGError(mod)) {
+ return (NULL);
+ }
+
+ if (!initModulePart_TSIGRecord(mod)) {
+ return (NULL);
+ }
+
if (!initModulePart_TSIGContext(mod)) {
return (NULL);
}
return (mod);
}
-
diff --git a/src/lib/dns/python/pydnspp_common.cc b/src/lib/dns/python/pydnspp_common.cc
index 6c26367..0f0f873 100644
--- a/src/lib/dns/python/pydnspp_common.cc
+++ b/src/lib/dns/python/pydnspp_common.cc
@@ -15,6 +15,45 @@
#include <Python.h>
#include <pydnspp_common.h>
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+
+#include <dns/exceptions.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+
+#include "pydnspp_common.h"
+#include "messagerenderer_python.h"
+#include "name_python.h"
+#include "rdata_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rrset_python.h"
+#include "rcode_python.h"
+#include "opcode_python.h"
+#include "tsigkey_python.h"
+#include "tsig_rdata_python.h"
+#include "tsigerror_python.h"
+#include "tsigrecord_python.h"
+#include "tsig_python.h"
+#include "question_python.h"
+#include "message_python.h"
+
+using namespace isc::dns::python;
+
+namespace isc {
+namespace dns {
+namespace python {
+// For our 'general' isc::Exceptions
+PyObject* po_IscException;
+PyObject* po_InvalidParameter;
+
+// For our own isc::dns::Exception
+PyObject* po_DNSMessageBADVERS;
+
+
int
readDataFromSequence(uint8_t *data, size_t len, PyObject* sequence) {
PyObject* el = NULL;
@@ -44,8 +83,15 @@ readDataFromSequence(uint8_t *data, size_t len, PyObject* sequence) {
}
-void addClassVariable(PyTypeObject& c, const char* name,
- PyObject* obj)
-{
- PyDict_SetItemString(c.tp_dict, name, obj);
+int
+addClassVariable(PyTypeObject& c, const char* name, PyObject* obj) {
+ if (obj == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "NULL object is specified for a class variable");
+ return (-1);
+ }
+ return (PyDict_SetItemString(c.tp_dict, name, obj));
+}
+}
+}
}
diff --git a/src/lib/dns/python/pydnspp_common.h b/src/lib/dns/python/pydnspp_common.h
index 32e2b78..8092b08 100644
--- a/src/lib/dns/python/pydnspp_common.h
+++ b/src/lib/dns/python/pydnspp_common.h
@@ -15,9 +15,20 @@
#ifndef __LIBDNS_PYTHON_COMMON_H
#define __LIBDNS_PYTHON_COMMON_H 1
-//
-// Shared functions for python/c API
-//
+#include <Python.h>
+
+#include <stdexcept>
+#include <string>
+
+namespace isc {
+namespace dns {
+namespace python {
+// For our 'general' isc::Exceptions
+extern PyObject* po_IscException;
+extern PyObject* po_InvalidParameter;
+
+// For our own isc::dns::Exception
+extern PyObject* po_DNSMessageBADVERS;
// This function reads 'bytes' from a sequence
// This sequence can be anything that implements the Sequence interface,
@@ -31,6 +42,12 @@
// case nothing is removed
int readDataFromSequence(uint8_t *data, size_t len, PyObject* sequence);
-void addClassVariable(PyTypeObject& c, const char* name, PyObject* obj);
-
+int addClassVariable(PyTypeObject& c, const char* name, PyObject* obj);
+} // namespace python
+} // namespace dns
+} // namespace isc
#endif // __LIBDNS_PYTHON_COMMON_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/pydnspp_towire.h b/src/lib/dns/python/pydnspp_towire.h
new file mode 100644
index 0000000..e987a29
--- /dev/null
+++ b/src/lib/dns/python/pydnspp_towire.h
@@ -0,0 +1,127 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LIBDNS_PYTHON_TOWIRE_H
+#define __LIBDNS_PYTHON_TOWIRE_H 1
+
+#include <Python.h>
+
+#include <stdexcept>
+#include <string>
+
+#include <dns/messagerenderer.h>
+
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "messagerenderer_python.h"
+
+namespace isc {
+namespace dns {
+namespace python {
+
+// The following two templated structures are a helper to use the same
+// toWire() template implementation for two types of toWire() methods:
+// return an integer or have no return value.
+template <typename CPPCLASS>
+struct ToWireCallVoid {
+ ToWireCallVoid(CPPCLASS& cppobj) : cppobj_(cppobj) {}
+ int operator()(AbstractMessageRenderer& renderer) const {
+ cppobj_.toWire(renderer);
+ return (0);
+ }
+ const CPPCLASS& cppobj_;
+};
+
+template <typename CPPCLASS>
+struct ToWireCallInt {
+ ToWireCallInt(CPPCLASS& cppobj) : cppobj_(cppobj) {}
+ int operator()(AbstractMessageRenderer& renderer) const {
+ return (cppobj_.toWire(renderer));
+ }
+ const CPPCLASS& cppobj_;
+};
+
+// This templated function gives a common implementation of the toWire()
+// wrapper for various libdns++ classes. PYSTRUCT and CPPCLASS are
+// (C++ binding of) python and (pure) C++ classes (e.g., s_Name and Name),
+// and TOWIRECALLER is either ToWireCallVoid<CPPCLASS> or
+// ToWireCallInt<CPPCLASS>, depending on the toWire() method of the class
+// returns a value or not.
+//
+// See, e.g., tsigrecord_python.cc for how to use it.
+//
+// This should be able to be used without modification for most classes that
+// have toWire(). But if the underlying toWire() has an extra argument, the
+// definition will need to be adjusted accordingly.
+template <typename PYSTRUCT, typename CPPCLASS, typename TOWIRECALLER>
+PyObject*
+toWireWrapper(const PYSTRUCT* const self, PyObject* args) {
+ try {
+ // To OutputBuffer version
+ PyObject* bytes; // this won't have own reference, no risk of leak.
+ if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
+ // render the object into a buffer (this can throw)
+ isc::util::OutputBuffer buffer(0);
+ self->cppobj->toWire(buffer);
+
+ // convert the rendered data into PyObject. This could leak later,
+ // so we need to store it in a container.
+ PyObject* rd_bytes = PyBytes_FromStringAndSize(
+ static_cast<const char*>(buffer.getData()),
+ buffer.getLength());
+ isc::util::python::PyObjectContainer rd_bytes_container(rd_bytes);
+
+ // concat the latest data to the given existing sequence. concat
+ // operation could fail, so we use a container to clean it up
+ // safely should that happen.
+ PyObject* result = PySequence_InPlaceConcat(bytes, rd_bytes);
+ isc::util::python::PyObjectContainer result_container(result);
+
+ return (result_container.release());
+ }
+
+ // To MessageRenderer version
+ PyObject* renderer;
+ if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &renderer)) {
+ const unsigned int n = TOWIRECALLER(*self->cppobj)(
+ PyMessageRenderer_ToMessageRenderer(renderer));
+
+ return (Py_BuildValue("I", n));
+ }
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Failed to render an libdns++ object wire-format: "
+ + std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(po_IscException, "Unexpectedly failed to render an "
+ "libdns++ object wire-format.");
+ return (NULL);
+ }
+
+ PyErr_Clear();
+ PyErr_SetString(PyExc_TypeError,
+ "Incorrect arguments for a to_wire() method");
+ return (NULL);
+}
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __LIBDNS_PYTHON_TOWIRE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/question_python.cc b/src/lib/dns/python/question_python.cc
index 2889350..44d68a2 100644
--- a/src/lib/dns/python/question_python.cc
+++ b/src/lib/dns/python/question_python.cc
@@ -12,25 +12,34 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
#include <dns/question.h>
+#include <dns/messagerenderer.h>
+#include <dns/exceptions.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "pydnspp_common.h"
+#include "question_python.h"
+#include "name_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "messagerenderer_python.h"
+
+using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+using namespace isc;
-//
-// Question
-//
-
-// The s_* Class simply coverst one instantiation of the object
+namespace {
class s_Question : public PyObject {
public:
- QuestionPtr question;
+ isc::dns::QuestionPtr cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
static int Question_init(s_Question* self, PyObject* args);
static void Question_destroy(s_Question* self);
@@ -69,60 +78,6 @@ static PyMethodDef Question_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Question
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject question_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Question",
- sizeof(s_Question), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Question_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Question_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Question class encapsulates the common search key of DNS"
- "lookup, consisting of owner name, RR type and RR class.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Question_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Question_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
static int
Question_init(s_Question* self, PyObject* args) {
// Try out the various combinations of arguments to call the
@@ -131,9 +86,9 @@ Question_init(s_Question* self, PyObject* args) {
// that if we try several like here. Otherwise the *next* python
// call will suddenly appear to throw an exception.
// (the way to do exceptions is to set PyErr and return -1)
- s_Name* name;
- s_RRClass* rrclass;
- s_RRType* rrtype;
+ PyObject* name;
+ PyObject* rrclass;
+ PyObject* rrtype;
const char* b;
Py_ssize_t len;
@@ -141,17 +96,18 @@ Question_init(s_Question* self, PyObject* args) {
try {
if (PyArg_ParseTuple(args, "O!O!O!", &name_type, &name,
- &rrclass_type, &rrclass,
- &rrtype_type, &rrtype
+ &rrclass_type, &rrclass,
+ &rrtype_type, &rrtype
)) {
- self->question = QuestionPtr(new Question(*name->name, *rrclass->rrclass,
- *rrtype->rrtype));
+ self->cppobj = QuestionPtr(new Question(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype)));
return (0);
} else if (PyArg_ParseTuple(args, "y#|I", &b, &len, &position)) {
PyErr_Clear();
InputBuffer inbuf(b, len);
inbuf.setPosition(position);
- self->question = QuestionPtr(new Question(inbuf));
+ self->cppobj = QuestionPtr(new Question(inbuf));
return (0);
}
} catch (const DNSMessageFORMERR& dmfe) {
@@ -168,7 +124,7 @@ Question_init(s_Question* self, PyObject* args) {
return (-1);
}
- self->question = QuestionPtr();
+ self->cppobj = QuestionPtr();
PyErr_Clear();
PyErr_SetString(PyExc_TypeError,
@@ -178,52 +134,62 @@ Question_init(s_Question* self, PyObject* args) {
static void
Question_destroy(s_Question* self) {
- self->question.reset();
+ self->cppobj.reset();
Py_TYPE(self)->tp_free(self);
}
static PyObject*
Question_getName(s_Question* self) {
- s_Name* name;
-
- // is this the best way to do this?
- name = static_cast<s_Name*>(name_type.tp_alloc(&name_type, 0));
- if (name != NULL) {
- name->name = new Name(self->question->getName());
+ try {
+ return (createNameObject(self->cppobj->getName()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question Name: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question Name");
}
-
- return (name);
+ return (NULL);
}
static PyObject*
Question_getType(s_Question* self) {
- s_RRType* rrtype;
-
- rrtype = static_cast<s_RRType*>(rrtype_type.tp_alloc(&rrtype_type, 0));
- if (rrtype != NULL) {
- rrtype->rrtype = new RRType(self->question->getType());
+ try {
+ return (createRRTypeObject(self->cppobj->getType()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRType: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRType");
}
-
- return (rrtype);
+ return (NULL);
}
static PyObject*
Question_getClass(s_Question* self) {
- s_RRClass* rrclass;
-
- rrclass = static_cast<s_RRClass*>(rrclass_type.tp_alloc(&rrclass_type, 0));
- if (rrclass != NULL) {
- rrclass->rrclass = new RRClass(self->question->getClass());
+ try {
+ return (createRRClassObject(self->cppobj->getClass()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRClass: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRClass");
}
-
- return (rrclass);
+ return (NULL);
}
-
static PyObject*
Question_toText(s_Question* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->question->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
static PyObject*
@@ -237,14 +203,14 @@ Question_str(PyObject* self) {
static PyObject*
Question_toWire(s_Question* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
// Max length is Name::MAX_WIRE + rrclass (2) + rrtype (2)
OutputBuffer buffer(Name::MAX_WIRE + 4);
- self->question->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()),
buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
@@ -253,7 +219,7 @@ Question_toWire(s_Question* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->question->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -264,23 +230,92 @@ Question_toWire(s_Question* self, PyObject* args) {
return (NULL);
}
-// end of Question
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Question
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject question_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Question",
+ sizeof(s_Question), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Question_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Question_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Question class encapsulates the common search key of DNS"
+ "lookup, consisting of owner name, RR type and RR class.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Question_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Question_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+PyObject*
+createQuestionObject(const Question& source) {
+ s_Question* question =
+ static_cast<s_Question*>(question_type.tp_alloc(&question_type, 0));
+ question->cppobj = QuestionPtr(new Question(source));
+ return (question);
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_Question(PyObject* mod) {
- // Add the exceptions to the module
+PyQuestion_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &question_type));
+}
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&question_type) < 0) {
- return (false);
+const Question&
+PyQuestion_ToQuestion(const PyObject* question_obj) {
+ if (question_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Question PyObject conversion");
}
- Py_INCREF(&question_type);
- PyModule_AddObject(mod, "Question",
- reinterpret_cast<PyObject*>(&question_type));
-
- return (true);
+ const s_Question* question = static_cast<const s_Question*>(question_obj);
+ return (*question->cppobj);
}
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/question_python.h b/src/lib/dns/python/question_python.h
new file mode 100644
index 0000000..f5d78b1
--- /dev/null
+++ b/src/lib/dns/python/question_python.h
@@ -0,0 +1,66 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_QUESTION_H
+#define __PYTHON_QUESTION_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Question;
+
+namespace python {
+
+extern PyObject* po_EmptyQuestion;
+
+extern PyTypeObject question_type;
+
+/// This is a simple shortcut to create a python Question object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createQuestionObject(const Question& source);
+
+/// \brief Checks if the given python object is a Question object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Question, false otherwise
+bool PyQuestion_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Question object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Question; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyQuestion_Check()
+///
+/// \note This is not a copy; if the Question is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param question_obj The question object to convert
+const Question& PyQuestion_ToQuestion(const PyObject* question_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_QUESTION_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rcode_python.cc b/src/lib/dns/python/rcode_python.cc
index b80a93c..42b48e7 100644
--- a/src/lib/dns/python/rcode_python.cc
+++ b/src/lib/dns/python/rcode_python.cc
@@ -12,26 +12,22 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <dns/rcode.h>
-
-using namespace isc::dns;
+#include <Python.h>
-//
-// Declaration of the custom exceptions (None for this class)
+#include <exceptions/exceptions.h>
+#include <dns/rcode.h>
+#include <util/python/pycppwrapper_util.h>
-//
-// Definition of the classes
-//
+#include "pydnspp_common.h"
+#include "rcode_python.h"
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util::python;
namespace {
+// The s_* Class simply covers one instantiation of the object.
//
-// Rcode
-//
-
// We added a helper variable static_code here
// Since we can create Rcodes dynamically with Rcode(int), but also
// use the static globals (Rcode::NOERROR() etc), we use this
@@ -39,13 +35,19 @@ namespace {
// case Rcode_destroy should not free it (the other option is to
// allocate new Rcodes for every use of the static ones, but this
// seems more efficient).
+//
+// Follow-up note: we don't have to use the proxy function in the python lib;
+// we can just define class specific constants directly (see TSIGError).
+// We should make this cleanup later.
class s_Rcode : public PyObject {
public:
- s_Rcode() : rcode(NULL), static_code(false) {}
- const Rcode* rcode;
+ s_Rcode() : cppobj(NULL), static_code(false) {};
+ const Rcode* cppobj;
bool static_code;
};
+typedef CPPPyObjectContainer<s_Rcode, Rcode> RcodeContainer;
+
int Rcode_init(s_Rcode* const self, PyObject* args);
void Rcode_destroy(s_Rcode* const self);
@@ -118,57 +120,6 @@ PyMethodDef Rcode_methods[] = {
{ NULL, NULL, 0, NULL }
};
-PyTypeObject rcode_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Rcode",
- sizeof(s_Rcode), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Rcode_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Rcode_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Rcode class objects represent standard RCODEs"
- "of the header section of DNS messages.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)Rcode_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Rcode_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Rcode_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
int
Rcode_init(s_Rcode* const self, PyObject* args) {
long code = 0;
@@ -193,9 +144,9 @@ Rcode_init(s_Rcode* const self, PyObject* args) {
}
try {
if (ext_code == -1) {
- self->rcode = new Rcode(code);
+ self->cppobj = new Rcode(code);
} else {
- self->rcode = new Rcode(code, ext_code);
+ self->cppobj = new Rcode(code, ext_code);
}
self->static_code = false;
} catch (const isc::OutOfRange& ex) {
@@ -211,27 +162,27 @@ Rcode_init(s_Rcode* const self, PyObject* args) {
void
Rcode_destroy(s_Rcode* const self) {
// Depending on whether we created the rcode or are referring
- // to a global one, we do or do not delete self->rcode here
+ // to a global one, we do or do not delete self->cppobj here
if (!self->static_code) {
- delete self->rcode;
+ delete self->cppobj;
}
- self->rcode = NULL;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
PyObject*
Rcode_getCode(const s_Rcode* const self) {
- return (Py_BuildValue("I", self->rcode->getCode()));
+ return (Py_BuildValue("I", self->cppobj->getCode()));
}
PyObject*
Rcode_getExtendedCode(const s_Rcode* const self) {
- return (Py_BuildValue("I", self->rcode->getExtendedCode()));
+ return (Py_BuildValue("I", self->cppobj->getExtendedCode()));
}
PyObject*
Rcode_toText(const s_Rcode* const self) {
- return (Py_BuildValue("s", self->rcode->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
PyObject*
@@ -245,7 +196,7 @@ PyObject*
Rcode_createStatic(const Rcode& rcode) {
s_Rcode* ret = PyObject_New(s_Rcode, &rcode_type);
if (ret != NULL) {
- ret->rcode = &rcode;
+ ret->cppobj = &rcode;
ret->static_code = true;
}
return (ret);
@@ -336,7 +287,7 @@ Rcode_BADVERS(const s_Rcode*) {
return (Rcode_createStatic(Rcode::BADVERS()));
}
-PyObject*
+PyObject*
Rcode_richcmp(const s_Rcode* const self, const s_Rcode* const other,
const int op)
{
@@ -357,10 +308,10 @@ Rcode_richcmp(const s_Rcode* const self, const s_Rcode* const other,
PyErr_SetString(PyExc_TypeError, "Unorderable type; Rcode");
return (NULL);
case Py_EQ:
- c = (*self->rcode == *other->rcode);
+ c = (*self->cppobj == *other->cppobj);
break;
case Py_NE:
- c = (*self->rcode != *other->rcode);
+ c = (*self->cppobj != *other->cppobj);
break;
case Py_GT:
PyErr_SetString(PyExc_TypeError, "Unorderable type; Rcode");
@@ -374,58 +325,87 @@ Rcode_richcmp(const s_Rcode* const self, const s_Rcode* const other,
else
Py_RETURN_FALSE;
}
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+PyTypeObject rcode_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Rcode",
+ sizeof(s_Rcode), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Rcode_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Rcode_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Rcode class objects represent standard RCODEs"
+ "of the header section of DNS messages.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ reinterpret_cast<richcmpfunc>(Rcode_richcmp), // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Rcode_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Rcode_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createRcodeObject(const Rcode& source) {
+ RcodeContainer container(PyObject_New(s_Rcode, &rcode_type));
+ container.set(new Rcode(source));
+ return (container.release());
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_Rcode(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rcode_type) < 0) {
- return (false);
- }
- Py_INCREF(&rcode_type);
- void* p = &rcode_type;
- if (PyModule_AddObject(mod, "Rcode", static_cast<PyObject*>(p)) != 0) {
- Py_DECREF(&rcode_type);
- return (false);
+PyRcode_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
+ return (PyObject_TypeCheck(obj, &rcode_type));
+}
- addClassVariable(rcode_type, "NOERROR_CODE",
- Py_BuildValue("h", Rcode::NOERROR_CODE));
- addClassVariable(rcode_type, "FORMERR_CODE",
- Py_BuildValue("h", Rcode::FORMERR_CODE));
- addClassVariable(rcode_type, "SERVFAIL_CODE",
- Py_BuildValue("h", Rcode::SERVFAIL_CODE));
- addClassVariable(rcode_type, "NXDOMAIN_CODE",
- Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
- addClassVariable(rcode_type, "NOTIMP_CODE",
- Py_BuildValue("h", Rcode::NOTIMP_CODE));
- addClassVariable(rcode_type, "REFUSED_CODE",
- Py_BuildValue("h", Rcode::REFUSED_CODE));
- addClassVariable(rcode_type, "YXDOMAIN_CODE",
- Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
- addClassVariable(rcode_type, "YXRRSET_CODE",
- Py_BuildValue("h", Rcode::YXRRSET_CODE));
- addClassVariable(rcode_type, "NXRRSET_CODE",
- Py_BuildValue("h", Rcode::NXRRSET_CODE));
- addClassVariable(rcode_type, "NOTAUTH_CODE",
- Py_BuildValue("h", Rcode::NOTAUTH_CODE));
- addClassVariable(rcode_type, "NOTZONE_CODE",
- Py_BuildValue("h", Rcode::NOTZONE_CODE));
- addClassVariable(rcode_type, "RESERVED11_CODE",
- Py_BuildValue("h", Rcode::RESERVED11_CODE));
- addClassVariable(rcode_type, "RESERVED12_CODE",
- Py_BuildValue("h", Rcode::RESERVED12_CODE));
- addClassVariable(rcode_type, "RESERVED13_CODE",
- Py_BuildValue("h", Rcode::RESERVED13_CODE));
- addClassVariable(rcode_type, "RESERVED14_CODE",
- Py_BuildValue("h", Rcode::RESERVED14_CODE));
- addClassVariable(rcode_type, "RESERVED15_CODE",
- Py_BuildValue("h", Rcode::RESERVED15_CODE));
- addClassVariable(rcode_type, "BADVERS_CODE",
- Py_BuildValue("h", Rcode::BADVERS_CODE));
-
- return (true);
+const Rcode&
+PyRcode_ToRcode(const PyObject* rcode_obj) {
+ if (rcode_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Rcode PyObject conversion");
+ }
+ const s_Rcode* rcode = static_cast<const s_Rcode*>(rcode_obj);
+ return (*rcode->cppobj);
}
-} // end of unnamed namespace
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/rcode_python.h b/src/lib/dns/python/rcode_python.h
new file mode 100644
index 0000000..a149406
--- /dev/null
+++ b/src/lib/dns/python/rcode_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RCODE_H
+#define __PYTHON_RCODE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Rcode;
+
+namespace python {
+
+extern PyTypeObject rcode_type;
+
+/// This is a simple shortcut to create a python Rcode object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRcodeObject(const Rcode& source);
+
+/// \brief Checks if the given python object is a Rcode object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Rcode, false otherwise
+bool PyRcode_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Rcode object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Rcode; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRcode_Check()
+///
+/// \note This is not a copy; if the Rcode is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rcode_obj The rcode object to convert
+const Rcode& PyRcode_ToRcode(const PyObject* rcode_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RCODE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rdata_python.cc b/src/lib/dns/python/rdata_python.cc
index faa4f4c..06c0263 100644
--- a/src/lib/dns/python/rdata_python.cc
+++ b/src/lib/dns/python/rdata_python.cc
@@ -12,60 +12,48 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
#include <dns/rdata.h>
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rdata_python.h"
+#include "rrtype_python.h"
+#include "rrclass_python.h"
+#include "messagerenderer_python.h"
+
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
using namespace isc::dns::rdata;
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRdataLength;
-static PyObject* po_InvalidRdataText;
-static PyObject* po_CharStringTooLong;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// Rdata
-//
-
-// The s_* Class simply coverst one instantiation of the object
-
-// Using a shared_ptr here should not really be necessary (PyObject
-// is already reference-counted), however internally on the cpp side,
-// not doing so might result in problems, since we can't copy construct
-// rdata field, adding them to rrsets results in a problem when the
-// rrset is destroyed later
+namespace {
class s_Rdata : public PyObject {
public:
- RdataPtr rdata;
+ isc::dns::rdata::ConstRdataPtr cppobj;
};
+typedef CPPPyObjectContainer<s_Rdata, Rdata> RdataContainer;
+
//
// We declare the functions here, the definitions are below
// the type definition of the object, since both can use the other
//
// General creation and destruction
-static int Rdata_init(s_Rdata* self, PyObject* args);
-static void Rdata_destroy(s_Rdata* self);
+int Rdata_init(s_Rdata* self, PyObject* args);
+void Rdata_destroy(s_Rdata* self);
// These are the functions we export
-static PyObject* Rdata_toText(s_Rdata* self);
+PyObject* Rdata_toText(s_Rdata* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* Rdata_str(PyObject* self);
-static PyObject* Rdata_toWire(s_Rdata* self, PyObject* args);
-static PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
+PyObject* Rdata_str(PyObject* self);
+PyObject* Rdata_toWire(s_Rdata* self, PyObject* args);
+PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -73,7 +61,7 @@ static PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef Rdata_methods[] = {
+PyMethodDef Rdata_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(Rdata_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(Rdata_toWire), METH_VARARGS,
@@ -86,64 +74,10 @@ static PyMethodDef Rdata_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Rdata
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rdata_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Rdata",
- sizeof(s_Rdata), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Rdata_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Rdata_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Rdata class is an abstract base class that provides "
- "a set of common interfaces to manipulate concrete RDATA objects.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RData_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Rdata_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Rdata_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
Rdata_init(s_Rdata* self, PyObject* args) {
- s_RRType* rrtype;
- s_RRClass* rrclass;
+ PyObject* rrtype;
+ PyObject* rrclass;
const char* s;
const char* data;
Py_ssize_t len;
@@ -152,34 +86,36 @@ Rdata_init(s_Rdata* self, PyObject* args) {
if (PyArg_ParseTuple(args, "O!O!s", &rrtype_type, &rrtype,
&rrclass_type, &rrclass,
&s)) {
- self->rdata = createRdata(*rrtype->rrtype, *rrclass->rrclass, s);
+ self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+ PyRRClass_ToRRClass(rrclass), s);
return (0);
} else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype,
&rrclass_type, &rrclass, &data, &len)) {
InputBuffer input_buffer(data, len);
- self->rdata = createRdata(*rrtype->rrtype, *rrclass->rrclass,
- input_buffer, len);
+ self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+ PyRRClass_ToRRClass(rrclass),
+ input_buffer, len);
return (0);
}
return (-1);
}
-static void
+void
Rdata_destroy(s_Rdata* self) {
// Clear the shared_ptr so that its reference count is zero
// before we call tp_free() (there is no direct release())
- self->rdata.reset();
+ self->cppobj.reset();
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
Rdata_toText(s_Rdata* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rdata->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
Rdata_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -187,16 +123,16 @@ Rdata_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
Rdata_toWire(s_Rdata* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(4);
- self->rdata->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* rd_bytes = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
// We need to release the object we temporarily created here
@@ -204,7 +140,7 @@ Rdata_toWire(s_Rdata* self, PyObject* args) {
Py_DECREF(rd_bytes);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rdata->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -215,9 +151,7 @@ Rdata_toWire(s_Rdata* self, PyObject* args) {
return (NULL);
}
-
-
-static PyObject*
+PyObject*
RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
bool c;
@@ -229,24 +163,24 @@ RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
switch (op) {
case Py_LT:
- c = self->rdata->compare(*other->rdata) < 0;
+ c = self->cppobj->compare(*other->cppobj) < 0;
break;
case Py_LE:
- c = self->rdata->compare(*other->rdata) < 0 ||
- self->rdata->compare(*other->rdata) == 0;
+ c = self->cppobj->compare(*other->cppobj) < 0 ||
+ self->cppobj->compare(*other->cppobj) == 0;
break;
case Py_EQ:
- c = self->rdata->compare(*other->rdata) == 0;
+ c = self->cppobj->compare(*other->cppobj) == 0;
break;
case Py_NE:
- c = self->rdata->compare(*other->rdata) != 0;
+ c = self->cppobj->compare(*other->cppobj) != 0;
break;
case Py_GT:
- c = self->rdata->compare(*other->rdata) > 0;
+ c = self->cppobj->compare(*other->cppobj) > 0;
break;
case Py_GE:
- c = self->rdata->compare(*other->rdata) > 0 ||
- self->rdata->compare(*other->rdata) == 0;
+ c = self->cppobj->compare(*other->cppobj) > 0 ||
+ self->cppobj->compare(*other->cppobj) == 0;
break;
default:
PyErr_SetString(PyExc_IndexError,
@@ -258,32 +192,107 @@ RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
else
Py_RETURN_FALSE;
}
-// end of Rdata
+} // end of unnamed namespace
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Rdata(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rdata_type) < 0) {
- return (false);
- }
- Py_INCREF(&rdata_type);
- PyModule_AddObject(mod, "Rdata",
- reinterpret_cast<PyObject*>(&rdata_type));
+namespace isc {
+namespace dns {
+namespace python {
- // Add the exceptions to the class
- po_InvalidRdataLength = PyErr_NewException("pydnspp.InvalidRdataLength", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRdataLength", po_InvalidRdataLength);
- po_InvalidRdataText = PyErr_NewException("pydnspp.InvalidRdataText", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRdataText", po_InvalidRdataText);
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp
+//
+PyObject* po_InvalidRdataLength;
+PyObject* po_InvalidRdataText;
+PyObject* po_CharStringTooLong;
- po_CharStringTooLong = PyErr_NewException("pydnspp.CharStringTooLong", NULL, NULL);
- PyModule_AddObject(mod, "CharStringTooLong", po_CharStringTooLong);
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Rdata
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rdata_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Rdata",
+ sizeof(s_Rdata), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Rdata_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Rdata_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Rdata class is an abstract base class that provides "
+ "a set of common interfaces to manipulate concrete RDATA objects.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RData_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Rdata_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Rdata_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
-
- return (true);
+PyObject*
+createRdataObject(ConstRdataPtr source) {
+ s_Rdata* py_rdata =
+ static_cast<s_Rdata*>(rdata_type.tp_alloc(&rdata_type, 0));
+ if (py_rdata == NULL) {
+ isc_throw(PyCPPWrapperException, "Unexpected NULL C++ object, "
+ "probably due to short memory");
+ }
+ py_rdata->cppobj = source;
+ return (py_rdata);
}
+
+bool
+PyRdata_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rdata_type));
+}
+
+const Rdata&
+PyRdata_ToRdata(const PyObject* rdata_obj) {
+ if (rdata_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Rdata PyObject conversion");
+ }
+ const s_Rdata* rdata = static_cast<const s_Rdata*>(rdata_obj);
+ return (*rdata->cppobj);
+}
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/rdata_python.h b/src/lib/dns/python/rdata_python.h
new file mode 100644
index 0000000..c7ddd57
--- /dev/null
+++ b/src/lib/dns/python/rdata_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RDATA_H
+#define __PYTHON_RDATA_H 1
+
+#include <Python.h>
+
+#include <dns/rdata.h>
+
+namespace isc {
+namespace dns {
+namespace python {
+
+extern PyObject* po_InvalidRdataLength;
+extern PyObject* po_InvalidRdataText;
+extern PyObject* po_CharStringTooLong;
+
+extern PyTypeObject rdata_type;
+
+/// This is a simple shortcut to create a python Rdata object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRdataObject(isc::dns::rdata::ConstRdataPtr source);
+
+/// \brief Checks if the given python object is a Rdata object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Rdata, false otherwise
+bool PyRdata_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Rdata object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Rdata; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRdata_Check()
+///
+/// \note This is not a copy; if the Rdata is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rdata_obj The rdata object to convert
+const isc::dns::rdata::Rdata& PyRdata_ToRdata(const PyObject* rdata_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RDATA_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrclass_python.cc b/src/lib/dns/python/rrclass_python.cc
index 6d150c2..0014187 100644
--- a/src/lib/dns/python/rrclass_python.cc
+++ b/src/lib/dns/python/rrclass_python.cc
@@ -11,35 +11,28 @@
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
#include <dns/rrclass.h>
-using namespace isc::dns;
-using namespace isc::util;
-
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRClass;
-static PyObject* po_IncompleteRRClass;
-
-//
-// Definition of the classes
-//
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+#include "rrclass_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
-//
-// RRClass
-//
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+namespace {
// The s_* Class simply covers one instantiation of the object
class s_RRClass : public PyObject {
public:
- RRClass* rrclass;
+ s_RRClass() : cppobj(NULL) {};
+ RRClass* cppobj;
};
//
@@ -48,25 +41,26 @@ public:
//
// General creation and destruction
-static int RRClass_init(s_RRClass* self, PyObject* args);
-static void RRClass_destroy(s_RRClass* self);
+int RRClass_init(s_RRClass* self, PyObject* args);
+void RRClass_destroy(s_RRClass* self);
// These are the functions we export
-static PyObject* RRClass_toText(s_RRClass* self);
+PyObject* RRClass_toText(s_RRClass* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* RRClass_str(PyObject* self);
-static PyObject* RRClass_toWire(s_RRClass* self, PyObject* args);
-static PyObject* RRClass_getCode(s_RRClass* self);
-static PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
+PyObject* RRClass_str(PyObject* self);
+PyObject* RRClass_toWire(s_RRClass* self, PyObject* args);
+PyObject* RRClass_getCode(s_RRClass* self);
+PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
// Static function for direct class creation
-static PyObject* RRClass_IN(s_RRClass *self);
-static PyObject* RRClass_CH(s_RRClass *self);
-static PyObject* RRClass_HS(s_RRClass *self);
-static PyObject* RRClass_NONE(s_RRClass *self);
-static PyObject* RRClass_ANY(s_RRClass *self);
+PyObject* RRClass_IN(s_RRClass *self);
+PyObject* RRClass_CH(s_RRClass *self);
+PyObject* RRClass_HS(s_RRClass *self);
+PyObject* RRClass_NONE(s_RRClass *self);
+PyObject* RRClass_ANY(s_RRClass *self);
+typedef CPPPyObjectContainer<s_RRClass, RRClass> RRClassContainer;
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -74,7 +68,7 @@ static PyObject* RRClass_ANY(s_RRClass *self);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef RRClass_methods[] = {
+PyMethodDef RRClass_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(RRClass_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(RRClass_toWire), METH_VARARGS,
@@ -94,63 +88,7 @@ static PyMethodDef RRClass_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRClass
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrclass_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRClass",
- sizeof(s_RRClass), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRClass_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRClass_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The RRClass class encapsulates DNS resource record classes.\n"
- "This class manages the 16-bit integer class codes in quite a straightforward"
- "way. The only non trivial task is to handle textual representations of"
- "RR classes, such as \"IN\", \"CH\", or \"CLASS65534\".",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RRClass_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRClass_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRClass_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRClass_init(s_RRClass* self, PyObject* args) {
const char* s;
long i;
@@ -164,7 +102,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
// (the way to do exceptions is to set PyErr and return -1)
try {
if (PyArg_ParseTuple(args, "s", &s)) {
- self->rrclass = new RRClass(s);
+ self->cppobj = new RRClass(s);
return (0);
} else if (PyArg_ParseTuple(args, "l", &i)) {
if (i < 0 || i > 0xffff) {
@@ -173,7 +111,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
"RR class number out of range");
return (-1);
}
- self->rrclass = new RRClass(i);
+ self->cppobj = new RRClass(i);
return (0);
} else if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
uint8_t data[2];
@@ -182,7 +120,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
return (result);
}
InputBuffer ib(data, 2);
- self->rrclass = new RRClass(ib);
+ self->cppobj = new RRClass(ib);
PyErr_Clear();
return (0);
}
@@ -199,20 +137,20 @@ RRClass_init(s_RRClass* self, PyObject* args) {
return (-1);
}
-static void
+void
RRClass_destroy(s_RRClass* self) {
- delete self->rrclass;
- self->rrclass = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRClass_toText(s_RRClass* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rrclass->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
RRClass_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -220,16 +158,16 @@ RRClass_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRClass_toWire(s_RRClass* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(2);
- self->rrclass->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
// We need to release the object we temporarily created here
@@ -237,7 +175,7 @@ RRClass_toWire(s_RRClass* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrclass->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -248,12 +186,12 @@ RRClass_toWire(s_RRClass* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRClass_getCode(s_RRClass* self) {
- return (Py_BuildValue("I", self->rrclass->getCode()));
+ return (Py_BuildValue("I", self->cppobj->getCode()));
}
-static PyObject*
+PyObject*
RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
bool c;
@@ -265,24 +203,24 @@ RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
switch (op) {
case Py_LT:
- c = *self->rrclass < *other->rrclass;
+ c = *self->cppobj < *other->cppobj;
break;
case Py_LE:
- c = *self->rrclass < *other->rrclass ||
- *self->rrclass == *other->rrclass;
+ c = *self->cppobj < *other->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
case Py_EQ:
- c = *self->rrclass == *other->rrclass;
+ c = *self->cppobj == *other->cppobj;
break;
case Py_NE:
- c = *self->rrclass != *other->rrclass;
+ c = *self->cppobj != *other->cppobj;
break;
case Py_GT:
- c = *other->rrclass < *self->rrclass;
+ c = *other->cppobj < *self->cppobj;
break;
case Py_GE:
- c = *other->rrclass < *self->rrclass ||
- *self->rrclass == *other->rrclass;
+ c = *other->cppobj < *self->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
default:
PyErr_SetString(PyExc_IndexError,
@@ -298,56 +236,131 @@ RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
//
// Common function for RRClass_IN/CH/etc.
//
-static PyObject* RRClass_createStatic(RRClass stc) {
+PyObject* RRClass_createStatic(RRClass stc) {
s_RRClass* ret = PyObject_New(s_RRClass, &rrclass_type);
if (ret != NULL) {
- ret->rrclass = new RRClass(stc);
+ ret->cppobj = new RRClass(stc);
}
return (ret);
}
-static PyObject* RRClass_IN(s_RRClass*) {
+PyObject* RRClass_IN(s_RRClass*) {
return (RRClass_createStatic(RRClass::IN()));
}
-static PyObject* RRClass_CH(s_RRClass*) {
+PyObject* RRClass_CH(s_RRClass*) {
return (RRClass_createStatic(RRClass::CH()));
}
-static PyObject* RRClass_HS(s_RRClass*) {
+PyObject* RRClass_HS(s_RRClass*) {
return (RRClass_createStatic(RRClass::HS()));
}
-static PyObject* RRClass_NONE(s_RRClass*) {
+PyObject* RRClass_NONE(s_RRClass*) {
return (RRClass_createStatic(RRClass::NONE()));
}
-static PyObject* RRClass_ANY(s_RRClass*) {
+PyObject* RRClass_ANY(s_RRClass*) {
return (RRClass_createStatic(RRClass::ANY()));
}
-// end of RRClass
+
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_InvalidRRClass;
+PyObject* po_IncompleteRRClass;
+
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRClass
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrclass_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRClass",
+ sizeof(s_RRClass), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRClass_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRClass_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The RRClass class encapsulates DNS resource record classes.\n"
+ "This class manages the 16-bit integer class codes in quite a straightforward"
+ "way. The only non trivial task is to handle textual representations of"
+ "RR classes, such as \"IN\", \"CH\", or \"CLASS65534\".",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RRClass_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRClass_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRClass_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createRRClassObject(const RRClass& source) {
+ RRClassContainer container(PyObject_New(s_RRClass, &rrclass_type));
+ container.set(new RRClass(source));
+ return (container.release());
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_RRClass(PyObject* mod) {
- // Add the exceptions to the module
- po_InvalidRRClass = PyErr_NewException("pydnspp.InvalidRRClass", NULL, NULL);
- Py_INCREF(po_InvalidRRClass);
- PyModule_AddObject(mod, "InvalidRRClass", po_InvalidRRClass);
- po_IncompleteRRClass = PyErr_NewException("pydnspp.IncompleteRRClass", NULL, NULL);
- Py_INCREF(po_IncompleteRRClass);
- PyModule_AddObject(mod, "IncompleteRRClass", po_IncompleteRRClass);
-
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rrclass_type) < 0) {
- return (false);
+PyRRClass_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&rrclass_type);
- PyModule_AddObject(mod, "RRClass",
- reinterpret_cast<PyObject*>(&rrclass_type));
-
- return (true);
+ return (PyObject_TypeCheck(obj, &rrclass_type));
}
+
+const RRClass&
+PyRRClass_ToRRClass(const PyObject* rrclass_obj) {
+ if (rrclass_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRClass PyObject conversion");
+ }
+ const s_RRClass* rrclass = static_cast<const s_RRClass*>(rrclass_obj);
+ return (*rrclass->cppobj);
+}
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/rrclass_python.h b/src/lib/dns/python/rrclass_python.h
new file mode 100644
index 0000000..f58bba6
--- /dev/null
+++ b/src/lib/dns/python/rrclass_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRCLASS_H
+#define __PYTHON_RRCLASS_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRClass;
+
+namespace python {
+
+extern PyObject* po_InvalidRRClass;
+extern PyObject* po_IncompleteRRClass;
+
+extern PyTypeObject rrclass_type;
+
+/// This is a simple shortcut to create a python RRClass object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRClassObject(const RRClass& source);
+
+/// \brief Checks if the given python object is a RRClass object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRClass, false otherwise
+bool PyRRClass_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRClass object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRClass; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRClass_Check()
+///
+/// \note This is not a copy; if the RRClass is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrclass_obj The rrclass object to convert
+const RRClass& PyRRClass_ToRRClass(const PyObject* rrclass_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRCLASS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrset_python.cc b/src/lib/dns/python/rrset_python.cc
index c7d05d1..73a19e7 100644
--- a/src/lib/dns/python/rrset_python.cc
+++ b/src/lib/dns/python/rrset_python.cc
@@ -12,55 +12,63 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <dns/rrset.h>
+#include <Python.h>
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the module init at the
-// end
-//
-static PyObject* po_EmptyRRset;
+#include <util/python/pycppwrapper_util.h>
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+#include <dns/rrset.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+
+#include "name_python.h"
+#include "pydnspp_common.h"
+#include "rrset_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rdata_python.h"
+#include "messagerenderer_python.h"
+
+using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
+
+namespace {
-// RRset
+// The s_* Class simply coverst one instantiation of the object
// Using a shared_ptr here should not really be necessary (PyObject
// is already reference-counted), however internally on the cpp side,
// not doing so might result in problems, since we can't copy construct
-// rrsets, adding them to messages results in a problem when the
-// message is destroyed or cleared later
+// rdata field, adding them to rrsets results in a problem when the
+// rrset is destroyed later
class s_RRset : public PyObject {
public:
- RRsetPtr rrset;
+ isc::dns::RRsetPtr cppobj;
};
-static int RRset_init(s_RRset* self, PyObject* args);
-static void RRset_destroy(s_RRset* self);
-
-static PyObject* RRset_getRdataCount(s_RRset* self);
-static PyObject* RRset_getName(s_RRset* self);
-static PyObject* RRset_getClass(s_RRset* self);
-static PyObject* RRset_getType(s_RRset* self);
-static PyObject* RRset_getTTL(s_RRset* self);
-static PyObject* RRset_setName(s_RRset* self, PyObject* args);
-static PyObject* RRset_setTTL(s_RRset* self, PyObject* args);
-static PyObject* RRset_toText(s_RRset* self);
-static PyObject* RRset_str(PyObject* self);
-static PyObject* RRset_toWire(s_RRset* self, PyObject* args);
-static PyObject* RRset_addRdata(s_RRset* self, PyObject* args);
-static PyObject* RRset_getRdata(s_RRset* self);
+int RRset_init(s_RRset* self, PyObject* args);
+void RRset_destroy(s_RRset* self);
+
+PyObject* RRset_getRdataCount(s_RRset* self);
+PyObject* RRset_getName(s_RRset* self);
+PyObject* RRset_getClass(s_RRset* self);
+PyObject* RRset_getType(s_RRset* self);
+PyObject* RRset_getTTL(s_RRset* self);
+PyObject* RRset_setName(s_RRset* self, PyObject* args);
+PyObject* RRset_setTTL(s_RRset* self, PyObject* args);
+PyObject* RRset_toText(s_RRset* self);
+PyObject* RRset_str(PyObject* self);
+PyObject* RRset_toWire(s_RRset* self, PyObject* args);
+PyObject* RRset_addRdata(s_RRset* self, PyObject* args);
+PyObject* RRset_getRdata(PyObject* po_self, PyObject*);
+PyObject* RRset_removeRRsig(s_RRset* self);
+
// TODO: iterator?
-static PyMethodDef RRset_methods[] = {
+PyMethodDef RRset_methods[] = {
{ "get_rdata_count", reinterpret_cast<PyCFunction>(RRset_getRdataCount), METH_NOARGS,
"Returns the number of rdata fields." },
{ "get_name", reinterpret_cast<PyCFunction>(RRset_getName), METH_NOARGS,
@@ -86,210 +94,144 @@ static PyMethodDef RRset_methods[] = {
"returned" },
{ "add_rdata", reinterpret_cast<PyCFunction>(RRset_addRdata), METH_VARARGS,
"Adds the rdata for one RR to the RRset.\nTakes an Rdata object as an argument" },
- { "get_rdata", reinterpret_cast<PyCFunction>(RRset_getRdata), METH_NOARGS,
+ { "get_rdata", RRset_getRdata, METH_NOARGS,
"Returns a List containing all Rdata elements" },
+ { "remove_rrsig", reinterpret_cast<PyCFunction>(RRset_removeRRsig), METH_NOARGS,
+ "Clears the list of RRsigs for this RRset" },
{ NULL, NULL, 0, NULL }
};
-static PyTypeObject rrset_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRset",
- sizeof(s_RRset), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRset_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRset_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The AbstractRRset class is an abstract base class that "
- "models a DNS RRset.\n\n"
- "An object of (a specific derived class of) AbstractRRset "
- "models an RRset as described in the DNS standard:\n"
- "A set of DNS resource records (RRs) of the same type and class. "
- "The standard requires the TTL of all RRs in an RRset be the same; "
- "this class follows that requirement.\n\n"
- "Note about duplicate RDATA: RFC2181 states that it's meaningless that an "
- "RRset contains two identical RRs and that name servers should suppress "
- "such duplicates.\n"
- "This class is not responsible for ensuring this requirement: For example, "
- "addRdata() method doesn't check if there's already RDATA identical "
- "to the one being added.\n"
- "This is because such checks can be expensive, and it's often easy to "
- "ensure the uniqueness requirement at the %data preparation phase "
- "(e.g. when loading a zone).",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRset_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRset_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRset_init(s_RRset* self, PyObject* args) {
- s_Name* name;
- s_RRClass* rrclass;
- s_RRType* rrtype;
- s_RRTTL* rrttl;
+ PyObject* name;
+ PyObject* rrclass;
+ PyObject* rrtype;
+ PyObject* rrttl;
if (PyArg_ParseTuple(args, "O!O!O!O!", &name_type, &name,
&rrclass_type, &rrclass,
&rrtype_type, &rrtype,
&rrttl_type, &rrttl
)) {
- self->rrset = RRsetPtr(new RRset(*name->name, *rrclass->rrclass,
- *rrtype->rrtype, *rrttl->rrttl));
+ self->cppobj = RRsetPtr(new RRset(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype),
+ PyRRTTL_ToRRTTL(rrttl)));
return (0);
}
- self->rrset = RRsetPtr();
+ self->cppobj = RRsetPtr();
return (-1);
}
-static void
+void
RRset_destroy(s_RRset* self) {
// Clear the shared_ptr so that its reference count is zero
// before we call tp_free() (there is no direct release())
- self->rrset.reset();
+ self->cppobj.reset();
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRset_getRdataCount(s_RRset* self) {
- return (Py_BuildValue("I", self->rrset->getRdataCount()));
+ return (Py_BuildValue("I", self->cppobj->getRdataCount()));
}
-static PyObject*
+PyObject*
RRset_getName(s_RRset* self) {
- s_Name* name;
-
- // is this the best way to do this?
- name = static_cast<s_Name*>(name_type.tp_alloc(&name_type, 0));
- if (name != NULL) {
- name->name = new Name(self->rrset->getName());
- if (name->name == NULL)
- {
- Py_DECREF(name);
- return (NULL);
- }
+ try {
+ return (createNameObject(self->cppobj->getName()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting rrset Name: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting rrset Name");
}
-
- return (name);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_getClass(s_RRset* self) {
- s_RRClass* rrclass;
-
- rrclass = static_cast<s_RRClass*>(rrclass_type.tp_alloc(&rrclass_type, 0));
- if (rrclass != NULL) {
- rrclass->rrclass = new RRClass(self->rrset->getClass());
- if (rrclass->rrclass == NULL)
- {
- Py_DECREF(rrclass);
- return (NULL);
- }
+ try {
+ return (createRRClassObject(self->cppobj->getClass()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRClass: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRClass");
}
-
- return (rrclass);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_getType(s_RRset* self) {
- s_RRType* rrtype;
-
- rrtype = static_cast<s_RRType*>(rrtype_type.tp_alloc(&rrtype_type, 0));
- if (rrtype != NULL) {
- rrtype->rrtype = new RRType(self->rrset->getType());
- if (rrtype->rrtype == NULL)
- {
- Py_DECREF(rrtype);
- return (NULL);
- }
+ try {
+ return (createRRTypeObject(self->cppobj->getType()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRType: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRType");
}
-
- return (rrtype);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_getTTL(s_RRset* self) {
- s_RRTTL* rrttl;
-
- rrttl = static_cast<s_RRTTL*>(rrttl_type.tp_alloc(&rrttl_type, 0));
- if (rrttl != NULL) {
- rrttl->rrttl = new RRTTL(self->rrset->getTTL());
- if (rrttl->rrttl == NULL)
- {
- Py_DECREF(rrttl);
- return (NULL);
- }
+ try {
+ return (createRRTTLObject(self->cppobj->getTTL()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question TTL: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question TTL");
}
-
- return (rrttl);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_setName(s_RRset* self, PyObject* args) {
- s_Name* name;
+ PyObject* name;
if (!PyArg_ParseTuple(args, "O!", &name_type, &name)) {
return (NULL);
}
- self->rrset->setName(*name->name);
+ self->cppobj->setName(PyName_ToName(name));
Py_RETURN_NONE;
}
-static PyObject*
+PyObject*
RRset_setTTL(s_RRset* self, PyObject* args) {
- s_RRTTL* rrttl;
+ PyObject* rrttl;
if (!PyArg_ParseTuple(args, "O!", &rrttl_type, &rrttl)) {
return (NULL);
}
- self->rrset->setTTL(*rrttl->rrttl);
+ self->cppobj->setTTL(PyRRTTL_ToRRTTL(rrttl));
Py_RETURN_NONE;
}
-static PyObject*
+PyObject*
RRset_toText(s_RRset* self) {
try {
- return (Py_BuildValue("s", self->rrset->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
} catch (const EmptyRRset& ers) {
PyErr_SetString(po_EmptyRRset, ers.what());
return (NULL);
}
}
-static PyObject*
+PyObject*
RRset_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -297,17 +239,17 @@ RRset_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRset_toWire(s_RRset* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
+ PyObject* mr;
try {
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(4096);
- self->rrset->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
// We need to release the object we temporarily created here
@@ -315,7 +257,7 @@ RRset_toWire(s_RRset* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrset->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -331,14 +273,14 @@ RRset_toWire(s_RRset* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRset_addRdata(s_RRset* self, PyObject* args) {
- s_Rdata* rdata;
+ PyObject* rdata;
if (!PyArg_ParseTuple(args, "O!", &rdata_type, &rdata)) {
return (NULL);
}
try {
- self->rrset->addRdata(*rdata->rdata);
+ self->cppobj->addRdata(PyRdata_ToRdata(rdata));
Py_RETURN_NONE;
} catch (const std::bad_cast&) {
PyErr_Clear();
@@ -348,55 +290,176 @@ RRset_addRdata(s_RRset* self, PyObject* args) {
}
}
-static PyObject*
-RRset_getRdata(s_RRset* self) {
- PyObject* list = PyList_New(0);
-
- RdataIteratorPtr it = self->rrset->getRdataIterator();
-
- for (; !it->isLast(); it->next()) {
- s_Rdata *rds = static_cast<s_Rdata*>(rdata_type.tp_alloc(&rdata_type, 0));
- if (rds != NULL) {
- // hmz them iterators/shared_ptrs and private constructors
- // make this a bit weird, so we create a new one with
- // the data available
- const Rdata *rd = &it->getCurrent();
- rds->rdata = createRdata(self->rrset->getType(), self->rrset->getClass(), *rd);
- PyList_Append(list, rds);
- } else {
- return (NULL);
+PyObject*
+RRset_getRdata(PyObject* po_self, PyObject*) {
+ const s_RRset* const self = static_cast<s_RRset*>(po_self);
+
+ try {
+ PyObjectContainer list_container(PyList_New(0));
+
+ for (RdataIteratorPtr it = self->cppobj->getRdataIterator();
+ !it->isLast(); it->next()) {
+ if (PyList_Append(list_container.get(),
+ PyObjectContainer(
+ createRdataObject(
+ createRdata(self->cppobj->getType(),
+ self->cppobj->getClass(),
+ it->getCurrent()))).get())
+ == -1) {
+ isc_throw(PyCPPWrapperException, "PyList_Append failed, "
+ "probably due to short memory");
+ }
}
+ return (list_container.release());
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting rrset Rdata: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting rrset Rdata");
}
-
- return (list);
+ return (NULL);
}
-// end of RRset
+PyObject*
+RRset_removeRRsig(s_RRset* self) {
+ self->cppobj->removeRRsig();
+ Py_RETURN_NONE;
+}
+} // end of unnamed namespace
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_RRset(PyObject* mod) {
- // Add the exceptions to the module
- po_EmptyRRset = PyErr_NewException("pydnspp.EmptyRRset", NULL, NULL);
- PyModule_AddObject(mod, "EmptyRRset", po_EmptyRRset);
+namespace isc {
+namespace dns {
+namespace python {
- // Add the enums to the module
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the module init at the
+// end
+//
+PyObject* po_EmptyRRset;
- // Add the constants to the module
+PyTypeObject rrset_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRset",
+ sizeof(s_RRset), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRset_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRset_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The AbstractRRset class is an abstract base class that "
+ "models a DNS RRset.\n\n"
+ "An object of (a specific derived class of) AbstractRRset "
+ "models an RRset as described in the DNS standard:\n"
+ "A set of DNS resource records (RRs) of the same type and class. "
+ "The standard requires the TTL of all RRs in an RRset be the same; "
+ "this class follows that requirement.\n\n"
+ "Note about duplicate RDATA: RFC2181 states that it's meaningless that an "
+ "RRset contains two identical RRs and that name servers should suppress "
+ "such duplicates.\n"
+ "This class is not responsible for ensuring this requirement: For example, "
+ "addRdata() method doesn't check if there's already RDATA identical "
+ "to the one being added.\n"
+ "This is because such checks can be expensive, and it's often easy to "
+ "ensure the uniqueness requirement at the %data preparation phase "
+ "(e.g. when loading a zone).",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRset_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRset_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createRRsetObject(const RRset& source) {
- // Add the classes to the module
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module
+ // RRsets are noncopyable, so as a workaround we recreate a new one
+ // and copy over all content
+ RRsetPtr new_rrset = isc::dns::RRsetPtr(
+ new isc::dns::RRset(source.getName(), source.getClass(),
+ source.getType(), source.getTTL()));
- // NameComparisonResult
- if (PyType_Ready(&rrset_type) < 0) {
- return (false);
+ isc::dns::RdataIteratorPtr rdata_it(source.getRdataIterator());
+ for (rdata_it->first(); !rdata_it->isLast(); rdata_it->next()) {
+ new_rrset->addRdata(rdata_it->getCurrent());
+ }
+
+ isc::dns::RRsetPtr sigs = source.getRRsig();
+ if (sigs) {
+ new_rrset->addRRsig(sigs);
+ }
+ s_RRset* py_rrset =
+ static_cast<s_RRset*>(rrset_type.tp_alloc(&rrset_type, 0));
+ if (py_rrset == NULL) {
+ isc_throw(PyCPPWrapperException, "Unexpected NULL C++ object, "
+ "probably due to short memory");
}
- Py_INCREF(&rrset_type);
- PyModule_AddObject(mod, "RRset",
- reinterpret_cast<PyObject*>(&rrset_type));
-
- return (true);
+ py_rrset->cppobj = new_rrset;
+ return (py_rrset);
}
+bool
+PyRRset_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rrset_type));
+}
+
+RRset&
+PyRRset_ToRRset(PyObject* rrset_obj) {
+ s_RRset* rrset = static_cast<s_RRset*>(rrset_obj);
+ return (*rrset->cppobj);
+}
+
+RRsetPtr
+PyRRset_ToRRsetPtr(PyObject* rrset_obj) {
+ if (rrset_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRset PyObject conversion");
+ }
+ s_RRset* rrset = static_cast<s_RRset*>(rrset_obj);
+ return (rrset->cppobj);
+}
+
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/rrset_python.h b/src/lib/dns/python/rrset_python.h
new file mode 100644
index 0000000..4268678
--- /dev/null
+++ b/src/lib/dns/python/rrset_python.h
@@ -0,0 +1,78 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRSET_H
+#define __PYTHON_RRSET_H 1
+
+#include <Python.h>
+
+#include <dns/rrset.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+namespace isc {
+namespace dns {
+namespace python {
+
+extern PyObject* po_EmptyRRset;
+
+extern PyTypeObject rrset_type;
+
+/// This is a simple shortcut to create a python RRset object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRsetObject(const RRset& source);
+
+/// \brief Checks if the given python object is a RRset object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRset, false otherwise
+bool PyRRset_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRset object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRset; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRset_Check()
+///
+/// \note This is not a copy; if the RRset is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrset_obj The rrset object to convert
+RRset& PyRRset_ToRRset(PyObject* rrset_obj);
+
+/// \brief Returns the shared_ptr of the RRset object contained within the
+/// given Python object.
+///
+/// \note The given object MUST be of type RRset; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRset_Check()
+///
+/// \param rrset_obj The rrset object to convert
+RRsetPtr PyRRset_ToRRsetPtr(PyObject* rrset_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRSET_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrttl_python.cc b/src/lib/dns/python/rrttl_python.cc
index c4b25bf..3a3f067 100644
--- a/src/lib/dns/python/rrttl_python.cc
+++ b/src/lib/dns/python/rrttl_python.cc
@@ -12,57 +12,41 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
#include <vector>
#include <dns/rrttl.h>
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rrttl_python.h"
+#include "pydnspp_common.h"
+#include "messagerenderer_python.h"
using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRTTL;
-static PyObject* po_IncompleteRRTTL;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// RRTTL
-//
-
+namespace {
// The s_* Class simply covers one instantiation of the object
class s_RRTTL : public PyObject {
public:
- RRTTL* rrttl;
+ s_RRTTL() : cppobj(NULL) {};
+ isc::dns::RRTTL* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
-static int RRTTL_init(s_RRTTL* self, PyObject* args);
-static void RRTTL_destroy(s_RRTTL* self);
+typedef CPPPyObjectContainer<s_RRTTL, RRTTL> RRTTLContainer;
-// These are the functions we export
-static PyObject* RRTTL_toText(s_RRTTL* self);
+PyObject* RRTTL_toText(s_RRTTL* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* RRTTL_str(PyObject* self);
-static PyObject* RRTTL_toWire(s_RRTTL* self, PyObject* args);
-static PyObject* RRTTL_getValue(s_RRTTL* self);
-static PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
+PyObject* RRTTL_str(PyObject* self);
+PyObject* RRTTL_toWire(s_RRTTL* self, PyObject* args);
+PyObject* RRTTL_getValue(s_RRTTL* self);
+PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -70,7 +54,7 @@ static PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef RRTTL_methods[] = {
+PyMethodDef RRTTL_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(RRTTL_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(RRTTL_toWire), METH_VARARGS,
@@ -85,65 +69,7 @@ static PyMethodDef RRTTL_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRTTL
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrttl_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRTTL",
- sizeof(s_RRTTL), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRTTL_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRTTL_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The RRTTL class encapsulates TTLs used in DNS resource records.\n\n"
- "This is a straightforward class; an RRTTL object simply maintains a "
- "32-bit unsigned integer corresponding to the TTL value. The main purpose "
- "of this class is to provide convenient interfaces to convert a textual "
- "representation into the integer TTL value and vice versa, and to handle "
- "wire-format representations.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RRTTL_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRTTL_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRTTL_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRTTL_init(s_RRTTL* self, PyObject* args) {
const char* s;
long long i;
@@ -157,7 +83,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
// (the way to do exceptions is to set PyErr and return -1)
try {
if (PyArg_ParseTuple(args, "s", &s)) {
- self->rrttl = new RRTTL(s);
+ self->cppobj = new RRTTL(s);
return (0);
} else if (PyArg_ParseTuple(args, "L", &i)) {
PyErr_Clear();
@@ -165,7 +91,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
PyErr_SetString(PyExc_ValueError, "RR TTL number out of range");
return (-1);
}
- self->rrttl = new RRTTL(i);
+ self->cppobj = new RRTTL(i);
return (0);
} else if (PyArg_ParseTuple(args, "O", &bytes) &&
PySequence_Check(bytes)) {
@@ -176,7 +102,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
return (result);
}
InputBuffer ib(&data[0], size);
- self->rrttl = new RRTTL(ib);
+ self->cppobj = new RRTTL(ib);
PyErr_Clear();
return (0);
}
@@ -200,20 +126,20 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
return (-1);
}
-static void
+void
RRTTL_destroy(s_RRTTL* self) {
- delete self->rrttl;
- self->rrttl = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRTTL_toText(s_RRTTL* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rrttl->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
RRTTL_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -221,16 +147,16 @@ RRTTL_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRTTL_toWire(s_RRTTL* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(4);
- self->rrttl->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()),
buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
@@ -239,7 +165,7 @@ RRTTL_toWire(s_RRTTL* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrttl->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -250,12 +176,12 @@ RRTTL_toWire(s_RRTTL* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRTTL_getValue(s_RRTTL* self) {
- return (Py_BuildValue("I", self->rrttl->getValue()));
+ return (Py_BuildValue("I", self->cppobj->getValue()));
}
-static PyObject*
+PyObject*
RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
bool c = false;
@@ -267,24 +193,24 @@ RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
switch (op) {
case Py_LT:
- c = *self->rrttl < *other->rrttl;
+ c = *self->cppobj < *other->cppobj;
break;
case Py_LE:
- c = *self->rrttl < *other->rrttl ||
- *self->rrttl == *other->rrttl;
+ c = *self->cppobj < *other->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
case Py_EQ:
- c = *self->rrttl == *other->rrttl;
+ c = *self->cppobj == *other->cppobj;
break;
case Py_NE:
- c = *self->rrttl != *other->rrttl;
+ c = *self->cppobj != *other->cppobj;
break;
case Py_GT:
- c = *other->rrttl < *self->rrttl;
+ c = *other->cppobj < *self->cppobj;
break;
case Py_GE:
- c = *other->rrttl < *self->rrttl ||
- *self->rrttl == *other->rrttl;
+ c = *other->cppobj < *self->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
}
if (c)
@@ -292,27 +218,104 @@ RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
else
Py_RETURN_FALSE;
}
-// end of RRTTL
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_InvalidRRTTL;
+PyObject* po_IncompleteRRTTL;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRTTL
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrttl_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRTTL",
+ sizeof(s_RRTTL), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRTTL_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRTTL_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The RRTTL class encapsulates TTLs used in DNS resource records.\n\n"
+ "This is a straightforward class; an RRTTL object simply maintains a "
+ "32-bit unsigned integer corresponding to the TTL value. The main purpose "
+ "of this class is to provide convenient interfaces to convert a textual "
+ "representation into the integer TTL value and vice versa, and to handle "
+ "wire-format representations.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RRTTL_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRTTL_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRTTL_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createRRTTLObject(const RRTTL& source) {
+ RRTTLContainer container(PyObject_New(s_RRTTL, &rrttl_type));
+ container.set(new RRTTL(source));
+ return (container.release());
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_RRTTL(PyObject* mod) {
- // Add the exceptions to the module
- po_InvalidRRTTL = PyErr_NewException("pydnspp.InvalidRRTTL", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRRTTL", po_InvalidRRTTL);
- po_IncompleteRRTTL = PyErr_NewException("pydnspp.IncompleteRRTTL", NULL, NULL);
- PyModule_AddObject(mod, "IncompleteRRTTL", po_IncompleteRRTTL);
+PyRRTTL_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rrttl_type));
+}
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rrttl_type) < 0) {
- return (false);
+const RRTTL&
+PyRRTTL_ToRRTTL(const PyObject* rrttl_obj) {
+ if (rrttl_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRTTL PyObject conversion");
}
- Py_INCREF(&rrttl_type);
- PyModule_AddObject(mod, "RRTTL",
- reinterpret_cast<PyObject*>(&rrttl_type));
-
- return (true);
+ const s_RRTTL* rrttl = static_cast<const s_RRTTL*>(rrttl_obj);
+ return (*rrttl->cppobj);
}
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/rrttl_python.h b/src/lib/dns/python/rrttl_python.h
new file mode 100644
index 0000000..9dbc982
--- /dev/null
+++ b/src/lib/dns/python/rrttl_python.h
@@ -0,0 +1,67 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRTTL_H
+#define __PYTHON_RRTTL_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRTTL;
+
+namespace python {
+
+extern PyObject* po_InvalidRRTTL;
+extern PyObject* po_IncompleteRRTTL;
+
+extern PyTypeObject rrttl_type;
+
+/// This is a simple shortcut to create a python RRTTL object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRTTLObject(const RRTTL& source);
+
+/// \brief Checks if the given python object is a RRTTL object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRTTL, false otherwise
+bool PyRRTTL_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRTTL object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRTTL; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRTTL_Check()
+///
+/// \note This is not a copy; if the RRTTL is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrttl_obj The rrttl object to convert
+const RRTTL& PyRRTTL_ToRRTTL(const PyObject* rrttl_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRTTL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrtype_python.cc b/src/lib/dns/python/rrtype_python.cc
index 00e0acd..bf20b7c 100644
--- a/src/lib/dns/python/rrtype_python.cc
+++ b/src/lib/dns/python/rrtype_python.cc
@@ -12,77 +12,64 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
#include <vector>
#include <dns/rrtype.h>
+#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rrtype_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRType;
-static PyObject* po_IncompleteRRType;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// RRType
-//
-
+namespace {
// The s_* Class simply covers one instantiation of the object
class s_RRType : public PyObject {
public:
- const RRType* rrtype;
+ const RRType* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
// General creation and destruction
-static int RRType_init(s_RRType* self, PyObject* args);
-static void RRType_destroy(s_RRType* self);
+int RRType_init(s_RRType* self, PyObject* args);
+void RRType_destroy(s_RRType* self);
// These are the functions we export
-static PyObject*
+PyObject*
RRType_toText(s_RRType* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* RRType_str(PyObject* self);
-static PyObject* RRType_toWire(s_RRType* self, PyObject* args);
-static PyObject* RRType_getCode(s_RRType* self);
-static PyObject* RRType_richcmp(s_RRType* self, s_RRType* other, int op);
-static PyObject* RRType_NSEC3PARAM(s_RRType *self);
-static PyObject* RRType_DNAME(s_RRType *self);
-static PyObject* RRType_PTR(s_RRType *self);
-static PyObject* RRType_MX(s_RRType *self);
-static PyObject* RRType_DNSKEY(s_RRType *self);
-static PyObject* RRType_TXT(s_RRType *self);
-static PyObject* RRType_RRSIG(s_RRType *self);
-static PyObject* RRType_NSEC(s_RRType *self);
-static PyObject* RRType_AAAA(s_RRType *self);
-static PyObject* RRType_DS(s_RRType *self);
-static PyObject* RRType_OPT(s_RRType *self);
-static PyObject* RRType_A(s_RRType *self);
-static PyObject* RRType_NS(s_RRType *self);
-static PyObject* RRType_CNAME(s_RRType *self);
-static PyObject* RRType_SOA(s_RRType *self);
-static PyObject* RRType_NSEC3(s_RRType *self);
-static PyObject* RRType_IXFR(s_RRType *self);
-static PyObject* RRType_AXFR(s_RRType *self);
-static PyObject* RRType_ANY(s_RRType *self);
+PyObject* RRType_str(PyObject* self);
+PyObject* RRType_toWire(s_RRType* self, PyObject* args);
+PyObject* RRType_getCode(s_RRType* self);
+PyObject* RRType_richcmp(s_RRType* self, s_RRType* other, int op);
+PyObject* RRType_NSEC3PARAM(s_RRType *self);
+PyObject* RRType_DNAME(s_RRType *self);
+PyObject* RRType_PTR(s_RRType *self);
+PyObject* RRType_MX(s_RRType *self);
+PyObject* RRType_DNSKEY(s_RRType *self);
+PyObject* RRType_TXT(s_RRType *self);
+PyObject* RRType_RRSIG(s_RRType *self);
+PyObject* RRType_NSEC(s_RRType *self);
+PyObject* RRType_AAAA(s_RRType *self);
+PyObject* RRType_DS(s_RRType *self);
+PyObject* RRType_OPT(s_RRType *self);
+PyObject* RRType_A(s_RRType *self);
+PyObject* RRType_NS(s_RRType *self);
+PyObject* RRType_CNAME(s_RRType *self);
+PyObject* RRType_SOA(s_RRType *self);
+PyObject* RRType_NSEC3(s_RRType *self);
+PyObject* RRType_IXFR(s_RRType *self);
+PyObject* RRType_AXFR(s_RRType *self);
+PyObject* RRType_ANY(s_RRType *self);
+
+typedef CPPPyObjectContainer<s_RRType, RRType> RRTypeContainer;
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -90,7 +77,7 @@ static PyObject* RRType_ANY(s_RRType *self);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef RRType_methods[] = {
+PyMethodDef RRType_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(RRType_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(RRType_toWire), METH_VARARGS,
@@ -124,63 +111,7 @@ static PyMethodDef RRType_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRType
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrtype_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRType",
- sizeof(s_RRType), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRType_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRType_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The RRType class encapsulates DNS resource record types.\n\n"
- "This class manages the 16-bit integer type codes in quite a straightforward "
- "way. The only non trivial task is to handle textual representations of "
- "RR types, such as \"A\", \"AAAA\", or \"TYPE65534\".",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RRType_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRType_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRType_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRType_init(s_RRType* self, PyObject* args) {
const char* s;
long i;
@@ -194,7 +125,7 @@ RRType_init(s_RRType* self, PyObject* args) {
// (the way to do exceptions is to set PyErr and return -1)
try {
if (PyArg_ParseTuple(args, "s", &s)) {
- self->rrtype = new RRType(s);
+ self->cppobj = new RRType(s);
return (0);
} else if (PyArg_ParseTuple(args, "l", &i)) {
PyErr_Clear();
@@ -202,7 +133,7 @@ RRType_init(s_RRType* self, PyObject* args) {
PyErr_SetString(PyExc_ValueError, "RR Type number out of range");
return (-1);
}
- self->rrtype = new RRType(i);
+ self->cppobj = new RRType(i);
return (0);
} else if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
Py_ssize_t size = PySequence_Size(bytes);
@@ -212,7 +143,7 @@ RRType_init(s_RRType* self, PyObject* args) {
return (result);
}
InputBuffer ib(&data[0], size);
- self->rrtype = new RRType(ib);
+ self->cppobj = new RRType(ib);
PyErr_Clear();
return (0);
}
@@ -236,36 +167,36 @@ RRType_init(s_RRType* self, PyObject* args) {
return (-1);
}
-static void
+void
RRType_destroy(s_RRType* self) {
- delete self->rrtype;
- self->rrtype = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRType_toText(s_RRType* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rrtype->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
RRType_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self, const_cast<char*>("to_text"),
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRType_toWire(s_RRType* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
+ PyObject* mr;
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
OutputBuffer buffer(2);
- self->rrtype->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
// We need to release the object we temporarily created here
@@ -273,7 +204,7 @@ RRType_toWire(s_RRType* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrtype->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -284,12 +215,12 @@ RRType_toWire(s_RRType* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRType_getCode(s_RRType* self) {
- return (Py_BuildValue("I", self->rrtype->getCode()));
+ return (Py_BuildValue("I", self->cppobj->getCode()));
}
-static PyObject*
+PyObject*
RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
bool c;
@@ -301,24 +232,24 @@ RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
switch (op) {
case Py_LT:
- c = *self->rrtype < *other->rrtype;
+ c = *self->cppobj < *other->cppobj;
break;
case Py_LE:
- c = *self->rrtype < *other->rrtype ||
- *self->rrtype == *other->rrtype;
+ c = *self->cppobj < *other->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
case Py_EQ:
- c = *self->rrtype == *other->rrtype;
+ c = *self->cppobj == *other->cppobj;
break;
case Py_NE:
- c = *self->rrtype != *other->rrtype;
+ c = *self->cppobj != *other->cppobj;
break;
case Py_GT:
- c = *other->rrtype < *self->rrtype;
+ c = *other->cppobj < *self->cppobj;
break;
case Py_GE:
- c = *other->rrtype < *self->rrtype ||
- *self->rrtype == *other->rrtype;
+ c = *other->cppobj < *self->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
default:
PyErr_SetString(PyExc_IndexError,
@@ -334,131 +265,200 @@ RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
//
// Common function for RRType_A/NS/etc.
//
-static PyObject* RRType_createStatic(RRType stc) {
+PyObject* RRType_createStatic(RRType stc) {
s_RRType* ret = PyObject_New(s_RRType, &rrtype_type);
if (ret != NULL) {
- ret->rrtype = new RRType(stc);
+ ret->cppobj = new RRType(stc);
}
return (ret);
}
-static PyObject*
+PyObject*
RRType_NSEC3PARAM(s_RRType*) {
return (RRType_createStatic(RRType::NSEC3PARAM()));
}
-static PyObject*
+PyObject*
RRType_DNAME(s_RRType*) {
return (RRType_createStatic(RRType::DNAME()));
}
-static PyObject*
+PyObject*
RRType_PTR(s_RRType*) {
return (RRType_createStatic(RRType::PTR()));
}
-static PyObject*
+PyObject*
RRType_MX(s_RRType*) {
return (RRType_createStatic(RRType::MX()));
}
-static PyObject*
+PyObject*
RRType_DNSKEY(s_RRType*) {
return (RRType_createStatic(RRType::DNSKEY()));
}
-static PyObject*
+PyObject*
RRType_TXT(s_RRType*) {
return (RRType_createStatic(RRType::TXT()));
}
-static PyObject*
+PyObject*
RRType_RRSIG(s_RRType*) {
return (RRType_createStatic(RRType::RRSIG()));
}
-static PyObject*
+PyObject*
RRType_NSEC(s_RRType*) {
return (RRType_createStatic(RRType::NSEC()));
}
-static PyObject*
+PyObject*
RRType_AAAA(s_RRType*) {
return (RRType_createStatic(RRType::AAAA()));
}
-static PyObject*
+PyObject*
RRType_DS(s_RRType*) {
return (RRType_createStatic(RRType::DS()));
}
-static PyObject*
+PyObject*
RRType_OPT(s_RRType*) {
return (RRType_createStatic(RRType::OPT()));
}
-static PyObject*
+PyObject*
RRType_A(s_RRType*) {
return (RRType_createStatic(RRType::A()));
}
-static PyObject*
+PyObject*
RRType_NS(s_RRType*) {
return (RRType_createStatic(RRType::NS()));
}
-static PyObject*
+PyObject*
RRType_CNAME(s_RRType*) {
return (RRType_createStatic(RRType::CNAME()));
}
-static PyObject*
+PyObject*
RRType_SOA(s_RRType*) {
return (RRType_createStatic(RRType::SOA()));
}
-static PyObject*
+PyObject*
RRType_NSEC3(s_RRType*) {
return (RRType_createStatic(RRType::NSEC3()));
}
-static PyObject*
+PyObject*
RRType_IXFR(s_RRType*) {
return (RRType_createStatic(RRType::IXFR()));
}
-static PyObject*
+PyObject*
RRType_AXFR(s_RRType*) {
return (RRType_createStatic(RRType::AXFR()));
}
-static PyObject*
+PyObject*
RRType_ANY(s_RRType*) {
return (RRType_createStatic(RRType::ANY()));
}
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
-// end of RRType
+PyObject* po_InvalidRRType;
+PyObject* po_IncompleteRRType;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRType
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrtype_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRType",
+ sizeof(s_RRType), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRType_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRType_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The RRType class encapsulates DNS resource record types.\n\n"
+ "This class manages the 16-bit integer type codes in quite a straightforward "
+ "way. The only non trivial task is to handle textual representations of "
+ "RR types, such as \"A\", \"AAAA\", or \"TYPE65534\".",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RRType_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRType_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRType_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+PyObject*
+createRRTypeObject(const RRType& source) {
+ RRTypeContainer container(PyObject_New(s_RRType, &rrtype_type));
+ container.set(new RRType(source));
+ return (container.release());
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_RRType(PyObject* mod) {
- // Add the exceptions to the module
- po_InvalidRRType = PyErr_NewException("pydnspp.InvalidRRType", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRRType", po_InvalidRRType);
- po_IncompleteRRType = PyErr_NewException("pydnspp.IncompleteRRType", NULL, NULL);
- PyModule_AddObject(mod, "IncompleteRRType", po_IncompleteRRType);
-
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rrtype_type) < 0) {
- return (false);
+PyRRType_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rrtype_type));
+}
+
+const RRType&
+PyRRType_ToRRType(const PyObject* rrtype_obj) {
+ if (rrtype_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRType PyObject conversion");
}
- Py_INCREF(&rrtype_type);
- PyModule_AddObject(mod, "RRType",
- reinterpret_cast<PyObject*>(&rrtype_type));
-
- return (true);
+ const s_RRType* rrtype = static_cast<const s_RRType*>(rrtype_obj);
+ return (*rrtype->cppobj);
}
+
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/rrtype_python.h b/src/lib/dns/python/rrtype_python.h
new file mode 100644
index 0000000..596598e
--- /dev/null
+++ b/src/lib/dns/python/rrtype_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRTYPE_H
+#define __PYTHON_RRTYPE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRType;
+
+namespace python {
+
+extern PyObject* po_InvalidRRType;
+extern PyObject* po_IncompleteRRType;
+
+extern PyTypeObject rrtype_type;
+
+/// This is a simple shortcut to create a python RRType object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRTypeObject(const RRType& source);
+
+/// \brief Checks if the given python object is a RRType object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRType, false otherwise
+bool PyRRType_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRType object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRType; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRType_Check()
+///
+/// \note This is not a copy; if the RRType is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrtype_obj The rrtype object to convert
+const RRType& PyRRType_ToRRType(const PyObject* rrtype_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRTYPE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tests/Makefile.am b/src/lib/dns/python/tests/Makefile.am
index 9ee98c7..d1273f3 100644
--- a/src/lib/dns/python/tests/Makefile.am
+++ b/src/lib/dns/python/tests/Makefile.am
@@ -12,7 +12,10 @@ PYTESTS += rrset_python_test.py
PYTESTS += rrttl_python_test.py
PYTESTS += rrtype_python_test.py
PYTESTS += tsig_python_test.py
+PYTESTS += tsig_rdata_python_test.py
+PYTESTS += tsigerror_python_test.py
PYTESTS += tsigkey_python_test.py
+PYTESTS += tsigrecord_python_test.py
EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += testutil.py
@@ -21,7 +24,7 @@ EXTRA_DIST += testutil.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -33,8 +36,13 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/dns/.libs:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ env PYTHONPATH=$(abs_top_builddir)/src/lib/util/pyunittests/.libs:$(abs_top_srcdir)/src/lib/dns/.libs:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
TESTDATA_PATH=$(abs_top_srcdir)/src/lib/dns/tests/testdata:$(abs_top_builddir)/src/lib/dns/tests/testdata \
$(LIBRARY_PATH_PLACEHOLDER) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/dns/python/tests/message_python_test.py b/src/lib/dns/python/tests/message_python_test.py
index 72807cc..86574fb 100644
--- a/src/lib/dns/python/tests/message_python_test.py
+++ b/src/lib/dns/python/tests/message_python_test.py
@@ -17,10 +17,12 @@
# Tests for the message part of the pydnspp module
#
+import sys
import unittest
import os
from pydnspp import *
from testutil import *
+from pyunittests_util import fix_current_time
# helper functions for tests taken from c++ unittests
if "TESTDATA_PATH" in os.environ:
@@ -28,10 +30,10 @@ if "TESTDATA_PATH" in os.environ:
else:
testdata_path = "../tests/testdata"
-def factoryFromFile(message, file):
+def factoryFromFile(message, file, parse_options=Message.PARSE_DEFAULT):
data = read_wire_data(file)
- message.from_wire(data)
- pass
+ message.from_wire(data, parse_options)
+ return data
# we don't have direct comparison for rrsets right now (should we?
# should go in the cpp version first then), so also no direct list
@@ -44,6 +46,15 @@ def compare_rrset_list(list1, list2):
return False
return True
+# These are used for TSIG + TC tests
+LONG_TXT1 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde";
+
+LONG_TXT2 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456";
+
+LONG_TXT3 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01";
+
+LONG_TXT4 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0";
+
# a complete message taken from cpp tests, for testing towire and totext
def create_message():
message_render = Message(Message.RENDER)
@@ -62,16 +73,12 @@ def create_message():
message_render.add_rrset(Message.SECTION_ANSWER, rrset)
return message_render
-def strip_mutable_tsig_data(data):
- # Unfortunately we cannot easily compare TSIG RR because we can't tweak
- # current time. As a work around this helper function strips off the time
- # dependent part of TSIG RDATA, i.e., the MAC (assuming HMAC-MD5) and
- # Time Signed.
- return data[0:-32] + data[-26:-22] + data[-6:]
-
class MessageTest(unittest.TestCase):
def setUp(self):
+ # make sure we don't use faked time unless explicitly do so in tests
+ fix_current_time(None)
+
self.p = Message(Message.PARSE)
self.r = Message(Message.RENDER)
@@ -90,6 +97,10 @@ class MessageTest(unittest.TestCase):
self.tsig_key = TSIGKey("www.example.com:SFuWd/q99SzF8Yzd1QbB9g==")
self.tsig_ctx = TSIGContext(self.tsig_key)
+ def tearDown(self):
+ # reset any faked current time setting (it would affect other tests)
+ fix_current_time(None)
+
def test_init(self):
self.assertRaises(TypeError, Message, -1)
self.assertRaises(TypeError, Message, 3)
@@ -220,6 +231,14 @@ class MessageTest(unittest.TestCase):
self.assertTrue(compare_rrset_list(section_rrset, self.r.get_section(Message.SECTION_ANSWER)))
self.assertEqual(2, self.r.get_rr_count(Message.SECTION_ANSWER))
+ # We always make a new deep copy in get_section(), so the reference
+ # count of the returned list and its each item should be 1; otherwise
+ # they would leak.
+ self.assertEqual(1, sys.getrefcount(self.r.get_section(
+ Message.SECTION_ANSWER)))
+ self.assertEqual(1, sys.getrefcount(self.r.get_section(
+ Message.SECTION_ANSWER)[0]))
+
self.assertFalse(compare_rrset_list(section_rrset, self.r.get_section(Message.SECTION_AUTHORITY)))
self.assertEqual(0, self.r.get_rr_count(Message.SECTION_AUTHORITY))
self.r.add_rrset(Message.SECTION_AUTHORITY, self.rrset_a)
@@ -232,7 +251,7 @@ class MessageTest(unittest.TestCase):
self.assertTrue(compare_rrset_list(section_rrset, self.r.get_section(Message.SECTION_ADDITIONAL)))
self.assertEqual(2, self.r.get_rr_count(Message.SECTION_ADDITIONAL))
- def test_add_question(self):
+ def test_add_and_get_question(self):
self.assertRaises(TypeError, self.r.add_question, "wrong", "wrong")
q = Question(Name("example.com"), RRClass("IN"), RRType("A"))
qs = [q]
@@ -242,6 +261,12 @@ class MessageTest(unittest.TestCase):
self.assertTrue(compare_rrset_list(qs, self.r.get_question()))
self.assertEqual(1, self.r.get_rr_count(Message.SECTION_QUESTION))
+ # We always make a new deep copy in get_section(), so the reference
+ # count of the returned list and its each item should be 1; otherwise
+ # they would leak.
+ self.assertEqual(1, sys.getrefcount(self.r.get_question()))
+ self.assertEqual(1, sys.getrefcount(self.r.get_question()[0]))
+
def test_add_rrset(self):
self.assertRaises(TypeError, self.r.add_rrset, "wrong")
self.assertRaises(TypeError, self.r.add_rrset)
@@ -285,33 +310,112 @@ class MessageTest(unittest.TestCase):
self.assertRaises(InvalidMessageOperation, self.r.to_wire,
MessageRenderer())
- def __common_tsigquery_setup(self):
+ def __common_tsigmessage_setup(self, flags=[Message.HEADERFLAG_RD],
+ rrtype=RRType("A"), answer_data=None):
self.r.set_opcode(Opcode.QUERY())
self.r.set_rcode(Rcode.NOERROR())
- self.r.set_header_flag(Message.HEADERFLAG_RD)
+ for flag in flags:
+ self.r.set_header_flag(flag)
+ if answer_data is not None:
+ rrset = RRset(Name("www.example.com"), RRClass("IN"),
+ rrtype, RRTTL(86400))
+ for rdata in answer_data:
+ rrset.add_rdata(Rdata(rrtype, RRClass("IN"), rdata))
+ self.r.add_rrset(Message.SECTION_ANSWER, rrset)
self.r.add_question(Question(Name("www.example.com"),
- RRClass("IN"), RRType("A")))
+ RRClass("IN"), rrtype))
def __common_tsig_checks(self, expected_file):
renderer = MessageRenderer()
self.r.to_wire(renderer, self.tsig_ctx)
- actual_wire = strip_mutable_tsig_data(renderer.get_data())
- expected_wire = strip_mutable_tsig_data(read_wire_data(expected_file))
- self.assertEqual(expected_wire, actual_wire)
+ self.assertEqual(read_wire_data(expected_file), renderer.get_data())
def test_to_wire_with_tsig(self):
+ fix_current_time(0x4da8877a)
self.r.set_qid(0x2d65)
- self.__common_tsigquery_setup()
+ self.__common_tsigmessage_setup()
self.__common_tsig_checks("message_toWire2.wire")
def test_to_wire_with_edns_tsig(self):
+ fix_current_time(0x4db60d1f)
self.r.set_qid(0x6cd)
- self.__common_tsigquery_setup()
+ self.__common_tsigmessage_setup()
edns = EDNS()
edns.set_udp_size(4096)
self.r.set_edns(edns)
self.__common_tsig_checks("message_toWire3.wire")
+ def test_to_wire_tsig_truncation(self):
+ fix_current_time(0x4e179212)
+ data = factoryFromFile(self.p, "message_fromWire17.wire")
+ self.assertEqual(TSIGError.NOERROR,
+ self.tsig_ctx.verify(self.p.get_tsig_record(), data))
+ self.r.set_qid(0x22c2)
+ self.__common_tsigmessage_setup([Message.HEADERFLAG_QR,
+ Message.HEADERFLAG_AA,
+ Message.HEADERFLAG_RD],
+ RRType("TXT"),
+ [LONG_TXT1, LONG_TXT2])
+ self.__common_tsig_checks("message_toWire4.wire")
+
+ def test_to_wire_tsig_truncation2(self):
+ fix_current_time(0x4e179212)
+ data = factoryFromFile(self.p, "message_fromWire17.wire")
+ self.assertEqual(TSIGError.NOERROR,
+ self.tsig_ctx.verify(self.p.get_tsig_record(), data))
+ self.r.set_qid(0x22c2)
+ self.__common_tsigmessage_setup([Message.HEADERFLAG_QR,
+ Message.HEADERFLAG_AA,
+ Message.HEADERFLAG_RD],
+ RRType("TXT"),
+ [LONG_TXT1, LONG_TXT3])
+ self.__common_tsig_checks("message_toWire4.wire")
+
+ def test_to_wire_tsig_truncation3(self):
+ self.r.set_opcode(Opcode.QUERY())
+ self.r.set_rcode(Rcode.NOERROR())
+ for i in range(1, 68):
+ self.r.add_question(Question(Name("www.example.com"),
+ RRClass("IN"), RRType(i)))
+ renderer = MessageRenderer()
+ self.r.to_wire(renderer, self.tsig_ctx)
+
+ self.p.from_wire(renderer.get_data())
+ self.assertTrue(self.p.get_header_flag(Message.HEADERFLAG_TC))
+ self.assertEqual(66, self.p.get_rr_count(Message.SECTION_QUESTION))
+ self.assertNotEqual(None, self.p.get_tsig_record())
+
+ def test_to_wire_tsig_no_truncation(self):
+ fix_current_time(0x4e17b38d)
+ data = factoryFromFile(self.p, "message_fromWire18.wire")
+ self.assertEqual(TSIGError.NOERROR,
+ self.tsig_ctx.verify(self.p.get_tsig_record(), data))
+ self.r.set_qid(0xd6e2)
+ self.__common_tsigmessage_setup([Message.HEADERFLAG_QR,
+ Message.HEADERFLAG_AA,
+ Message.HEADERFLAG_RD],
+ RRType("TXT"),
+ [LONG_TXT1, LONG_TXT4])
+ self.__common_tsig_checks("message_toWire5.wire")
+
+ def test_to_wire_tsig_length_errors(self):
+ renderer = MessageRenderer()
+ renderer.set_length_limit(84) # 84 = expected TSIG length - 1
+ self.__common_tsigmessage_setup()
+ self.assertRaises(TSIGContextError,
+ self.r.to_wire, renderer, self.tsig_ctx)
+
+ renderer.clear()
+ self.r.clear(Message.RENDER)
+ renderer.set_length_limit(86) # 86 = expected TSIG length + 1
+ self.__common_tsigmessage_setup()
+ self.assertRaises(TSIGContextError,
+ self.r.to_wire, renderer, self.tsig_ctx)
+
+ # skip the last test of the corresponding C++ test: it requires
+ # subclassing MessageRenderer, which is (currently) not possible
+ # for python. In any case, it's very unlikely to happen in practice.
+
def test_to_text(self):
message_render = create_message()
@@ -377,6 +481,54 @@ test.example.com. 3600 IN A 192.0.2.2
self.assertEqual("192.0.2.2", rdata[1].to_text())
self.assertEqual(2, len(rdata))
+ def test_from_wire_short_buffer(self):
+ data = read_wire_data("message_fromWire22.wire")
+ self.assertRaises(DNSMessageFORMERR, self.p.from_wire, data[:-1])
+
+ def test_from_wire_combind_rrs(self):
+ factoryFromFile(self.p, "message_fromWire19.wire")
+ rrset = self.p.get_section(Message.SECTION_ANSWER)[0]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ self.assertEqual(2, len(rrset.get_rdata()))
+
+ rrset = self.p.get_section(Message.SECTION_ANSWER)[1]
+ self.assertEqual(RRType("AAAA"), rrset.get_type())
+ self.assertEqual(1, len(rrset.get_rdata()))
+
+ def check_preserve_rrs(self, message, section):
+ rrset = message.get_section(section)[0]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('192.0.2.1', rdata[0].to_text())
+
+ rrset = message.get_section(section)[1]
+ self.assertEqual(RRType("AAAA"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('2001:db8::1', rdata[0].to_text())
+
+ rrset = message.get_section(section)[2]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('192.0.2.2', rdata[0].to_text())
+
+ def test_from_wire_preserve_answer(self):
+ factoryFromFile(self.p, "message_fromWire19.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_ANSWER)
+
+ def test_from_wire_preserve_authority(self):
+ factoryFromFile(self.p, "message_fromWire20.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_AUTHORITY)
+
+ def test_from_wire_preserve_additional(self):
+ factoryFromFile(self.p, "message_fromWire21.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_ADDITIONAL)
+
def test_EDNS0ExtCode(self):
# Extended Rcode = BADVERS
message_parse = Message(Message.PARSE)
@@ -422,7 +574,54 @@ test.example.com. 3600 IN A 192.0.2.2
factoryFromFile,
message_parse,
"message_fromWire9")
-
+
+ def test_from_wire_with_tsig(self):
+ # Initially there should be no TSIG
+ self.assertEqual(None, self.p.get_tsig_record())
+
+ # getTSIGRecord() is only valid in the parse mode.
+ self.assertRaises(InvalidMessageOperation, self.r.get_tsig_record)
+
+ factoryFromFile(self.p, "message_toWire2.wire")
+ tsig_rr = self.p.get_tsig_record()
+ self.assertEqual(Name("www.example.com"), tsig_rr.get_name())
+ self.assertEqual(85, tsig_rr.get_length())
+ self.assertEqual(TSIGKey.HMACMD5_NAME,
+ tsig_rr.get_rdata().get_algorithm())
+
+ # If we clear the message for reuse, the recorded TSIG will be cleared.
+ self.p.clear(Message.PARSE)
+ self.assertEqual(None, self.p.get_tsig_record())
+
+ def test_from_wire_with_tsigcompressed(self):
+ # Mostly same as fromWireWithTSIG, but the TSIG owner name is
+ # compressed.
+ factoryFromFile(self.p, "message_fromWire12.wire");
+ tsig_rr = self.p.get_tsig_record()
+ self.assertEqual(Name("www.example.com"), tsig_rr.get_name())
+ # len(www.example.com) = 17, but when fully compressed, the length is
+ # 2 bytes. So the length of the record should be 15 bytes shorter.
+ self.assertEqual(70, tsig_rr.get_length())
+
+ def test_from_wire_with_badtsig(self):
+ # Multiple TSIG RRs
+ self.assertRaises(DNSMessageFORMERR, factoryFromFile,
+ self.p, "message_fromWire13.wire")
+ self.p.clear(Message.PARSE)
+
+ # TSIG in the answer section (must be in additional)
+ self.assertRaises(DNSMessageFORMERR, factoryFromFile,
+ self.p, "message_fromWire14.wire")
+ self.p.clear(Message.PARSE)
+
+ # TSIG is not the last record.
+ self.assertRaises(DNSMessageFORMERR, factoryFromFile,
+ self.p, "message_fromWire15.wire")
+ self.p.clear(Message.PARSE)
+
+ # Unexpected RR Class (this will fail in constructing TSIGRecord)
+ self.assertRaises(DNSMessageFORMERR, factoryFromFile,
+ self.p, "message_fromWire16.wire")
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/name_python_test.py b/src/lib/dns/python/tests/name_python_test.py
index b8e625a..5263412 100644
--- a/src/lib/dns/python/tests/name_python_test.py
+++ b/src/lib/dns/python/tests/name_python_test.py
@@ -121,6 +121,15 @@ class NameTest(unittest.TestCase):
self.assertEqual(".", str(self.name2))
self.assertEqual("something.completely.different.", self.name3.to_text())
+ self.assertEqual("example.com.", self.name1.to_text(False))
+ self.assertEqual("example.com", self.name1.to_text(True))
+
+ # make sure it does not behave unexpectedly on wrong arguments
+ self.assertRaises(TypeError, self.name1.to_text, True, 1)
+ self.assertRaises(TypeError, self.name1.to_text, 1)
+ self.assertRaises(TypeError, self.name1.to_text, [])
+ self.assertRaises(TypeError, self.name1.to_text, "foo")
+
def test_to_wire(self):
b1 = bytearray()
self.name1.to_wire(b1)
diff --git a/src/lib/dns/python/tests/question_python_test.py b/src/lib/dns/python/tests/question_python_test.py
index 69e3051..8c8c815 100644
--- a/src/lib/dns/python/tests/question_python_test.py
+++ b/src/lib/dns/python/tests/question_python_test.py
@@ -74,7 +74,6 @@ class QuestionTest(unittest.TestCase):
self.assertEqual("foo.example.com. IN NS\n", str(self.test_question1))
self.assertEqual("bar.example.com. CH A\n", self.test_question2.to_text())
-
def test_to_wire_buffer(self):
obuffer = bytes()
obuffer = self.test_question1.to_wire(obuffer)
@@ -82,7 +81,6 @@ class QuestionTest(unittest.TestCase):
wiredata = read_wire_data("question_toWire1")
self.assertEqual(obuffer, wiredata)
-
def test_to_wire_renderer(self):
renderer = MessageRenderer()
self.test_question1.to_wire(renderer)
@@ -91,5 +89,13 @@ class QuestionTest(unittest.TestCase):
self.assertEqual(renderer.get_data(), wiredata)
self.assertRaises(TypeError, self.test_question1.to_wire, 1)
+ def test_to_wire_truncated(self):
+ renderer = MessageRenderer()
+ renderer.set_length_limit(self.example_name1.get_length())
+ self.assertFalse(renderer.is_truncated())
+ self.test_question1.to_wire(renderer)
+ self.assertTrue(renderer.is_truncated())
+ self.assertEqual(0, renderer.get_length())
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/rrset_python_test.py b/src/lib/dns/python/tests/rrset_python_test.py
index e0eab4a..de475a7 100644
--- a/src/lib/dns/python/tests/rrset_python_test.py
+++ b/src/lib/dns/python/tests/rrset_python_test.py
@@ -17,6 +17,7 @@
# Tests for the rrtype part of the pydnspp module
#
+import sys
import unittest
import os
from pydnspp import *
@@ -110,6 +111,12 @@ class TestModuleSpec(unittest.TestCase):
]
self.assertEqual(rdata, self.rrset_a.get_rdata())
self.assertEqual([], self.rrset_a_empty.get_rdata())
+
+ # We always make a new deep copy in get_rdata(), so the reference
+ # count of the returned list and its each item should be 1; otherwise
+ # they would leak.
+ self.assertEqual(1, sys.getrefcount(self.rrset_a.get_rdata()))
+ self.assertEqual(1, sys.getrefcount(self.rrset_a.get_rdata()[0]))
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/tsig_python_test.py b/src/lib/dns/python/tests/tsig_python_test.py
index bffa0cf..7e5515d 100644
--- a/src/lib/dns/python/tests/tsig_python_test.py
+++ b/src/lib/dns/python/tests/tsig_python_test.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2010 Internet Systems Consortium.
+# Copyright (C) 2011 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -13,17 +13,542 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-import unittest
+import base64, sys, time, unittest
from pydnspp import *
+from testutil import *
+from pyunittests_util import fix_current_time
+
+# bit-wise constant flags to configure DNS header flags for test
+# messages.
+QR_FLAG = 0x1
+AA_FLAG = 0x2
+RD_FLAG = 0x4
+
+COMMON_EXPECTED_MAC = b"\x22\x70\x26\xad\x29\x7b\xee\xe7\x21\xce\x6c\x6f\xff\x1e\x9e\xf3"
+DUMMY_DATA = b"\xdd" * 100
class TSIGContextTest(unittest.TestCase):
tsig_key = TSIGKey('www.example.com:SFuWd/q99SzF8Yzd1QbB9g==')
def setUp(self):
- # In the minimal implementation, we simply check constructing a
- # TSIGContext doesn't cause any disruption. We can add more tests
- # later.
+ # make sure we don't use faked time unless explicitly do so in tests
+ fix_current_time(None)
+ self.qid = 0x2d65
+ self.test_name = Name("www.example.com")
self.tsig_ctx = TSIGContext(self.tsig_key)
+ self.tsig_verify_ctx = TSIGContext(self.tsig_key)
+ self.keyring = TSIGKeyRing()
+ self.message = Message(Message.RENDER)
+ self.renderer = MessageRenderer()
+ self.test_class = RRClass.IN()
+ self.test_ttl = RRTTL(86400)
+ self.secret = base64.b64decode(b"SFuWd/q99SzF8Yzd1QbB9g==")
+ self.tsig_ctx = TSIGContext(TSIGKey(self.test_name,
+ TSIGKey.HMACMD5_NAME,
+ self.secret))
+ self.badkey_name = Name("badkey.example.com")
+ self.dummy_record = TSIGRecord(self.badkey_name,
+ TSIG("hmac-md5.sig-alg.reg.int. " + \
+ "1302890362 300 0 11621 " + \
+ "0 0"))
+
+ def tearDown(self):
+ # reset any faked current time setting (it would affect other tests)
+ fix_current_time(None)
+
+ # Note: intentionally use camelCase so that we can easily copy-paste
+ # corresponding C++ tests.
+ def createMessageAndSign(self, id, qname, ctx, message_flags=RD_FLAG,
+ qtype=RRType.A(), answer_data=None,
+ answer_type=None, add_question=True,
+ rcode=Rcode.NOERROR()):
+ self.message.clear(Message.RENDER)
+ self.message.set_qid(id)
+ self.message.set_opcode(Opcode.QUERY())
+ self.message.set_rcode(rcode)
+ if (message_flags & QR_FLAG) != 0:
+ self.message.set_header_flag(Message.HEADERFLAG_QR)
+ if (message_flags & AA_FLAG) != 0:
+ self.message.set_header_flag(Message.HEADERFLAG_AA)
+ if (message_flags & RD_FLAG) != 0:
+ self.message.set_header_flag(Message.HEADERFLAG_RD)
+ if add_question:
+ self.message.add_question(Question(qname, self.test_class, qtype))
+ if answer_data is not None:
+ if answer_type is None:
+ answer_type = qtype
+ answer_rrset = RRset(qname, self.test_class, answer_type,
+ self.test_ttl)
+ answer_rrset.add_rdata(Rdata(answer_type, self.test_class,
+ answer_data))
+ self.message.add_rrset(Message.SECTION_ANSWER, answer_rrset)
+ self.renderer.clear()
+ self.message.to_wire(self.renderer)
+
+ if ctx.get_state() == TSIGContext.STATE_INIT:
+ expected_new_state = TSIGContext.STATE_SENT_REQUEST
+ else:
+ expected_new_state = TSIGContext.STATE_SENT_RESPONSE
+ tsig = ctx.sign(id, self.renderer.get_data())
+
+ return tsig
+
+ # Note: intentionally use camelCase so that we can easily copy-paste
+ # corresponding C++ tests.
+ def createMessageFromFile(self, file):
+ self.message.clear(Message.PARSE)
+ self.received_data = read_wire_data(file)
+ self.message.from_wire(self.received_data)
+
+ # Note: intentionally use camelCase so that we can easily copy-paste
+ # corresponding C++ tests.
+ def commonSignChecks(self, tsig, expected_qid, expected_timesigned,
+ expected_mac, expected_error=0,
+ expected_otherdata=None,
+ expected_algorithm=TSIGKey.HMACMD5_NAME):
+ tsig_rdata = tsig.get_rdata()
+ self.assertEqual(expected_algorithm, tsig_rdata.get_algorithm())
+ self.assertEqual(expected_timesigned, tsig_rdata.get_timesigned())
+ self.assertEqual(300, tsig_rdata.get_fudge())
+ self.assertEqual(expected_mac, tsig_rdata.get_mac())
+ self.assertEqual(expected_qid, tsig_rdata.get_original_id())
+ self.assertEqual(expected_error, tsig_rdata.get_error())
+ self.assertEqual(expected_otherdata, tsig_rdata.get_other_data())
+
+ def test_initial_state(self):
+ # Until signing or verifying, the state should be INIT
+ self.assertEqual(TSIGContext.STATE_INIT, self.tsig_ctx.get_state())
+
+ # And there should be no error code.
+ self.assertEqual(TSIGError(Rcode.NOERROR()), self.tsig_ctx.get_error())
+
+ # Note: intentionally use camelCase so that we can easily copy-paste
+ # corresponding C++ tests.
+ def commonVerifyChecks(self, ctx, record, data, expected_error,
+ expected_new_state=\
+ TSIGContext.STATE_VERIFIED_RESPONSE):
+ self.assertEqual(expected_error, ctx.verify(record, data))
+ self.assertEqual(expected_error, ctx.get_error())
+ self.assertEqual(expected_new_state, ctx.get_state())
+
+ def test_from_keyring(self):
+ # Construct a TSIG context with an empty key ring. Key shouldn't be
+ # found, and the BAD_KEY error should be recorded.
+ ctx = TSIGContext(self.test_name, TSIGKey.HMACMD5_NAME, self.keyring)
+ self.assertEqual(TSIGContext.STATE_INIT, ctx.get_state())
+ self.assertEqual(TSIGError.BAD_KEY, ctx.get_error())
+ # check get_error() doesn't cause ref leak. Note: we can't
+ # realiably do this check for get_state(), as it returns an integer
+ # object, which could have many references
+ self.assertEqual(1, sys.getrefcount(ctx.get_error()))
+
+ # Add a matching key (we don't use the secret so leave it empty), and
+ # construct it again. This time it should be constructed with a valid
+ # key.
+ self.keyring.add(TSIGKey(self.test_name, TSIGKey.HMACMD5_NAME, b""))
+ ctx = TSIGContext(self.test_name, TSIGKey.HMACMD5_NAME, self.keyring)
+ self.assertEqual(TSIGContext.STATE_INIT, ctx.get_state())
+ self.assertEqual(TSIGError.NOERROR, ctx.get_error())
+
+ # Similar to the first case except that the key ring isn't empty but
+ # it doesn't contain a matching key.
+ ctx = TSIGContext(self.test_name, TSIGKey.HMACSHA1_NAME, self.keyring)
+ self.assertEqual(TSIGContext.STATE_INIT, ctx.get_state())
+ self.assertEqual(TSIGError.BAD_KEY, ctx.get_error())
+
+ ctx = TSIGContext(Name("different-key.example"),
+ TSIGKey.HMACMD5_NAME, self.keyring)
+ self.assertEqual(TSIGContext.STATE_INIT, ctx.get_state())
+ self.assertEqual(TSIGError.BAD_KEY, ctx.get_error())
+
+ # "Unknown" algorithm name will result in BADKEY, too.
+ ctx = TSIGContext(self.test_name, Name("unknown.algorithm"),
+ self.keyring)
+ self.assertEqual(TSIGContext.STATE_INIT, ctx.get_state())
+ self.assertEqual(TSIGError.BAD_KEY, ctx.get_error())
+
+ def test_sign(self):
+ fix_current_time(0x4da8877a)
+ tsig = self.createMessageAndSign(self.qid, self.test_name,
+ self.tsig_ctx)
+ self.commonSignChecks(tsig, self.qid, 0x4da8877a, COMMON_EXPECTED_MAC)
+
+ # Same test as sign, but specifying the key name with upper-case (i.e.
+ # non canonical) characters. The digest must be the same. It should
+ # actually be ensured at the level of TSIGKey, but we confirm that at
+ # this level, too.
+ def test_sign_using_uppercase_keyname(self):
+ fix_current_time(0x4da8877a)
+ cap_ctx = TSIGContext(TSIGKey(Name("WWW.EXAMPLE.COM"),
+ TSIGKey.HMACMD5_NAME, self.secret))
+ tsig = self.createMessageAndSign(self.qid, self.test_name, cap_ctx)
+ self.commonSignChecks(tsig, self.qid, 0x4da8877a, COMMON_EXPECTED_MAC)
+
+ # Same as the previous test, but for the algorithm name.
+ def test_sign_using_uppercase_algorithm_name(self):
+ fix_current_time(0x4da8877a)
+ cap_ctx = TSIGContext(TSIGKey(self.test_name,
+ Name("HMAC-md5.SIG-alg.REG.int"),
+ self.secret))
+ tsig = self.createMessageAndSign(self.qid, self.test_name, cap_ctx)
+ self.commonSignChecks(tsig, self.qid, 0x4da8877a, COMMON_EXPECTED_MAC)
+
+ # Sign the message using the actual time, and check the accuracy of it.
+ # We cannot reasonably predict the expected MAC, so don't bother to
+ # check it.
+ def test_sign_at_actual_time(self):
+ now = int(time.time())
+ tsig = self.createMessageAndSign(self.qid, self.test_name,
+ self.tsig_ctx)
+ tsig_rdata = tsig.get_rdata()
+
+ # Check the resulted time signed is in the range of [now, now + 5]
+ self.assertTrue(now <= tsig_rdata.get_timesigned())
+ self.assertTrue(now + 5 >= tsig_rdata.get_timesigned())
+
+ def test_bad_data(self):
+ self.assertRaises(TypeError, self.tsig_ctx.sign, None, 10)
+
+ def test_verify_bad_data(self):
+ # the data must at least hold the DNS message header and the specified
+ # TSIG.
+ bad_len = 12 + self.dummy_record.get_length() - 1
+ self.assertRaises(InvalidParameter, self.tsig_ctx.verify,
+ self.dummy_record, DUMMY_DATA[:bad_len])
+
+ def test_sign_using_hmacsha1(self):
+ fix_current_time(0x4dae7d5f)
+
+ secret = base64.b64decode(b"MA+QDhXbyqUak+qnMFyTyEirzng=")
+ sha1_ctx = TSIGContext(TSIGKey(self.test_name, TSIGKey.HMACSHA1_NAME,
+ secret))
+ qid = 0x0967
+ expected_mac = b"\x41\x53\x40\xc7\xda\xf8\x24\xed\x68\x4e\xe5\x86" + \
+ b"\xf7\xb5\xa6\x7a\x2f\xeb\xc0\xd3"
+ tsig = self.createMessageAndSign(qid, self.test_name, sha1_ctx)
+ self.commonSignChecks(tsig, qid, 0x4dae7d5f, expected_mac,
+ 0, None, TSIGKey.HMACSHA1_NAME)
+
+ def test_verify_then_sign_response(self):
+ fix_current_time(0x4da8877a)
+
+ self.createMessageFromFile("message_toWire2.wire")
+ self.commonVerifyChecks(self.tsig_verify_ctx,
+ self.message.get_tsig_record(),
+ self.received_data, TSIGError.NOERROR,
+ TSIGContext.STATE_RECEIVED_REQUEST)
+
+ tsig = self.createMessageAndSign(self.qid, self.test_name,
+ self.tsig_verify_ctx,
+ QR_FLAG|AA_FLAG|RD_FLAG,
+ RRType.A(), "192.0.2.1")
+
+ expected_mac = b"\x8f\xcd\xa6\x6a\x7c\xd1\xa3\xb9\x94\x8e\xb1\x86" + \
+ b"\x9d\x38\x4a\x9f"
+ self.commonSignChecks(tsig, self.qid, 0x4da8877a, expected_mac)
+
+ def test_verify_uppercase_names(self):
+ fix_current_time(0x4da8877a)
+
+ self.createMessageFromFile("tsig_verify9.wire")
+ self.commonVerifyChecks(self.tsig_verify_ctx,
+ self.message.get_tsig_record(),
+ self.received_data, TSIGError.NOERROR,
+ TSIGContext.STATE_RECEIVED_REQUEST)
+
+ def test_verify_forward_message(self):
+ fix_current_time(0x4da8877a)
+
+ self.createMessageFromFile("tsig_verify6.wire")
+ self.commonVerifyChecks(self.tsig_verify_ctx,
+ self.message.get_tsig_record(),
+ self.received_data, TSIGError.NOERROR,
+ TSIGContext.STATE_RECEIVED_REQUEST)
+
+ def test_sign_continuation(self):
+ fix_current_time(0x4da8e951)
+
+ axfr_qid = 0x3410
+ zone_name = Name("example.com")
+
+ tsig = self.createMessageAndSign(axfr_qid, zone_name, self.tsig_ctx,
+ 0, RRType.AXFR())
+
+ received_data = read_wire_data("tsig_verify1.wire")
+ self.commonVerifyChecks(self.tsig_verify_ctx, tsig, received_data,
+ TSIGError.NOERROR,
+ TSIGContext.STATE_RECEIVED_REQUEST)
+
+ tsig = self.createMessageAndSign(axfr_qid, zone_name,
+ self.tsig_verify_ctx,
+ AA_FLAG|QR_FLAG, RRType.AXFR(),
+ "ns.example.com. root.example.com." +\
+ " 2011041503 7200 3600 2592000 1200",
+ RRType.SOA())
+
+ received_data = read_wire_data("tsig_verify2.wire")
+ self.commonVerifyChecks(self.tsig_ctx, tsig, received_data,
+ TSIGError.NOERROR)
+
+ expected_mac = b"\x10\x24\x58\xf7\xf6\x2d\xdd\x7d\x63\x8d\x74" +\
+ b"\x60\x34\x13\x09\x68"
+ tsig = self.createMessageAndSign(axfr_qid, zone_name,
+ self.tsig_verify_ctx,
+ AA_FLAG|QR_FLAG, RRType.AXFR(),
+ "ns.example.com.", RRType.NS(),
+ False)
+ self.commonSignChecks(tsig, axfr_qid, 0x4da8e951, expected_mac)
+
+ received_data = read_wire_data("tsig_verify3.wire")
+ self.commonVerifyChecks(self.tsig_ctx, tsig, received_data,
+ TSIGError.NOERROR)
+
+ def test_badtime_response(self):
+ fix_current_time(0x4da8b9d6)
+
+ test_qid = 0x7fc4
+ tsig = self.createMessageAndSign(test_qid, self.test_name,
+ self.tsig_ctx, 0, RRType.SOA())
+
+ # "advance the clock" and try validating, which should fail due to
+ # BADTIME
+ fix_current_time(0x4da8be86)
+ self.commonVerifyChecks(self.tsig_verify_ctx, tsig, DUMMY_DATA,
+ TSIGError.BAD_TIME,
+ TSIGContext.STATE_RECEIVED_REQUEST)
+
+ # make and sign a response in the context of TSIG error.
+ tsig = self.createMessageAndSign(test_qid, self.test_name,
+ self.tsig_verify_ctx,
+ QR_FLAG, RRType.SOA(), None, None,
+ True, Rcode.NOTAUTH())
+
+ expected_otherdata = b"\x00\x00\x4d\xa8\xbe\x86"
+ expected_mac = b"\xd4\xb0\x43\xf6\xf4\x44\x95\xec\x8a\x01\x26" +\
+ b"\x0e\x39\x15\x9d\x76"
+
+ self.commonSignChecks(tsig, self.message.get_qid(), 0x4da8b9d6,
+ expected_mac,
+ 18, # error: BADTIME
+ expected_otherdata)
+
+ def test_badtime_response2(self):
+ fix_current_time(0x4da8b9d6)
+
+ tsig = self.createMessageAndSign(self.qid, self.test_name,
+ self.tsig_ctx, 0, RRType.SOA())
+
+ # "rewind the clock" and try validating, which should fail due to
+ # BADTIME
+ fix_current_time(0x4da8b9d6 - 600)
+ self.commonVerifyChecks(self.tsig_verify_ctx, tsig, DUMMY_DATA,
+ TSIGError.BAD_TIME,
+ TSIGContext.STATE_RECEIVED_REQUEST)
+
+ # Test various boundary conditions. We intentionally use the magic
+ # number of 300 instead of the constant variable for testing.
+ # In the okay cases, signature is not correct, but it's sufficient to
+ # check the error code isn't BADTIME for the purpose of this test.
+ def test_badtime_boundaries(self):
+ fix_current_time(0x4da8b9d6)
+
+ tsig = self.createMessageAndSign(self.qid, self.test_name,
+ self.tsig_ctx, 0, RRType.SOA())
+
+ fix_current_time(0x4da8b9d6 + 301)
+ self.assertEqual(TSIGError.BAD_TIME,
+ self.tsig_verify_ctx.verify(tsig, DUMMY_DATA))
+
+ fix_current_time(0x4da8b9d6 + 300)
+ self.assertNotEqual(TSIGError.BAD_TIME,
+ self.tsig_verify_ctx.verify(tsig, DUMMY_DATA))
+
+ fix_current_time(0x4da8b9d6 - 301)
+ self.assertEqual(TSIGError.BAD_TIME,
+ self.tsig_verify_ctx.verify(tsig, DUMMY_DATA))
+
+ fix_current_time(0x4da8b9d6 - 300)
+ self.assertNotEqual(TSIGError.BAD_TIME,
+ self.tsig_verify_ctx.verify(tsig, DUMMY_DATA))
+
+ def test_badtime_overflow(self):
+ fix_current_time(200)
+ tsig = self.createMessageAndSign(self.qid, self.test_name,
+ self.tsig_ctx, 0, RRType.SOA())
+
+ # This should be in the okay range, but since "200 - fudge" overflows
+ # and we compare them as 64-bit unsigned integers, it results in a
+ # false positive (we intentionally accept that).
+ fix_current_time(100)
+ self.assertEqual(TSIGError.BAD_TIME,
+ self.tsig_verify_ctx.verify(tsig, DUMMY_DATA))
+
+ def test_badsig_response(self):
+ fix_current_time(0x4da8877a)
+
+ # Try to sign a simple message with bogus secret. It should fail
+ # with BADSIG.
+ self.createMessageFromFile("message_toWire2.wire")
+ bad_ctx = TSIGContext(TSIGKey(self.test_name, TSIGKey.HMACMD5_NAME,
+ DUMMY_DATA))
+ self.commonVerifyChecks(bad_ctx, self.message.get_tsig_record(),
+ self.received_data, TSIGError.BAD_SIG,
+ TSIGContext.STATE_RECEIVED_REQUEST)
+
+ # Sign the same message (which doesn't matter for this test) with the
+ # context of "checked state".
+ tsig = self.createMessageAndSign(self.qid, self.test_name, bad_ctx)
+ self.commonSignChecks(tsig, self.message.get_qid(), 0x4da8877a, None,
+ 16) # 16: BADSIG
+
+ def test_badkey_response(self):
+ # A similar test as badsigResponse but for BADKEY
+ fix_current_time(0x4da8877a)
+ tsig_ctx = TSIGContext(self.badkey_name, TSIGKey.HMACMD5_NAME,
+ self.keyring)
+ self.commonVerifyChecks(tsig_ctx, self.dummy_record, DUMMY_DATA,
+ TSIGError.BAD_KEY,
+ TSIGContext.STATE_RECEIVED_REQUEST)
+
+ sig = self.createMessageAndSign(self.qid, self.test_name, tsig_ctx)
+ self.assertEqual(self.badkey_name, sig.get_name())
+ self.commonSignChecks(sig, self.qid, 0x4da8877a, None, 17) # 17: BADKEY
+
+ def test_badkey_for_response(self):
+ # "BADKEY" case for a response to a signed message
+ self.createMessageAndSign(self.qid, self.test_name, self.tsig_ctx)
+ self.commonVerifyChecks(self.tsig_ctx, self.dummy_record, DUMMY_DATA,
+ TSIGError.BAD_KEY,
+ TSIGContext.STATE_SENT_REQUEST)
+
+ # A similar case with a different algorithm
+ dummy_record = TSIGRecord(self.test_name,
+ TSIG("hmac-sha1. 1302890362 300 0 "
+ "11621 0 0"))
+ self.commonVerifyChecks(self.tsig_ctx, dummy_record, DUMMY_DATA,
+ TSIGError.BAD_KEY,
+ TSIGContext.STATE_SENT_REQUEST)
+
+ # According to RFC2845 4.6, if TSIG verification fails the client
+ # should discard that message and wait for another signed response.
+ # This test emulates that situation.
+ def test_badsig_then_validate(self):
+ fix_current_time(0x4da8877a)
+
+ self.createMessageAndSign(self.qid, self.test_name, self.tsig_ctx)
+ self.createMessageFromFile("tsig_verify4.wire")
+
+ self.commonVerifyChecks(self.tsig_ctx, self.message.get_tsig_record(),
+ self.received_data, TSIGError.BAD_SIG,
+ TSIGContext.STATE_SENT_REQUEST)
+
+ self.createMessageFromFile("tsig_verify5.wire")
+ self.commonVerifyChecks(self.tsig_ctx, self.message.get_tsig_record(),
+ self.received_data, TSIGError.NOERROR,
+ TSIGContext.STATE_VERIFIED_RESPONSE)
+
+ # Similar to the previous test, but the first response doesn't contain
+ # TSIG.
+ def test_nosig_then_validate(self):
+ fix_current_time(0x4da8877a)
+ self.createMessageAndSign(self.qid, self.test_name, self.tsig_ctx)
+
+ self.commonVerifyChecks(self.tsig_ctx, None, DUMMY_DATA,
+ TSIGError.FORMERR, TSIGContext.STATE_SENT_REQUEST)
+
+ self.createMessageFromFile("tsig_verify5.wire")
+ self.commonVerifyChecks(self.tsig_ctx, self.message.get_tsig_record(),
+ self.received_data, TSIGError.NOERROR,
+ TSIGContext.STATE_VERIFIED_RESPONSE)
+
+ # Similar to the previous test, but the first response results in BADTIME.
+ def test_badtime_then_validate(self):
+ fix_current_time(0x4da8877a)
+ tsig = self.createMessageAndSign(self.qid, self.test_name,
+ self.tsig_ctx)
+
+ # "advance the clock" and try validating, which should fail due to
+ # BADTIME
+ fix_current_time(0x4da8877a + 600)
+ self.commonVerifyChecks(self.tsig_ctx, tsig, DUMMY_DATA,
+ TSIGError.BAD_TIME, TSIGContext.STATE_SENT_REQUEST)
+
+ # revert the clock again.
+ fix_current_time(0x4da8877a)
+ self.createMessageFromFile("tsig_verify5.wire")
+ self.commonVerifyChecks(self.tsig_ctx, self.message.get_tsig_record(),
+ self.received_data, TSIGError.NOERROR,
+ TSIGContext.STATE_VERIFIED_RESPONSE)
+
+ # We don't allow empty MAC unless the TSIG error is BADSIG or BADKEY.
+ def test_empty_mac(self):
+ fix_current_time(0x4da8877a)
+
+ self.createMessageFromFile("tsig_verify7.wire")
+
+ self.commonVerifyChecks(self.tsig_verify_ctx,
+ self.message.get_tsig_record(),
+ self.received_data,
+ TSIGError.BAD_SIG,
+ TSIGContext.STATE_RECEIVED_REQUEST)
+
+ # If the empty MAC comes with a BADKEY error, the error is passed
+ # transparently.
+ self.createMessageFromFile("tsig_verify8.wire")
+ self.commonVerifyChecks(self.tsig_verify_ctx,
+ self.message.get_tsig_record(),
+ self.received_data,
+ TSIGError.BAD_KEY,
+ TSIGContext.STATE_RECEIVED_REQUEST)
+
+ # Once the context is used for sending a signed response, it shouldn't
+ # be used for further verification.
+ def test_verify_after_sendresponse(self):
+ fix_current_time(0x4da8877a)
+
+ self.createMessageFromFile("message_toWire2.wire")
+ self.tsig_verify_ctx.verify(self.message.get_tsig_record(),
+ self.received_data)
+ self.assertEqual(TSIGContext.STATE_RECEIVED_REQUEST,
+ self.tsig_verify_ctx.get_state())
+ self.createMessageAndSign(self.qid, self.test_name,
+ self.tsig_verify_ctx,
+ QR_FLAG|AA_FLAG|RD_FLAG, RRType.A(),
+ "192.0.2.1")
+ self.assertEqual(TSIGContext.STATE_SENT_RESPONSE,
+ self.tsig_verify_ctx.get_state())
+
+ # Now trying further verification.
+ self.createMessageFromFile("message_toWire2.wire")
+ self.assertRaises(TSIGContextError, self.tsig_verify_ctx.verify,
+ self.message.get_tsig_record(), self.received_data)
+
+ # Likewise, once the context verifies a response, it shouldn't for
+ # signing any more.
+ def test_sign_after_verified(self):
+ fix_current_time(0x4da8877a)
+
+ self.createMessageAndSign(self.qid, self.test_name, self.tsig_ctx)
+ self.createMessageFromFile("tsig_verify5.wire")
+ self.tsig_ctx.verify(self.message.get_tsig_record(),
+ self.received_data)
+ self.assertEqual(TSIGContext.STATE_VERIFIED_RESPONSE,
+ self.tsig_ctx.get_state())
+
+ # Now trying further signing.
+ self.assertRaises(TSIGContextError, self.createMessageAndSign,
+ self.qid, self.test_name, self.tsig_ctx)
+
+ # Too short MAC should be rejected.
+ # Note: when we implement RFC4635-based checks, the error code will
+ # (probably) be FORMERR.
+ def test_too_short_mac(self):
+ fix_current_time(0x4da8877a)
+ self.createMessageFromFile("tsig_verify10.wire")
+ self.commonVerifyChecks(self.tsig_verify_ctx,
+ self.message.get_tsig_record(),
+ self.received_data, TSIGError.BAD_SIG,
+ TSIGContext.STATE_RECEIVED_REQUEST)
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/tsig_rdata_python_test.py b/src/lib/dns/python/tests/tsig_rdata_python_test.py
new file mode 100644
index 0000000..7b861d6
--- /dev/null
+++ b/src/lib/dns/python/tests/tsig_rdata_python_test.py
@@ -0,0 +1,30 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+import sys
+from pydnspp import *
+
+class TSIGRdataTest(unittest.TestCase):
+ VALID_TEXT1 = "hmac-md5.sig-alg.reg.int. 1286779327 300 0 16020 BADKEY 0"
+ def test_from_string(self):
+ tsig = TSIG(self.VALID_TEXT1)
+ self.assertEqual(Name("hmac-md5.sig-alg.reg.int"),
+ tsig.get_algorithm())
+ # check there's no leak in creating the name object:
+ self.assertEqual(1, sys.getrefcount(tsig.get_algorithm()))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/lib/dns/python/tests/tsigerror_python_test.py b/src/lib/dns/python/tests/tsigerror_python_test.py
new file mode 100644
index 0000000..a968b6b
--- /dev/null
+++ b/src/lib/dns/python/tests/tsigerror_python_test.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+import sys
+from pydnspp import *
+
+class TSIGErrorTest(unittest.TestCase):
+ def test_from_code(self):
+ self.assertEqual(0, TSIGError(0).get_code())
+ self.assertEqual(18, TSIGError(18).get_code())
+ self.assertEqual(65535, TSIGError(65535).get_code())
+ self.assertRaises(ValueError, TSIGError, 65536)
+ self.assertRaises(ValueError, TSIGError, -1)
+ self.assertRaises(TypeError, TSIGError, "not yet supported")
+
+ def test_from_rcode(self):
+ # We use RCODE for code values from 0-15.
+ self.assertEqual(0, TSIGError(Rcode.NOERROR()).get_code())
+ self.assertEqual(15, TSIGError(Rcode(15)).get_code())
+
+ # From error code 16 TSIG errors define a separate space, so passing
+ # corresponding RCODE for such code values should be prohibited.
+ self.assertRaises(ValueError, TSIGError, Rcode(16))
+
+ def test_constants(self):
+ # We'll only test arbitrarily chosen subsets of the codes.
+ # This class is quite simple, so it should be suffice.
+ self.assertEqual(TSIGError.BAD_SIG_CODE, TSIGError(16).get_code())
+ self.assertEqual(TSIGError.BAD_KEY_CODE, TSIGError(17).get_code())
+ self.assertEqual(TSIGError.BAD_TIME_CODE, TSIGError(18).get_code())
+
+ self.assertEqual(0, TSIGError.NOERROR.get_code())
+ self.assertEqual(9, TSIGError.NOTAUTH.get_code())
+ self.assertEqual(14, TSIGError.RESERVED14.get_code())
+ self.assertEqual(TSIGError.BAD_SIG_CODE, TSIGError.BAD_SIG.get_code())
+ self.assertEqual(TSIGError.BAD_KEY_CODE, TSIGError.BAD_KEY.get_code())
+ self.assertEqual(TSIGError.BAD_TIME_CODE, TSIGError.BAD_TIME.get_code())
+
+ def test_equal(self):
+ self.assertTrue(TSIGError.NOERROR == TSIGError(Rcode.NOERROR()))
+ self.assertTrue(TSIGError(Rcode.NOERROR()) == TSIGError.NOERROR)
+
+ self.assertTrue(TSIGError.BAD_SIG == TSIGError(16))
+ self.assertTrue(TSIGError(16) == TSIGError.BAD_SIG)
+
+ def test_nequal(self):
+ self.assertTrue(TSIGError.BAD_KEY != TSIGError(Rcode.NOERROR()))
+ self.assertTrue(TSIGError(Rcode.NOERROR()) != TSIGError.BAD_KEY)
+
+ def test_to_text(self):
+ # TSIGError derived from the standard Rcode
+ self.assertEqual("NOERROR", TSIGError(Rcode.NOERROR()).to_text())
+
+ # Well known TSIG errors
+ self.assertEqual("BADSIG", TSIGError.BAD_SIG.to_text())
+ self.assertEqual("BADKEY", TSIGError.BAD_KEY.to_text())
+ self.assertEqual("BADTIME", TSIGError.BAD_TIME.to_text())
+
+ # Unknown (or not yet supported) codes. Simply converted as numeric.
+ self.assertEqual("19", TSIGError(19).to_text());
+ self.assertEqual("65535", TSIGError(65535).to_text());
+
+ # also check str() works same way
+ self.assertEqual("NOERROR", str(TSIGError(Rcode.NOERROR())))
+ self.assertEqual("BADSIG", str(TSIGError.BAD_SIG))
+
+ def test_to_rcode(self):
+ # TSIGError derived from the standard Rcode
+ self.assertEqual(Rcode.NOERROR(), TSIGError(Rcode.NOERROR()).to_rcode())
+
+ # Well known TSIG errors
+ self.assertEqual(Rcode.NOTAUTH(), TSIGError.BAD_SIG.to_rcode())
+ self.assertEqual(Rcode.NOTAUTH(), TSIGError.BAD_KEY.to_rcode())
+ self.assertEqual(Rcode.NOTAUTH(), TSIGError.BAD_TIME.to_rcode())
+
+ # Unknown (or not yet supported) codes are treated as SERVFAIL.
+ self.assertEqual(Rcode.SERVFAIL(), TSIGError(19).to_rcode())
+ self.assertEqual(Rcode.SERVFAIL(), TSIGError(65535).to_rcode())
+
+ # Check there's no redundant refcount (which would cause leak)
+ self.assertEqual(1, sys.getrefcount(TSIGError.BAD_SIG.to_rcode()))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/lib/dns/python/tests/tsigkey_python_test.py b/src/lib/dns/python/tests/tsigkey_python_test.py
index 305c8dd..516bea4 100644
--- a/src/lib/dns/python/tests/tsigkey_python_test.py
+++ b/src/lib/dns/python/tests/tsigkey_python_test.py
@@ -25,6 +25,9 @@ class TSIGKeyTest(unittest.TestCase):
TSIGKey.HMACMD5_NAME)
self.assertEqual(Name('hmac-sha1'), TSIGKey.HMACSHA1_NAME)
self.assertEqual(Name('hmac-sha256'), TSIGKey.HMACSHA256_NAME)
+ self.assertEqual(Name('hmac-sha224'), TSIGKey.HMACSHA224_NAME)
+ self.assertEqual(Name('hmac-sha384'), TSIGKey.HMACSHA384_NAME)
+ self.assertEqual(Name('hmac-sha512'), TSIGKey.HMACSHA512_NAME)
def test_init(self):
key = TSIGKey(self.key_name, TSIGKey.HMACMD5_NAME, self.secret)
diff --git a/src/lib/dns/python/tests/tsigrecord_python_test.py b/src/lib/dns/python/tests/tsigrecord_python_test.py
new file mode 100644
index 0000000..813a23b
--- /dev/null
+++ b/src/lib/dns/python/tests/tsigrecord_python_test.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+import sys
+from pydnspp import *
+
+class TSIGRecordTest(unittest.TestCase):
+ def setUp(self):
+ self.test_name = Name("www.example.com")
+ self.test_rdata = TSIG("hmac-md5.sig-alg.reg.int. 1302890362 " + \
+ "300 16 2tra2tra2tra2tra2tra2g== " + \
+ "11621 0 0")
+ self.test_record = TSIGRecord(self.test_name, self.test_rdata)
+
+ def test_getname(self):
+ self.assertEqual(self.test_name, self.test_record.get_name())
+ self.assertEqual(1, sys.getrefcount(self.test_record.get_name()))
+
+ def test_get_length(self):
+ # see the C++ test for the magic number
+ self.assertEqual(85, self.test_record.get_length())
+
+ def test_to_text(self):
+ expected_text = "www.example.com. 0 ANY TSIG " + \
+ "hmac-md5.sig-alg.reg.int. 1302890362 300 16 " + \
+ "2tra2tra2tra2tra2tra2g== 11621 NOERROR 0\n"
+ self.assertEqual(expected_text, self.test_record.to_text())
+ self.assertEqual(expected_text, str(self.test_record))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/lib/dns/python/tsig_python.cc b/src/lib/dns/python/tsig_python.cc
index 2e6d986..0764e33 100644
--- a/src/lib/dns/python/tsig_python.cc
+++ b/src/lib/dns/python/tsig_python.cc
@@ -12,13 +12,30 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN // need for "y#" below
+#include <Python.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <exceptions/exceptions.h>
+
+#include <util/python/pycppwrapper_util.h>
+
#include <dns/tsig.h>
-using namespace isc::dns;
+#include "pydnspp_common.h"
+#include "name_python.h"
+#include "tsigkey_python.h"
+#include "tsigerror_python.h"
+#include "tsigrecord_python.h"
+#include "tsig_python.h"
-//
-// Definition of the classes
-//
+using namespace std;
+using namespace isc;
+using namespace isc::util::python;
+using namespace isc::dns;
+using namespace isc::dns::python;
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
@@ -26,12 +43,15 @@ using namespace isc::dns;
namespace {
// The s_* Class simply covers one instantiation of the object
-
class s_TSIGContext : public PyObject {
public:
- TSIGContext* tsig_ctx;
+ s_TSIGContext() : cppobj(NULL) {};
+ TSIGContext* cppobj;
};
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_TSIGContext, TSIGContext> TSIGContextContainer;
+
//
// We declare the functions here, the definitions are below
// the type definition of the object, since both can use the other
@@ -41,6 +61,12 @@ public:
int TSIGContext_init(s_TSIGContext* self, PyObject* args);
void TSIGContext_destroy(s_TSIGContext* self);
+// Class specific methods
+PyObject* TSIGContext_getState(s_TSIGContext* self);
+PyObject* TSIGContext_getError(s_TSIGContext* self);
+PyObject* TSIGContext_sign(s_TSIGContext* self, PyObject* args);
+PyObject* TSIGContext_verify(s_TSIGContext* self, PyObject* args);
+
// These are the functions we export
// For a minimal support, we don't need them.
@@ -51,18 +77,180 @@ void TSIGContext_destroy(s_TSIGContext* self);
// 3. Argument type
// 4. Documentation
PyMethodDef TSIGContext_methods[] = {
+ { "get_state", reinterpret_cast<PyCFunction>(TSIGContext_getState),
+ METH_NOARGS,
+ "Return the current state of the context (mainly for tests)" },
+ { "get_error", reinterpret_cast<PyCFunction>(TSIGContext_getError),
+ METH_NOARGS,
+ "Return the TSIG error as a result of the latest verification" },
+ { "sign",
+ reinterpret_cast<PyCFunction>(TSIGContext_sign), METH_VARARGS,
+ "Sign a DNS message." },
+ { "verify",
+ reinterpret_cast<PyCFunction>(TSIGContext_verify), METH_VARARGS,
+ "Verify a DNS message." },
{ NULL, NULL, 0, NULL }
};
+int
+TSIGContext_init(s_TSIGContext* self, PyObject* args) {
+ try {
+ // "From key" constructor
+ const PyObject* tsigkey_obj;
+ if (PyArg_ParseTuple(args, "O!", &tsigkey_type, &tsigkey_obj)) {
+ self->cppobj = new TSIGContext(PyTSIGKey_ToTSIGKey(tsigkey_obj));
+ return (0);
+ }
+
+ // "From key param + keyring" constructor
+ PyErr_Clear();
+ const PyObject* keyname_obj;
+ const PyObject* algname_obj;
+ const PyObject* keyring_obj;
+ if (PyArg_ParseTuple(args, "O!O!O!", &name_type, &keyname_obj,
+ &name_type, &algname_obj, &tsigkeyring_type,
+ &keyring_obj)) {
+ self->cppobj = new TSIGContext(PyName_ToName(keyname_obj),
+ PyName_ToName(algname_obj),
+ PyTSIGKeyRing_ToTSIGKeyRing(keyring_obj));
+ return (0);
+ }
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct TSIGContext object: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in constructing TSIGContext");
+ return (-1);
+ }
+
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to TSIGContext constructor");
+
+ return (-1);
+}
+
+void
+TSIGContext_destroy(s_TSIGContext* const self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+TSIGContext_getState(s_TSIGContext* self) {
+ return (Py_BuildValue("I", self->cppobj->getState()));
+}
+
+PyObject*
+TSIGContext_getError(s_TSIGContext* self) {
+ try {
+ PyObjectContainer container(createTSIGErrorObject(
+ self->cppobj->getError()));
+ return (Py_BuildValue("O", container.get()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpectedly failed to get TSIGContext error: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in TSIGContext.get_error");
+ }
+ return (NULL);
+}
+
+PyObject*
+TSIGContext_sign(s_TSIGContext* self, PyObject* args) {
+ long qid = 0;
+ const char* mac;
+ Py_ssize_t mac_size;
+
+ if (PyArg_ParseTuple(args, "ly#", &qid, &mac, &mac_size)) {
+ if (qid < 0 || qid > 0xffff) {
+ PyErr_SetString(PyExc_ValueError,
+ "TSIGContext.sign: QID out of range");
+ return (NULL);
+ }
+
+ try {
+ ConstTSIGRecordPtr record = self->cppobj->sign(qid, mac, mac_size);
+ return (createTSIGRecordObject(*record));
+ } catch (const TSIGContextError& ex) {
+ PyErr_SetString(po_TSIGContextError, ex.what());
+ } catch (const exception& ex) {
+ const string ex_what = "Unexpected failure in TSIG sign: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIG sign");
+ }
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to TSIGContext.sign");
+ }
+
+ return (NULL);
+}
+
+PyObject*
+TSIGContext_verify(s_TSIGContext* self, PyObject* args) {
+ const char* data;
+ Py_ssize_t data_len;
+ PyObject* py_record;
+ PyObject* py_maybe_none;
+ const TSIGRecord* record;
+
+ if (PyArg_ParseTuple(args, "O!y#", &tsigrecord_type, &py_record,
+ &data, &data_len)) {
+ record = &PyTSIGRecord_ToTSIGRecord(py_record);
+ } else if (PyArg_ParseTuple(args, "Oy#", &py_maybe_none, &data,
+ &data_len)) {
+ record = NULL;
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to TSIGContext.verify");
+ return (NULL);
+ }
+ PyErr_Clear();
+
+ try {
+ const TSIGError error = self->cppobj->verify(record, data, data_len);
+ return (createTSIGErrorObject(error));
+ } catch (const TSIGContextError& ex) {
+ PyErr_SetString(po_TSIGContextError, ex.what());
+ } catch (const InvalidParameter& ex) {
+ PyErr_SetString(po_InvalidParameter, ex.what());
+ } catch (const exception& ex) {
+ const string ex_what = "Unexpected failure in TSIG verify: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in TSIG verify");
+ }
+
+ return (NULL);
+}
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+// Definition of class specific exception(s)
+PyObject* po_TSIGContextError;
+
// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_EDNS
+// parsing of PyObject* to s_TSIGContext
// Most of the functions are not actually implemented and NULL here.
-PyTypeObject tsig_context_type = {
+PyTypeObject tsigcontext_type = {
PyVarObject_HEAD_INIT(NULL, 0)
- "libdns_python.TSIGContext",
- sizeof(s_TSIGContext), // tp_basicsize
+ "pydnspp.TSIGContext",
+ sizeof(s_TSIGContext), // tp_basicsize
0, // tp_itemsize
- (destructor)TSIGContext_destroy, // tp_dealloc
+ reinterpret_cast<destructor>(TSIGContext_destroy), // tp_dealloc
NULL, // tp_print
NULL, // tp_getattr
NULL, // tp_setattr
@@ -71,22 +259,28 @@ PyTypeObject tsig_context_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
NULL, // tp_str
NULL, // tp_getattro
NULL, // tp_setattro
NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The TSIGContext class maintains a context of a signed session of "
- "DNS transactions by TSIG.",
+
+ // We allow the python version of TSIGContext to act as a base class.
+ // From pure design point of view, this is wrong because it's not intended
+ // to be inherited. However, cryptographic operations are generally
+ // difficult to test, so it would be very advantageous if we can define
+ // a mock context class.
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
+
+ "The TSIGContext class objects is...(COMPLETE THIS)",
NULL, // tp_traverse
NULL, // tp_clear
- NULL, // tp_richcompare
+ NULL, // tp_richcompare
0, // tp_weaklistoffset
NULL, // tp_iter
NULL, // tp_iternext
- TSIGContext_methods, // tp_methods
+ TSIGContext_methods, // tp_methods
NULL, // tp_members
NULL, // tp_getset
NULL, // tp_base
@@ -94,7 +288,7 @@ PyTypeObject tsig_context_type = {
NULL, // tp_descr_get
NULL, // tp_descr_set
0, // tp_dictoffset
- (initproc)TSIGContext_init, // tp_init
+ reinterpret_cast<initproc>(TSIGContext_init), // tp_init
NULL, // tp_alloc
PyType_GenericNew, // tp_new
NULL, // tp_free
@@ -108,50 +302,24 @@ PyTypeObject tsig_context_type = {
0 // tp_version_tag
};
-int
-TSIGContext_init(s_TSIGContext* self, PyObject* args) {
- const s_TSIGKey* tsigkey_obj;
-
- try {
- if (PyArg_ParseTuple(args, "O!", &tsigkey_type, &tsigkey_obj)) {
- self->tsig_ctx = new TSIGContext(*tsigkey_obj->tsigkey);
- return (0);
- }
- } catch (...) {
- PyErr_SetString(po_IscException, "Unexpected exception");
- return (-1);
+bool
+PyTSIGContext_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
-
- PyErr_Clear();
- PyErr_SetString(PyExc_TypeError,
- "Invalid arguments to TSIGContext constructor");
-
- return (-1);
+ return (PyObject_TypeCheck(obj, &tsigcontext_type));
}
-void
-TSIGContext_destroy(s_TSIGContext* const self) {
- delete self->tsig_ctx;
- self->tsig_ctx = NULL;
- Py_TYPE(self)->tp_free(self);
-}
-
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_TSIGContext(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsig_context_type) < 0) {
- return (false);
+TSIGContext&
+PyTSIGContext_ToTSIGContext(PyObject* tsigcontext_obj) {
+ if (tsigcontext_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIGContext PyObject conversion");
}
- Py_INCREF(&tsig_context_type);
- void* p = &tsig_context_type;
- PyModule_AddObject(mod, "TSIGContext", static_cast<PyObject*>(p));
-
- addClassVariable(tsig_context_type, "DEFAULT_FUDGE",
- Py_BuildValue("H", TSIGContext::DEFAULT_FUDGE));
-
- return (true);
+ s_TSIGContext* tsigcontext = static_cast<s_TSIGContext*>(tsigcontext_obj);
+ return (*tsigcontext->cppobj);
}
-} // end of anonymous namespace
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/tsig_python.h b/src/lib/dns/python/tsig_python.h
new file mode 100644
index 0000000..e4e9fff
--- /dev/null
+++ b/src/lib/dns/python/tsig_python.h
@@ -0,0 +1,59 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_TSIGCONTEXT_H
+#define __PYTHON_TSIGCONTEXT_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class TSIGContext;
+
+namespace python {
+
+extern PyTypeObject tsigcontext_type;
+
+// Class specific exceptions
+extern PyObject* po_TSIGContextError;
+
+/// \brief Checks if the given python object is a TSIGContext object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGContext, false otherwise
+bool PyTSIGContext_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGContext object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGContext; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGContext_Check()
+///
+/// \note This is not a copy; if the TSIGContext is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigcontext_obj The tsigcontext object to convert
+TSIGContext& PyTSIGContext_ToTSIGContext(PyObject* tsigcontext_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_TSIGCONTEXT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tsig_rdata_python.cc b/src/lib/dns/python/tsig_rdata_python.cc
new file mode 100644
index 0000000..6ec0f09
--- /dev/null
+++ b/src/lib/dns/python/tsig_rdata_python.cc
@@ -0,0 +1,367 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <dns/rdataclass.h>
+
+#include "pydnspp_common.h"
+#include "pydnspp_towire.h"
+#include "name_python.h"
+#include "tsig_rdata_python.h"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns;
+using namespace isc::dns::rdata;
+using namespace isc::dns::python;
+
+// For each class, we need a struct, a helper functions (init, destroy,
+// and static wrappers around the methods we export), a list of methods,
+// and a type description
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIG : public PyObject {
+public:
+ s_TSIG() : cppobj(NULL) {};
+ const rdata::any::TSIG* cppobj;
+};
+
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_TSIG, any::TSIG> TSIGContainer;
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+// General creation and destruction
+int TSIG_init(s_TSIG* self, PyObject* args);
+void TSIG_destroy(s_TSIG* self);
+
+// These are the functions we export
+// ADD/REMOVE/MODIFY THE FOLLOWING AS APPROPRIATE FOR THE ACTUAL CLASS.
+//
+PyObject* TSIG_toText(const s_TSIG* const self);
+PyObject* TSIG_getAlgorithm(const s_TSIG* const self);
+PyObject* TSIG_getTimeSigned(const s_TSIG* const self);
+PyObject* TSIG_getFudge(const s_TSIG* const self);
+PyObject* TSIG_getOriginalID(const s_TSIG* const self);
+PyObject* TSIG_getError(const s_TSIG* const self);
+PyObject* TSIG_getMAC(const s_TSIG* const self);
+PyObject* TSIG_getOtherData(const s_TSIG* const self);
+PyObject* TSIG_str(PyObject* self);
+PyObject* TSIG_richcmp(const s_TSIG* const self,
+ const s_TSIG* const other, int op);
+PyObject* TSIG_toWire(const s_TSIG* self, PyObject* args);
+
+// These are the functions we export
+// For a minimal support, we don't need them.
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef TSIG_methods[] = {
+ { "get_algorithm", reinterpret_cast<PyCFunction>(TSIG_getAlgorithm),
+ METH_NOARGS,
+ "Return the algorithm name." },
+ { "get_timesigned", reinterpret_cast<PyCFunction>(TSIG_getTimeSigned),
+ METH_NOARGS,
+ "Return the value of the Time Signed field. "
+ "The returned value does not exceed 2^48-1."
+ },
+ { "get_fudge", reinterpret_cast<PyCFunction>(TSIG_getFudge),
+ METH_NOARGS,
+ "Return the value of the Fudge field." },
+ { "get_original_id", reinterpret_cast<PyCFunction>(TSIG_getOriginalID),
+ METH_NOARGS,
+ "Return the value of the Original ID field." },
+ { "get_error", reinterpret_cast<PyCFunction>(TSIG_getError),
+ METH_NOARGS,
+ "Return the value of the Error field." },
+ { "get_mac", reinterpret_cast<PyCFunction>(TSIG_getMAC),
+ METH_NOARGS,
+ "Return the value of the MAC field."
+ "If it's empty, return None." },
+ { "get_other_data", reinterpret_cast<PyCFunction>(TSIG_getOtherData),
+ METH_NOARGS,
+ "Return the value of the Other Data field."
+ "If it's empty, return None." },
+ { "to_text", reinterpret_cast<PyCFunction>(TSIG_toText), METH_NOARGS,
+ "Returns the text representation" },
+ { "to_wire", reinterpret_cast<PyCFunction>(TSIG_toWire), METH_VARARGS,
+ "Converts the TSIG object to wire format.\n"
+ "The argument can be either a MessageRenderer or an object that "
+ "implements the sequence interface. If the object is mutable "
+ "(for instance a bytearray()), the wire data is added in-place.\n"
+ "If it is not (for instance a bytes() object), a new object is "
+ "returned" },
+ { NULL, NULL, 0, NULL }
+};
+
+int
+TSIG_init(s_TSIG* self, PyObject* args) {
+ try {
+ // constructor from string
+ const char* rdata_str;
+ if (PyArg_ParseTuple(args, "s", &rdata_str)) {
+ self->cppobj = new any::TSIG(string(rdata_str));
+ return (0);
+ }
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct TSIG object: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in constructing TSIG");
+ return (-1);
+ }
+
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to TSIG constructor");
+
+ return (-1);
+}
+
+void
+TSIG_destroy(s_TSIG* const self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+TSIG_getAlgorithm(const s_TSIG* const self) {
+ try {
+ return (createNameObject(self->cppobj->getAlgorithm()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get TSIG algorithm: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "getting TSIG algorithm");
+ }
+ return (NULL);
+}
+
+PyObject*
+TSIG_getTimeSigned(const s_TSIG* const self) {
+ return (Py_BuildValue("K", self->cppobj->getTimeSigned()));
+}
+
+PyObject*
+TSIG_getFudge(const s_TSIG* const self) {
+ return (Py_BuildValue("H", self->cppobj->getFudge()));
+}
+
+PyObject*
+TSIG_getOriginalID(const s_TSIG* const self) {
+ return (Py_BuildValue("H", self->cppobj->getOriginalID()));
+}
+
+PyObject*
+TSIG_getError(const s_TSIG* const self) {
+ return (Py_BuildValue("H", self->cppobj->getError()));
+}
+
+PyObject*
+TSIG_getMAC(const s_TSIG* const self) {
+ return (Py_BuildValue("y#", self->cppobj->getMAC(),
+ self->cppobj->getMACSize()));
+}
+
+PyObject*
+TSIG_getOtherData(const s_TSIG* const self) {
+ return (Py_BuildValue("y#", self->cppobj->getOtherData(),
+ self->cppobj->getOtherLen()));
+}
+
+PyObject*
+TSIG_toText(const s_TSIG* const self) {
+ try {
+ // toText() could throw, so we need to catch any exceptions below.
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to convert TSIG object to text: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "converting TSIG object to text");
+ }
+ return (NULL);
+}
+
+PyObject*
+TSIG_str(PyObject* self) {
+ // Simply call the to_text method we already defined
+ return (PyObject_CallMethod(self, const_cast<char*>("to_text"),
+ const_cast<char*>("")));
+}
+
+PyObject*
+TSIG_toWire(const s_TSIG* const self, PyObject* args) {
+ typedef any::TSIG TSIGRdata;
+ return (toWireWrapper<s_TSIG, TSIGRdata, ToWireCallVoid<const TSIGRdata> >(
+ self, args));
+}
+
+PyObject*
+TSIG_richcmp(const s_TSIG* const self,
+ const s_TSIG* const other,
+ const int op)
+{
+ bool c = false;
+
+ // Check for null and if the types match. If different type,
+ // simply return False
+ if (other == NULL || (self->ob_type != other->ob_type)) {
+ Py_RETURN_FALSE;
+ }
+
+ // Only equals and not equals here, unorderable type
+ const int cmp = self->cppobj->compare(*other->cppobj);
+ switch (op) {
+ case Py_EQ:
+ c = (cmp == 0);
+ break;
+ case Py_NE:
+ c = (cmp != 0);
+ break;
+ case Py_GT:
+ c = (cmp > 0);
+ break;
+ case Py_GE:
+ c = (cmp >= 0);
+ break;
+ case Py_LT:
+ c = (cmp < 0);
+ break;
+ case Py_LE:
+ c = (cmp <= 0);
+ break;
+ default:
+ PyErr_SetString(PyExc_IndexError,
+ "Unhandled rich comparison operator for TSIG");
+ return (NULL);
+ }
+ if (c) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_TSIG
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject tsig_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.TSIG",
+ sizeof(s_TSIG), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(TSIG_destroy), // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ TSIG_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The TSIG class objects represents the TSIG RDATA as defined in RFC2845.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ reinterpret_cast<richcmpfunc>(TSIG_richcmp), // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ TSIG_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ // At the moment, we leave tp_base NULL as we won't use this class
+ // in a polymorphic way for our immediate need.
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(TSIG_init), // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createTSIGObject(const any::TSIG& source) {
+ TSIGContainer container(PyObject_New(s_TSIG, &tsig_type));
+ container.set(new any::TSIG(source));
+ return (container.release());
+}
+
+bool
+PyTSIG_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &tsig_type));
+}
+
+const any::TSIG&
+PyTSIG_ToTSIG(const PyObject* tsig_obj) {
+ if (tsig_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIG PyObject conversion");
+ }
+ const s_TSIG* tsig = static_cast<const s_TSIG*>(tsig_obj);
+ return (*tsig->cppobj);
+}
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/tsig_rdata_python.h b/src/lib/dns/python/tsig_rdata_python.h
new file mode 100644
index 0000000..a84d9e8
--- /dev/null
+++ b/src/lib/dns/python/tsig_rdata_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_TSIG_H
+#define __PYTHON_TSIG_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+namespace rdata {
+namespace any {
+class TSIG;
+}
+}
+
+namespace python {
+
+extern PyTypeObject tsig_type;
+
+/// This is A simple shortcut to create a python TSIG object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called with in a try block
+/// followed by necessary setup for python exception.
+PyObject* createTSIGObject(const rdata::any::TSIG& source);
+
+/// \brief Checks if the given python object is a TSIG object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIG, false otherwise
+bool PyTSIG_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIG object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIG; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIG_Check()
+///
+/// \note This is not a copy; if the TSIG is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsig_obj The tsig object to convert
+const rdata::any::TSIG& PyTSIG_ToTSIG(const PyObject* tsig_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_TSIG_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tsigerror_python.cc b/src/lib/dns/python/tsigerror_python.cc
new file mode 100644
index 0000000..7a0217e
--- /dev/null
+++ b/src/lib/dns/python/tsigerror_python.cc
@@ -0,0 +1,291 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <dns/tsigerror.h>
+
+#include "pydnspp_common.h"
+#include "rcode_python.h"
+#include "tsigerror_python.h"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns;
+using namespace isc::dns::python;
+
+// For each class, we need a struct, a helper functions (init, destroy,
+// and static wrappers around the methods we export), a list of methods,
+// and a type description
+
+// Import pydoc text
+#include "tsigerror_python_inc.cc"
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGError : public PyObject {
+public:
+ s_TSIGError() : cppobj(NULL) {};
+ const TSIGError* cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_TSIGError, TSIGError> TSIGErrorContainer;
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+// General creation and destruction
+int TSIGError_init(s_TSIGError* self, PyObject* args);
+void TSIGError_destroy(s_TSIGError* self);
+
+// These are the functions we export
+PyObject* TSIGError_getCode(const s_TSIGError* const self);
+PyObject* TSIGError_toText(const s_TSIGError* const self);
+PyObject* TSIGError_toRcode(const s_TSIGError* const self);
+PyObject* TSIGError_str(PyObject* self);
+PyObject* TSIGError_richcmp(const s_TSIGError* const self,
+ const s_TSIGError* const other, int op);
+
+// These are the functions we export
+// For a minimal support, we don't need them.
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef TSIGError_methods[] = {
+ { "get_code", reinterpret_cast<PyCFunction>(TSIGError_getCode),
+ METH_NOARGS,
+ TSIGError_getCode_doc },
+ { "to_text", reinterpret_cast<PyCFunction>(TSIGError_toText), METH_NOARGS,
+ TSIGError_toText_doc },
+ { "to_rcode", reinterpret_cast<PyCFunction>(TSIGError_toRcode),
+ METH_NOARGS,
+ TSIGError_toRcode_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+int
+TSIGError_init(s_TSIGError* self, PyObject* args) {
+ try {
+ // Constructor from the code value
+ long code = 0;
+ if (PyArg_ParseTuple(args, "l", &code)) {
+ if (code < 0 || code > 0xffff) {
+ PyErr_SetString(PyExc_ValueError, "TSIG error out of range");
+ return (-1);
+ }
+ self->cppobj = new TSIGError(code);
+ return (0);
+ }
+
+ // Constructor from Rcode
+ PyErr_Clear();
+ PyObject* py_rcode;
+ if (PyArg_ParseTuple(args, "O!", &rcode_type, &py_rcode)) {
+ self->cppobj = new TSIGError(PyRcode_ToRcode(py_rcode));
+ return (0);
+ }
+ } catch (const isc::OutOfRange& ex) {
+ const string ex_what = "Failed to construct TSIGError object: " +
+ string(ex.what());
+ PyErr_SetString(PyExc_ValueError, ex_what.c_str());
+ return (-1);
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct TSIGError object: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in constructing TSIGError");
+ return (-1);
+ }
+
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to TSIGError constructor");
+
+ return (-1);
+}
+
+void
+TSIGError_destroy(s_TSIGError* const self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+TSIGError_getCode(const s_TSIGError* const self) {
+ return (Py_BuildValue("I", self->cppobj->getCode()));
+}
+
+PyObject*
+TSIGError_toText(const s_TSIGError* const self) {
+ try {
+ // toText() could throw, so we need to catch any exceptions below.
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to convert TSIGError object to text: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "converting TSIGError object to text");
+ }
+ return (NULL);
+}
+
+PyObject*
+TSIGError_str(PyObject* self) {
+ // Simply call the to_text method we already defined
+ return (PyObject_CallMethod(self, const_cast<char*>("to_text"),
+ const_cast<char*>("")));
+}
+
+PyObject*
+TSIGError_toRcode(const s_TSIGError* const self) {
+ try {
+ return (createRcodeObject(self->cppobj->toRcode()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to convert TSIGError to Rcode: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "converting TSIGError to Rcode");
+ }
+ return (NULL);
+}
+
+PyObject*
+TSIGError_richcmp(const s_TSIGError* const self,
+ const s_TSIGError* const other,
+ const int op)
+{
+ bool c = false;
+
+ // Check for null and if the types match. If different type,
+ // simply return False
+ if (other == NULL || (self->ob_type != other->ob_type)) {
+ Py_RETURN_FALSE;
+ }
+
+ // Only equals and not equals here, unorderable type
+ switch (op) {
+ case Py_LT:
+ PyErr_SetString(PyExc_TypeError, "Unorderable type; TSIGError");
+ return (NULL);
+ case Py_LE:
+ PyErr_SetString(PyExc_TypeError, "Unorderable type; TSIGError");
+ return (NULL);
+ case Py_EQ:
+ c = (*self->cppobj == *other->cppobj);
+ break;
+ case Py_NE:
+ c = (*self->cppobj != *other->cppobj);
+ break;
+ case Py_GT:
+ PyErr_SetString(PyExc_TypeError, "Unorderable type; TSIGError");
+ return (NULL);
+ case Py_GE:
+ PyErr_SetString(PyExc_TypeError, "Unorderable type; TSIGError");
+ return (NULL);
+ }
+ if (c) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_TSIGError
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject tsigerror_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.TSIGError",
+ sizeof(s_TSIGError), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(TSIGError_destroy), // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ // THIS MAY HAVE TO BE CHANGED TO NULL:
+ TSIGError_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ TSIGError_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ // THIS MAY HAVE TO BE CHANGED TO NULL:
+ reinterpret_cast<richcmpfunc>(TSIGError_richcmp), // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ TSIGError_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(TSIGError_init), // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createTSIGErrorObject(const TSIGError& source) {
+ TSIGErrorContainer container(PyObject_New(s_TSIGError, &tsigerror_type));
+ container.set(new TSIGError(source));
+ return (container.release());
+}
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/tsigerror_python.h b/src/lib/dns/python/tsigerror_python.h
new file mode 100644
index 0000000..0b5b630
--- /dev/null
+++ b/src/lib/dns/python/tsigerror_python.h
@@ -0,0 +1,44 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_TSIGERROR_H
+#define __PYTHON_TSIGERROR_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class TSIGError;
+
+namespace python {
+
+extern PyTypeObject tsigerror_type;
+
+/// This is A simple shortcut to create a python TSIGError object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called with in a try block
+/// followed by necessary setup for python exception.
+PyObject* createTSIGErrorObject(const TSIGError& source);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_TSIGERROR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tsigerror_python_inc.cc b/src/lib/dns/python/tsigerror_python_inc.cc
new file mode 100644
index 0000000..ed3b605
--- /dev/null
+++ b/src/lib/dns/python/tsigerror_python_inc.cc
@@ -0,0 +1,83 @@
+namespace {
+const char* const TSIGError_doc = "\n\
+TSIG errors.\n\
+\n\
+\n\
+The TSIGError class objects represent standard errors related to TSIG\n\
+protocol operations as defined in related specifications, mainly in\n\
+RFC2845.\n\
+\n\
+TSIGError(error_code)\n\
+\n\
+Constructor from the code value.\n\
+\n\
+Exceptions:\n\
+ None: \n\
+\n\
+Parameters:\n\
+ error_code: The underlying 16-bit error code value of the TSIGError.\n\
+\n\
+TSIGError(rcode)\n\
+\n\
+Constructor from Rcode.\n\
+\n\
+As defined in RFC2845, error code values from 0 to 15 (inclusive) are\n\
+derived from the DNS RCODEs, which are represented via the Rcode class\n\
+in this library. This constructor works as a converter from these\n\
+RCODEs to corresponding TSIGError objects.\n\
+\n\
+Exceptions:\n\
+ ValueError: Given rcode is not convertible to TSIGErrors.\n\
+\n\
+Parameters:\n\
+ rcode: the Rcode from which the TSIGError should be derived.\n\
+\n\
+";
+const char* const TSIGError_getCode_doc = "get_code() -> integer\n\
+\n\
+Returns the TSIGCode error code value.\n\
+\n\
+Exceptions:\n\
+ None: \n\
+\n\
+Return Value(s):\n\
+ The underlying code value corresponding to the TSIGError.\n\
+";
+const char* const TSIGError_toText_doc = "to_text() -> string\n\
+\n\
+Convert the TSIGError to a string.\n\
+\n\
+For codes derived from RCODEs up to 15, this method returns the same\n\
+string as Rcode.to_text() for the corresponding code. For other pre-\n\
+defined code values (see TSIGError.CodeValue), this method returns a\n\
+string representation of the \"mnemonic' used for the enum and\n\
+constant objects as defined in RFC2845. For example, the string for\n\
+code value 16 is \"BADSIG\", etc. For other code values it returns a\n\
+string representation of the decimal number of the value, e.g. \"32\",\n\
+\"100\", etc.\n\
+\n\
+Exceptions:\n\
+ None\n\
+\n\
+Return Value(s):\n\
+ A string representation of the TSIGError.\n\
+";
+const char* const TSIGError_toRcode_doc = "to_rcode() -> Rcode\n\
+\n\
+Convert the TSIGError to a Rcode.\n\
+\n\
+This method returns an Rcode object that is corresponding to the TSIG\n\
+error. The returned Rcode is expected to be used by a verifying server\n\
+to specify the RCODE of a response when TSIG verification fails.\n\
+\n\
+Specifically, this method returns Rcode.NOTAUTH() for the TSIG\n\
+specific errors, BADSIG, BADKEY, BADTIME, as described in RFC2845. For\n\
+errors derived from the standard Rcode (code 0-15), it returns the\n\
+corresponding Rcode. For others, this method returns Rcode.SERVFAIL()\n\
+as a last resort.\n\
+\n\
+Exceptions:\n\
+ None: \n\
+\n\
+";
+}
diff --git a/src/lib/dns/python/tsigkey_python.cc b/src/lib/dns/python/tsigkey_python.cc
index 4ca7bcd..cf79c1a 100644
--- a/src/lib/dns/python/tsigkey_python.cc
+++ b/src/lib/dns/python/tsigkey_python.cc
@@ -12,32 +12,39 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <new>
+#include <Python.h>
+#include <stdexcept>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <dns/name.h>
#include <dns/tsigkey.h>
+#include <dns/rdata.h>
-using namespace isc::dns;
-using namespace isc::dns::rdata;
+#include "pydnspp_common.h"
+#include "name_python.h"
+#include "tsigkey_python.h"
-//
-// Definition of the classes
-//
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns;
+using namespace isc::dns::python;
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
-namespace {
//
// TSIGKey
//
+namespace {
// The s_* Class simply covers one instantiation of the object
-
class s_TSIGKey : public PyObject {
public:
- s_TSIGKey() : tsigkey(NULL) {}
- TSIGKey* tsigkey;
+ s_TSIGKey() : cppobj(NULL) {};
+ TSIGKey* cppobj;
};
//
@@ -78,12 +85,105 @@ PyMethodDef TSIGKey_methods[] = {
{ NULL, NULL, 0, NULL }
};
+int
+TSIGKey_init(s_TSIGKey* self, PyObject* args) {
+ try {
+ const char* str;
+ if (PyArg_ParseTuple(args, "s", &str)) {
+ self->cppobj = new TSIGKey(str);
+ return (0);
+ }
+
+ PyErr_Clear();
+ const PyObject* key_name;
+ const PyObject* algorithm_name;
+ PyObject* bytes_obj;
+ const char* secret;
+ Py_ssize_t secret_len;
+ if (PyArg_ParseTuple(args, "O!O!O", &name_type, &key_name,
+ &name_type, &algorithm_name, &bytes_obj) &&
+ PyObject_AsCharBuffer(bytes_obj, &secret, &secret_len) == 0) {
+ if (secret_len == 0) {
+ secret = NULL;
+ }
+ self->cppobj = new TSIGKey(PyName_ToName(key_name),
+ PyName_ToName(algorithm_name),
+ secret, secret_len);
+ return (0);
+ }
+ } catch (const isc::InvalidParameter& ex) {
+ PyErr_SetString(po_InvalidParameter, ex.what());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(po_IscException, "Unexpected exception");
+ return (-1);
+ }
+
+ PyErr_Clear();
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to TSIGKey constructor");
+
+ return (-1);
+}
+
+void
+TSIGKey_destroy(s_TSIGKey* const self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+TSIGKey_getKeyName(const s_TSIGKey* const self) {
+ try {
+ return (createNameObject(self->cppobj->getKeyName()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get key name of TSIGKey: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "getting key name of TSIGKey");
+ }
+ return (NULL);
+}
+
+PyObject*
+TSIGKey_getAlgorithmName(const s_TSIGKey* const self) {
+ try {
+ return (createNameObject(self->cppobj->getAlgorithmName()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get algorithm name of TSIGKey: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "getting algorithm name of TSIGKey");
+ }
+ return (NULL);
+}
+
+PyObject*
+TSIGKey_getSecret(const s_TSIGKey* const self) {
+ return (Py_BuildValue("y#", self->cppobj->getSecret(),
+ self->cppobj->getSecretLength()));
+}
+
+PyObject*
+TSIGKey_toText(const s_TSIGKey* self) {
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+}
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
// This defines the complete type for reflection in python and
// parsing of PyObject* to s_EDNS
// Most of the functions are not actually implemented and NULL here.
PyTypeObject tsigkey_type = {
PyVarObject_HEAD_INIT(NULL, 0)
- "libdns_python.TSIGKey",
+ "pydnspp.TSIGKey",
sizeof(s_TSIGKey), // tp_basicsize
0, // tp_itemsize
(destructor)TSIGKey_destroy, // tp_dealloc
@@ -95,7 +195,7 @@ PyTypeObject tsigkey_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
NULL, // tp_str
NULL, // tp_getattro
@@ -132,125 +232,23 @@ PyTypeObject tsigkey_type = {
0 // tp_version_tag
};
-// A helper function to build a python "Name" object with error handling
-// encapsulated.
-s_Name*
-createNameObject(const Name& source) {
- s_Name* name = PyObject_New(s_Name, &name_type);
- if (name == NULL) {
- return (NULL);
- }
- name->name = new(nothrow) Name(source);
- if (name->name == NULL) {
- Py_DECREF(name);
- PyErr_SetString(po_IscException, "Allocating Name object failed");
- return (NULL);
+bool
+PyTSIGKey_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- return (name);
+ return (PyObject_TypeCheck(obj, &tsigkey_type));
}
-int
-TSIGKey_init(s_TSIGKey* self, PyObject* args) {
- const char* str;
-
- const s_Name* key_name;
- const s_Name* algorithm_name;
- PyObject* bytes_obj;
- const char* secret;
- Py_ssize_t secret_len;
-
-
- try {
- if (PyArg_ParseTuple(args, "s", &str)) {
- self->tsigkey = new TSIGKey(str);
- return (0);
- } else if (PyArg_ParseTuple(args, "O!O!O", &name_type, &key_name,
- &name_type, &algorithm_name, &bytes_obj) &&
- PyObject_AsCharBuffer(bytes_obj, &secret, &secret_len) != -1) {
- self->tsigkey = new TSIGKey(*key_name->name,
- *algorithm_name->name,
- secret, secret_len);
- return (0);
- }
- } catch (const isc::InvalidParameter& ex) {
- PyErr_SetString(po_InvalidParameter, ex.what());
- return (-1);
- } catch (...) {
- PyErr_SetString(po_IscException, "Unexpected exception");
- return (-1);
- }
-
- PyErr_Clear();
- PyErr_SetString(PyExc_TypeError,
- "Invalid arguments to TSIGKey constructor");
-
- return (-1);
-}
-
-void
-TSIGKey_destroy(s_TSIGKey* const self) {
- delete self->tsigkey;
- self->tsigkey = NULL;
- Py_TYPE(self)->tp_free(self);
-}
-
-PyObject*
-TSIGKey_getKeyName(const s_TSIGKey* const self) {
- return (createNameObject(self->tsigkey->getKeyName()));
-}
-
-PyObject*
-TSIGKey_getAlgorithmName(const s_TSIGKey* const self) {
- return (createNameObject(self->tsigkey->getAlgorithmName()));
-}
-
-PyObject*
-TSIGKey_getSecret(const s_TSIGKey* const self) {
- return (Py_BuildValue("y#", self->tsigkey->getSecret(),
- self->tsigkey->getSecretLength()));
-}
-
-PyObject*
-TSIGKey_toText(const s_TSIGKey* self) {
- return (Py_BuildValue("s", self->tsigkey->toText().c_str()));
+const TSIGKey&
+PyTSIGKey_ToTSIGKey(const PyObject* tsigkey_obj) {
+ const s_TSIGKey* tsigkey = static_cast<const s_TSIGKey*>(tsigkey_obj);
+ return (*tsigkey->cppobj);
}
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_TSIGKey(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsigkey_type) < 0) {
- return (false);
- }
- Py_INCREF(&tsigkey_type);
- void* p = &tsigkey_type;
- if (PyModule_AddObject(mod, "TSIGKey", static_cast<PyObject*>(p)) != 0) {
- Py_DECREF(&tsigkey_type);
- return (false);
- }
-
- s_Name* name;
- if ((name = createNameObject(TSIGKey::HMACMD5_NAME())) == NULL) {
- goto cleanup;
- }
- addClassVariable(tsigkey_type, "HMACMD5_NAME", name);
- if ((name = createNameObject(TSIGKey::HMACSHA1_NAME())) == NULL) {
- goto cleanup;
- }
- addClassVariable(tsigkey_type, "HMACSHA1_NAME", name);
- if ((name = createNameObject(TSIGKey::HMACSHA256_NAME())) == NULL) {
- goto cleanup;
- }
- addClassVariable(tsigkey_type, "HMACSHA256_NAME", name);
-
- return (true);
-
- cleanup:
- Py_DECREF(&tsigkey_type);
- return (false);
-}
+} // namespace python
+} // namespace dns
+} // namespace isc
//
// End of TSIGKey
//
@@ -259,14 +257,12 @@ initModulePart_TSIGKey(PyObject* mod) {
// TSIGKeyRing
//
+namespace {
// The s_* Class simply covers one instantiation of the object
-
-// The s_* Class simply covers one instantiation of the object
-
class s_TSIGKeyRing : public PyObject {
public:
- s_TSIGKeyRing() : keyring(NULL) {}
- TSIGKeyRing* keyring;
+ s_TSIGKeyRing() : cppobj(NULL) {};
+ TSIGKeyRing* cppobj;
};
//
@@ -296,56 +292,6 @@ PyMethodDef TSIGKeyRing_methods[] = {
{ NULL, NULL, 0, NULL }
};
-PyTypeObject tsigkeyring_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "libdns_python.TSIGKeyRing",
- sizeof(s_TSIGKeyRing), // tp_basicsize
- 0, // tp_itemsize
- (destructor)TSIGKeyRing_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- NULL, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "A simple repository of a set of TSIGKey objects.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- TSIGKeyRing_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)TSIGKeyRing_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
int
TSIGKeyRing_init(s_TSIGKeyRing* self, PyObject* args) {
if (!PyArg_ParseTuple(args, "")) {
@@ -354,9 +300,9 @@ TSIGKeyRing_init(s_TSIGKeyRing* self, PyObject* args) {
"Invalid arguments to TSIGKeyRing constructor");
return (-1);
}
-
- self->keyring = new(nothrow) TSIGKeyRing();
- if (self->keyring == NULL) {
+
+ self->cppobj = new(nothrow) TSIGKeyRing();
+ if (self->cppobj == NULL) {
PyErr_SetString(po_IscException, "Allocating TSIGKeyRing failed");
return (-1);
}
@@ -366,24 +312,24 @@ TSIGKeyRing_init(s_TSIGKeyRing* self, PyObject* args) {
void
TSIGKeyRing_destroy(s_TSIGKeyRing* self) {
- delete self->keyring;
- self->keyring = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
PyObject*
TSIGKeyRing_size(const s_TSIGKeyRing* const self) {
- return (Py_BuildValue("I", self->keyring->size()));
+ return (Py_BuildValue("I", self->cppobj->size()));
}
PyObject*
TSIGKeyRing_add(const s_TSIGKeyRing* const self, PyObject* args) {
s_TSIGKey* tsigkey;
-
+
if (PyArg_ParseTuple(args, "O!", &tsigkey_type, &tsigkey)) {
try {
const TSIGKeyRing::Result result =
- self->keyring->add(*tsigkey->tsigkey);
+ self->cppobj->add(*tsigkey->cppobj);
return (Py_BuildValue("I", result));
} catch (...) {
PyErr_SetString(po_IscException, "Unexpected exception");
@@ -399,11 +345,11 @@ TSIGKeyRing_add(const s_TSIGKeyRing* const self, PyObject* args) {
PyObject*
TSIGKeyRing_remove(const s_TSIGKeyRing* self, PyObject* args) {
- s_Name* key_name;
+ PyObject* key_name;
if (PyArg_ParseTuple(args, "O!", &name_type, &key_name)) {
const TSIGKeyRing::Result result =
- self->keyring->remove(*key_name->name);
+ self->cppobj->remove(PyName_ToName(key_name));
return (Py_BuildValue("I", result));
}
@@ -415,20 +361,21 @@ TSIGKeyRing_remove(const s_TSIGKeyRing* self, PyObject* args) {
PyObject*
TSIGKeyRing_find(const s_TSIGKeyRing* self, PyObject* args) {
- s_Name* key_name;
- s_Name* algorithm_name;
+ PyObject* key_name;
+ PyObject* algorithm_name;
if (PyArg_ParseTuple(args, "O!O!", &name_type, &key_name,
&name_type, &algorithm_name)) {
const TSIGKeyRing::FindResult result =
- self->keyring->find(*key_name->name, *algorithm_name->name);
+ self->cppobj->find(PyName_ToName(key_name),
+ PyName_ToName(algorithm_name));
if (result.key != NULL) {
s_TSIGKey* key = PyObject_New(s_TSIGKey, &tsigkey_type);
if (key == NULL) {
return (NULL);
}
- key->tsigkey = new(nothrow) TSIGKey(*result.key);
- if (key->tsigkey == NULL) {
+ key->cppobj = new(nothrow) TSIGKey(*result.key);
+ if (key->cppobj == NULL) {
Py_DECREF(key);
PyErr_SetString(po_IscException,
"Allocating TSIGKey object failed");
@@ -442,28 +389,80 @@ TSIGKeyRing_find(const s_TSIGKeyRing* self, PyObject* args) {
return (NULL);
}
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+PyTypeObject tsigkeyring_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.TSIGKeyRing",
+ sizeof(s_TSIGKeyRing), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)TSIGKeyRing_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "A simple repository of a set of TSIGKey objects.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ TSIGKeyRing_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)TSIGKeyRing_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
bool
-initModulePart_TSIGKeyRing(PyObject* mod) {
- if (PyType_Ready(&tsigkeyring_type) < 0) {
- return (false);
+PyTSIGKeyRing_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&tsigkeyring_type);
- void* p = &tsigkeyring_type;
- if (PyModule_AddObject(mod, "TSIGKeyRing",
- static_cast<PyObject*>(p)) != 0) {
- Py_DECREF(&tsigkeyring_type);
- return (false);
- }
-
- addClassVariable(tsigkeyring_type, "SUCCESS",
- Py_BuildValue("I", TSIGKeyRing::SUCCESS));
- addClassVariable(tsigkeyring_type, "EXIST",
- Py_BuildValue("I", TSIGKeyRing::EXIST));
- addClassVariable(tsigkeyring_type, "NOTFOUND",
- Py_BuildValue("I", TSIGKeyRing::NOTFOUND));
+ return (PyObject_TypeCheck(obj, &tsigkeyring_type));
+}
- return (true);
+const TSIGKeyRing&
+PyTSIGKeyRing_ToTSIGKeyRing(const PyObject* tsigkeyring_obj) {
+ if (tsigkeyring_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIGKeyRing PyObject conversion");
+ }
+ const s_TSIGKeyRing* tsigkeyring =
+ static_cast<const s_TSIGKeyRing*>(tsigkeyring_obj);
+ return (*tsigkeyring->cppobj);
}
-} // end of unnamed namespace
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/tsigkey_python.h b/src/lib/dns/python/tsigkey_python.h
new file mode 100644
index 0000000..6c3d2e3
--- /dev/null
+++ b/src/lib/dns/python/tsigkey_python.h
@@ -0,0 +1,75 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_TSIGKEY_H
+#define __PYTHON_TSIGKEY_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class TSIGKey;
+class TSIGKeyRing;
+
+namespace python {
+
+extern PyTypeObject tsigkey_type;
+extern PyTypeObject tsigkeyring_type;
+
+/// \brief Checks if the given python object is a TSIGKey object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGKey, false otherwise
+bool PyTSIGKey_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGKey object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGKey; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGKey_Check()
+///
+/// \note This is not a copy; if the TSIGKey is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigkey_obj The tsigkey object to convert
+const TSIGKey& PyTSIGKey_ToTSIGKey(const PyObject* tsigkey_obj);
+
+/// \brief Checks if the given python object is a TSIGKeyRing object
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGKeyRing, false otherwise
+bool PyTSIGKeyRing_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGKeyRing object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGKeyRing; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGKeyRing_Check()
+///
+/// \note This is not a copy; if the TSIGKeyRing is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigkeyring_obj The tsigkeyring object to convert
+const TSIGKeyRing& PyTSIGKeyRing_ToTSIGKeyRing(const PyObject* tsigkeyring_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_TSIGKEY_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tsigrecord_python.cc b/src/lib/dns/python/tsigrecord_python.cc
new file mode 100644
index 0000000..c754dd2
--- /dev/null
+++ b/src/lib/dns/python/tsigrecord_python.cc
@@ -0,0 +1,293 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <dns/tsigrecord.h>
+
+#include "pydnspp_common.h"
+#include "pydnspp_towire.h"
+#include "name_python.h"
+#include "tsig_rdata_python.h"
+#include "tsigrecord_python.h"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns;
+using namespace isc::dns::python;
+
+// For each class, we need a struct, a helper functions (init, destroy,
+// and static wrappers around the methods we export), a list of methods,
+// and a type description
+
+//
+// TSIGRecord
+//
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGRecord : public PyObject {
+public:
+ s_TSIGRecord() : cppobj(NULL) {};
+ TSIGRecord* cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_TSIGRecord, TSIGRecord> TSIGRecordContainer;
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+// General creation and destruction
+int TSIGRecord_init(s_TSIGRecord* self, PyObject* args);
+void TSIGRecord_destroy(s_TSIGRecord* self);
+PyObject* TSIGRecord_toText(const s_TSIGRecord* const self);
+PyObject* TSIGRecord_str(PyObject* self);
+PyObject* TSIGRecord_toWire(const s_TSIGRecord* self, PyObject* args);
+PyObject* TSIGRecord_getName(const s_TSIGRecord* self);
+PyObject* TSIGRecord_getLength(const s_TSIGRecord* self);
+PyObject* TSIGRecord_getRdata(const s_TSIGRecord* self);
+
+// These are the functions we export
+// For a minimal support, we don't need them.
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef TSIGRecord_methods[] = {
+ { "get_name", reinterpret_cast<PyCFunction>(TSIGRecord_getName),
+ METH_NOARGS,
+ "Return the owner name of the TSIG RR, which is the TSIG key name" },
+ { "get_length", reinterpret_cast<PyCFunction>(TSIGRecord_getLength),
+ METH_NOARGS,
+ "Return the length of the TSIG record" },
+ { "get_rdata", reinterpret_cast<PyCFunction>(TSIGRecord_getRdata),
+ METH_NOARGS,
+ "Return the RDATA of the TSIG RR" },
+ { "to_text", reinterpret_cast<PyCFunction>(TSIGRecord_toText), METH_NOARGS,
+ "Returns the text representation" },
+ { "to_wire", reinterpret_cast<PyCFunction>(TSIGRecord_toWire),
+ METH_VARARGS,
+ "Converts the TSIGRecord object to wire format.\n"
+ "The argument can be either a MessageRenderer or an object that "
+ "implements the sequence interface. If the object is mutable "
+ "(for instance a bytearray()), the wire data is added in-place.\n"
+ "If it is not (for instance a bytes() object), a new object is "
+ "returned" },
+ { NULL, NULL, 0, NULL }
+};
+
+int
+TSIGRecord_init(s_TSIGRecord* self, PyObject* args) {
+ try {
+ const PyObject* py_name;
+ const PyObject* py_tsig;
+ if (PyArg_ParseTuple(args, "O!O!", &name_type, &py_name,
+ &tsig_type, &py_tsig)) {
+ self->cppobj = new TSIGRecord(PyName_ToName(py_name),
+ PyTSIG_ToTSIG(py_tsig));
+ return (0);
+ }
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct TSIGRecord object: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in constructing TSIGRecord");
+ return (-1);
+ }
+
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to TSIGRecord constructor");
+
+ return (-1);
+}
+
+// This is a template of typical code logic of python object destructor.
+// In many cases you can use it without modification, but check that carefully.
+void
+TSIGRecord_destroy(s_TSIGRecord* const self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+// This should be able to be used without modification as long as the
+// underlying C++ class has toText().
+PyObject*
+TSIGRecord_toText(const s_TSIGRecord* const self) {
+ try {
+ // toText() could throw, so we need to catch any exceptions below.
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to convert TSIGRecord object to text: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "converting TSIGRecord object to text");
+ }
+ return (NULL);
+}
+
+PyObject*
+TSIGRecord_str(PyObject* self) {
+ // Simply call the to_text method we already defined
+ return (PyObject_CallMethod(self, const_cast<char*>("to_text"),
+ const_cast<char*>("")));
+}
+
+PyObject*
+TSIGRecord_toWire(const s_TSIGRecord* const self, PyObject* args) {
+ typedef ToWireCallInt<const TSIGRecord> ToWireCall;
+ PyObject* (*towire_fn)(const s_TSIGRecord* const, PyObject*) =
+ toWireWrapper<s_TSIGRecord, TSIGRecord, ToWireCall>;
+ return (towire_fn(self, args));
+}
+
+PyObject*
+TSIGRecord_getName(const s_TSIGRecord* const self) {
+ try {
+ return (createNameObject(self->cppobj->getName()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get TSIGRecord name: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "getting TSIGRecord name");
+ }
+ return (NULL);
+}
+
+PyObject*
+TSIGRecord_getLength(const s_TSIGRecord* const self) {
+ return (Py_BuildValue("H", self->cppobj->getLength()));
+}
+
+PyObject*
+TSIGRecord_getRdata(const s_TSIGRecord* const self) {
+ try {
+ return (createTSIGObject(self->cppobj->getRdata()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get TSIGRecord RDATA: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "getting TSIGRecord RDATA");
+ }
+ return (NULL);
+}
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_TSIGRecord
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject tsigrecord_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.TSIGRecord",
+ sizeof(s_TSIGRecord), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(TSIGRecord_destroy), // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ TSIGRecord_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The TSIGRecord class objects is...(COMPLETE THIS)",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ TSIGRecord_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(TSIGRecord_init), // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createTSIGRecordObject(const TSIGRecord& source) {
+ TSIGRecordContainer container(PyObject_New(s_TSIGRecord, &tsigrecord_type));
+ container.set(new TSIGRecord(source));
+ return (container.release());
+}
+
+bool
+PyTSIGRecord_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &tsigrecord_type));
+}
+
+const TSIGRecord&
+PyTSIGRecord_ToTSIGRecord(PyObject* tsigrecord_obj) {
+ if (tsigrecord_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIGRecord PyObject conversion");
+ }
+ s_TSIGRecord* tsigrecord = static_cast<s_TSIGRecord*>(tsigrecord_obj);
+ return (*tsigrecord->cppobj);
+}
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/tsigrecord_python.h b/src/lib/dns/python/tsigrecord_python.h
new file mode 100644
index 0000000..d6252e1
--- /dev/null
+++ b/src/lib/dns/python/tsigrecord_python.h
@@ -0,0 +1,65 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_TSIGRECORD_H
+#define __PYTHON_TSIGRECORD_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class TSIGRecord;
+
+namespace python {
+
+
+extern PyTypeObject tsigrecord_type;
+
+/// This is A simple shortcut to create a python TSIGRecord object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called with in a try block
+/// followed by necessary setup for python exception.
+PyObject* createTSIGRecordObject(const TSIGRecord& source);
+
+/// \brief Checks if the given python object is a TSIGRecord object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGRecord, false otherwise
+bool PyTSIGRecord_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGRecord object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGRecord; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGRecord_Check()
+///
+/// \note This is not a copy; if the TSIGRecord is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrtype_obj The rrtype object to convert
+const TSIGRecord& PyTSIGRecord_ToTSIGRecord(PyObject* tsigrecord_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_TSIGRECORD_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/question.cc b/src/lib/dns/question.cc
index 96e2a9c..6ccb164 100644
--- a/src/lib/dns/question.cc
+++ b/src/lib/dns/question.cc
@@ -57,10 +57,19 @@ Question::toWire(OutputBuffer& buffer) const {
unsigned int
Question::toWire(AbstractMessageRenderer& renderer) const {
+ const size_t pos0 = renderer.getLength();
+
renderer.writeName(name_);
rrtype_.toWire(renderer);
rrclass_.toWire(renderer);
+ // Make sure the renderer has a room for the question
+ if (renderer.getLength() > renderer.getLengthLimit()) {
+ renderer.trim(renderer.getLength() - pos0);
+ renderer.setTruncated();
+ return (0);
+ }
+
return (1); // number of "entries"
}
diff --git a/src/lib/dns/question.h b/src/lib/dns/question.h
index b3f3d98..5d2783b 100644
--- a/src/lib/dns/question.h
+++ b/src/lib/dns/question.h
@@ -201,23 +201,23 @@ public:
/// class description).
///
/// The owner name will be compressed if possible, although it's an
- /// unlikely event in practice because the %Question section a DNS
+ /// unlikely event in practice because the Question section a DNS
/// message normally doesn't contain multiple question entries and
/// it's located right after the Header section.
/// Nevertheless, \c renderer records the information of the owner name
/// so that it can be pointed by other RRs in other sections (which is
/// more likely to happen).
///
- /// In theory, an attempt to render a Question may cause truncation
- /// (when the Question section contains a large number of entries),
- /// but this implementation doesn't catch that situation.
- /// It would make the code unnecessarily complicated (though perhaps
- /// slightly) for almost impossible case in practice.
- /// An upper layer will handle the pathological case as a general error.
+ /// It could be possible, though very rare in practice, that
+ /// an attempt to render a Question may cause truncation
+ /// (when the Question section contains a large number of entries).
+ /// In such a case this method avoid the rendering and indicate the
+ /// truncation in the \c renderer. This method returns 0 in this case.
///
/// \param renderer DNS message rendering context that encapsulates the
/// output buffer and name compression information.
- /// \return 1
+ ///
+ /// \return 1 on success; 0 if it causes truncation
unsigned int toWire(AbstractMessageRenderer& renderer) const;
/// \brief Render the Question in the wire format without name compression.
diff --git a/src/lib/dns/rdata/any_255/tsig_250.cc b/src/lib/dns/rdata/any_255/tsig_250.cc
index 8211e7f..4eb72bc 100644
--- a/src/lib/dns/rdata/any_255/tsig_250.cc
+++ b/src/lib/dns/rdata/any_255/tsig_250.cc
@@ -19,9 +19,11 @@
#include <boost/lexical_cast.hpp>
#include <util/buffer.h>
+#include <util/strutil.h>
#include <util/encode/base64.h>
#include <dns/messagerenderer.h>
+#include <dns/name.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
#include <dns/tsigerror.h>
@@ -30,6 +32,7 @@ using namespace std;
using namespace boost;
using namespace isc::util;
using namespace isc::util::encode;
+using namespace isc::util::str;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
@@ -65,45 +68,6 @@ struct TSIG::TSIGImpl {
const vector<uint8_t> other_data_;
};
-namespace {
-string
-getToken(istringstream& iss, const string& full_input) {
- string token;
- iss >> token;
- if (iss.bad() || iss.fail()) {
- isc_throw(InvalidRdataText, "Invalid TSIG text: parse error" <<
- full_input);
- }
- return (token);
-}
-
-// This helper function converts a string token to an *unsigned* integer.
-// NumType is a *signed* integral type (e.g. int32_t) that is sufficiently
-// wide to store resulting integers.
-// BitSize is the maximum number of bits that the resulting integer can take.
-// This function first checks whether the given token can be converted to
-// an integer of NumType type. It then confirms the conversion result is
-// within the valid range, i.e., [0, 2^NumType - 1]. The second check is
-// necessary because lexical_cast<T> where T is an unsigned integer type
-// doesn't correctly reject negative numbers when compiled with SunStudio.
-template <typename NumType, int BitSize>
-NumType
-tokenToNum(const string& num_token) {
- NumType num;
- try {
- num = lexical_cast<NumType>(num_token);
- } catch (const boost::bad_lexical_cast& ex) {
- isc_throw(InvalidRdataText, "Invalid TSIG numeric parameter: " <<
- num_token);
- }
- if (num < 0 || num >= (static_cast<NumType>(1) << BitSize)) {
- isc_throw(InvalidRdataText, "Numeric TSIG parameter out of range: " <<
- num);
- }
- return (num);
-}
-}
-
/// \brief Constructor from string.
///
/// \c tsig_str must be formatted as follows:
@@ -148,47 +112,52 @@ tokenToNum(const string& num_token) {
TSIG::TSIG(const std::string& tsig_str) : impl_(NULL) {
istringstream iss(tsig_str);
- const Name algorithm(getToken(iss, tsig_str));
- const int64_t time_signed = tokenToNum<int64_t, 48>(getToken(iss,
- tsig_str));
- const int32_t fudge = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
- const int32_t macsize = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-
- const string mac_txt = (macsize > 0) ? getToken(iss, tsig_str) : "";
- vector<uint8_t> mac;
- decodeBase64(mac_txt, mac);
- if (mac.size() != macsize) {
- isc_throw(InvalidRdataText, "TSIG MAC size and data are inconsistent");
- }
-
- const int32_t orig_id = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-
- const string error_txt = getToken(iss, tsig_str);
- int32_t error = 0;
- // XXX: In the initial implementation we hardcode the mnemonics.
- // We'll soon generalize this.
- if (error_txt == "BADSIG") {
- error = 16;
- } else if (error_txt == "BADKEY") {
- error = 17;
- } else if (error_txt == "BADTIME") {
- error = 18;
- } else {
- error = tokenToNum<int32_t, 16>(error_txt);
- }
-
- const int32_t otherlen = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
- const string otherdata_txt = (otherlen > 0) ? getToken(iss, tsig_str) : "";
- vector<uint8_t> other_data;
- decodeBase64(otherdata_txt, other_data);
-
- if (!iss.eof()) {
- isc_throw(InvalidRdataText, "Unexpected input for TSIG RDATA: " <<
- tsig_str);
+ try {
+ const Name algorithm(getToken(iss));
+ const int64_t time_signed = tokenToNum<int64_t, 48>(getToken(iss));
+ const int32_t fudge = tokenToNum<int32_t, 16>(getToken(iss));
+ const int32_t macsize = tokenToNum<int32_t, 16>(getToken(iss));
+
+ const string mac_txt = (macsize > 0) ? getToken(iss) : "";
+ vector<uint8_t> mac;
+ decodeBase64(mac_txt, mac);
+ if (mac.size() != macsize) {
+ isc_throw(InvalidRdataText, "TSIG MAC size and data are inconsistent");
+ }
+
+ const int32_t orig_id = tokenToNum<int32_t, 16>(getToken(iss));
+
+ const string error_txt = getToken(iss);
+ int32_t error = 0;
+ // XXX: In the initial implementation we hardcode the mnemonics.
+ // We'll soon generalize this.
+ if (error_txt == "BADSIG") {
+ error = 16;
+ } else if (error_txt == "BADKEY") {
+ error = 17;
+ } else if (error_txt == "BADTIME") {
+ error = 18;
+ } else {
+ error = tokenToNum<int32_t, 16>(error_txt);
+ }
+
+ const int32_t otherlen = tokenToNum<int32_t, 16>(getToken(iss));
+ const string otherdata_txt = (otherlen > 0) ? getToken(iss) : "";
+ vector<uint8_t> other_data;
+ decodeBase64(otherdata_txt, other_data);
+
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Unexpected input for TSIG RDATA: " <<
+ tsig_str);
+ }
+
+ impl_ = new TSIGImpl(algorithm, time_signed, fudge, mac, orig_id,
+ error, other_data);
+
+ } catch (const StringTokenError& ste) {
+ isc_throw(InvalidRdataText, "Invalid TSIG text: " << ste.what() <<
+ ": " << tsig_str);
}
-
- impl_ = new TSIGImpl(algorithm, time_signed, fudge, mac, orig_id,
- error, other_data);
}
/// \brief Constructor from wire-format data.
diff --git a/src/lib/dns/rdata/generic/afsdb_18.cc b/src/lib/dns/rdata/generic/afsdb_18.cc
new file mode 100644
index 0000000..6afc4de
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.cc
@@ -0,0 +1,171 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+#include <util/strutil.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <boost/lexical_cast.hpp>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::str;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c afsdb_str must be formatted as follows:
+/// \code <subtype> <server name>
+/// \endcode
+/// where server name field must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "1 server.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+AFSDB::AFSDB(const std::string& afsdb_str) :
+ subtype_(0), server_(Name::ROOT_NAME())
+{
+ istringstream iss(afsdb_str);
+
+ try {
+ const uint32_t subtype = tokenToNum<int32_t, 16>(getToken(iss));
+ const Name servername(getToken(iss));
+ string server;
+
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Unexpected input for AFSDB"
+ "RDATA: " << afsdb_str);
+ }
+
+ subtype_ = subtype;
+ server_ = servername;
+
+ } catch (const StringTokenError& ste) {
+ isc_throw(InvalidRdataText, "Invalid AFSDB text: " <<
+ ste.what() << ": " << afsdb_str);
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+AFSDB::AFSDB(InputBuffer& buffer, size_t) :
+ subtype_(buffer.readUint16()), server_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+AFSDB::AFSDB(const AFSDB& other) :
+ Rdata(), subtype_(other.subtype_), server_(other.server_)
+{}
+
+AFSDB&
+AFSDB::operator=(const AFSDB& source) {
+ subtype_ = source.subtype_;
+ server_ = source.server_;
+
+ return (*this);
+}
+
+/// \brief Convert the \c AFSDB to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c AFSDB(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c AFSDB object.
+string
+AFSDB::toText() const {
+ return (boost::lexical_cast<string>(subtype_) + " " + server_.toText());
+}
+
+/// \brief Render the \c AFSDB in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+AFSDB::toWire(OutputBuffer& buffer) const {
+ buffer.writeUint16(subtype_);
+ server_.toWire(buffer);
+}
+
+/// \brief Render the \c AFSDB in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE AFSDB is not "well-known", the server
+/// field (domain name) will not be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+AFSDB::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeUint16(subtype_);
+ renderer.writeName(server_, false);
+}
+
+/// \brief Compare two instances of \c AFSDB RDATA.
+///
+/// See documentation in \c Rdata.
+int
+AFSDB::compare(const Rdata& other) const {
+ const AFSDB& other_afsdb = dynamic_cast<const AFSDB&>(other);
+ if (subtype_ < other_afsdb.subtype_) {
+ return (-1);
+ } else if (subtype_ > other_afsdb.subtype_) {
+ return (1);
+ }
+
+ return (compareNames(server_, other_afsdb.server_));
+}
+
+const Name&
+AFSDB::getServer() const {
+ return (server_);
+}
+
+uint16_t
+AFSDB::getSubtype() const {
+ return (subtype_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/afsdb_18.h b/src/lib/dns/rdata/generic/afsdb_18.h
new file mode 100644
index 0000000..4a46775
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.h
@@ -0,0 +1,74 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::AFSDB class represents the AFSDB RDATA as defined %in
+/// RFC1183.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// AFSDB RDATA.
+class AFSDB : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// This method never throws an exception.
+ AFSDB& operator=(const AFSDB& source);
+ ///
+ /// Specialized methods
+ ///
+
+ /// \brief Return the value of the server field.
+ ///
+ /// \return A reference to a \c Name class object corresponding to the
+ /// internal server name.
+ ///
+ /// This method never throws an exception.
+ const Name& getServer() const;
+
+ /// \brief Return the value of the subtype field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getSubtype() const;
+
+private:
+ uint16_t subtype_;
+ Name server_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/detail/ds_like.h b/src/lib/dns/rdata/generic/detail/ds_like.h
new file mode 100644
index 0000000..b5a35cd
--- /dev/null
+++ b/src/lib/dns/rdata/generic/detail/ds_like.h
@@ -0,0 +1,225 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DS_LIKE_H
+#define __DS_LIKE_H 1
+
+#include <stdint.h>
+
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+namespace isc {
+namespace dns {
+namespace rdata {
+namespace generic {
+namespace detail {
+
+/// \brief \c rdata::DSLikeImpl class represents the DS-like RDATA for DS
+/// and DLV types.
+///
+/// This class implements the basic interfaces inherited by the DS and DLV
+/// classes from the abstract \c rdata::Rdata class, and provides trivial
+/// accessors to DS-like RDATA.
+template <class Type, uint16_t typeCode> class DSLikeImpl {
+ // Common sequence of toWire() operations used for the two versions of
+ // toWire().
+ template <typename Output>
+ void
+ toWireCommon(Output& output) const {
+ output.writeUint16(tag_);
+ output.writeUint8(algorithm_);
+ output.writeUint8(digest_type_);
+ output.writeData(&digest_[0], digest_.size());
+ }
+
+public:
+ /// \brief Constructor from string.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c InvalidRdataText is thrown if the method cannot process the
+ /// parameter data for any of the number of reasons.
+ DSLikeImpl(const std::string& ds_str) {
+ std::istringstream iss(ds_str);
+ // peekc should be of iss's char_type for isspace to work
+ std::istringstream::char_type peekc;
+ std::stringbuf digestbuf;
+ uint32_t tag, algorithm, digest_type;
+
+ iss >> tag >> algorithm >> digest_type;
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText,
+ "Invalid " << RRType(typeCode) << " text");
+ }
+ if (tag > 0xffff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " tag out of range");
+ }
+ if (algorithm > 0xff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " algorithm out of range");
+ }
+ if (digest_type > 0xff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " digest type out of range");
+ }
+
+ iss.read(&peekc, 1);
+ if (!iss.good() || !isspace(peekc, iss.getloc())) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " presentation format error");
+ }
+
+ iss >> &digestbuf;
+
+ tag_ = tag;
+ algorithm_ = algorithm;
+ digest_type_ = digest_type;
+ decodeHex(digestbuf.str(), digest_);
+ }
+
+ /// \brief Constructor from wire-format data.
+ ///
+ /// \param buffer A buffer storing the wire format data.
+ /// \param rdata_len The length of the RDATA in bytes, normally expected
+ /// to be the value of the RDLENGTH field of the corresponding RR.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c InvalidRdataLength is thrown if the input data is too short for the
+ /// type.
+ DSLikeImpl(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len < 4) {
+ isc_throw(InvalidRdataLength, RRType(typeCode) << " too short");
+ }
+
+ tag_ = buffer.readUint16();
+ algorithm_ = buffer.readUint8();
+ digest_type_ = buffer.readUint8();
+
+ rdata_len -= 4;
+ digest_.resize(rdata_len);
+ buffer.readData(&digest_[0], rdata_len);
+ }
+
+ /// \brief The copy constructor.
+ ///
+ /// Trivial for now, we could've used the default one.
+ DSLikeImpl(const DSLikeImpl& source) {
+ digest_ = source.digest_;
+ tag_ = source.tag_;
+ algorithm_ = source.algorithm_;
+ digest_type_ = source.digest_type_;
+ }
+
+ /// \brief Convert the DS-like data to a string.
+ ///
+ /// \return A \c string object that represents the DS-like data.
+ std::string
+ toText() const {
+ using namespace boost;
+ return (lexical_cast<string>(static_cast<int>(tag_)) +
+ " " + lexical_cast<string>(static_cast<int>(algorithm_)) +
+ " " + lexical_cast<string>(static_cast<int>(digest_type_)) +
+ " " + encodeHex(digest_));
+ }
+
+ /// \brief Render the DS-like data in the wire format to an OutputBuffer
+ /// object.
+ ///
+ /// \param buffer An output buffer to store the wire data.
+ void
+ toWire(OutputBuffer& buffer) const {
+ toWireCommon(buffer);
+ }
+
+ /// \brief Render the DS-like data in the wire format to an
+ /// AbstractMessageRenderer object.
+ ///
+ /// \param renderer A renderer object to send the wire data to.
+ void
+ toWire(AbstractMessageRenderer& renderer) const {
+ toWireCommon(renderer);
+ }
+
+ /// \brief Compare two instances of DS-like RDATA.
+ ///
+ /// It is up to the caller to make sure that \c other is an object of the
+ /// same \c DSLikeImpl class.
+ ///
+ /// \param other the right-hand operand to compare against.
+ /// \return < 0 if \c this would be sorted before \c other.
+ /// \return 0 if \c this is identical to \c other in terms of sorting
+ /// order.
+ /// \return > 0 if \c this would be sorted after \c other.
+ int
+ compare(const DSLikeImpl& other_ds) const {
+ if (tag_ != other_ds.tag_) {
+ return (tag_ < other_ds.tag_ ? -1 : 1);
+ }
+ if (algorithm_ != other_ds.algorithm_) {
+ return (algorithm_ < other_ds.algorithm_ ? -1 : 1);
+ }
+ if (digest_type_ != other_ds.digest_type_) {
+ return (digest_type_ < other_ds.digest_type_ ? -1 : 1);
+ }
+
+ size_t this_len = digest_.size();
+ size_t other_len = other_ds.digest_.size();
+ size_t cmplen = min(this_len, other_len);
+ int cmp = memcmp(&digest_[0], &other_ds.digest_[0], cmplen);
+ if (cmp != 0) {
+ return (cmp);
+ } else {
+ return ((this_len == other_len)
+ ? 0 : (this_len < other_len) ? -1 : 1);
+ }
+ }
+
+ /// \brief Accessors
+ uint16_t
+ getTag() const {
+ return (tag_);
+ }
+
+private:
+ // straightforward representation of DS RDATA fields
+ uint16_t tag_;
+ uint8_t algorithm_;
+ uint8_t digest_type_;
+ std::vector<uint8_t> digest_;
+};
+
+}
+}
+}
+}
+}
+#endif // __DS_LIKE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/detail/txt_like.h b/src/lib/dns/rdata/generic/detail/txt_like.h
new file mode 100644
index 0000000..a0ab7ac
--- /dev/null
+++ b/src/lib/dns/rdata/generic/detail/txt_like.h
@@ -0,0 +1,224 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __TXT_LIKE_H
+#define __TXT_LIKE_H 1
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+using namespace std;
+using namespace isc::util;
+
+/// \brief \c rdata::TXTLikeImpl class represents the TXT-like RDATA for TXT
+/// and SPF types.
+///
+/// This class implements the basic interfaces inherited by the TXT and SPF
+/// classes from the abstract \c rdata::Rdata class, and provides trivial
+/// accessors to TXT-like RDATA.
+template<class Type, uint16_t typeCode>class TXTLikeImpl {
+public:
+ /// \brief Constructor from wire-format data.
+ ///
+ /// \param buffer A buffer storing the wire format data.
+ /// \param rdata_len The length of the RDATA in bytes, normally expected
+ /// to be the value of the RDLENGTH field of the corresponding RR.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c InvalidRdataLength is thrown if rdata_len exceeds the maximum.
+ /// \c DNSMessageFORMERR is thrown if the RR is misformed.
+ TXTLikeImpl(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len > MAX_RDLENGTH) {
+ isc_throw(InvalidRdataLength, "RDLENGTH too large: " << rdata_len);
+ }
+
+ if (rdata_len == 0) { // note that this couldn't happen in the loop.
+ isc_throw(DNSMessageFORMERR, "Error in parsing " <<
+ RRType(typeCode) << " RDATA: 0-length character string");
+ }
+
+ do {
+ const uint8_t len = buffer.readUint8();
+ if (rdata_len < len + 1) {
+ isc_throw(DNSMessageFORMERR, "Error in parsing " <<
+ RRType(typeCode) <<
+ " RDATA: character string length is too large: " <<
+ static_cast<int>(len));
+ }
+ vector<uint8_t> data(len + 1);
+ data[0] = len;
+ buffer.readData(&data[0] + 1, len);
+ string_list_.push_back(data);
+
+ rdata_len -= (len + 1);
+ } while (rdata_len > 0);
+ }
+
+ /// \brief Constructor from string.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c CharStringTooLong is thrown if the parameter string length exceeds
+ /// maximum.
+ /// \c InvalidRdataText is thrown if the method cannot process the
+ /// parameter data.
+ explicit TXTLikeImpl(const std::string& txtstr) {
+ // TBD: this is a simple, incomplete implementation that only supports
+ // a single character-string.
+
+ size_t length = txtstr.size();
+ size_t pos_begin = 0;
+
+ if (length > 1 && txtstr[0] == '"' && txtstr[length - 1] == '"') {
+ pos_begin = 1;
+ length -= 2;
+ }
+
+ if (length > MAX_CHARSTRING_LEN) {
+ isc_throw(CharStringTooLong, RRType(typeCode) <<
+ " RDATA construction from text:"
+ " string length is too long: " << length);
+ }
+
+ // TBD: right now, we don't support escaped characters
+ if (txtstr.find('\\') != string::npos) {
+ isc_throw(InvalidRdataText, RRType(typeCode) <<
+ " RDATA from text:"
+ " escaped character is currently not supported: " <<
+ txtstr);
+ }
+
+ vector<uint8_t> data;
+ data.reserve(length + 1);
+ data.push_back(length);
+ data.insert(data.end(), txtstr.begin() + pos_begin,
+ txtstr.begin() + pos_begin + length);
+ string_list_.push_back(data);
+ }
+
+ /// \brief The copy constructor.
+ ///
+ /// Trivial for now, we could've used the default one.
+ TXTLikeImpl(const TXTLikeImpl& other) :
+ string_list_(other.string_list_)
+ {}
+
+ /// \brief Render the TXT-like data in the wire format to an OutputBuffer
+ /// object.
+ ///
+ /// \param buffer An output buffer to store the wire data.
+ void
+ toWire(OutputBuffer& buffer) const {
+ for (vector<vector<uint8_t> >::const_iterator it =
+ string_list_.begin();
+ it != string_list_.end();
+ ++it)
+ {
+ buffer.writeData(&(*it)[0], (*it).size());
+ }
+ }
+
+ /// \brief Render the TXT-like data in the wire format to an
+ /// AbstractMessageRenderer object.
+ ///
+ /// \param buffer An output AbstractMessageRenderer to send the wire data
+ /// to.
+ void
+ toWire(AbstractMessageRenderer& renderer) const {
+ for (vector<vector<uint8_t> >::const_iterator it =
+ string_list_.begin();
+ it != string_list_.end();
+ ++it)
+ {
+ renderer.writeData(&(*it)[0], (*it).size());
+ }
+ }
+
+ /// \brief Convert the TXT-like data to a string.
+ ///
+ /// \return A \c string object that represents the TXT-like data.
+ string
+ toText() const {
+ string s;
+
+ // XXX: this implementation is not entirely correct. for example, it
+ // should escape double-quotes if they appear in the character string.
+ for (vector<vector<uint8_t> >::const_iterator it =
+ string_list_.begin();
+ it != string_list_.end();
+ ++it)
+ {
+ if (!s.empty()) {
+ s.push_back(' ');
+ }
+ s.push_back('"');
+ s.insert(s.end(), (*it).begin() + 1, (*it).end());
+ s.push_back('"');
+ }
+
+ return (s);
+ }
+
+ /// \brief Compare two instances of TXT-like RDATA.
+ ///
+ /// It is up to the caller to make sure that \c other is an object of the
+ /// same \c TXTLikeImpl class.
+ ///
+ /// \param other the right-hand operand to compare against.
+ /// \return < 0 if \c this would be sorted before \c other.
+ /// \return 0 if \c this is identical to \c other in terms of sorting
+ /// order.
+ /// \return > 0 if \c this would be sorted after \c other.
+ int
+ compare(const TXTLikeImpl& other) const {
+ // This implementation is not efficient. Revisit this (TBD).
+ OutputBuffer this_buffer(0);
+ toWire(this_buffer);
+ uint8_t const* const this_data = (uint8_t const*)this_buffer.getData();
+ size_t this_len = this_buffer.getLength();
+
+ OutputBuffer other_buffer(0);
+ other.toWire(other_buffer);
+ uint8_t const* const other_data
+ = (uint8_t const*)other_buffer.getData();
+ const size_t other_len = other_buffer.getLength();
+
+ const size_t cmplen = min(this_len, other_len);
+ const int cmp = memcmp(this_data, other_data, cmplen);
+
+ if (cmp != 0) {
+ return (cmp);
+ } else {
+ return ((this_len == other_len) ? 0 :
+ (this_len < other_len) ? -1 : 1);
+ }
+ }
+
+private:
+ /// Note: this is a prototype version; we may reconsider
+ /// this representation later.
+ std::vector<std::vector<uint8_t> > string_list_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+
+#endif // __TXT_LIKE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/dlv_32769.cc b/src/lib/dns/rdata/generic/dlv_32769.cc
new file mode 100644
index 0000000..9887aa8
--- /dev/null
+++ b/src/lib/dns/rdata/generic/dlv_32769.cc
@@ -0,0 +1,121 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+
+#include <util/buffer.h>
+#include <util/encode/hex.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <dns/rdata/generic/detail/ds_like.h>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::encode;
+using namespace isc::dns::rdata::generic::detail;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(const string& ds_str) :
+ impl_(new DLVImpl(ds_str))
+{}
+
+/// \brief Constructor from wire-format data.
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new DLVImpl(buffer, rdata_len))
+{}
+
+/// \brief Copy constructor
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(const DLV& source) :
+ Rdata(), impl_(new DLVImpl(*source.impl_))
+{}
+
+/// \brief Assignment operator
+///
+/// PIMPL-induced logic
+DLV&
+DLV::operator=(const DLV& source) {
+ if (impl_ == source.impl_) {
+ return (*this);
+ }
+
+ DLVImpl* newimpl = new DLVImpl(*source.impl_);
+ delete impl_;
+ impl_ = newimpl;
+
+ return (*this);
+}
+
+/// \brief Destructor
+///
+/// Deallocates an internal resource.
+DLV::~DLV() {
+ delete impl_;
+}
+
+/// \brief Convert the \c DLV to a string.
+///
+/// A pass-thru to the corresponding implementation method.
+string
+DLV::toText() const {
+ return (impl_->toText());
+}
+
+/// \brief Render the \c DLV in the wire format to a OutputBuffer object
+///
+/// A pass-thru to the corresponding implementation method.
+void
+DLV::toWire(OutputBuffer& buffer) const {
+ impl_->toWire(buffer);
+}
+
+/// \brief Render the \c DLV in the wire format to a AbstractMessageRenderer
+/// object
+///
+/// A pass-thru to the corresponding implementation method.
+void
+DLV::toWire(AbstractMessageRenderer& renderer) const {
+ impl_->toWire(renderer);
+}
+
+/// \brief Compare two instances of \c DLV RDATA.
+///
+/// The type check is performed here. Otherwise, a pass-thru to the
+/// corresponding implementation method.
+int
+DLV::compare(const Rdata& other) const {
+ const DLV& other_ds = dynamic_cast<const DLV&>(other);
+
+ return (impl_->compare(*other_ds.impl_));
+}
+
+/// \brief Tag accessor
+uint16_t
+DLV::getTag() const {
+ return (impl_->getTag());
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/dlv_32769.h b/src/lib/dns/rdata/generic/dlv_32769.h
new file mode 100644
index 0000000..86cd98c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/dlv_32769.h
@@ -0,0 +1,77 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rrtype.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+namespace detail {
+template <class Type, uint16_t typeCode> class DSLikeImpl;
+}
+
+/// \brief \c rdata::generic::DLV class represents the DLV RDATA as defined in
+/// RFC4431.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DLV RDATA.
+class DLV : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
+ DLV& operator=(const DLV& source);
+
+ /// \brief The destructor.
+ ~DLV();
+
+ /// \brief Return the value of the Tag field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getTag() const;
+private:
+ typedef detail::DSLikeImpl<DLV, 32769> DLVImpl;
+ DLVImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/ds_43.cc b/src/lib/dns/rdata/generic/ds_43.cc
index 1b48456..20b62dc 100644
--- a/src/lib/dns/rdata/generic/ds_43.cc
+++ b/src/lib/dns/rdata/generic/ds_43.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -12,87 +12,32 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <iostream>
#include <string>
-#include <sstream>
-#include <vector>
-
-#include <boost/lexical_cast.hpp>
#include <util/buffer.h>
#include <util/encode/hex.h>
#include <dns/messagerenderer.h>
-#include <dns/name.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
-#include <stdio.h>
-#include <time.h>
+#include <dns/rdata/generic/detail/ds_like.h>
using namespace std;
using namespace isc::util;
using namespace isc::util::encode;
+using namespace isc::dns::rdata::generic::detail;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-struct DSImpl {
- // straightforward representation of DS RDATA fields
- DSImpl(uint16_t tag, uint8_t algorithm, uint8_t digest_type,
- const vector<uint8_t>& digest) :
- tag_(tag), algorithm_(algorithm), digest_type_(digest_type),
- digest_(digest)
- {}
-
- uint16_t tag_;
- uint8_t algorithm_;
- uint8_t digest_type_;
- const vector<uint8_t> digest_;
-};
-
DS::DS(const string& ds_str) :
- impl_(NULL)
-{
- istringstream iss(ds_str);
- unsigned int tag, algorithm, digest_type;
- stringbuf digestbuf;
-
- iss >> tag >> algorithm >> digest_type >> &digestbuf;
- if (iss.bad() || iss.fail()) {
- isc_throw(InvalidRdataText, "Invalid DS text");
- }
- if (tag > 0xffff) {
- isc_throw(InvalidRdataText, "DS tag out of range");
- }
- if (algorithm > 0xff) {
- isc_throw(InvalidRdataText, "DS algorithm out of range");
- }
- if (digest_type > 0xff) {
- isc_throw(InvalidRdataText, "DS digest type out of range");
- }
-
- vector<uint8_t> digest;
- decodeHex(digestbuf.str(), digest);
-
- impl_ = new DSImpl(tag, algorithm, digest_type, digest);
-}
-
-DS::DS(InputBuffer& buffer, size_t rdata_len) {
- if (rdata_len < 4) {
- isc_throw(InvalidRdataLength, "DS too short");
- }
-
- uint16_t tag = buffer.readUint16();
- uint16_t algorithm = buffer.readUint8();
- uint16_t digest_type = buffer.readUint8();
-
- rdata_len -= 4;
- vector<uint8_t> digest(rdata_len);
- buffer.readData(&digest[0], rdata_len);
+ impl_(new DSImpl(ds_str))
+{}
- impl_ = new DSImpl(tag, algorithm, digest_type, digest);
-}
+DS::DS(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new DSImpl(buffer, rdata_len))
+{}
DS::DS(const DS& source) :
Rdata(), impl_(new DSImpl(*source.impl_))
@@ -117,57 +62,29 @@ DS::~DS() {
string
DS::toText() const {
- using namespace boost;
- return (lexical_cast<string>(static_cast<int>(impl_->tag_)) +
- " " + lexical_cast<string>(static_cast<int>(impl_->algorithm_)) +
- " " + lexical_cast<string>(static_cast<int>(impl_->digest_type_)) +
- " " + encodeHex(impl_->digest_));
+ return (impl_->toText());
}
void
DS::toWire(OutputBuffer& buffer) const {
- buffer.writeUint16(impl_->tag_);
- buffer.writeUint8(impl_->algorithm_);
- buffer.writeUint8(impl_->digest_type_);
- buffer.writeData(&impl_->digest_[0], impl_->digest_.size());
+ impl_->toWire(buffer);
}
void
DS::toWire(AbstractMessageRenderer& renderer) const {
- renderer.writeUint16(impl_->tag_);
- renderer.writeUint8(impl_->algorithm_);
- renderer.writeUint8(impl_->digest_type_);
- renderer.writeData(&impl_->digest_[0], impl_->digest_.size());
+ impl_->toWire(renderer);
}
int
DS::compare(const Rdata& other) const {
const DS& other_ds = dynamic_cast<const DS&>(other);
- if (impl_->tag_ != other_ds.impl_->tag_) {
- return (impl_->tag_ < other_ds.impl_->tag_ ? -1 : 1);
- }
- if (impl_->algorithm_ != other_ds.impl_->algorithm_) {
- return (impl_->algorithm_ < other_ds.impl_->algorithm_ ? -1 : 1);
- }
- if (impl_->digest_type_ != other_ds.impl_->digest_type_) {
- return (impl_->digest_type_ < other_ds.impl_->digest_type_ ? -1 : 1);
- }
-
- size_t this_len = impl_->digest_.size();
- size_t other_len = other_ds.impl_->digest_.size();
- size_t cmplen = min(this_len, other_len);
- int cmp = memcmp(&impl_->digest_[0], &other_ds.impl_->digest_[0], cmplen);
- if (cmp != 0) {
- return (cmp);
- } else {
- return ((this_len == other_len) ? 0 : (this_len < other_len) ? -1 : 1);
- }
+ return (impl_->compare(*other_ds.impl_));
}
uint16_t
DS::getTag() const {
- return (impl_->tag_);
+ return (impl_->getTag());
}
// END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/ds_43.h b/src/lib/dns/rdata/generic/ds_43.h
index 03b19a0..2697f51 100644
--- a/src/lib/dns/rdata/generic/ds_43.h
+++ b/src/lib/dns/rdata/generic/ds_43.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+// BEGIN_HEADER_GUARD
+
#include <stdint.h>
#include <string>
@@ -21,8 +23,6 @@
#include <dns/rrttl.h>
#include <dns/rdata.h>
-// BEGIN_HEADER_GUARD
-
// BEGIN_ISC_NAMESPACE
// BEGIN_COMMON_DECLARATIONS
@@ -30,20 +30,41 @@
// BEGIN_RDATA_NAMESPACE
-struct DSImpl;
+namespace detail {
+template <class Type, uint16_t typeCode> class DSLikeImpl;
+}
+/// \brief \c rdata::generic::DS class represents the DS RDATA as defined in
+/// RFC3658.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DS RDATA.
class DS : public Rdata {
public:
// BEGIN_COMMON_MEMBERS
// END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
DS& operator=(const DS& source);
+
+ /// \brief The destructor.
~DS();
+ /// \brief Return the value of the Tag field.
///
- /// Specialized methods
- ///
+ /// This method never throws an exception.
uint16_t getTag() const;
private:
+ typedef detail::DSLikeImpl<DS, 43> DSImpl;
DSImpl* impl_;
};
diff --git a/src/lib/dns/rdata/generic/hinfo_13.cc b/src/lib/dns/rdata/generic/hinfo_13.cc
new file mode 100644
index 0000000..45f4209
--- /dev/null
+++ b/src/lib/dns/rdata/generic/hinfo_13.cc
@@ -0,0 +1,129 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <string>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/character_string.h>
+#include <util/strutil.h>
+
+using namespace std;
+using namespace boost;
+using namespace isc::util;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+
+HINFO::HINFO(const string& hinfo_str) {
+ string::const_iterator input_iterator = hinfo_str.begin();
+ cpu_ = getNextCharacterString(hinfo_str, input_iterator);
+
+ skipLeftSpaces(hinfo_str, input_iterator);
+
+ os_ = getNextCharacterString(hinfo_str, input_iterator);
+}
+
+HINFO::HINFO(InputBuffer& buffer, size_t rdata_len) {
+ cpu_ = getNextCharacterString(buffer, rdata_len);
+ os_ = getNextCharacterString(buffer, rdata_len);
+}
+
+HINFO::HINFO(const HINFO& source):
+ Rdata(), cpu_(source.cpu_), os_(source.os_)
+{
+}
+
+std::string
+HINFO::toText() const {
+ string result;
+ result += "\"";
+ result += cpu_;
+ result += "\" \"";
+ result += os_;
+ result += "\"";
+ return (result);
+}
+
+void
+HINFO::toWire(OutputBuffer& buffer) const {
+ toWireHelper(buffer);
+}
+
+void
+HINFO::toWire(AbstractMessageRenderer& renderer) const {
+ toWireHelper(renderer);
+}
+
+int
+HINFO::compare(const Rdata& other) const {
+ const HINFO& other_hinfo = dynamic_cast<const HINFO&>(other);
+
+ if (cpu_ < other_hinfo.cpu_) {
+ return (-1);
+ } else if (cpu_ > other_hinfo.cpu_) {
+ return (1);
+ }
+
+ if (os_ < other_hinfo.os_) {
+ return (-1);
+ } else if (os_ > other_hinfo.os_) {
+ return (1);
+ }
+
+ return (0);
+}
+
+const std::string&
+HINFO::getCPU() const {
+ return (cpu_);
+}
+
+const std::string&
+HINFO::getOS() const {
+ return (os_);
+}
+
+void
+HINFO::skipLeftSpaces(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText,
+ "Invalid HINFO text format, field is missing.");
+ }
+
+ if (!isspace(*input_iterator)) {
+ isc_throw(InvalidRdataText,
+ "Invalid HINFO text format, fields are not separated by space.");
+ }
+ // Skip white spaces
+ while (input_iterator < input_str.end() && isspace(*input_iterator)) {
+ ++input_iterator;
+ }
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/hinfo_13.h b/src/lib/dns/rdata/generic/hinfo_13.h
new file mode 100644
index 0000000..8513419
--- /dev/null
+++ b/src/lib/dns/rdata/generic/hinfo_13.h
@@ -0,0 +1,77 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <util/buffer.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c HINFO class represents the HINFO rdata defined in
+/// RFC1034, RFC1035
+///
+/// This class implements the basic interfaces inherited from the
+/// \c rdata::Rdata class, and provides accessors specific to the
+/// HINFO rdata.
+class HINFO : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ // HINFO specific methods
+ const std::string& getCPU() const;
+ const std::string& getOS() const;
+
+private:
+ /// Skip the left whitespaces of the input string
+ ///
+ /// \param input_str The input string
+ /// \param input_iterator From which the skipping started
+ void skipLeftSpaces(const std::string& input_str,
+ std::string::const_iterator& input_iterator);
+
+ /// Helper template function for toWire()
+ ///
+ /// \param outputer Where to write data in
+ template <typename T>
+ void toWireHelper(T& outputer) const {
+ outputer.writeUint8(cpu_.size());
+ outputer.writeData(cpu_.c_str(), cpu_.size());
+
+ outputer.writeUint8(os_.size());
+ outputer.writeData(os_.c_str(), os_.size());
+ }
+
+ std::string cpu_;
+ std::string os_;
+};
+
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/minfo_14.cc b/src/lib/dns/rdata/generic/minfo_14.cc
new file mode 100644
index 0000000..aa5272c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/minfo_14.cc
@@ -0,0 +1,156 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c minfo_str must be formatted as follows:
+/// \code <rmailbox name> <emailbox name>
+/// \endcode
+/// where both fields must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "rmail.example.com. email.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation for names fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+MINFO::MINFO(const std::string& minfo_str) :
+ // We cannot construct both names in the initialization list due to the
+ // necessary text processing, so we have to initialize them with a dummy
+ // name and replace them later.
+ rmailbox_(Name::ROOT_NAME()), emailbox_(Name::ROOT_NAME())
+{
+ istringstream iss(minfo_str);
+ string rmailbox_str, emailbox_str;
+ iss >> rmailbox_str >> emailbox_str;
+
+ // Validation: A valid MINFO RR must have exactly two fields.
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText, "Invalid MINFO text: " << minfo_str);
+ }
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Invalid MINFO text (redundant field): "
+ << minfo_str);
+ }
+
+ rmailbox_ = Name(rmailbox_str);
+ emailbox_ = Name(emailbox_str);
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation for names fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+MINFO::MINFO(InputBuffer& buffer, size_t) :
+ rmailbox_(buffer), emailbox_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+MINFO::MINFO(const MINFO& other) :
+ Rdata(), rmailbox_(other.rmailbox_), emailbox_(other.emailbox_)
+{}
+
+/// \brief Convert the \c MINFO to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c MINFO(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c MINFO object.
+std::string
+MINFO::toText() const {
+ return (rmailbox_.toText() + " " + emailbox_.toText());
+}
+
+/// \brief Render the \c MINFO in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+MINFO::toWire(OutputBuffer& buffer) const {
+ rmailbox_.toWire(buffer);
+ emailbox_.toWire(buffer);
+}
+
+MINFO&
+MINFO::operator=(const MINFO& source) {
+ rmailbox_ = source.rmailbox_;
+ emailbox_ = source.emailbox_;
+
+ return (*this);
+}
+
+/// \brief Render the \c MINFO in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE MINFO is "well-known", the rmailbox and
+/// emailbox fields (domain names) will be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+MINFO::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeName(rmailbox_);
+ renderer.writeName(emailbox_);
+}
+
+/// \brief Compare two instances of \c MINFO RDATA.
+///
+/// See documentation in \c Rdata.
+int
+MINFO::compare(const Rdata& other) const {
+ const MINFO& other_minfo = dynamic_cast<const MINFO&>(other);
+
+ const int cmp = compareNames(rmailbox_, other_minfo.rmailbox_);
+ if (cmp != 0) {
+ return (cmp);
+ }
+ return (compareNames(emailbox_, other_minfo.emailbox_));
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/minfo_14.h b/src/lib/dns/rdata/generic/minfo_14.h
new file mode 100644
index 0000000..f3ee1d0
--- /dev/null
+++ b/src/lib/dns/rdata/generic/minfo_14.h
@@ -0,0 +1,82 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::generic::MINFO class represents the MINFO RDATA as
+/// defined in RFC1035.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// MINFO RDATA.
+class MINFO : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Define the assignment operator.
+ ///
+ /// \exception std::bad_alloc Memory allocation fails in copying
+ /// internal member variables (this should be very rare).
+ MINFO& operator=(const MINFO& source);
+
+ /// \brief Return the value of the rmailbox field.
+ ///
+ /// \exception std::bad_alloc If resource allocation for the returned
+ /// \c Name fails.
+ ///
+ /// \note
+ /// Unlike the case of some other RDATA classes (such as
+ /// \c NS::getNSName()), this method constructs a new \c Name object
+ /// and returns it, instead of returning a reference to a \c Name object
+ /// internally maintained in the class (which is a private member).
+ /// This is based on the observation that this method will be rarely
+ /// used and even when it's used it will not be in a performance context
+ /// (for example, a recursive resolver won't need this field in its
+ /// resolution process). By returning a new object we have flexibility
+ /// of changing the internal representation without the risk of changing
+ /// the interface or method property.
+ /// The same note applies to the \c getEmailbox() method.
+ Name getRmailbox() const { return (rmailbox_); }
+
+ /// \brief Return the value of the emailbox field.
+ ///
+ /// \exception std::bad_alloc If resource allocation for the returned
+ /// \c Name fails.
+ Name getEmailbox() const { return (emailbox_); }
+
+private:
+ Name rmailbox_;
+ Name emailbox_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/naptr_35.cc b/src/lib/dns/rdata/generic/naptr_35.cc
new file mode 100644
index 0000000..129bf6c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/naptr_35.cc
@@ -0,0 +1,220 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <string>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/character_string.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace boost;
+using namespace isc::util;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+namespace {
+/// Skip the left whitespaces of the input string
+///
+/// \param input_str The input string
+/// \param input_iterator From which the skipping started
+void
+skipLeftSpaces(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, field is missing.");
+ }
+
+ if (!isspace(*input_iterator)) {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, fields are not separated by space.");
+ }
+ // Skip white spaces
+ while (input_iterator < input_str.end() && isspace(*input_iterator)) {
+ ++input_iterator;
+ }
+}
+
+} // Anonymous namespace
+
+NAPTR::NAPTR(InputBuffer& buffer, size_t len):
+ replacement_(".")
+{
+ order_ = buffer.readUint16();
+ preference_ = buffer.readUint16();
+
+ flags_ = getNextCharacterString(buffer, len);
+ services_ = getNextCharacterString(buffer, len);
+ regexp_ = getNextCharacterString(buffer, len);
+ replacement_ = Name(buffer);
+}
+
+NAPTR::NAPTR(const std::string& naptr_str):
+ replacement_(".")
+{
+ istringstream iss(naptr_str);
+ uint16_t order;
+ uint16_t preference;
+
+ iss >> order >> preference;
+
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText, "Invalid NAPTR text format");
+ }
+
+ order_ = order;
+ preference_ = preference;
+
+ string::const_iterator input_iterator = naptr_str.begin() + iss.tellg();
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ flags_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ services_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ regexp_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ if (input_iterator < naptr_str.end()) {
+ string replacementStr(input_iterator, naptr_str.end());
+
+ replacement_ = Name(replacementStr);
+ } else {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, replacement field is missing");
+ }
+}
+
+NAPTR::NAPTR(const NAPTR& naptr):
+ Rdata(), order_(naptr.order_), preference_(naptr.preference_),
+ flags_(naptr.flags_), services_(naptr.services_), regexp_(naptr.regexp_),
+ replacement_(naptr.replacement_)
+{
+}
+
+void
+NAPTR::toWire(OutputBuffer& buffer) const {
+ toWireHelper(buffer);
+}
+
+void
+NAPTR::toWire(AbstractMessageRenderer& renderer) const {
+ toWireHelper(renderer);
+}
+
+string
+NAPTR::toText() const {
+ string result;
+ result += lexical_cast<string>(order_);
+ result += " ";
+ result += lexical_cast<string>(preference_);
+ result += " \"";
+ result += flags_;
+ result += "\" \"";
+ result += services_;
+ result += "\" \"";
+ result += regexp_;
+ result += "\" ";
+ result += replacement_.toText();
+ return (result);
+}
+
+int
+NAPTR::compare(const Rdata& other) const {
+ const NAPTR other_naptr = dynamic_cast<const NAPTR&>(other);
+
+ if (order_ < other_naptr.order_) {
+ return (-1);
+ } else if (order_ > other_naptr.order_) {
+ return (1);
+ }
+
+ if (preference_ < other_naptr.preference_) {
+ return (-1);
+ } else if (preference_ > other_naptr.preference_) {
+ return (1);
+ }
+
+ if (flags_ < other_naptr.flags_) {
+ return (-1);
+ } else if (flags_ > other_naptr.flags_) {
+ return (1);
+ }
+
+ if (services_ < other_naptr.services_) {
+ return (-1);
+ } else if (services_ > other_naptr.services_) {
+ return (1);
+ }
+
+ if (regexp_ < other_naptr.regexp_) {
+ return (-1);
+ } else if (regexp_ > other_naptr.regexp_) {
+ return (1);
+ }
+
+ return (compareNames(replacement_, other_naptr.replacement_));
+}
+
+uint16_t
+NAPTR::getOrder() const {
+ return (order_);
+}
+
+uint16_t
+NAPTR::getPreference() const {
+ return (preference_);
+}
+
+const std::string&
+NAPTR::getFlags() const {
+ return (flags_);
+}
+
+const std::string&
+NAPTR::getServices() const {
+ return (services_);
+}
+
+const std::string&
+NAPTR::getRegexp() const {
+ return (regexp_);
+}
+
+const Name&
+NAPTR::getReplacement() const {
+ return (replacement_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/naptr_35.h b/src/lib/dns/rdata/generic/naptr_35.h
new file mode 100644
index 0000000..ca16b3c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/naptr_35.h
@@ -0,0 +1,83 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <util/buffer.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c NAPTR class represents the NAPTR rdata defined in
+/// RFC2915, RFC2168 and RFC3403
+///
+/// This class implements the basic interfaces inherited from the
+/// \c rdata::Rdata class, and provides accessors specific to the
+/// NAPTR rdata.
+class NAPTR : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ // NAPTR specific methods
+ uint16_t getOrder() const;
+ uint16_t getPreference() const;
+ const std::string& getFlags() const;
+ const std::string& getServices() const;
+ const std::string& getRegexp() const;
+ const Name& getReplacement() const;
+private:
+ /// Helper template function for toWire()
+ ///
+ /// \param outputer Where to write data in
+ template <typename T>
+ void toWireHelper(T& outputer) const {
+ outputer.writeUint16(order_);
+ outputer.writeUint16(preference_);
+
+ outputer.writeUint8(flags_.size());
+ outputer.writeData(flags_.c_str(), flags_.size());
+
+ outputer.writeUint8(services_.size());
+ outputer.writeData(services_.c_str(), services_.size());
+
+ outputer.writeUint8(regexp_.size());
+ outputer.writeData(regexp_.c_str(), regexp_.size());
+
+ replacement_.toWire(outputer);
+ }
+
+ uint16_t order_;
+ uint16_t preference_;
+ std::string flags_;
+ std::string services_;
+ std::string regexp_;
+ Name replacement_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/nsec_47.cc b/src/lib/dns/rdata/generic/nsec_47.cc
index 93b8b5f..4723c23 100644
--- a/src/lib/dns/rdata/generic/nsec_47.cc
+++ b/src/lib/dns/rdata/generic/nsec_47.cc
@@ -178,6 +178,11 @@ NSEC::toWire(AbstractMessageRenderer& renderer) const {
renderer.writeData(&impl_->typebits_[0], impl_->typebits_.size());
}
+const Name&
+NSEC::getNextName() const {
+ return (impl_->nextname_);
+}
+
int
NSEC::compare(const Rdata& other) const {
const NSEC& other_nsec = dynamic_cast<const NSEC&>(other);
diff --git a/src/lib/dns/rdata/generic/nsec_47.h b/src/lib/dns/rdata/generic/nsec_47.h
index b86a25b..005dd3a 100644
--- a/src/lib/dns/rdata/generic/nsec_47.h
+++ b/src/lib/dns/rdata/generic/nsec_47.h
@@ -38,6 +38,16 @@ public:
// END_COMMON_MEMBERS
NSEC& operator=(const NSEC& source);
~NSEC();
+
+ // specialized methods
+
+ /// Return the next domain name.
+ ///
+ /// \exception std::bad_alloc Resource allocation failure in name copy.
+ ///
+ /// \return The next domain name field in the form of \c Name object.
+ const Name& getNextName() const;
+
private:
NSECImpl* impl_;
};
diff --git a/src/lib/dns/rdata/generic/rp_17.cc b/src/lib/dns/rdata/generic/rp_17.cc
index b8b2ba2..781b55d 100644
--- a/src/lib/dns/rdata/generic/rp_17.cc
+++ b/src/lib/dns/rdata/generic/rp_17.cc
@@ -24,6 +24,7 @@
using namespace std;
using namespace isc::dns;
+using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/rrsig_46.cc b/src/lib/dns/rdata/generic/rrsig_46.cc
index 0c82406..59ff030 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.cc
+++ b/src/lib/dns/rdata/generic/rrsig_46.cc
@@ -243,5 +243,10 @@ RRSIG::compare(const Rdata& other) const {
}
}
+const RRType&
+RRSIG::typeCovered() const {
+ return (impl_->covered_);
+}
+
// END_RDATA_NAMESPACE
// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/rrsig_46.h b/src/lib/dns/rdata/generic/rrsig_46.h
index 19acc40..b32c17f 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.h
+++ b/src/lib/dns/rdata/generic/rrsig_46.h
@@ -38,6 +38,9 @@ public:
// END_COMMON_MEMBERS
RRSIG& operator=(const RRSIG& source);
~RRSIG();
+
+ // specialized methods
+ const RRType& typeCovered() const;
private:
RRSIGImpl* impl_;
};
diff --git a/src/lib/dns/rdata/generic/spf_99.cc b/src/lib/dns/rdata/generic/spf_99.cc
new file mode 100644
index 0000000..aa3e4a1
--- /dev/null
+++ b/src/lib/dns/rdata/generic/spf_99.cc
@@ -0,0 +1,131 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+#include <vector>
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class. The semantics of the class is provided by
+/// a copy of instantiated TXTLikeImpl class common to both TXT and SPF.
+
+#include <dns/rdata/generic/detail/txt_like.h>
+
+/// \brief The assignment operator
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+/// This method never throws an exception otherwise.
+SPF&
+SPF::operator=(const SPF& source) {
+ if (impl_ == source.impl_) {
+ return (*this);
+ }
+
+ SPFImpl* newimpl = new SPFImpl(*source.impl_);
+ delete impl_;
+ impl_ = newimpl;
+
+ return (*this);
+}
+
+/// \brief The destructor
+SPF::~SPF() {
+ delete impl_;
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+SPF::SPF(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new SPFImpl(buffer, rdata_len))
+{}
+
+/// \brief Constructor from string.
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+SPF::SPF(const std::string& txtstr) :
+ impl_(new SPFImpl(txtstr))
+{}
+
+/// \brief Copy constructor
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+SPF::SPF(const SPF& other) :
+ Rdata(), impl_(new SPFImpl(*other.impl_))
+{}
+
+/// \brief Render the \c SPF in the wire format to a OutputBuffer object
+///
+/// \return is the return of the corresponding implementation method.
+void
+SPF::toWire(OutputBuffer& buffer) const {
+ impl_->toWire(buffer);
+}
+
+/// \brief Render the \c SPF in the wire format to an AbstractMessageRenderer
+/// object
+///
+/// \return is the return of the corresponding implementation method.
+void
+SPF::toWire(AbstractMessageRenderer& renderer) const {
+ impl_->toWire(renderer);
+}
+
+/// \brief Convert the \c SPF to a string.
+///
+/// \return is the return of the corresponding implementation method.
+string
+SPF::toText() const {
+ return (impl_->toText());
+}
+
+/// \brief Compare two instances of \c SPF RDATA.
+///
+/// This method compares \c this and the \c other \c SPF objects.
+///
+/// This method is expected to be used in a polymorphic way, and the
+/// parameter to compare against is therefore of the abstract \c Rdata class.
+/// However, comparing two \c Rdata objects of different RR types
+/// is meaningless, and \c other must point to a \c SPF object;
+/// otherwise, the standard \c bad_cast exception will be thrown.
+///
+/// \param other the right-hand operand to compare against.
+/// \return is the return of the corresponding implementation method.
+int
+SPF::compare(const Rdata& other) const {
+ const SPF& other_txt = dynamic_cast<const SPF&>(other);
+
+ return (impl_->compare(*other_txt.impl_));
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/spf_99.h b/src/lib/dns/rdata/generic/spf_99.h
new file mode 100644
index 0000000..04ac99b
--- /dev/null
+++ b/src/lib/dns/rdata/generic/spf_99.h
@@ -0,0 +1,78 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+template<class Type, uint16_t typeCode> class TXTLikeImpl;
+
+/// \brief \c rdata::SPF class represents the SPF RDATA as defined %in
+/// RFC4408.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class. The semantics of the class is provided by
+/// a copy of instantiated TXTLikeImpl class common to both TXT and SPF.
+class SPF : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
+ SPF& operator=(const SPF& source);
+
+ /// \brief The destructor.
+ ~SPF();
+
+ ///
+ /// Specialized methods
+ ///
+
+ /// \brief Return a reference to the data strings
+ ///
+ /// This method never throws an exception.
+ const std::vector<std::vector<uint8_t> >& getString() const;
+
+private:
+ typedef TXTLikeImpl<SPF, 99> SPFImpl;
+ SPFImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/txt_16.cc b/src/lib/dns/rdata/generic/txt_16.cc
index ac2ba8a..418bc05 100644
--- a/src/lib/dns/rdata/generic/txt_16.cc
+++ b/src/lib/dns/rdata/generic/txt_16.cc
@@ -30,130 +30,57 @@ using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-TXT::TXT(InputBuffer& buffer, size_t rdata_len) {
- if (rdata_len > MAX_RDLENGTH) {
- isc_throw(InvalidRdataLength, "RDLENGTH too large: " << rdata_len);
- }
+#include <dns/rdata/generic/detail/txt_like.h>
- if (rdata_len == 0) { // note that this couldn't happen in the loop.
- isc_throw(DNSMessageFORMERR,
- "Error in parsing TXT RDATA: 0-length character string");
+TXT&
+TXT::operator=(const TXT& source) {
+ if (impl_ == source.impl_) {
+ return (*this);
}
- do {
- const uint8_t len = buffer.readUint8();
- if (rdata_len < len + 1) {
- isc_throw(DNSMessageFORMERR,
- "Error in parsing TXT RDATA: character string length "
- "is too large: " << static_cast<int>(len));
- }
- vector<uint8_t> data(len + 1);
- data[0] = len;
- buffer.readData(&data[0] + 1, len);
- string_list_.push_back(data);
-
- rdata_len -= (len + 1);
- } while (rdata_len > 0);
-}
-
-TXT::TXT(const std::string& txtstr) {
- // TBD: this is a simple, incomplete implementation that only supports
- // a single character-string.
+ TXTImpl* newimpl = new TXTImpl(*source.impl_);
+ delete impl_;
+ impl_ = newimpl;
- size_t length = txtstr.size();
- size_t pos_begin = 0;
-
- if (length > 1 && txtstr[0] == '"' && txtstr[length - 1] == '"') {
- pos_begin = 1;
- length -= 2;
- }
+ return (*this);
+}
- if (length > MAX_CHARSTRING_LEN) {
- isc_throw(CharStringTooLong, "TXT RDATA construction from text: "
- "string length is too long: " << length);
- }
+TXT::~TXT() {
+ delete impl_;
+}
- // TBD: right now, we don't support escaped characters
- if (txtstr.find('\\') != string::npos) {
- isc_throw(InvalidRdataText, "TXT RDATA from text: "
- "escaped character is currently not supported: " << txtstr);
- }
+TXT::TXT(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new TXTImpl(buffer, rdata_len))
+{}
- vector<uint8_t> data;
- data.reserve(length + 1);
- data.push_back(length);
- data.insert(data.end(), txtstr.begin() + pos_begin,
- txtstr.begin() + pos_begin + length);
- string_list_.push_back(data);
-}
+TXT::TXT(const std::string& txtstr) :
+ impl_(new TXTImpl(txtstr))
+{}
TXT::TXT(const TXT& other) :
- Rdata(), string_list_(other.string_list_)
+ Rdata(), impl_(new TXTImpl(*other.impl_))
{}
void
TXT::toWire(OutputBuffer& buffer) const {
- for (vector<vector<uint8_t> >::const_iterator it = string_list_.begin();
- it != string_list_.end();
- ++it)
- {
- buffer.writeData(&(*it)[0], (*it).size());
- }
+ impl_->toWire(buffer);
}
void
TXT::toWire(AbstractMessageRenderer& renderer) const {
- for (vector<vector<uint8_t> >::const_iterator it = string_list_.begin();
- it != string_list_.end();
- ++it)
- {
- renderer.writeData(&(*it)[0], (*it).size());
- }
+ impl_->toWire(renderer);
}
string
TXT::toText() const {
- string s;
-
- // XXX: this implementation is not entirely correct. for example, it
- // should escape double-quotes if they appear in the character string.
- for (vector<vector<uint8_t> >::const_iterator it = string_list_.begin();
- it != string_list_.end();
- ++it)
- {
- if (!s.empty()) {
- s.push_back(' ');
- }
- s.push_back('"');
- s.insert(s.end(), (*it).begin() + 1, (*it).end());
- s.push_back('"');
- }
-
- return (s);
+ return (impl_->toText());
}
int
TXT::compare(const Rdata& other) const {
const TXT& other_txt = dynamic_cast<const TXT&>(other);
- // This implementation is not efficient. Revisit this (TBD).
- OutputBuffer this_buffer(0);
- toWire(this_buffer);
- size_t this_len = this_buffer.getLength();
-
- OutputBuffer other_buffer(0);
- other_txt.toWire(other_buffer);
- const size_t other_len = other_buffer.getLength();
-
- const size_t cmplen = min(this_len, other_len);
- const int cmp = memcmp(this_buffer.getData(), other_buffer.getData(),
- cmplen);
- if (cmp != 0) {
- return (cmp);
- } else {
- return ((this_len == other_len) ? 0 :
- (this_len < other_len) ? -1 : 1);
- }
+ return (impl_->compare(*other_txt.impl_));
}
// END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/txt_16.h b/src/lib/dns/rdata/generic/txt_16.h
index b4c791f..d99d69b 100644
--- a/src/lib/dns/rdata/generic/txt_16.h
+++ b/src/lib/dns/rdata/generic/txt_16.h
@@ -28,14 +28,19 @@
// BEGIN_RDATA_NAMESPACE
+template<class Type, uint16_t typeCode> class TXTLikeImpl;
+
class TXT : public Rdata {
public:
// BEGIN_COMMON_MEMBERS
// END_COMMON_MEMBERS
+
+ TXT& operator=(const TXT& source);
+ ~TXT();
+
private:
- /// Note: this is a prototype version; we may reconsider
- /// this representation later.
- std::vector<std::vector<uint8_t> > string_list_;
+ typedef TXTLikeImpl<TXT, 16> TXTImpl;
+ TXTImpl* impl_;
};
// END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.cc b/src/lib/dns/rdata/in_1/dhcid_49.cc
new file mode 100644
index 0000000..f0c4aca
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/dhcid_49.cc
@@ -0,0 +1,145 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+#include <util/encode/base64.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \param dhcid_str A base-64 representation of the DHCID binary data.
+/// The data is considered to be opaque, but a sanity check is performed.
+///
+/// <b>Exceptions</b>
+///
+/// \c dhcid_str must be a valid BASE-64 string, otherwise an exception
+/// of class \c isc::BadValue will be thrown;
+/// the binary data should consist of at leat of 3 octets as per RFC4701:
+/// < 2 octets > Identifier type code
+/// < 1 octet > Digest type code
+/// < n octets > Digest (length depends on digest type)
+/// If the data is less than 3 octets (i.e. it cannot contain id type code and
+/// digest type code), an exception of class \c InvalidRdataLength is thrown.
+DHCID::DHCID(const string& dhcid_str) {
+ istringstream iss(dhcid_str);
+ stringbuf digestbuf;
+
+ iss >> &digestbuf;
+ isc::util::encode::decodeBase64(digestbuf.str(), digest_);
+
+ // RFC4701 states DNS software should consider the RDATA section to
+ // be opaque, but there must be at least three bytes in the data:
+ // < 2 octets > Identifier type code
+ // < 1 octet > Digest type code
+ if (digest_.size() < 3) {
+ isc_throw(InvalidRdataLength, "DHCID length " << digest_.size() <<
+ " too short, need at least 3 bytes");
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// \param buffer A buffer storing the wire format data.
+/// \param rdata_len The length of the RDATA in bytes
+///
+/// <b>Exceptions</b>
+/// \c InvalidRdataLength is thrown if \c rdata_len is than minimum of 3 octets
+DHCID::DHCID(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len < 3) {
+ isc_throw(InvalidRdataLength, "DHCID length " << rdata_len <<
+ " too short, need at least 3 bytes");
+ }
+
+ digest_.resize(rdata_len);
+ buffer.readData(&digest_[0], rdata_len);
+}
+
+/// \brief The copy constructor.
+///
+/// This trivial copy constructor never throws an exception.
+DHCID::DHCID(const DHCID& other) : Rdata(), digest_(other.digest_)
+{}
+
+/// \brief Render the \c DHCID in the wire format.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+DHCID::toWire(OutputBuffer& buffer) const {
+ buffer.writeData(&digest_[0], digest_.size());
+}
+
+/// \brief Render the \c DHCID in the wire format into a
+/// \c MessageRenderer object.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer in which the \c DHCID is to be stored.
+void
+DHCID::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeData(&digest_[0], digest_.size());
+}
+
+/// \brief Convert the \c DHCID to a string.
+///
+/// This method returns a \c std::string object representing the \c DHCID.
+///
+/// \return A string representation of \c DHCID.
+string
+DHCID::toText() const {
+ return (isc::util::encode::encodeBase64(digest_));
+}
+
+/// \brief Compare two instances of \c DHCID RDATA.
+///
+/// See documentation in \c Rdata.
+int
+DHCID::compare(const Rdata& other) const {
+ const DHCID& other_dhcid = dynamic_cast<const DHCID&>(other);
+
+ size_t this_len = digest_.size();
+ size_t other_len = other_dhcid.digest_.size();
+ size_t cmplen = min(this_len, other_len);
+ int cmp = memcmp(&digest_[0], &other_dhcid.digest_[0], cmplen);
+ if (cmp != 0) {
+ return (cmp);
+ } else {
+ return ((this_len == other_len) ? 0 : (this_len < other_len) ? -1 : 1);
+ }
+}
+
+/// \brief Accessor method to get the DHCID digest
+///
+/// \return A reference to the binary DHCID data
+const std::vector<uint8_t>&
+DHCID::getDigest() const {
+ return (digest_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.h b/src/lib/dns/rdata/in_1/dhcid_49.h
new file mode 100644
index 0000000..90f5fab
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/dhcid_49.h
@@ -0,0 +1,58 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+#include <vector>
+
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::DHCID class represents the DHCID RDATA as defined %in
+/// RFC4701.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DHCID RDATA.
+class DHCID : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Return the digest.
+ ///
+ /// This method never throws an exception.
+ const std::vector<uint8_t>& getDigest() const;
+
+private:
+ /// \brief Private data representation
+ ///
+ /// Opaque data at least 3 octets long as per RFC4701.
+ ///
+ std::vector<uint8_t> digest_;
+};
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/in_1/srv_33.cc b/src/lib/dns/rdata/in_1/srv_33.cc
new file mode 100644
index 0000000..93b5d4d
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/srv_33.cc
@@ -0,0 +1,245 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <iostream>
+#include <sstream>
+
+#include <boost/lexical_cast.hpp>
+
+#include <util/buffer.h>
+#include <util/strutil.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::str;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+struct SRVImpl {
+ // straightforward representation of SRV RDATA fields
+ SRVImpl(uint16_t priority, uint16_t weight, uint16_t port,
+ const Name& target) :
+ priority_(priority), weight_(weight), port_(port),
+ target_(target)
+ {}
+
+ uint16_t priority_;
+ uint16_t weight_;
+ uint16_t port_;
+ Name target_;
+};
+
+/// \brief Constructor from string.
+///
+/// \c srv_str must be formatted as follows:
+/// \code <Priority> <Weight> <Port> <Target>
+/// \endcode
+/// where
+/// - <Priority>, <Weight>, and <Port> are an unsigned 16-bit decimal
+/// integer.
+/// - <Target> is a valid textual representation of domain name.
+///
+/// An example of valid string is:
+/// \code "1 5 1500 example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// If <Target> is not a valid domain name, a corresponding exception from
+/// the \c Name class will be thrown;
+/// if %any of the other bullet points above is not met, an exception of
+/// class \c InvalidRdataText will be thrown.
+/// This constructor internally involves resource allocation, and if it fails
+/// a corresponding standard exception will be thrown.
+SRV::SRV(const string& srv_str) :
+ impl_(NULL)
+{
+ istringstream iss(srv_str);
+
+ try {
+ const int32_t priority = tokenToNum<int32_t, 16>(getToken(iss));
+ const int32_t weight = tokenToNum<int32_t, 16>(getToken(iss));
+ const int32_t port = tokenToNum<int32_t, 16>(getToken(iss));
+ const Name targetname(getToken(iss));
+
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Unexpected input for SRV RDATA: " <<
+ srv_str);
+ }
+
+ impl_ = new SRVImpl(priority, weight, port, targetname);
+ } catch (const StringTokenError& ste) {
+ isc_throw(InvalidRdataText, "Invalid SRV text: " <<
+ ste.what() << ": " << srv_str);
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// When a read operation on \c buffer fails (e.g., due to a corrupted
+/// message) a corresponding exception from the \c InputBuffer class will
+/// be thrown.
+/// If the wire-format data does not end with a valid domain name,
+/// a corresponding exception from the \c Name class will be thrown.
+/// In addition, this constructor internally involves resource allocation,
+/// and if it fails a corresponding standard exception will be thrown.
+///
+/// According to RFC2782, the Target field must be a non compressed form
+/// of domain name. But this implementation accepts a %SRV RR even if that
+/// field is compressed as suggested in RFC3597.
+///
+/// \param buffer A buffer storing the wire format data.
+/// \param rdata_len The length of the RDATA in bytes, normally expected
+/// to be the value of the RDLENGTH field of the corresponding RR.
+SRV::SRV(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len < 6) {
+ isc_throw(InvalidRdataLength, "SRV too short");
+ }
+
+ uint16_t priority = buffer.readUint16();
+ uint16_t weight = buffer.readUint16();
+ uint16_t port = buffer.readUint16();
+ const Name targetname(buffer);
+
+ impl_ = new SRVImpl(priority, weight, port, targetname);
+}
+
+/// \brief The copy constructor.
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+/// This constructor never throws an exception otherwise.
+SRV::SRV(const SRV& source) :
+ Rdata(), impl_(new SRVImpl(*source.impl_))
+{}
+
+SRV&
+SRV::operator=(const SRV& source) {
+ if (impl_ == source.impl_) {
+ return (*this);
+ }
+
+ SRVImpl* newimpl = new SRVImpl(*source.impl_);
+ delete impl_;
+ impl_ = newimpl;
+
+ return (*this);
+}
+
+SRV::~SRV() {
+ delete impl_;
+}
+
+/// \brief Convert the \c SRV to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c SRV(const std::string&))).
+///
+/// If internal resource allocation fails, a corresponding
+/// standard exception will be thrown.
+///
+/// \return A \c string object that represents the \c SRV object.
+string
+SRV::toText() const {
+ using namespace boost;
+ return (lexical_cast<string>(impl_->priority_) +
+ " " + lexical_cast<string>(impl_->weight_) +
+ " " + lexical_cast<string>(impl_->port_) +
+ " " + impl_->target_.toText());
+}
+
+/// \brief Render the \c SRV in the wire format without name compression.
+///
+/// If internal resource allocation fails, a corresponding
+/// standard exception will be thrown.
+/// This method never throws an exception otherwise.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+SRV::toWire(OutputBuffer& buffer) const {
+ buffer.writeUint16(impl_->priority_);
+ buffer.writeUint16(impl_->weight_);
+ buffer.writeUint16(impl_->port_);
+ impl_->target_.toWire(buffer);
+}
+
+/// \brief Render the \c SRV in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC2782, the Target field (a domain name) will not be
+/// compressed. However, the domain name could be a target of compression
+/// of other compressible names (though pretty unlikely), the offset
+/// information of the algorithm name may be recorded in \c renderer.
+///
+/// If internal resource allocation fails, a corresponding
+/// standard exception will be thrown.
+/// This method never throws an exception otherwise.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+SRV::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeUint16(impl_->priority_);
+ renderer.writeUint16(impl_->weight_);
+ renderer.writeUint16(impl_->port_);
+ renderer.writeName(impl_->target_, false);
+}
+
+/// \brief Compare two instances of \c SRV RDATA.
+///
+/// See documentation in \c Rdata.
+int
+SRV::compare(const Rdata& other) const {
+ const SRV& other_srv = dynamic_cast<const SRV&>(other);
+
+ if (impl_->priority_ != other_srv.impl_->priority_) {
+ return (impl_->priority_ < other_srv.impl_->priority_ ? -1 : 1);
+ }
+ if (impl_->weight_ != other_srv.impl_->weight_) {
+ return (impl_->weight_ < other_srv.impl_->weight_ ? -1 : 1);
+ }
+ if (impl_->port_ != other_srv.impl_->port_) {
+ return (impl_->port_ < other_srv.impl_->port_ ? -1 : 1);
+ }
+
+ return (compareNames(impl_->target_, other_srv.impl_->target_));
+}
+
+uint16_t
+SRV::getPriority() const {
+ return (impl_->priority_);
+}
+
+uint16_t
+SRV::getWeight() const {
+ return (impl_->weight_);
+}
+
+uint16_t
+SRV::getPort() const {
+ return (impl_->port_);
+}
+
+const Name&
+SRV::getTarget() const {
+ return (impl_->target_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/srv_33.h b/src/lib/dns/rdata/in_1/srv_33.h
new file mode 100644
index 0000000..32b7dc0
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/srv_33.h
@@ -0,0 +1,93 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+struct SRVImpl;
+
+/// \brief \c rdata::SRV class represents the SRV RDATA as defined %in
+/// RFC2782.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// SRV RDATA.
+class SRV : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
+ SRV& operator=(const SRV& source);
+
+ /// \brief The destructor.
+ ~SRV();
+
+ ///
+ /// Specialized methods
+ ///
+
+ /// \brief Return the value of the priority field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getPriority() const;
+
+ /// \brief Return the value of the weight field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getWeight() const;
+
+ /// \brief Return the value of the port field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getPort() const;
+
+ /// \brief Return the value of the target field.
+ ///
+ /// \return A reference to a \c Name class object corresponding to the
+ /// internal target name.
+ ///
+ /// This method never throws an exception.
+ const Name& getTarget() const;
+
+private:
+ SRVImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/template.cc b/src/lib/dns/rdata/template.cc
index d9f08ee..e85f82c 100644
--- a/src/lib/dns/rdata/template.cc
+++ b/src/lib/dns/rdata/template.cc
@@ -18,6 +18,7 @@
#include <dns/messagerenderer.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rrtype.h>
using namespace std;
using namespace isc::util;
diff --git a/src/lib/dns/rdatafields.h b/src/lib/dns/rdatafields.h
index e33bcd7..16880f0 100644
--- a/src/lib/dns/rdatafields.h
+++ b/src/lib/dns/rdatafields.h
@@ -296,7 +296,7 @@ public:
/// as long as the \c RdataFields object is used.
///
/// \param fields An array of \c FieldSpec entries. This can be \c NULL.
- /// \param nfields The number of entries of \c fields.
+ /// \param fields_length The total length of the \c fields.
/// \param data A pointer to memory region for the entire RDATA. This can
/// be NULL.
/// \param data_length The length of \c data in bytes.
diff --git a/src/lib/dns/rrset.h b/src/lib/dns/rrset.h
index 6c15b53..1586465 100644
--- a/src/lib/dns/rrset.h
+++ b/src/lib/dns/rrset.h
@@ -478,7 +478,7 @@ public:
/// \brief Return the current \c Rdata corresponding to the rdata cursor.
///
- /// \return A reference to an \c rdata::::Rdata object corresponding
+ /// \return A reference to an \c rdata::Rdata object corresponding
/// to the rdata cursor.
virtual const rdata::Rdata& getCurrent() const = 0;
diff --git a/src/lib/dns/rrtype-placeholder.h b/src/lib/dns/rrtype-placeholder.h
index 1cb028c..dad1b2b 100644
--- a/src/lib/dns/rrtype-placeholder.h
+++ b/src/lib/dns/rrtype-placeholder.h
@@ -22,6 +22,11 @@
#include <exceptions/exceptions.h>
+// Solaris x86 defines DS in <sys/regset.h>, which gets pulled in by Boost
+#if defined(__sun) && defined(DS)
+# undef DS
+#endif
+
namespace isc {
namespace util {
class InputBuffer;
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index 9783beb..ceeb3b8 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -29,19 +29,26 @@ run_unittests_SOURCES += rdata_unittest.h rdata_unittest.cc
run_unittests_SOURCES += rdatafields_unittest.cc
run_unittests_SOURCES += rdata_in_a_unittest.cc rdata_in_aaaa_unittest.cc
run_unittests_SOURCES += rdata_ns_unittest.cc rdata_soa_unittest.cc
-run_unittests_SOURCES += rdata_txt_unittest.cc rdata_mx_unittest.cc
+run_unittests_SOURCES += rdata_txt_like_unittest.cc
+run_unittests_SOURCES += rdata_mx_unittest.cc
run_unittests_SOURCES += rdata_ptr_unittest.cc rdata_cname_unittest.cc
run_unittests_SOURCES += rdata_dname_unittest.cc
+run_unittests_SOURCES += rdata_afsdb_unittest.cc
run_unittests_SOURCES += rdata_opt_unittest.cc
+run_unittests_SOURCES += rdata_dhcid_unittest.cc
run_unittests_SOURCES += rdata_dnskey_unittest.cc
-run_unittests_SOURCES += rdata_ds_unittest.cc
+run_unittests_SOURCES += rdata_ds_like_unittest.cc
run_unittests_SOURCES += rdata_nsec_unittest.cc
run_unittests_SOURCES += rdata_nsec3_unittest.cc
run_unittests_SOURCES += rdata_nsecbitmap_unittest.cc
run_unittests_SOURCES += rdata_nsec3param_unittest.cc
run_unittests_SOURCES += rdata_rrsig_unittest.cc
run_unittests_SOURCES += rdata_rp_unittest.cc
+run_unittests_SOURCES += rdata_srv_unittest.cc
+run_unittests_SOURCES += rdata_minfo_unittest.cc
run_unittests_SOURCES += rdata_tsig_unittest.cc
+run_unittests_SOURCES += rdata_naptr_unittest.cc
+run_unittests_SOURCES += rdata_hinfo_unittest.cc
run_unittests_SOURCES += rrset_unittest.cc rrsetlist_unittest.cc
run_unittests_SOURCES += question_unittest.cc
run_unittests_SOURCES += rrparamregistry_unittest.cc
@@ -51,14 +58,18 @@ run_unittests_SOURCES += tsig_unittest.cc
run_unittests_SOURCES += tsigerror_unittest.cc
run_unittests_SOURCES += tsigkey_unittest.cc
run_unittests_SOURCES += tsigrecord_unittest.cc
+run_unittests_SOURCES += character_string_unittest.cc
run_unittests_SOURCES += run_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+# We shouldn't need to include BOTAN_LDFLAGS here, but there
+# is one test system where the path for GTEST_LDFLAGS contains
+# an older version of botan, and somehow that version gets
+# linked if we don't
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
-run_unittests_LDADD += $(top_builddir)/src/lib/util/io/libutil_io.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
endif
diff --git a/src/lib/dns/tests/character_string_unittest.cc b/src/lib/dns/tests/character_string_unittest.cc
new file mode 100644
index 0000000..5fed9eb
--- /dev/null
+++ b/src/lib/dns/tests/character_string_unittest.cc
@@ -0,0 +1,92 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#include <gtest/gtest.h>
+
+#include <dns/rdata.h>
+#include <dns/tests/unittest_util.h>
+#include <dns/character_string.h>
+
+using isc::UnitTestUtil;
+
+using namespace std;
+using namespace isc;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+using namespace isc::dns::rdata;
+
+namespace {
+
+class CharacterString {
+public:
+ CharacterString(const string& str){
+ string::const_iterator it = str.begin();
+ characterStr_ = getNextCharacterString(str, it);
+ }
+ const string& str() const { return characterStr_; }
+private:
+ string characterStr_;
+};
+
+TEST(CharacterStringTest, testNormalCase) {
+ CharacterString cstr1("foo");
+ EXPECT_EQ(string("foo"), cstr1.str());
+
+ // Test <character-string> that separated by space
+ CharacterString cstr2("foo bar");
+ EXPECT_EQ(string("foo"), cstr2.str());
+
+ // Test <character-string> that separated by quotes
+ CharacterString cstr3("\"foo bar\"");
+ EXPECT_EQ(string("foo bar"), cstr3.str());
+
+ // Test <character-string> that not separate by quotes but ended with quotes
+ CharacterString cstr4("foo\"");
+ EXPECT_EQ(string("foo\""), cstr4.str());
+}
+
+TEST(CharacterStringTest, testBadCase) {
+ // The <character-string> that started with quotes should also be ended
+ // with quotes
+ EXPECT_THROW(CharacterString cstr("\"foo"), InvalidRdataText);
+
+ // The string length cannot exceed 255 characters
+ string str;
+ for (int i = 0; i < 257; ++i) {
+ str += 'A';
+ }
+ EXPECT_THROW(CharacterString cstr(str), CharStringTooLong);
+}
+
+TEST(CharacterStringTest, testEscapeCharacter) {
+ CharacterString cstr1("foo\\bar");
+ EXPECT_EQ(string("foobar"), cstr1.str());
+
+ CharacterString cstr2("foo\\\\bar");
+ EXPECT_EQ(string("foo\\bar"), cstr2.str());
+
+ CharacterString cstr3("fo\\111bar");
+ EXPECT_EQ(string("foobar"), cstr3.str());
+
+ CharacterString cstr4("fo\\1112bar");
+ EXPECT_EQ(string("foo2bar"), cstr4.str());
+
+ // There must be at least 3 digits followed by '\'
+ EXPECT_THROW(CharacterString cstr("foo\\98ar"), InvalidRdataText);
+ EXPECT_THROW(CharacterString cstr("foo\\9ar"), InvalidRdataText);
+ EXPECT_THROW(CharacterString cstr("foo\\98"), InvalidRdataText);
+}
+
+} // namespace
diff --git a/src/lib/dns/tests/message_unittest.cc b/src/lib/dns/tests/message_unittest.cc
index c79ea2c..f068791 100644
--- a/src/lib/dns/tests/message_unittest.cc
+++ b/src/lib/dns/tests/message_unittest.cc
@@ -62,7 +62,6 @@ using namespace isc::dns::rdata;
//
const uint16_t Message::DEFAULT_MAX_UDPSIZE;
-const Name test_name("test.example.com");
namespace isc {
namespace util {
@@ -79,7 +78,8 @@ const uint16_t TSIGContext::DEFAULT_FUDGE;
namespace {
class MessageTest : public ::testing::Test {
protected:
- MessageTest() : obuffer(0), renderer(obuffer),
+ MessageTest() : test_name("test.example.com"), obuffer(0),
+ renderer(obuffer),
message_parse(Message::PARSE),
message_render(Message::RENDER),
bogus_section(static_cast<Message::Section>(
@@ -103,8 +103,9 @@ protected:
"FAKEFAKEFAKEFAKE"));
rrset_aaaa->addRRsig(rrset_rrsig);
}
-
+
static Question factoryFromFile(const char* datafile);
+ const Name test_name;
OutputBuffer obuffer;
MessageRenderer renderer;
Message message_parse;
@@ -114,18 +115,23 @@ protected:
RRsetPtr rrset_aaaa; // AAAA RRset with one RDATA with RRSIG
RRsetPtr rrset_rrsig; // RRSIG for the AAAA RRset
TSIGContext tsig_ctx;
+ vector<unsigned char> received_data;
vector<unsigned char> expected_data;
- static void factoryFromFile(Message& message, const char* datafile);
+ void factoryFromFile(Message& message, const char* datafile,
+ Message::ParseOptions options =
+ Message::PARSE_DEFAULT);
};
void
-MessageTest::factoryFromFile(Message& message, const char* datafile) {
- std::vector<unsigned char> data;
- UnitTestUtil::readWireData(datafile, data);
+MessageTest::factoryFromFile(Message& message, const char* datafile,
+ Message::ParseOptions options)
+{
+ received_data.clear();
+ UnitTestUtil::readWireData(datafile, received_data);
- InputBuffer buffer(&data[0], data.size());
- message.fromWire(buffer);
+ InputBuffer buffer(&received_data[0], received_data.size());
+ message.fromWire(buffer, options);
}
TEST_F(MessageTest, headerFlag) {
@@ -173,7 +179,6 @@ TEST_F(MessageTest, headerFlag) {
EXPECT_THROW(message_parse.setHeaderFlag(Message::HEADERFLAG_QR),
InvalidMessageOperation);
}
-
TEST_F(MessageTest, getEDNS) {
EXPECT_FALSE(message_parse.getEDNS()); // by default EDNS isn't set
@@ -530,7 +535,46 @@ TEST_F(MessageTest, appendSection) {
}
+TEST_F(MessageTest, parseHeader) {
+ received_data.clear();
+ UnitTestUtil::readWireData("message_fromWire1", received_data);
+
+ // parseHeader() isn't allowed in the render mode.
+ InputBuffer buffer(&received_data[0], received_data.size());
+ EXPECT_THROW(message_render.parseHeader(buffer), InvalidMessageOperation);
+
+ message_parse.parseHeader(buffer);
+ EXPECT_EQ(0x1035, message_parse.getQid());
+ EXPECT_EQ(Opcode::QUERY(), message_parse.getOpcode());
+ EXPECT_EQ(Rcode::NOERROR(), message_parse.getRcode());
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_QR));
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_AA));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_TC));
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_RD));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_RA));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_AD));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_CD));
+ EXPECT_EQ(1, message_parse.getRRCount(Message::SECTION_QUESTION));
+ EXPECT_EQ(2, message_parse.getRRCount(Message::SECTION_ANSWER));
+ EXPECT_EQ(0, message_parse.getRRCount(Message::SECTION_AUTHORITY));
+ EXPECT_EQ(0, message_parse.getRRCount(Message::SECTION_ADDITIONAL));
+
+ // Only the header part should have been examined.
+ EXPECT_EQ(12, buffer.getPosition()); // 12 = size of the header section
+ EXPECT_TRUE(message_parse.beginQuestion() == message_parse.endQuestion());
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_ANSWER) ==
+ message_parse.endSection(Message::SECTION_ANSWER));
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_AUTHORITY) ==
+ message_parse.endSection(Message::SECTION_AUTHORITY));
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_ADDITIONAL) ==
+ message_parse.endSection(Message::SECTION_ADDITIONAL));
+}
+
TEST_F(MessageTest, fromWire) {
+ // fromWire() isn't allowed in the render mode.
+ EXPECT_THROW(factoryFromFile(message_render, "message_fromWire1"),
+ InvalidMessageOperation);
+
factoryFromFile(message_parse, "message_fromWire1");
EXPECT_EQ(0x1035, message_parse.getQid());
EXPECT_EQ(Opcode::QUERY(), message_parse.getOpcode());
@@ -562,6 +606,87 @@ TEST_F(MessageTest, fromWire) {
EXPECT_TRUE(it->isLast());
}
+TEST_F(MessageTest, fromWireShortBuffer) {
+ // We trim a valid message (ending with an SOA RR) for one byte.
+ // fromWire() should throw an exception while parsing the trimmed RR.
+ UnitTestUtil::readWireData("message_fromWire22.wire", received_data);
+ InputBuffer buffer(&received_data[0], received_data.size() - 1);
+ EXPECT_THROW(message_parse.fromWire(buffer), InvalidBufferPosition);
+}
+
+TEST_F(MessageTest, fromWireCombineRRs) {
+ // This message contains 3 RRs in the answer section in the order of
+ // A, AAAA, A types. fromWire() should combine the two A RRs into a
+ // single RRset by default.
+ factoryFromFile(message_parse, "message_fromWire19.wire");
+
+ RRsetIterator it = message_parse.beginSection(Message::SECTION_ANSWER);
+ RRsetIterator it_end = message_parse.endSection(Message::SECTION_ANSWER);
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(2, (*it)->getRdataCount());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::AAAA(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+}
+
+// A helper function for a test pattern commonly used in several tests below.
+void
+preserveRRCheck(const Message& message, Message::Section section) {
+ RRsetIterator it = message.beginSection(section);
+ RRsetIterator it_end = message.endSection(section);
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("192.0.2.1", (*it)->getRdataIterator()->getCurrent().toText());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::AAAA(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("2001:db8::1", (*it)->getRdataIterator()->getCurrent().toText());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("192.0.2.2", (*it)->getRdataIterator()->getCurrent().toText());
+}
+
+TEST_F(MessageTest, fromWirePreserveAnswer) {
+ // Using the same data as the previous test, but specify the PRESERVE_ORDER
+ // option. The received order of RRs should be preserved, and each RR
+ // should be stored in a single RRset.
+ factoryFromFile(message_parse, "message_fromWire19.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve answer RRs");
+ preserveRRCheck(message_parse, Message::SECTION_ANSWER);
+ }
+}
+
+TEST_F(MessageTest, fromWirePreserveAuthority) {
+ // Same for the previous test, but for the authority section.
+ factoryFromFile(message_parse, "message_fromWire20.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve authority RRs");
+ preserveRRCheck(message_parse, Message::SECTION_AUTHORITY);
+ }
+}
+
+TEST_F(MessageTest, fromWirePreserveAdditional) {
+ // Same for the previous test, but for the additional section.
+ factoryFromFile(message_parse, "message_fromWire21.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve additional RRs");
+ preserveRRCheck(message_parse, Message::SECTION_ADDITIONAL);
+ }
+}
+
TEST_F(MessageTest, EDNS0ExtRcode) {
// Extended Rcode = BADVERS
factoryFromFile(message_parse, "message_fromWire10.wire");
@@ -618,15 +743,43 @@ testGetTime() {
return (NOW);
}
+// bit-wise constant flags to configure DNS header flags for test
+// messages.
+const unsigned int QR_FLAG = 0x1;
+const unsigned int AA_FLAG = 0x2;
+const unsigned int RD_FLAG = 0x4;
+
void
commonTSIGToWireCheck(Message& message, MessageRenderer& renderer,
- TSIGContext& tsig_ctx, const char* const expected_file)
+ TSIGContext& tsig_ctx, const char* const expected_file,
+ unsigned int message_flags = RD_FLAG,
+ RRType qtype = RRType::A(),
+ const vector<const char*>* answer_data = NULL)
{
message.setOpcode(Opcode::QUERY());
message.setRcode(Rcode::NOERROR());
- message.setHeaderFlag(Message::HEADERFLAG_RD, true);
+ if ((message_flags & QR_FLAG) != 0) {
+ message.setHeaderFlag(Message::HEADERFLAG_QR);
+ }
+ if ((message_flags & AA_FLAG) != 0) {
+ message.setHeaderFlag(Message::HEADERFLAG_AA);
+ }
+ if ((message_flags & RD_FLAG) != 0) {
+ message.setHeaderFlag(Message::HEADERFLAG_RD);
+ }
message.addQuestion(Question(Name("www.example.com"), RRClass::IN(),
- RRType::A()));
+ qtype));
+
+ if (answer_data != NULL) {
+ RRsetPtr ans_rrset(new RRset(Name("www.example.com"), RRClass::IN(),
+ qtype, RRTTL(86400)));
+ for (vector<const char*>::const_iterator it = answer_data->begin();
+ it != answer_data->end();
+ ++it) {
+ ans_rrset->addRdata(createRdata(qtype, RRClass::IN(), *it));
+ }
+ message.addRRset(Message::SECTION_ANSWER, ans_rrset);
+ }
message.toWire(renderer, tsig_ctx);
vector<unsigned char> expected_data;
@@ -670,6 +823,182 @@ TEST_F(MessageTest, toWireWithEDNSAndTSIG) {
}
}
+// Some of the following tests involve truncation. We use the query name
+// "www.example.com" and some TXT question/answers. The length of the
+// header and question will be 33 bytes. If we also try to include a
+// TSIG of the same key name (not compressed) with HMAC-MD5, the TSIG RR
+// will be 85 bytes.
+
+// A long TXT RDATA. With a fully compressed owner name, the corresponding
+// RR will be 268 bytes.
+const char* const long_txt1 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde";
+
+// With a fully compressed owner name, the corresponding RR will be 212 bytes.
+// It should result in truncation even without TSIG (33 + 268 + 212 = 513)
+const char* const long_txt2 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456";
+
+// With a fully compressed owner name, the corresponding RR will be 127 bytes.
+// So, it can fit in the standard 512 bytes with txt1 and without TSIG, but
+// adding a TSIG would result in truncation (33 + 268 + 127 + 85 = 513)
+const char* const long_txt3 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01";
+
+// This is 1 byte shorter than txt3, which will result in a possible longest
+// message containing answer RRs and TSIG.
+const char* const long_txt4 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0";
+
+// Example output generated by
+// "dig -y www.example.com:SFuWd/q99SzF8Yzd1QbB9g== www.example.com txt
+// QID: 0x22c2
+// Time Signed: 0x00004e179212
+TEST_F(MessageTest, toWireTSIGTruncation) {
+ isc::util::detail::gettimeFunction = testGetTime<0x4e179212>;
+
+ // Verify a validly signed query so that we can use the TSIG context
+
+ factoryFromFile(message_parse, "message_fromWire17.wire");
+ EXPECT_EQ(TSIGError::NOERROR(),
+ tsig_ctx.verify(message_parse.getTSIGRecord(),
+ &received_data[0], received_data.size()));
+
+ message_render.setQid(0x22c2);
+ vector<const char*> answer_data;
+ answer_data.push_back(long_txt1);
+ answer_data.push_back(long_txt2);
+ {
+ SCOPED_TRACE("Message sign with TSIG and TC bit on");
+ commonTSIGToWireCheck(message_render, renderer, tsig_ctx,
+ "message_toWire4.wire",
+ QR_FLAG|AA_FLAG|RD_FLAG,
+ RRType::TXT(), &answer_data);
+ }
+}
+
+TEST_F(MessageTest, toWireTSIGTruncation2) {
+ // Similar to the previous test, but without TSIG it wouldn't cause
+ // truncation.
+ isc::util::detail::gettimeFunction = testGetTime<0x4e179212>;
+ factoryFromFile(message_parse, "message_fromWire17.wire");
+ EXPECT_EQ(TSIGError::NOERROR(),
+ tsig_ctx.verify(message_parse.getTSIGRecord(),
+ &received_data[0], received_data.size()));
+
+ message_render.setQid(0x22c2);
+ vector<const char*> answer_data;
+ answer_data.push_back(long_txt1);
+ answer_data.push_back(long_txt3);
+ {
+ SCOPED_TRACE("Message sign with TSIG and TC bit on (2)");
+ commonTSIGToWireCheck(message_render, renderer, tsig_ctx,
+ "message_toWire4.wire",
+ QR_FLAG|AA_FLAG|RD_FLAG,
+ RRType::TXT(), &answer_data);
+ }
+}
+
+TEST_F(MessageTest, toWireTSIGTruncation3) {
+ // Similar to previous ones, but truncation occurs due to too many
+ // Questions (very unusual, but not necessarily illegal).
+
+ // We are going to create a message starting with a standard
+ // header (12 bytes) and multiple questions in the Question
+ // section of the same owner name (changing the RRType, just so
+ // that it would be the form that would be accepted by the BIND 9
+ // parser). The first Question is 21 bytes in length, and the subsequent
+ // ones are 6 bytes. We'll also use a TSIG whose size is 85 bytes.
+ // Up to 66 questions can fit in the standard 512-byte buffer
+ // (12 + 21 + 6 * 65 + 85 = 508). If we try to add one more it would
+ // result in truncation.
+ message_render.setOpcode(Opcode::QUERY());
+ message_render.setRcode(Rcode::NOERROR());
+ for (int i = 1; i <= 67; ++i) {
+ message_render.addQuestion(Question(Name("www.example.com"),
+ RRClass::IN(), RRType(i)));
+ }
+ message_render.toWire(renderer, tsig_ctx);
+
+ // Check the rendered data by parsing it. We only check it has the
+ // TC bit on, has the correct number of questions, and has a TSIG RR.
+ // Checking the signature wouldn't be necessary for this rare case
+ // scenario.
+ InputBuffer buffer(renderer.getData(), renderer.getLength());
+ message_parse.fromWire(buffer);
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_TC));
+ // Note that the number of questions are 66, not 67 as we tried to add.
+ EXPECT_EQ(66, message_parse.getRRCount(Message::SECTION_QUESTION));
+ EXPECT_TRUE(message_parse.getTSIGRecord() != NULL);
+}
+
+TEST_F(MessageTest, toWireTSIGNoTruncation) {
+ // A boundary case that shouldn't cause truncation: the resulting
+ // response message with a TSIG will be 512 bytes long.
+ isc::util::detail::gettimeFunction = testGetTime<0x4e17b38d>;
+ factoryFromFile(message_parse, "message_fromWire18.wire");
+ EXPECT_EQ(TSIGError::NOERROR(),
+ tsig_ctx.verify(message_parse.getTSIGRecord(),
+ &received_data[0], received_data.size()));
+
+ message_render.setQid(0xd6e2);
+ vector<const char*> answer_data;
+ answer_data.push_back(long_txt1);
+ answer_data.push_back(long_txt4);
+ {
+ SCOPED_TRACE("Message sign with TSIG, no truncation");
+ commonTSIGToWireCheck(message_render, renderer, tsig_ctx,
+ "message_toWire5.wire",
+ QR_FLAG|AA_FLAG|RD_FLAG,
+ RRType::TXT(), &answer_data);
+ }
+}
+
+// This is a buggy renderer for testing. It behaves like the straightforward
+// MessageRenderer, but once it has some data, its setLengthLimit() ignores
+// the given parameter and resets the limit to the current length, making
+// subsequent insertion result in truncation, which would make TSIG RR
+// rendering fail unexpectedly in the test that follows.
+class BadRenderer : public MessageRenderer {
+public:
+ BadRenderer(isc::util::OutputBuffer& buffer) :
+ MessageRenderer(buffer)
+ {}
+ virtual void setLengthLimit(size_t len) {
+ if (getLength() > 0) {
+ MessageRenderer::setLengthLimit(getLength());
+ } else {
+ MessageRenderer::setLengthLimit(len);
+ }
+ }
+};
+
+TEST_F(MessageTest, toWireTSIGLengthErrors) {
+ // specify an unusual short limit that wouldn't be able to hold
+ // the TSIG.
+ renderer.setLengthLimit(tsig_ctx.getTSIGLength() - 1);
+ // Use commonTSIGToWireCheck() only to call toWire() with otherwise valid
+ // conditions. The checks inside it don't matter because we expect an
+ // exception before any of the checks.
+ EXPECT_THROW(commonTSIGToWireCheck(message_render, renderer, tsig_ctx,
+ "message_toWire2.wire"),
+ InvalidParameter);
+
+ // This one is large enough for TSIG, but the remaining limit isn't
+ // even enough for the Header section.
+ renderer.clear();
+ message_render.clear(Message::RENDER);
+ renderer.setLengthLimit(tsig_ctx.getTSIGLength() + 1);
+ EXPECT_THROW(commonTSIGToWireCheck(message_render, renderer, tsig_ctx,
+ "message_toWire2.wire"),
+ InvalidParameter);
+
+ // Trying to render a message with TSIG using a buggy renderer.
+ obuffer.clear();
+ BadRenderer bad_renderer(obuffer);
+ bad_renderer.setLengthLimit(512);
+ message_render.clear(Message::RENDER);
+ EXPECT_THROW(commonTSIGToWireCheck(message_render, bad_renderer, tsig_ctx,
+ "message_toWire2.wire"),
+ Unexpected);
+}
+
TEST_F(MessageTest, toWireWithoutOpcode) {
message_render.setRcode(Rcode::NOERROR());
EXPECT_THROW(message_render.toWire(renderer), InvalidMessageOperation);
diff --git a/src/lib/dns/tests/question_unittest.cc b/src/lib/dns/tests/question_unittest.cc
index 25fd75b..1d483f2 100644
--- a/src/lib/dns/tests/question_unittest.cc
+++ b/src/lib/dns/tests/question_unittest.cc
@@ -106,6 +106,22 @@ TEST_F(QuestionTest, toWireRenderer) {
obuffer.getLength(), &wiredata[0], wiredata.size());
}
+TEST_F(QuestionTest, toWireTruncated) {
+ // If the available length in the renderer is too small, it would require
+ // truncation. This won't happen in normal cases, but protocol wise it
+ // could still happen if and when we support some (possibly future) opcode
+ // that allows multiple questions.
+
+ // Set the length limit to the qname length so that the whole question
+ // would request truncated
+ renderer.setLengthLimit(example_name1.getLength());
+
+ EXPECT_FALSE(renderer.isTruncated()); // check pre-render condition
+ EXPECT_EQ(0, test_question1.toWire(renderer));
+ EXPECT_TRUE(renderer.isTruncated());
+ EXPECT_EQ(0, renderer.getLength()); // renderer shouldn't have any data
+}
+
// test operator<<. We simply confirm it appends the result of toText().
TEST_F(QuestionTest, LeftShiftOperator) {
ostringstream oss;
diff --git a/src/lib/dns/tests/rdata_afsdb_unittest.cc b/src/lib/dns/tests/rdata_afsdb_unittest.cc
new file mode 100644
index 0000000..7df8d83
--- /dev/null
+++ b/src/lib/dns/tests/rdata_afsdb_unittest.cc
@@ -0,0 +1,210 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+const char* const afsdb_text = "1 afsdb.example.com.";
+const char* const afsdb_text2 = "0 root.example.com.";
+const char* const too_long_label("012345678901234567890123456789"
+ "0123456789012345678901234567890123");
+
+namespace {
+class Rdata_AFSDB_Test : public RdataTest {
+protected:
+ Rdata_AFSDB_Test() :
+ rdata_afsdb(string(afsdb_text)), rdata_afsdb2(string(afsdb_text2))
+ {}
+
+ const generic::AFSDB rdata_afsdb;
+ const generic::AFSDB rdata_afsdb2;
+ vector<uint8_t> expected_wire;
+};
+
+
+TEST_F(Rdata_AFSDB_Test, createFromText) {
+ EXPECT_EQ(1, rdata_afsdb.getSubtype());
+ EXPECT_EQ(Name("afsdb.example.com."), rdata_afsdb.getServer());
+
+ EXPECT_EQ(0, rdata_afsdb2.getSubtype());
+ EXPECT_EQ(Name("root.example.com."), rdata_afsdb2.getServer());
+}
+
+TEST_F(Rdata_AFSDB_Test, badText) {
+ // subtype is too large
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("99999999 afsdb.example.com."),
+ InvalidRdataText);
+ // incomplete text
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("10"), InvalidRdataText);
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("SPOON"), InvalidRdataText);
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("1root.example.com."), InvalidRdataText);
+ // number of fields (must be 2) is incorrect
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("10 afsdb. example.com."),
+ InvalidRdataText);
+ // bad name
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("1 afsdb.example.com." +
+ string(too_long_label)), TooLongLabel);
+}
+
+TEST_F(Rdata_AFSDB_Test, assignment) {
+ generic::AFSDB copy((string(afsdb_text2)));
+ copy = rdata_afsdb;
+ EXPECT_EQ(0, copy.compare(rdata_afsdb));
+
+ // Check if the copied data is valid even after the original is deleted
+ generic::AFSDB* copy2 = new generic::AFSDB(rdata_afsdb);
+ generic::AFSDB copy3((string(afsdb_text2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_afsdb));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_afsdb));
+}
+
+TEST_F(Rdata_AFSDB_Test, createFromWire) {
+ // uncompressed names
+ EXPECT_EQ(0, rdata_afsdb.compare(
+ *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire1.wire")));
+ // compressed name
+ EXPECT_EQ(0, rdata_afsdb.compare(
+ *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire2.wire", 13)));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire3.wire"),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire4.wire"),
+ InvalidRdataLength);
+ // bogus server name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire5.wire"),
+ DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireBuffer) {
+ // construct actual data
+ rdata_afsdb.toWire(obuffer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ &expected_wire[0], expected_wire.size());
+
+ // clear buffer for the next test
+ obuffer.clear();
+
+ // construct actual data
+ Name("example.com.").toWire(obuffer);
+ rdata_afsdb2.toWire(obuffer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireRenderer) {
+ // similar to toWireBuffer, but names in RDATA could be compressed due to
+ // preceding names. Actually they must not be compressed according to
+ // RFC3597, and this test checks that.
+
+ // construct actual data
+ rdata_afsdb.toWire(renderer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ renderer.getData(), renderer.getLength(),
+ &expected_wire[0], expected_wire.size());
+
+ // clear renderer for the next test
+ renderer.clear();
+
+ // construct actual data
+ Name("example.com.").toWire(obuffer);
+ rdata_afsdb2.toWire(renderer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ renderer.getData(), renderer.getLength(),
+ &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toText) {
+ EXPECT_EQ(afsdb_text, rdata_afsdb.toText());
+ EXPECT_EQ(afsdb_text2, rdata_afsdb2.toText());
+}
+
+TEST_F(Rdata_AFSDB_Test, compare) {
+ // check reflexivity
+ EXPECT_EQ(0, rdata_afsdb.compare(rdata_afsdb));
+
+ // name must be compared in case-insensitive manner
+ EXPECT_EQ(0, rdata_afsdb.compare(generic::AFSDB("1 "
+ "AFSDB.example.com.")));
+
+ const generic::AFSDB small1("10 afsdb.example.com");
+ const generic::AFSDB large1("65535 afsdb.example.com");
+ const generic::AFSDB large2("256 afsdb.example.com");
+
+ // confirm these are compared as unsigned values
+ EXPECT_GT(0, rdata_afsdb.compare(large1));
+ EXPECT_LT(0, large1.compare(rdata_afsdb));
+
+ // confirm these are compared in network byte order
+ EXPECT_GT(0, small1.compare(large2));
+ EXPECT_LT(0, large2.compare(small1));
+
+ // another AFSDB whose server name is larger than that of rdata_afsdb.
+ const generic::AFSDB large3("256 zzzzz.example.com");
+ EXPECT_GT(0, large2.compare(large3));
+ EXPECT_LT(0, large3.compare(large2));
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_afsdb.compare(*rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_dhcid_unittest.cc b/src/lib/dns/tests/rdata_dhcid_unittest.cc
new file mode 100644
index 0000000..9df7043
--- /dev/null
+++ b/src/lib/dns/tests/rdata_dhcid_unittest.cc
@@ -0,0 +1,111 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/rdataclass.h>
+#include <util/encode/base64.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::util::encode;
+using namespace isc::dns::rdata;
+
+namespace {
+
+const string string_dhcid(
+ "0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=");
+
+const in::DHCID rdata_dhcid(string_dhcid);
+
+class Rdata_DHCID_Test : public RdataTest {
+};
+
+TEST_F(Rdata_DHCID_Test, createFromString) {
+ const in::DHCID rdata_dhcid2(string_dhcid);
+ EXPECT_EQ(0, rdata_dhcid2.compare(rdata_dhcid));
+}
+
+TEST_F(Rdata_DHCID_Test, badBase64) {
+ EXPECT_THROW(const in::DHCID rdata_dhcid_bad("00"), isc::BadValue);
+}
+
+TEST_F(Rdata_DHCID_Test, badLength) {
+ EXPECT_THROW(const in::DHCID rdata_dhcid_bad("MDA="), InvalidRdataLength);
+}
+
+TEST_F(Rdata_DHCID_Test, copy) {
+ const in::DHCID rdata_dhcid2(rdata_dhcid);
+ EXPECT_EQ(0, rdata_dhcid.compare(rdata_dhcid2));
+}
+
+TEST_F(Rdata_DHCID_Test, createFromWire) {
+ EXPECT_EQ(0, rdata_dhcid.compare(
+ *rdataFactoryFromFile(RRType("DHCID"), RRClass("IN"),
+ "rdata_dhcid_fromWire")));
+ // TBD: more tests
+}
+
+TEST_F(Rdata_DHCID_Test, toWireRenderer) {
+ rdata_dhcid.toWire(renderer);
+
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_dhcid_toWire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, renderer.getData(),
+ renderer.getLength(), &data[0], data.size());
+}
+
+TEST_F(Rdata_DHCID_Test, toWireBuffer) {
+ rdata_dhcid.toWire(obuffer);
+
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_dhcid_toWire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), &data[0], data.size());
+}
+
+TEST_F(Rdata_DHCID_Test, toText) {
+ EXPECT_EQ(string_dhcid, rdata_dhcid.toText());
+}
+
+TEST_F(Rdata_DHCID_Test, getDHCIDDigest) {
+ const string string_dhcid1(encodeBase64(rdata_dhcid.getDigest()));
+
+ EXPECT_EQ(string_dhcid, string_dhcid1);
+}
+
+TEST_F(Rdata_DHCID_Test, compare) {
+ // trivial case: self equivalence
+ EXPECT_EQ(0, rdata_dhcid.compare(rdata_dhcid));
+
+ in::DHCID rdata_dhcid1("0YLQvtC/0L7Qu9GPINC00LLQsCDRgNGD0LHQu9GP");
+ in::DHCID rdata_dhcid2("0YLQvtC/0L7Qu9GPINGC0YDQuCDRgNGD0LHQu9GP");
+ in::DHCID rdata_dhcid3("0YLQvtC/0L7Qu9GPINGH0LXRgtGL0YDQtSDRgNGD0LHQu9GP");
+
+ EXPECT_LT(rdata_dhcid1.compare(rdata_dhcid2), 0);
+ EXPECT_GT(rdata_dhcid2.compare(rdata_dhcid1), 0);
+
+ EXPECT_LT(rdata_dhcid2.compare(rdata_dhcid3), 0);
+ EXPECT_GT(rdata_dhcid3.compare(rdata_dhcid2), 0);
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_dhcid.compare(*rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_ds_like_unittest.cc b/src/lib/dns/tests/rdata_ds_like_unittest.cc
new file mode 100644
index 0000000..9b29446
--- /dev/null
+++ b/src/lib/dns/tests/rdata_ds_like_unittest.cc
@@ -0,0 +1,171 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <string>
+
+#include <util/buffer.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+namespace {
+// hacks to make templates work
+template <class T>
+class RRTYPE : public RRType {
+public:
+ RRTYPE();
+};
+
+template<> RRTYPE<generic::DS>::RRTYPE() : RRType(RRType::DS()) {}
+template<> RRTYPE<generic::DLV>::RRTYPE() : RRType(RRType::DLV()) {}
+
+template <class DS_LIKE>
+class Rdata_DS_LIKE_Test : public RdataTest {
+protected:
+ static DS_LIKE const rdata_ds_like;
+};
+
+string ds_like_txt("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+
+template <class DS_LIKE>
+DS_LIKE const Rdata_DS_LIKE_Test<DS_LIKE>::rdata_ds_like(ds_like_txt);
+
+// The list of types we want to test.
+typedef testing::Types<generic::DS, generic::DLV> Implementations;
+
+TYPED_TEST_CASE(Rdata_DS_LIKE_Test, Implementations);
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toText_DS_LIKE) {
+ EXPECT_EQ(ds_like_txt, this->rdata_ds_like.toText());
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, badText_DS_LIKE) {
+ EXPECT_THROW(const TypeParam ds_like2("99999 5 2 BEEF"), InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 555 2 BEEF"),
+ InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 5 22222 BEEF"),
+ InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 5 2"), InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("GARBAGE IN"), InvalidRdataText);
+ // no space between the digest type and the digest.
+ EXPECT_THROW(const TypeParam ds_like2(
+ "12892 5 2F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5"), InvalidRdataText);
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, createFromWire_DS_LIKE) {
+ EXPECT_EQ(0, this->rdata_ds_like.compare(
+ *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass::IN(),
+ "rdata_ds_fromWire")));
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, assignment_DS_LIKE) {
+ TypeParam copy((string(ds_like_txt)));
+ copy = this->rdata_ds_like;
+ EXPECT_EQ(0, copy.compare(this->rdata_ds_like));
+
+ // Check if the copied data is valid even after the original is deleted
+ TypeParam* copy2 = new TypeParam(this->rdata_ds_like);
+ TypeParam copy3((string(ds_like_txt)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(this->rdata_ds_like));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(this->rdata_ds_like));
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, getTag_DS_LIKE) {
+ EXPECT_EQ(12892, this->rdata_ds_like.getTag());
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toWireRenderer) {
+ Rdata_DS_LIKE_Test<TypeParam>::renderer.skip(2);
+ TypeParam rdata_ds_like(ds_like_txt);
+ rdata_ds_like.toWire(this->renderer);
+
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_ds_fromWire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t*>
+ (this->obuffer.getData()) + 2,
+ this->obuffer.getLength() - 2,
+ &data[2], data.size() - 2);
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toWireBuffer) {
+ TypeParam rdata_ds_like(ds_like_txt);
+ rdata_ds_like.toWire(this->obuffer);
+}
+
+string ds_like_txt1("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different tag
+string ds_like_txt2("12893 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different algorithm
+string ds_like_txt3("12892 6 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest type
+string ds_like_txt4("12892 5 3 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest
+string ds_like_txt5("12892 5 2 F2E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest length
+string ds_like_txt6("12892 5 2 F2E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B555");
+
+TYPED_TEST(Rdata_DS_LIKE_Test, compare) {
+ // trivial case: self equivalence
+ EXPECT_EQ(0, TypeParam(ds_like_txt).compare(TypeParam(ds_like_txt)));
+
+ // non-equivalence tests
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt2)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt2).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt3)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt3).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt4)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt4).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt5)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt5).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt6)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt6).compare(TypeParam(ds_like_txt1)), 0);
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(this->rdata_ds_like.compare(*this->rdata_nomatch),
+ bad_cast);
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_ds_unittest.cc b/src/lib/dns/tests/rdata_ds_unittest.cc
deleted file mode 100644
index 5988620..0000000
--- a/src/lib/dns/tests/rdata_ds_unittest.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <string>
-
-#include <util/buffer.h>
-#include <dns/messagerenderer.h>
-#include <dns/rdata.h>
-#include <dns/rdataclass.h>
-#include <dns/rrclass.h>
-#include <dns/rrtype.h>
-
-#include <gtest/gtest.h>
-
-#include <dns/tests/unittest_util.h>
-#include <dns/tests/rdata_unittest.h>
-
-using isc::UnitTestUtil;
-using namespace std;
-using namespace isc::dns;
-using namespace isc::util;
-using namespace isc::dns::rdata;
-
-namespace {
-class Rdata_DS_Test : public RdataTest {
- // there's nothing to specialize
-};
-
-string ds_txt("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
- "5F0EB5C777586DE18DA6B5");
-const generic::DS rdata_ds(ds_txt);
-
-TEST_F(Rdata_DS_Test, toText_DS) {
- EXPECT_EQ(ds_txt, rdata_ds.toText());
-}
-
-TEST_F(Rdata_DS_Test, badText_DS) {
- EXPECT_THROW(const generic::DS ds2("99999 5 2 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 555 2 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 5 22222 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 5 2"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("GARBAGE IN"), InvalidRdataText);
-}
-
-// this test currently fails; we must fix it, and then migrate the test to
-// badText_DS
-TEST_F(Rdata_DS_Test, DISABLED_badText_DS) {
- // no space between the digest type and the digest.
- EXPECT_THROW(const generic::DS ds2(
- "12892 5 2F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
- "5F0EB5C777586DE18DA6B5"), InvalidRdataText);
-}
-
-TEST_F(Rdata_DS_Test, createFromWire_DS) {
- EXPECT_EQ(0, rdata_ds.compare(
- *rdataFactoryFromFile(RRType::DS(), RRClass::IN(),
- "rdata_ds_fromWire")));
-}
-
-TEST_F(Rdata_DS_Test, getTag_DS) {
- EXPECT_EQ(12892, rdata_ds.getTag());
-}
-
-TEST_F(Rdata_DS_Test, toWireRenderer) {
- renderer.skip(2);
- generic::DS rdata_ds(ds_txt);
- rdata_ds.toWire(renderer);
-
- vector<unsigned char> data;
- UnitTestUtil::readWireData("rdata_ds_fromWire", data);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- static_cast<const uint8_t *>(obuffer.getData()) + 2,
- obuffer.getLength() - 2, &data[2], data.size() - 2);
-}
-
-TEST_F(Rdata_DS_Test, toWireBuffer) {
- generic::DS rdata_ds(ds_txt);
- rdata_ds.toWire(obuffer);
-}
-
-TEST_F(Rdata_DS_Test, compare) {
- // trivial case: self equivalence
- EXPECT_EQ(0, generic::DS(ds_txt).compare(generic::DS(ds_txt)));
-
- // TODO: need more tests
-}
-
-}
diff --git a/src/lib/dns/tests/rdata_hinfo_unittest.cc b/src/lib/dns/tests/rdata_hinfo_unittest.cc
new file mode 100644
index 0000000..c52b2a0
--- /dev/null
+++ b/src/lib/dns/tests/rdata_hinfo_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+using namespace isc::dns::rdata::generic;
+
+namespace {
+class Rdata_HINFO_Test : public RdataTest {
+};
+
+static uint8_t hinfo_rdata[] = {0x07,0x50,0x65,0x6e,0x74,0x69,0x75,0x6d,0x05,
+ 0x4c,0x69,0x6e,0x75,0x78};
+static const char *hinfo_str = "\"Pentium\" \"Linux\"";
+static const char *hinfo_str1 = "\"Pen\\\"tium\" \"Linux\"";
+
+static const char *hinfo_str_small1 = "\"Lentium\" \"Linux\"";
+static const char *hinfo_str_small2 = "\"Pentium\" \"Kinux\"";
+static const char *hinfo_str_large1 = "\"Qentium\" \"Linux\"";
+static const char *hinfo_str_large2 = "\"Pentium\" \"UNIX\"";
+
+TEST_F(Rdata_HINFO_Test, createFromText) {
+ HINFO hinfo(hinfo_str);
+ EXPECT_EQ(string("Pentium"), hinfo.getCPU());
+ EXPECT_EQ(string("Linux"), hinfo.getOS());
+
+ // Test the text with double quotes in the middle of string
+ HINFO hinfo1(hinfo_str1);
+ EXPECT_EQ(string("Pen\"tium"), hinfo1.getCPU());
+}
+
+TEST_F(Rdata_HINFO_Test, badText) {
+ // Fields must be seperated by spaces
+ EXPECT_THROW(const HINFO hinfo("\"Pentium\"\"Linux\""), InvalidRdataText);
+ // Field cannot be missing
+ EXPECT_THROW(const HINFO hinfo("Pentium"), InvalidRdataText);
+ // The <character-string> cannot exceed 255 characters
+ string hinfo_str;
+ for (int i = 0; i < 257; ++i) {
+ hinfo_str += 'A';
+ }
+ hinfo_str += " Linux";
+ EXPECT_THROW(const HINFO hinfo(hinfo_str), CharStringTooLong);
+}
+
+TEST_F(Rdata_HINFO_Test, createFromWire) {
+ InputBuffer input_buffer(hinfo_rdata, sizeof(hinfo_rdata));
+ HINFO hinfo(input_buffer, sizeof(hinfo_rdata));
+ EXPECT_EQ(string("Pentium"), hinfo.getCPU());
+ EXPECT_EQ(string("Linux"), hinfo.getOS());
+}
+
+TEST_F(Rdata_HINFO_Test, toText) {
+ HINFO hinfo(hinfo_str);
+ EXPECT_EQ(hinfo_str, hinfo.toText());
+}
+
+TEST_F(Rdata_HINFO_Test, toWire) {
+ HINFO hinfo(hinfo_str);
+ hinfo.toWire(obuffer);
+
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), hinfo_rdata, sizeof(hinfo_rdata));
+}
+
+TEST_F(Rdata_HINFO_Test, toWireRenderer) {
+ HINFO hinfo(hinfo_str);
+
+ hinfo.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), hinfo_rdata, sizeof(hinfo_rdata));
+}
+
+TEST_F(Rdata_HINFO_Test, compare) {
+ HINFO hinfo(hinfo_str);
+ HINFO hinfo_small1(hinfo_str_small1);
+ HINFO hinfo_small2(hinfo_str_small2);
+ HINFO hinfo_large1(hinfo_str_large1);
+ HINFO hinfo_large2(hinfo_str_large2);
+
+ EXPECT_EQ(0, hinfo.compare(HINFO(hinfo_str)));
+ EXPECT_EQ(1, hinfo.compare(HINFO(hinfo_str_small1)));
+ EXPECT_EQ(1, hinfo.compare(HINFO(hinfo_str_small2)));
+ EXPECT_EQ(-1, hinfo.compare(HINFO(hinfo_str_large1)));
+ EXPECT_EQ(-1, hinfo.compare(HINFO(hinfo_str_large2)));
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_minfo_unittest.cc b/src/lib/dns/tests/rdata_minfo_unittest.cc
new file mode 100644
index 0000000..30c7c39
--- /dev/null
+++ b/src/lib/dns/tests/rdata_minfo_unittest.cc
@@ -0,0 +1,184 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for generic
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+// minfo text
+const char* const minfo_txt = "rmailbox.example.com. emailbox.example.com.";
+const char* const minfo_txt2 = "root.example.com. emailbox.example.com.";
+const char* const too_long_label = "01234567890123456789012345678901234567"
+ "89012345678901234567890123";
+
+namespace {
+class Rdata_MINFO_Test : public RdataTest {
+public:
+ Rdata_MINFO_Test():
+ rdata_minfo(string(minfo_txt)), rdata_minfo2(string(minfo_txt2)) {}
+
+ const generic::MINFO rdata_minfo;
+ const generic::MINFO rdata_minfo2;
+};
+
+
+TEST_F(Rdata_MINFO_Test, createFromText) {
+ EXPECT_EQ(Name("rmailbox.example.com."), rdata_minfo.getRmailbox());
+ EXPECT_EQ(Name("emailbox.example.com."), rdata_minfo.getEmailbox());
+
+ EXPECT_EQ(Name("root.example.com."), rdata_minfo2.getRmailbox());
+ EXPECT_EQ(Name("emailbox.example.com."), rdata_minfo2.getEmailbox());
+}
+
+TEST_F(Rdata_MINFO_Test, badText) {
+ // incomplete text
+ EXPECT_THROW(generic::MINFO("root.example.com."),
+ InvalidRdataText);
+ // number of fields (must be 2) is incorrect
+ EXPECT_THROW(generic::MINFO("root.example.com emailbox.example.com. "
+ "example.com."),
+ InvalidRdataText);
+ // bad rmailbox name
+ EXPECT_THROW(generic::MINFO("root.example.com. emailbox.example.com." +
+ string(too_long_label)),
+ TooLongLabel);
+ // bad emailbox name
+ EXPECT_THROW(generic::MINFO("root.example.com." +
+ string(too_long_label) + " emailbox.example.com."),
+ TooLongLabel);
+}
+
+TEST_F(Rdata_MINFO_Test, createFromWire) {
+ // uncompressed names
+ EXPECT_EQ(0, rdata_minfo.compare(
+ *rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire1.wire")));
+ // compressed names
+ EXPECT_EQ(0, rdata_minfo.compare(
+ *rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire2.wire", 15)));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire3.wire"),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire4.wire"),
+ InvalidRdataLength);
+ // bogus rmailbox name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire5.wire"),
+ DNSMessageFORMERR);
+ // bogus emailbox name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire6.wire"),
+ DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_MINFO_Test, assignment) {
+ generic::MINFO copy((string(minfo_txt2)));
+ copy = rdata_minfo;
+ EXPECT_EQ(0, copy.compare(rdata_minfo));
+
+ // Check if the copied data is valid even after the original is deleted
+ generic::MINFO* copy2 = new generic::MINFO(rdata_minfo);
+ generic::MINFO copy3((string(minfo_txt2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_minfo));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_minfo));
+}
+
+TEST_F(Rdata_MINFO_Test, toWireBuffer) {
+ rdata_minfo.toWire(obuffer);
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_minfo_toWireUncompressed1.wire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data[0], data.size());
+
+ obuffer.clear();
+ rdata_minfo2.toWire(obuffer);
+ vector<unsigned char> data2;
+ UnitTestUtil::readWireData("rdata_minfo_toWireUncompressed2.wire", data2);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data2[0], data2.size());
+}
+
+TEST_F(Rdata_MINFO_Test, toWireRenderer) {
+ rdata_minfo.toWire(renderer);
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_minfo_toWire1.wire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data[0], data.size());
+ renderer.clear();
+ rdata_minfo2.toWire(renderer);
+ vector<unsigned char> data2;
+ UnitTestUtil::readWireData("rdata_minfo_toWire2.wire", data2);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data2[0], data2.size());
+}
+
+TEST_F(Rdata_MINFO_Test, toText) {
+ EXPECT_EQ(minfo_txt, rdata_minfo.toText());
+ EXPECT_EQ(minfo_txt2, rdata_minfo2.toText());
+}
+
+TEST_F(Rdata_MINFO_Test, compare) {
+ // check reflexivity
+ EXPECT_EQ(0, rdata_minfo.compare(rdata_minfo));
+
+ // names must be compared in case-insensitive manner
+ EXPECT_EQ(0, rdata_minfo.compare(generic::MINFO("RMAILBOX.example.com. "
+ "emailbox.EXAMPLE.com.")));
+
+ // another MINFO whose rmailbox name is larger than that of rdata_minfo.
+ const generic::MINFO large1_minfo("zzzzzzzz.example.com. "
+ "emailbox.example.com.");
+ EXPECT_GT(0, rdata_minfo.compare(large1_minfo));
+ EXPECT_LT(0, large1_minfo.compare(rdata_minfo));
+
+ // another MINFO whose emailbox name is larger than that of rdata_minfo.
+ const generic::MINFO large2_minfo("rmailbox.example.com. "
+ "zzzzzzzzzzz.example.com.");
+ EXPECT_GT(0, rdata_minfo.compare(large2_minfo));
+ EXPECT_LT(0, large2_minfo.compare(rdata_minfo));
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_minfo.compare(*RdataTest::rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_naptr_unittest.cc b/src/lib/dns/tests/rdata_naptr_unittest.cc
new file mode 100644
index 0000000..f905943
--- /dev/null
+++ b/src/lib/dns/tests/rdata_naptr_unittest.cc
@@ -0,0 +1,178 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+using namespace isc::dns::rdata::generic;
+
+namespace {
+class Rdata_NAPTR_Test : public RdataTest {
+};
+
+// 10 100 "S" "SIP+D2U" "" _sip._udp.example.com.
+static uint8_t naptr_rdata[] = {0x00,0x0a,0x00,0x64,0x01,0x53,0x07,0x53,0x49,
+ 0x50,0x2b,0x44,0x32,0x55,0x00,0x04,0x5f,0x73,0x69,0x70,0x04,0x5f,0x75,0x64,
+ 0x70,0x07,0x65,0x78,0x61,0x6d,0x70,0x6c,0x65,0x03,0x63,0x6f,0x6d,0x00};
+
+static const char *naptr_str =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str2 =
+ "10 100 S SIP+D2U \"\" _sip._udp.example.com.";
+
+static const char *naptr_str_small1 =
+ "9 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small2 =
+ "10 90 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small3 =
+ "10 100 \"R\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small4 =
+ "10 100 \"S\" \"SIP+C2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small5 =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _rip._udp.example.com.";
+
+static const char *naptr_str_large1 =
+ "11 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large2 =
+ "10 110 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large3 =
+ "10 100 \"T\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large4 =
+ "10 100 \"S\" \"SIP+E2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large5 =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _tip._udp.example.com.";
+
+TEST_F(Rdata_NAPTR_Test, createFromText) {
+ NAPTR naptr(naptr_str);
+ EXPECT_EQ(10, naptr.getOrder());
+ EXPECT_EQ(100, naptr.getPreference());
+ EXPECT_EQ(string("S"), naptr.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr.getServices());
+ EXPECT_EQ(string(""), naptr.getRegexp());
+ EXPECT_EQ(Name("_sip._udp.example.com."), naptr.getReplacement());
+
+ // Test <char-string> that separated by space
+ NAPTR naptr2(naptr_str2);
+ EXPECT_EQ(string("S"), naptr2.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr2.getServices());
+}
+
+TEST_F(Rdata_NAPTR_Test, badText) {
+ // Order number cannot exceed 65535
+ EXPECT_THROW(const NAPTR naptr("65536 10 S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Preference number cannot exceed 65535
+ EXPECT_THROW(const NAPTR naptr("100 65536 S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // No regexp given
+ EXPECT_THROW(const NAPTR naptr("100 10 S SIP _sip._udp.example.com."),
+ InvalidRdataText);
+ // The double quotes seperator must match
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Order or preference cannot be missed
+ EXPECT_THROW(const NAPTR naptr("10 \"S\" SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Fields must be seperated by spaces
+ EXPECT_THROW(const NAPTR naptr("100 10S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S\"\"SIP\" \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Field cannot be missing
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S\""), InvalidRdataText);
+
+ // The <character-string> cannot exceed 255 characters
+ string naptr_str;
+ naptr_str += "100 10 ";
+ for (int i = 0; i < 257; ++i) {
+ naptr_str += 'A';
+ }
+ naptr_str += " SIP \"\" _sip._udp.example.com.";
+ EXPECT_THROW(const NAPTR naptr(naptr_str), CharStringTooLong);
+}
+
+TEST_F(Rdata_NAPTR_Test, createFromWire) {
+ InputBuffer input_buffer(naptr_rdata, sizeof(naptr_rdata));
+ NAPTR naptr(input_buffer, sizeof(naptr_rdata));
+ EXPECT_EQ(10, naptr.getOrder());
+ EXPECT_EQ(100, naptr.getPreference());
+ EXPECT_EQ(string("S"), naptr.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr.getServices());
+ EXPECT_EQ(string(""), naptr.getRegexp());
+ EXPECT_EQ(Name("_sip._udp.example.com."), naptr.getReplacement());
+}
+
+TEST_F(Rdata_NAPTR_Test, toWire) {
+ NAPTR naptr(naptr_str);
+ naptr.toWire(obuffer);
+
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), naptr_rdata, sizeof(naptr_rdata));
+}
+
+TEST_F(Rdata_NAPTR_Test, toWireRenderer) {
+ NAPTR naptr(naptr_str);
+
+ naptr.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), naptr_rdata, sizeof(naptr_rdata));
+}
+
+TEST_F(Rdata_NAPTR_Test, toText) {
+ NAPTR naptr(naptr_str);
+ EXPECT_EQ(naptr_str, naptr.toText());
+}
+
+TEST_F(Rdata_NAPTR_Test, compare) {
+ NAPTR naptr(naptr_str);
+ NAPTR naptr_small1(naptr_str_small1);
+ NAPTR naptr_small2(naptr_str_small2);
+ NAPTR naptr_small3(naptr_str_small3);
+ NAPTR naptr_small4(naptr_str_small4);
+ NAPTR naptr_small5(naptr_str_small5);
+ NAPTR naptr_large1(naptr_str_large1);
+ NAPTR naptr_large2(naptr_str_large2);
+ NAPTR naptr_large3(naptr_str_large3);
+ NAPTR naptr_large4(naptr_str_large4);
+ NAPTR naptr_large5(naptr_str_large5);
+
+ EXPECT_EQ(0, naptr.compare(NAPTR(naptr_str)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small1)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small2)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small3)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small4)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small5)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large1)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large2)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large3)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large4)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large5)));
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_nsec_unittest.cc b/src/lib/dns/tests/rdata_nsec_unittest.cc
index 5aa1e9c..f081cd8 100644
--- a/src/lib/dns/tests/rdata_nsec_unittest.cc
+++ b/src/lib/dns/tests/rdata_nsec_unittest.cc
@@ -89,4 +89,10 @@ TEST_F(Rdata_NSEC_Test, assign) {
EXPECT_EQ(0, rdata_nsec.compare(rdata_nsec2));
}
+TEST_F(Rdata_NSEC_Test, getNextName) {
+ // The implementation is quite trivial, so we simply check it's actually
+ // defined and does work as intended in a simple case.
+ EXPECT_EQ(Name("www2.isc.org"), generic::NSEC((nsec_txt)).getNextName());
+}
+
}
diff --git a/src/lib/dns/tests/rdata_rrsig_unittest.cc b/src/lib/dns/tests/rdata_rrsig_unittest.cc
index 903021f..3324b99 100644
--- a/src/lib/dns/tests/rdata_rrsig_unittest.cc
+++ b/src/lib/dns/tests/rdata_rrsig_unittest.cc
@@ -47,7 +47,7 @@ TEST_F(Rdata_RRSIG_Test, fromText) {
"f49t+sXKPzbipN9g+s1ZPiIyofc=");
generic::RRSIG rdata_rrsig(rrsig_txt);
EXPECT_EQ(rrsig_txt, rdata_rrsig.toText());
-
+ EXPECT_EQ(isc::dns::RRType::A(), rdata_rrsig.typeCovered());
}
TEST_F(Rdata_RRSIG_Test, badText) {
diff --git a/src/lib/dns/tests/rdata_srv_unittest.cc b/src/lib/dns/tests/rdata_srv_unittest.cc
new file mode 100644
index 0000000..3394f43
--- /dev/null
+++ b/src/lib/dns/tests/rdata_srv_unittest.cc
@@ -0,0 +1,173 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for generic
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+namespace {
+class Rdata_SRV_Test : public RdataTest {
+ // there's nothing to specialize
+};
+
+string srv_txt("1 5 1500 a.example.com.");
+string srv_txt2("1 5 1400 example.com.");
+string too_long_label("012345678901234567890123456789"
+ "0123456789012345678901234567890123");
+
+// 1 5 1500 a.example.com.
+const uint8_t wiredata_srv[] = {
+ 0x00, 0x01, 0x00, 0x05, 0x05, 0xdc, 0x01, 0x61, 0x07, 0x65, 0x78,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03, 0x63, 0x6f, 0x6d, 0x00};
+// 1 5 1400 example.com.
+const uint8_t wiredata_srv2[] = {
+ 0x00, 0x01, 0x00, 0x05, 0x05, 0x78, 0x07, 0x65, 0x78, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x03, 0x63, 0x6f, 0x6d, 0x00};
+
+const in::SRV rdata_srv(srv_txt);
+const in::SRV rdata_srv2(srv_txt2);
+
+TEST_F(Rdata_SRV_Test, createFromText) {
+ EXPECT_EQ(1, rdata_srv.getPriority());
+ EXPECT_EQ(5, rdata_srv.getWeight());
+ EXPECT_EQ(1500, rdata_srv.getPort());
+ EXPECT_EQ(Name("a.example.com."), rdata_srv.getTarget());
+}
+
+TEST_F(Rdata_SRV_Test, badText) {
+ // priority is too large (2814...6 is 2^48)
+ EXPECT_THROW(in::SRV("281474976710656 5 1500 a.example.com."),
+ InvalidRdataText);
+ // weight is too large
+ EXPECT_THROW(in::SRV("1 281474976710656 1500 a.example.com."),
+ InvalidRdataText);
+ // port is too large
+ EXPECT_THROW(in::SRV("1 5 281474976710656 a.example.com."),
+ InvalidRdataText);
+ // incomplete text
+ EXPECT_THROW(in::SRV("1 5 a.example.com."),
+ InvalidRdataText);
+ EXPECT_THROW(in::SRV("1 5 1500a.example.com."),
+ InvalidRdataText);
+ // bad name
+ EXPECT_THROW(in::SRV("1 5 1500 a.example.com." + too_long_label),
+ TooLongLabel);
+}
+
+TEST_F(Rdata_SRV_Test, assignment) {
+ in::SRV copy((string(srv_txt2)));
+ copy = rdata_srv;
+ EXPECT_EQ(0, copy.compare(rdata_srv));
+
+ // Check if the copied data is valid even after the original is deleted
+ in::SRV* copy2 = new in::SRV(rdata_srv);
+ in::SRV copy3((string(srv_txt2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_srv));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_srv));
+}
+
+TEST_F(Rdata_SRV_Test, createFromWire) {
+ EXPECT_EQ(0, rdata_srv.compare(
+ *rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+ "rdata_srv_fromWire")));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+ "rdata_srv_fromWire", 23),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+ "rdata_srv_fromWire", 46),
+ InvalidRdataLength);
+ // incomplete name. the error should be detected in the name constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+ "rdata_cname_fromWire", 69),
+ DNSMessageFORMERR);
+ // parse compressed target name
+ EXPECT_EQ(0, rdata_srv.compare(
+ *rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+ "rdata_srv_fromWire", 89)));
+}
+
+TEST_F(Rdata_SRV_Test, toWireBuffer) {
+ rdata_srv.toWire(obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ wiredata_srv, sizeof(wiredata_srv));
+ obuffer.clear();
+ rdata_srv2.toWire(obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ wiredata_srv2, sizeof(wiredata_srv2));
+}
+
+TEST_F(Rdata_SRV_Test, toWireRenderer) {
+ rdata_srv.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ wiredata_srv, sizeof(wiredata_srv));
+ renderer.clear();
+ rdata_srv2.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ wiredata_srv2, sizeof(wiredata_srv2));
+}
+
+TEST_F(Rdata_SRV_Test, toText) {
+ EXPECT_EQ(srv_txt, rdata_srv.toText());
+ EXPECT_EQ(srv_txt2, rdata_srv2.toText());
+}
+
+TEST_F(Rdata_SRV_Test, compare) {
+ // test RDATAs, sorted in the ascendent order.
+ vector<in::SRV> compare_set;
+ compare_set.push_back(in::SRV("1 5 1500 a.example.com."));
+ compare_set.push_back(in::SRV("2 5 1500 a.example.com."));
+ compare_set.push_back(in::SRV("2 6 1500 a.example.com."));
+ compare_set.push_back(in::SRV("2 6 1600 a.example.com."));
+ compare_set.push_back(in::SRV("2 6 1600 example.com."));
+
+ EXPECT_EQ(0, compare_set[0].compare(
+ in::SRV("1 5 1500 a.example.com.")));
+
+ vector<in::SRV>::const_iterator it;
+ vector<in::SRV>::const_iterator it_end = compare_set.end();
+ for (it = compare_set.begin(); it != it_end - 1; ++it) {
+ EXPECT_GT(0, (*it).compare(*(it + 1)));
+ EXPECT_LT(0, (*(it + 1)).compare(*it));
+ }
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_srv.compare(*RdataTest::rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_txt_like_unittest.cc b/src/lib/dns/tests/rdata_txt_like_unittest.cc
new file mode 100644
index 0000000..981265e
--- /dev/null
+++ b/src/lib/dns/tests/rdata_txt_like_unittest.cc
@@ -0,0 +1,261 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// This is the common code for TXT and SPF tests.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/rdataclass.h>
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+
+template<class T>
+class RRTYPE : public RRType {
+public:
+ RRTYPE();
+};
+
+template<> RRTYPE<generic::TXT>::RRTYPE() : RRType(RRType::TXT()) {}
+template<> RRTYPE<generic::SPF>::RRTYPE() : RRType(RRType::SPF()) {}
+
+namespace {
+const uint8_t wiredata_txt_like[] = {
+ sizeof("Test String") - 1,
+ 'T', 'e', 's', 't', ' ', 'S', 't', 'r', 'i', 'n', 'g'
+};
+
+const uint8_t wiredata_nulltxt[] = { 0 };
+vector<uint8_t> wiredata_longesttxt(256, 'a');
+
+template<class TXT_LIKE>
+class Rdata_TXT_LIKE_Test : public RdataTest {
+protected:
+ Rdata_TXT_LIKE_Test() {
+ wiredata_longesttxt[0] = 255; // adjust length
+ }
+
+ static const TXT_LIKE rdata_txt_like;
+ static const TXT_LIKE rdata_txt_like_empty;
+ static const TXT_LIKE rdata_txt_like_quoted;
+};
+
+template<class TXT_LIKE>
+const TXT_LIKE Rdata_TXT_LIKE_Test<TXT_LIKE>::rdata_txt_like("Test String");
+
+template<class TXT_LIKE>
+const TXT_LIKE Rdata_TXT_LIKE_Test<TXT_LIKE>::rdata_txt_like_empty("");
+
+template<class TXT_LIKE>
+const TXT_LIKE Rdata_TXT_LIKE_Test<TXT_LIKE>::rdata_txt_like_quoted
+ ("\"Test String\"");
+
+// The list of types we want to test.
+typedef testing::Types<generic::TXT, generic::SPF> Implementations;
+
+TYPED_TEST_CASE(Rdata_TXT_LIKE_Test, Implementations);
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, createFromText) {
+ // normal case is covered in toWireBuffer.
+
+ // surrounding double-quotes shouldn't change the result.
+ EXPECT_EQ(0, this->rdata_txt_like.compare(this->rdata_txt_like_quoted));
+
+ // Null character-string.
+ this->obuffer.clear();
+ TypeParam(string("")).toWire(this->obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->obuffer.getData(),
+ this->obuffer.getLength(),
+ wiredata_nulltxt, sizeof(wiredata_nulltxt));
+
+ // Longest possible character-string.
+ this->obuffer.clear();
+ TypeParam(string(255, 'a')).toWire(this->obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->obuffer.getData(),
+ this->obuffer.getLength(),
+ &wiredata_longesttxt[0], wiredata_longesttxt.size());
+
+ // Too long text for a valid character-string.
+ EXPECT_THROW(TypeParam(string(256, 'a')), CharStringTooLong);
+
+ // The escape character makes the double quote a part of character-string,
+ // so this is invalid input and should be rejected.
+ EXPECT_THROW(TypeParam("\"Test String\\\""), InvalidRdataText);
+
+ // Terminating double-quote is provided, so this is valid, but in this
+ // version of implementation we reject escaped characters.
+ EXPECT_THROW(TypeParam("\"Test String\\\"\""), InvalidRdataText);
+}
+
+void
+makeLargest(vector<uint8_t>& data) {
+ uint8_t ch = 0;
+
+ // create 255 sets of character-strings, each of which has the longest
+ // length (255bytes string + 1-byte length field)
+ for (int i = 0; i < 255; ++i, ++ch) {
+ data.push_back(255);
+ data.insert(data.end(), 255, ch);
+ }
+ // the last character-string should be 255 bytes (including the one-byte
+ // length field) in length so that the total length should be in the range
+ // of 16-bit integers.
+ data.push_back(254);
+ data.insert(data.end(), 254, ch);
+
+ assert(data.size() == 65535);
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, createFromWire) {
+ EXPECT_EQ(0, this->rdata_txt_like.compare(
+ *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+ "rdata_txt_fromWire1")));
+
+ // Empty character string
+ EXPECT_EQ(0, this->rdata_txt_like_empty.compare(
+ *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+ "rdata_txt_fromWire2.wire")));
+
+ // Multiple character strings
+ this->obuffer.clear();
+ this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+ "rdata_txt_fromWire3.wire")->toWire(this->obuffer);
+ // the result should be 'wiredata_txt' repeated twice
+ vector<uint8_t> expected_data(wiredata_txt_like, wiredata_txt_like +
+ sizeof(wiredata_txt_like));
+ expected_data.insert(expected_data.end(), wiredata_txt_like,
+ wiredata_txt_like + sizeof(wiredata_txt_like));
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->obuffer.getData(),
+ this->obuffer.getLength(),
+ &expected_data[0], expected_data.size());
+
+ // Largest length of data. There's nothing special, but should be
+ // constructed safely, and the content should be identical to the original
+ // data.
+ vector<uint8_t> largest_txt_like_data;
+ makeLargest(largest_txt_like_data);
+ InputBuffer ibuffer(&largest_txt_like_data[0],
+ largest_txt_like_data.size());
+ TypeParam largest_txt_like(ibuffer, largest_txt_like_data.size());
+ this->obuffer.clear();
+ largest_txt_like.toWire(this->obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->obuffer.getData(),
+ this->obuffer.getLength(),
+ &largest_txt_like_data[0],
+ largest_txt_like_data.size());
+
+ // rdlen parameter is out of range. This is a rare event because we'd
+ // normally call the constructor via a polymorphic wrapper, where the
+ // length is validated. But this should be checked explicitly.
+ InputBuffer ibuffer2(&largest_txt_like_data[0],
+ largest_txt_like_data.size());
+ EXPECT_THROW(TypeParam(ibuffer2, 65536), InvalidRdataLength);
+
+ // RDATA is empty, which is invalid for TXT_LIKE.
+ EXPECT_THROW(this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+ "rdata_txt_fromWire4.wire"),
+ DNSMessageFORMERR);
+
+ // character-string length is too large, which could cause overrun.
+ EXPECT_THROW(this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+ "rdata_txt_fromWire5.wire"),
+ DNSMessageFORMERR);
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, toWireBuffer) {
+ this->rdata_txt_like.toWire(this->obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->obuffer.getData(),
+ this->obuffer.getLength(),
+ wiredata_txt_like, sizeof(wiredata_txt_like));
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, toWireRenderer) {
+ this->rdata_txt_like.toWire(this->renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ this->renderer.getData(),
+ this->renderer.getLength(),
+ wiredata_txt_like, sizeof(wiredata_txt_like));
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, toText) {
+ EXPECT_EQ("\"Test String\"", this->rdata_txt_like.toText());
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, assignment) {
+ TypeParam rdata1("assignment1");
+ TypeParam rdata2("assignment2");
+ rdata1 = rdata2;
+ EXPECT_EQ(0, rdata2.compare(rdata1));
+
+ // Check if the copied data is valid even after the original is deleted
+ TypeParam* rdata3 = new TypeParam(rdata1);
+ TypeParam rdata4("assignment3");
+ rdata4 = *rdata3;
+ delete rdata3;
+ EXPECT_EQ(0, rdata4.compare(rdata1));
+
+ // Self assignment
+ rdata2 = rdata2;
+ EXPECT_EQ(0, rdata2.compare(rdata1));
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, compare) {
+ string const txt1("aaaaaaaa");
+ string const txt2("aaaaaaaaaa");
+ string const txt3("bbbbbbbb");
+ string const txt4(129, 'a');
+ string const txt5(128, 'b');
+
+ EXPECT_EQ(TypeParam(txt1).compare(TypeParam(txt1)), 0);
+
+ EXPECT_LT(TypeParam("").compare(TypeParam(txt1)), 0);
+ EXPECT_GT(TypeParam(txt1).compare(TypeParam("")), 0);
+
+ EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt2)), 0);
+ EXPECT_GT(TypeParam(txt2).compare(TypeParam(txt1)), 0);
+
+ EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt3)), 0);
+ EXPECT_GT(TypeParam(txt3).compare(TypeParam(txt1)), 0);
+
+ // we're comparing the data raw, starting at the length octet, so a shorter
+ // string sorts before a longer one no matter the lexicopraphical order
+ EXPECT_LT(TypeParam(txt3).compare(TypeParam(txt2)), 0);
+ EXPECT_GT(TypeParam(txt2).compare(TypeParam(txt3)), 0);
+
+ // to make sure the length octet compares unsigned
+ EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt4)), 0);
+ EXPECT_GT(TypeParam(txt4).compare(TypeParam(txt1)), 0);
+
+ EXPECT_LT(TypeParam(txt5).compare(TypeParam(txt4)), 0);
+ EXPECT_GT(TypeParam(txt4).compare(TypeParam(txt5)), 0);
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(TypeParam(txt1).compare(*this->rdata_nomatch),
+ bad_cast);
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_txt_unittest.cc b/src/lib/dns/tests/rdata_txt_unittest.cc
deleted file mode 100644
index e5f8ac9..0000000
--- a/src/lib/dns/tests/rdata_txt_unittest.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <util/buffer.h>
-#include <dns/exceptions.h>
-#include <dns/messagerenderer.h>
-#include <dns/rdata.h>
-#include <dns/rdataclass.h>
-#include <dns/rrclass.h>
-#include <dns/rrtype.h>
-
-#include <gtest/gtest.h>
-
-#include <dns/tests/unittest_util.h>
-#include <dns/tests/rdata_unittest.h>
-
-using isc::UnitTestUtil;
-using namespace std;
-using namespace isc::dns;
-using namespace isc::util;
-using namespace isc::dns::rdata;
-
-namespace {
-const generic::TXT rdata_txt("Test String");
-const generic::TXT rdata_txt_empty("");
-const generic::TXT rdata_txt_quoated("\"Test String\"");
-const uint8_t wiredata_txt[] = {
- sizeof("Test String") - 1,
- 'T', 'e', 's', 't', ' ', 'S', 't', 'r', 'i', 'n', 'g'
-};
-const uint8_t wiredata_nulltxt[] = { 0 };
-vector<uint8_t> wiredata_longesttxt(256, 'a');
-
-class Rdata_TXT_Test : public RdataTest {
-protected:
- Rdata_TXT_Test() {
- wiredata_longesttxt[0] = 255; // adjust length
- }
-};
-
-TEST_F(Rdata_TXT_Test, createFromText) {
- // normal case is covered in toWireBuffer.
-
- // surrounding double-quotes shouldn't change the result.
- EXPECT_EQ(0, rdata_txt.compare(rdata_txt_quoated));
-
- // Null character-string.
- obuffer.clear();
- generic::TXT(string("")).toWire(obuffer);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- obuffer.getData(), obuffer.getLength(),
- wiredata_nulltxt, sizeof(wiredata_nulltxt));
-
- // Longest possible character-string.
- obuffer.clear();
- generic::TXT(string(255, 'a')).toWire(obuffer);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- obuffer.getData(), obuffer.getLength(),
- &wiredata_longesttxt[0], wiredata_longesttxt.size());
-
- // Too long text for a valid character-string.
- EXPECT_THROW(generic::TXT(string(256, 'a')), CharStringTooLong);
-
- // The escape character makes the double quote a part of character-string,
- // so this is invalid input and should be rejected.
- EXPECT_THROW(generic::TXT("\"Test String\\\""), InvalidRdataText);
-
- // Terminating double-quote is provided, so this is valid, but in this
- // version of implementation we reject escaped characters.
- EXPECT_THROW(generic::TXT("\"Test String\\\"\""), InvalidRdataText);
-}
-
-void
-makeLargest(vector<uint8_t>& data) {
- uint8_t ch = 0;
-
- // create 255 sets of character-strings, each of which has the longest
- // length (255bytes string + 1-byte length field)
- for (int i = 0; i < 255; ++i, ++ch) {
- data.push_back(255);
- data.insert(data.end(), 255, ch);
- }
- // the last character-string should be 255 bytes (including the one-byte
- // length field) in length so that the total length should be in the range
- // of 16-bit integers.
- data.push_back(254);
- data.insert(data.end(), 254, ch);
-
- assert(data.size() == 65535);
-}
-
-TEST_F(Rdata_TXT_Test, createFromWire) {
- EXPECT_EQ(0, rdata_txt.compare(
- *rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
- "rdata_txt_fromWire1")));
-
- // Empty character string
- EXPECT_EQ(0, rdata_txt_empty.compare(
- *rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
- "rdata_txt_fromWire2.wire")));
-
- // Multiple character strings
- obuffer.clear();
- rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
- "rdata_txt_fromWire3.wire")->toWire(obuffer);
- // the result should be 'wiredata_txt' repeated twice
- vector<uint8_t> expected_data(wiredata_txt, wiredata_txt +
- sizeof(wiredata_txt));
- expected_data.insert(expected_data.end(), wiredata_txt,
- wiredata_txt + sizeof(wiredata_txt));
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- obuffer.getData(), obuffer.getLength(),
- &expected_data[0], expected_data.size());
-
- // Largest length of data. There's nothing special, but should be
- // constructed safely, and the content should be identical to the original
- // data.
- vector<uint8_t> largest_txt_data;
- makeLargest(largest_txt_data);
- InputBuffer ibuffer(&largest_txt_data[0], largest_txt_data.size());
- generic::TXT largest_txt(ibuffer, largest_txt_data.size());
- obuffer.clear();
- largest_txt.toWire(obuffer);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- obuffer.getData(), obuffer.getLength(),
- &largest_txt_data[0], largest_txt_data.size());
-
- // rdlen parameter is out of range. This is a rare event because we'd
- // normally call the constructor via a polymorphic wrapper, where the
- // length is validated. But this should be checked explicitly.
- InputBuffer ibuffer2(&largest_txt_data[0], largest_txt_data.size());
- EXPECT_THROW(generic::TXT(ibuffer2, 65536), InvalidRdataLength);
-
- // RDATA is empty, which is invalid for TXT.
- EXPECT_THROW(rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
- "rdata_txt_fromWire4.wire"),
- DNSMessageFORMERR);
-
- // character-string length is too large, which could cause overrun.
- EXPECT_THROW(rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
- "rdata_txt_fromWire5.wire"),
- DNSMessageFORMERR);
-}
-
-TEST_F(Rdata_TXT_Test, toWireBuffer) {
- rdata_txt.toWire(obuffer);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- obuffer.getData(), obuffer.getLength(),
- wiredata_txt, sizeof(wiredata_txt));
-}
-
-TEST_F(Rdata_TXT_Test, toText) {
- EXPECT_EQ("\"Test String\"", rdata_txt.toText());
-}
-}
diff --git a/src/lib/dns/tests/run_unittests.cc b/src/lib/dns/tests/run_unittests.cc
index 18eb0a5..7616202 100644
--- a/src/lib/dns/tests/run_unittests.cc
+++ b/src/lib/dns/tests/run_unittests.cc
@@ -13,6 +13,7 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
#include <util/unittests/testdata.h>
#include <dns/tests/unittest_util.h>
@@ -25,5 +26,5 @@ main(int argc, char* argv[]) {
isc::UnitTestUtil::addDataPath(TEST_DATA_BUILDDIR);
isc::util::unittests::addTestDataPath(TEST_DATA_BUILDDIR);
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/dns/tests/testdata/Makefile.am b/src/lib/dns/tests/testdata/Makefile.am
index cb1bb1c..27edf5f 100644
--- a/src/lib/dns/tests/testdata/Makefile.am
+++ b/src/lib/dns/tests/testdata/Makefile.am
@@ -5,8 +5,12 @@ BUILT_SOURCES += edns_toWire4.wire
BUILT_SOURCES += message_fromWire10.wire message_fromWire11.wire
BUILT_SOURCES += message_fromWire12.wire message_fromWire13.wire
BUILT_SOURCES += message_fromWire14.wire message_fromWire15.wire
-BUILT_SOURCES += message_fromWire16.wire
+BUILT_SOURCES += message_fromWire16.wire message_fromWire17.wire
+BUILT_SOURCES += message_fromWire18.wire message_fromWire19.wire
+BUILT_SOURCES += message_fromWire20.wire message_fromWire21.wire
+BUILT_SOURCES += message_fromWire22.wire
BUILT_SOURCES += message_toWire2.wire message_toWire3.wire
+BUILT_SOURCES += message_toWire4.wire message_toWire5.wire
BUILT_SOURCES += message_toText1.wire message_toText2.wire
BUILT_SOURCES += message_toText3.wire
BUILT_SOURCES += name_toWire5.wire name_toWire6.wire
@@ -24,10 +28,20 @@ BUILT_SOURCES += rdata_nsec3_fromWire10.wire rdata_nsec3_fromWire11.wire
BUILT_SOURCES += rdata_nsec3_fromWire12.wire rdata_nsec3_fromWire13.wire
BUILT_SOURCES += rdata_nsec3_fromWire14.wire rdata_nsec3_fromWire15.wire
BUILT_SOURCES += rdata_rrsig_fromWire2.wire
+BUILT_SOURCES += rdata_minfo_fromWire1.wire rdata_minfo_fromWire2.wire
+BUILT_SOURCES += rdata_minfo_fromWire3.wire rdata_minfo_fromWire4.wire
+BUILT_SOURCES += rdata_minfo_fromWire5.wire rdata_minfo_fromWire6.wire
+BUILT_SOURCES += rdata_minfo_toWire1.wire rdata_minfo_toWire2.wire
+BUILT_SOURCES += rdata_minfo_toWireUncompressed1.wire
+BUILT_SOURCES += rdata_minfo_toWireUncompressed2.wire
BUILT_SOURCES += rdata_rp_fromWire1.wire rdata_rp_fromWire2.wire
BUILT_SOURCES += rdata_rp_fromWire3.wire rdata_rp_fromWire4.wire
BUILT_SOURCES += rdata_rp_fromWire5.wire rdata_rp_fromWire6.wire
BUILT_SOURCES += rdata_rp_toWire1.wire rdata_rp_toWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire1.wire rdata_afsdb_fromWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire3.wire rdata_afsdb_fromWire4.wire
+BUILT_SOURCES += rdata_afsdb_fromWire5.wire
+BUILT_SOURCES += rdata_afsdb_toWire1.wire rdata_afsdb_toWire2.wire
BUILT_SOURCES += rdata_soa_toWireUncompressed.wire
BUILT_SOURCES += rdata_txt_fromWire2.wire rdata_txt_fromWire3.wire
BUILT_SOURCES += rdata_txt_fromWire4.wire rdata_txt_fromWire5.wire
@@ -47,8 +61,7 @@ BUILT_SOURCES += tsig_verify10.wire
# NOTE: keep this in sync with real file listing
# so is included in tarball
-EXTRA_DIST = gen-wiredata.py.in
-EXTRA_DIST += edns_toWire1.spec edns_toWire2.spec
+EXTRA_DIST = edns_toWire1.spec edns_toWire2.spec
EXTRA_DIST += edns_toWire3.spec edns_toWire4.spec
EXTRA_DIST += masterload.txt
EXTRA_DIST += message_fromWire1 message_fromWire2
@@ -59,7 +72,11 @@ EXTRA_DIST += message_fromWire9 message_fromWire10.spec
EXTRA_DIST += message_fromWire11.spec message_fromWire12.spec
EXTRA_DIST += message_fromWire13.spec message_fromWire14.spec
EXTRA_DIST += message_fromWire15.spec message_fromWire16.spec
+EXTRA_DIST += message_fromWire17.spec message_fromWire18.spec
+EXTRA_DIST += message_fromWire19.spec message_fromWire20.spec
+EXTRA_DIST += message_fromWire21.spec message_fromWire22.spec
EXTRA_DIST += message_toWire1 message_toWire2.spec message_toWire3.spec
+EXTRA_DIST += message_toWire4.spec message_toWire5.spec
EXTRA_DIST += message_toText1.txt message_toText1.spec
EXTRA_DIST += message_toText2.txt message_toText2.spec
EXTRA_DIST += message_toText3.txt message_toText3.spec
@@ -73,6 +90,7 @@ EXTRA_DIST += question_fromWire question_toWire1 question_toWire2
EXTRA_DIST += rdatafields1.spec rdatafields2.spec rdatafields3.spec
EXTRA_DIST += rdatafields4.spec rdatafields5.spec rdatafields6.spec
EXTRA_DIST += rdata_cname_fromWire rdata_dname_fromWire rdata_dnskey_fromWire
+EXTRA_DIST += rdata_dhcid_fromWire rdata_dhcid_toWire
EXTRA_DIST += rdata_ds_fromWire rdata_in_a_fromWire rdata_in_aaaa_fromWire
EXTRA_DIST += rdata_mx_fromWire rdata_mx_toWire1 rdata_mx_toWire2
EXTRA_DIST += rdata_ns_fromWire
@@ -96,7 +114,18 @@ EXTRA_DIST += rdata_rp_fromWire1.spec rdata_rp_fromWire2.spec
EXTRA_DIST += rdata_rp_fromWire3.spec rdata_rp_fromWire4.spec
EXTRA_DIST += rdata_rp_fromWire5.spec rdata_rp_fromWire6.spec
EXTRA_DIST += rdata_rp_toWire1.spec rdata_rp_toWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire1.spec rdata_afsdb_fromWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire3.spec rdata_afsdb_fromWire4.spec
+EXTRA_DIST += rdata_afsdb_fromWire5.spec
+EXTRA_DIST += rdata_afsdb_toWire1.spec rdata_afsdb_toWire2.spec
EXTRA_DIST += rdata_soa_fromWire rdata_soa_toWireUncompressed.spec
+EXTRA_DIST += rdata_srv_fromWire
+EXTRA_DIST += rdata_minfo_fromWire1.spec rdata_minfo_fromWire2.spec
+EXTRA_DIST += rdata_minfo_fromWire3.spec rdata_minfo_fromWire4.spec
+EXTRA_DIST += rdata_minfo_fromWire5.spec rdata_minfo_fromWire6.spec
+EXTRA_DIST += rdata_minfo_toWire1.spec rdata_minfo_toWire2.spec
+EXTRA_DIST += rdata_minfo_toWireUncompressed1.spec
+EXTRA_DIST += rdata_minfo_toWireUncompressed2.spec
EXTRA_DIST += rdata_txt_fromWire1 rdata_txt_fromWire2.spec
EXTRA_DIST += rdata_txt_fromWire3.spec rdata_txt_fromWire4.spec
EXTRA_DIST += rdata_txt_fromWire5.spec rdata_unknown_fromWire
@@ -118,4 +147,4 @@ EXTRA_DIST += tsig_verify7.spec tsig_verify8.spec tsig_verify9.spec
EXTRA_DIST += tsig_verify10.spec
.spec.wire:
- ./gen-wiredata.py -o $@ $<
+ $(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/lib/dns/tests/testdata/gen-wiredata.py.in b/src/lib/dns/tests/testdata/gen-wiredata.py.in
deleted file mode 100755
index fd98c6e..0000000
--- a/src/lib/dns/tests/testdata/gen-wiredata.py.in
+++ /dev/null
@@ -1,612 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-import configparser, re, time, socket, sys
-from datetime import datetime
-from optparse import OptionParser
-
-re_hex = re.compile(r'^0x[0-9a-fA-F]+')
-re_decimal = re.compile(r'^\d+$')
-re_string = re.compile(r"\'(.*)\'$")
-
-dnssec_timefmt = '%Y%m%d%H%M%S'
-
-dict_qr = { 'query' : 0, 'response' : 1 }
-dict_opcode = { 'query' : 0, 'iquery' : 1, 'status' : 2, 'notify' : 4,
- 'update' : 5 }
-rdict_opcode = dict([(dict_opcode[k], k.upper()) for k in dict_opcode.keys()])
-dict_rcode = { 'noerror' : 0, 'formerr' : 1, 'servfail' : 2, 'nxdomain' : 3,
- 'notimp' : 4, 'refused' : 5, 'yxdomain' : 6, 'yxrrset' : 7,
- 'nxrrset' : 8, 'notauth' : 9, 'notzone' : 10 }
-rdict_rcode = dict([(dict_rcode[k], k.upper()) for k in dict_rcode.keys()])
-dict_rrtype = { 'none' : 0, 'a' : 1, 'ns' : 2, 'md' : 3, 'mf' : 4, 'cname' : 5,
- 'soa' : 6, 'mb' : 7, 'mg' : 8, 'mr' : 9, 'null' : 10,
- 'wks' : 11, 'ptr' : 12, 'hinfo' : 13, 'minfo' : 14, 'mx' : 15,
- 'txt' : 16, 'rp' : 17, 'afsdb' : 18, 'x25' : 19, 'isdn' : 20,
- 'rt' : 21, 'nsap' : 22, 'nsap_tr' : 23, 'sig' : 24, 'key' : 25,
- 'px' : 26, 'gpos' : 27, 'aaaa' : 28, 'loc' : 29, 'nxt' : 30,
- 'srv' : 33, 'naptr' : 35, 'kx' : 36, 'cert' : 37, 'a6' : 38,
- 'dname' : 39, 'opt' : 41, 'apl' : 42, 'ds' : 43, 'sshfp' : 44,
- 'ipseckey' : 45, 'rrsig' : 46, 'nsec' : 47, 'dnskey' : 48,
- 'dhcid' : 49, 'nsec3' : 50, 'nsec3param' : 51, 'hip' : 55,
- 'spf' : 99, 'unspec' : 103, 'tkey' : 249, 'tsig' : 250,
- 'dlv' : 32769, 'ixfr' : 251, 'axfr' : 252, 'mailb' : 253,
- 'maila' : 254, 'any' : 255 }
-rdict_rrtype = dict([(dict_rrtype[k], k.upper()) for k in dict_rrtype.keys()])
-dict_rrclass = { 'in' : 1, 'ch' : 3, 'hs' : 4, 'any' : 255 }
-rdict_rrclass = dict([(dict_rrclass[k], k.upper()) for k in \
- dict_rrclass.keys()])
-dict_algorithm = { 'rsamd5' : 1, 'dh' : 2, 'dsa' : 3, 'ecc' : 4,
- 'rsasha1' : 5 }
-dict_nsec3_algorithm = { 'reserved' : 0, 'sha1' : 1 }
-rdict_algorithm = dict([(dict_algorithm[k], k.upper()) for k in \
- dict_algorithm.keys()])
-rdict_nsec3_algorithm = dict([(dict_nsec3_algorithm[k], k.upper()) for k in \
- dict_nsec3_algorithm.keys()])
-
-header_xtables = { 'qr' : dict_qr, 'opcode' : dict_opcode,
- 'rcode' : dict_rcode }
-question_xtables = { 'rrtype' : dict_rrtype, 'rrclass' : dict_rrclass }
-rrsig_xtables = { 'algorithm' : dict_algorithm }
-
-def parse_value(value, xtable = {}):
- if re.search(re_hex, value):
- return int(value, 16)
- if re.search(re_decimal, value):
- return int(value)
- m = re.match(re_string, value)
- if m:
- return m.group(1)
- lovalue = value.lower()
- if lovalue in xtable:
- return xtable[lovalue]
- return value
-
-def code_totext(code, dict):
- if code in dict.keys():
- return dict[code] + '(' + str(code) + ')'
- return str(code)
-
-def encode_name(name, absolute=True):
- # make sure the name is dot-terminated. duplicate dots will be ignored
- # below.
- name += '.'
- labels = name.split('.')
- wire = ''
- for l in labels:
- if len(l) > 4 and l[0:4] == 'ptr=':
- # special meta-syntax for compression pointer
- wire += '%04x' % (0xc000 | int(l[4:]))
- break
- if absolute or len(l) > 0:
- wire += '%02x' % len(l)
- wire += ''.join(['%02x' % ord(ch) for ch in l])
- if len(l) == 0:
- break
- return wire
-
-def encode_string(name, len=None):
- if type(name) is int and len is not None:
- return '%0.*x' % (len * 2, name)
- return ''.join(['%02x' % ord(ch) for ch in name])
-
-def count_namelabels(name):
- if name == '.': # special case
- return 0
- m = re.match('^(.*)\.$', name)
- if m:
- name = m.group(1)
- return len(name.split('.'))
-
-def get_config(config, section, configobj, xtables = {}):
- try:
- for field in config.options(section):
- value = config.get(section, field)
- if field in xtables.keys():
- xtable = xtables[field]
- else:
- xtable = {}
- configobj.__dict__[field] = parse_value(value, xtable)
- except configparser.NoSectionError:
- return False
- return True
-
-def print_header(f, input_file):
- f.write('''###
-### This data file was auto-generated from ''' + input_file + '''
-###
-''')
-
-class Name:
- name = 'example.com'
- pointer = None # no compression by default
- def dump(self, f):
- name = self.name
- if self.pointer is not None:
- if len(name) > 0 and name[-1] != '.':
- name += '.'
- name += 'ptr=%d' % self.pointer
- name_wire = encode_name(name)
- f.write('\n# DNS Name: %s' % self.name)
- if self.pointer is not None:
- f.write(' + compression pointer: %d' % self.pointer)
- f.write('\n')
- f.write('%s' % name_wire)
- f.write('\n')
-
-class DNSHeader:
- id = 0x1035
- (qr, aa, tc, rd, ra, ad, cd) = 0, 0, 0, 0, 0, 0, 0
- mbz = 0
- rcode = 0 # noerror
- opcode = 0 # query
- (qdcount, ancount, nscount, arcount) = 1, 0, 0, 0
- def dump(self, f):
- f.write('\n# Header Section\n')
- f.write('# ID=' + str(self.id))
- f.write(' QR=' + ('Response' if self.qr else 'Query'))
- f.write(' Opcode=' + code_totext(self.opcode, rdict_opcode))
- f.write(' Rcode=' + code_totext(self.rcode, rdict_rcode))
- f.write('%s' % (' AA' if self.aa else ''))
- f.write('%s' % (' TC' if self.tc else ''))
- f.write('%s' % (' RD' if self.rd else ''))
- f.write('%s' % (' AD' if self.ad else ''))
- f.write('%s' % (' CD' if self.cd else ''))
- f.write('\n')
- f.write('%04x ' % self.id)
- flag_and_code = 0
- flag_and_code |= (self.qr << 15 | self.opcode << 14 | self.aa << 10 |
- self.tc << 9 | self.rd << 8 | self.ra << 7 |
- self.mbz << 6 | self.ad << 5 | self.cd << 4 |
- self.rcode)
- f.write('%04x\n' % flag_and_code)
- f.write('# QDCNT=%d, ANCNT=%d, NSCNT=%d, ARCNT=%d\n' %
- (self.qdcount, self.ancount, self.nscount, self.arcount))
- f.write('%04x %04x %04x %04x\n' % (self.qdcount, self.ancount,
- self.nscount, self.arcount))
-
-class DNSQuestion:
- name = 'example.com.'
- rrtype = parse_value('A', dict_rrtype)
- rrclass = parse_value('IN', dict_rrclass)
- def dump(self, f):
- f.write('\n# Question Section\n')
- f.write('# QNAME=%s QTYPE=%s QCLASS=%s\n' %
- (self.name,
- code_totext(self.rrtype, rdict_rrtype),
- code_totext(self.rrclass, rdict_rrclass)))
- f.write(encode_name(self.name))
- f.write(' %04x %04x\n' % (self.rrtype, self.rrclass))
-
-class EDNS:
- name = '.'
- udpsize = 4096
- extrcode = 0
- version = 0
- do = 0
- mbz = 0
- rdlen = 0
- def dump(self, f):
- f.write('\n# EDNS OPT RR\n')
- f.write('# NAME=%s TYPE=%s UDPSize=%d ExtRcode=%s Version=%s DO=%d\n' %
- (self.name, code_totext(dict_rrtype['opt'], rdict_rrtype),
- self.udpsize, self.extrcode, self.version,
- 1 if self.do else 0))
-
- code_vers = (self.extrcode << 8) | (self.version & 0x00ff)
- extflags = (self.do << 15) | (self.mbz & 0x8000)
- f.write('%s %04x %04x %04x %04x\n' %
- (encode_name(self.name), dict_rrtype['opt'], self.udpsize,
- code_vers, extflags))
- f.write('# RDLEN=%d\n' % self.rdlen)
- f.write('%04x\n' % self.rdlen)
-
-class RR:
- '''This is a base class for various types of RR test data.
- For each RR type (A, AAAA, NS, etc), we define a derived class of RR
- to dump type specific RDATA parameters. This class defines parameters
- common to all types of RDATA, namely the owner name, RR class and TTL.
- The dump() method of derived classes are expected to call dump_header(),
- whose default implementation is provided in this class. This method
- decides whether to dump the test data as an RR (with name, type, class)
- or only as RDATA (with its length), and dumps the corresponding data
- via the specified file object.
-
- By convention we assume derived classes are named after the common
- standard mnemonic of the corresponding RR types. For example, the
- derived class for the RR type SOA should be named "SOA".
-
- Configurable parameters are as follows:
- - as_rr (bool): Whether or not the data is to be dumped as an RR. False
- by default.
- - rr_class (string): The RR class of the data. Only meaningful when the
- data is dumped as an RR. Default is 'IN'.
- - rr_ttl (integer): The TTL value of the RR. Only meaningful when the
- data is dumped as an RR. Default is 86400 (1 day).
- '''
-
- def __init__(self):
- self.as_rr = False
- # only when as_rr is True, same for class/TTL:
- self.rr_name = 'example.com'
- self.rr_class = 'IN'
- self.rr_ttl = 86400
- def dump_header(self, f, rdlen):
- type_txt = self.__class__.__name__
- type_code = parse_value(type_txt, dict_rrtype)
- if self.as_rr:
- rrclass = parse_value(self.rr_class, dict_rrclass)
- f.write('\n# %s RR (QNAME=%s Class=%s TTL=%d RDLEN=%d)\n' %
- (type_txt, self.rr_name,
- code_totext(rrclass, rdict_rrclass), self.rr_ttl, rdlen))
- f.write('%s %04x %04x %08x %04x\n' %
- (encode_name(self.rr_name), type_code, rrclass,
- self.rr_ttl, rdlen))
- else:
- f.write('\n# %s RDATA (RDLEN=%d)\n' % (type_txt, rdlen))
- f.write('%04x\n' % rdlen)
-
-class A(RR):
- rdlen = 4 # fixed by default
- address = '192.0.2.1'
-
- def dump(self, f):
- self.dump_header(f, self.rdlen)
- f.write('# Address=%s\n' % (self.address))
- bin_address = socket.inet_aton(self.address)
- f.write('%02x%02x%02x%02x\n' % (bin_address[0], bin_address[1],
- bin_address[2], bin_address[3]))
-
-class NS(RR):
- rdlen = None # auto calculate
- nsname = 'ns.example.com'
-
- def dump(self, f):
- nsname_wire = encode_name(self.nsname)
- if self.rdlen is None:
- self.rdlen = len(nsname_wire) / 2
- self.dump_header(f, self.rdlen)
- f.write('# NS name=%s\n' % (self.nsname))
- f.write('%s\n' % nsname_wire)
-
-class SOA(RR):
- rdlen = None # auto-calculate
- mname = 'ns.example.com'
- rname = 'root.example.com'
- serial = 2010012601
- refresh = 3600
- retry = 300
- expire = 3600000
- minimum = 1200
- def dump(self, f):
- mname_wire = encode_name(self.mname)
- rname_wire = encode_name(self.rname)
- if self.rdlen is None:
- self.rdlen = int(20 + len(mname_wire) / 2 + len(str(rname_wire)) / 2)
- self.dump_header(f, self.rdlen)
- f.write('# NNAME=%s RNAME=%s\n' % (self.mname, self.rname))
- f.write('%s %s\n' % (mname_wire, rname_wire))
- f.write('# SERIAL(%d) REFRESH(%d) RETRY(%d) EXPIRE(%d) MINIMUM(%d)\n' %
- (self.serial, self.refresh, self.retry, self.expire,
- self.minimum))
- f.write('%08x %08x %08x %08x %08x\n' % (self.serial, self.refresh,
- self.retry, self.expire,
- self.minimum))
-
-class TXT:
- rdlen = -1 # auto-calculate
- nstring = 1 # number of character-strings
- stringlen = -1 # default string length, auto-calculate
- string = 'Test String' # default string
- def dump(self, f):
- stringlen_list = []
- string_list = []
- wirestring_list = []
- for i in range(0, self.nstring):
- key_string = 'string' + str(i)
- if key_string in self.__dict__:
- string_list.append(self.__dict__[key_string])
- else:
- string_list.append(self.string)
- wirestring_list.append(encode_string(string_list[-1]))
- key_stringlen = 'stringlen' + str(i)
- if key_stringlen in self.__dict__:
- stringlen_list.append(self.__dict__[key_stringlen])
- else:
- stringlen_list.append(self.stringlen)
- if stringlen_list[-1] < 0:
- stringlen_list[-1] = int(len(wirestring_list[-1]) / 2)
- rdlen = self.rdlen
- if rdlen < 0:
- rdlen = int(len(''.join(wirestring_list)) / 2) + self.nstring
- f.write('\n# TXT RDATA (RDLEN=%d)\n' % rdlen)
- f.write('%04x\n' % rdlen);
- for i in range(0, self.nstring):
- f.write('# String Len=%d, String=\"%s\"\n' %
- (stringlen_list[i], string_list[i]))
- f.write('%02x%s%s\n' % (stringlen_list[i],
- ' ' if len(wirestring_list[i]) > 0 else '',
- wirestring_list[i]))
-
-class RP:
- '''Implements rendering RP RDATA in the wire format.
- Configurable parameters are as follows:
- - rdlen: 16-bit RDATA length. If omitted, the accurate value is auto
- calculated and used; if negative, the RDLEN field will be omitted from
- the output data.
- - mailbox: The mailbox field.
- - text: The text field.
- All of these parameters have the default values and can be omitted.
- '''
- rdlen = None # auto-calculate
- mailbox = 'root.example.com'
- text = 'rp-text.example.com'
- def dump(self, f):
- mailbox_wire = encode_name(self.mailbox)
- text_wire = encode_name(self.text)
- if self.rdlen is None:
- self.rdlen = (len(mailbox_wire) + len(text_wire)) / 2
- else:
- self.rdlen = int(self.rdlen)
- if self.rdlen >= 0:
- f.write('\n# RP RDATA (RDLEN=%d)\n' % self.rdlen)
- f.write('%04x\n' % self.rdlen)
- else:
- f.write('\n# RP RDATA (RDLEN omitted)\n')
- f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
- f.write('%s %s\n' % (mailbox_wire, text_wire))
-
-class NSECBASE:
- '''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
- these RRs. The NSEC and NSEC3 classes will be inherited from this
- class.'''
- nbitmap = 1 # number of bitmaps
- block = 0
- maplen = None # default bitmap length, auto-calculate
- bitmap = '040000000003' # an arbtrarily chosen bitmap sample
- def dump(self, f):
- # first, construct the bitmpa data
- block_list = []
- maplen_list = []
- bitmap_list = []
- for i in range(0, self.nbitmap):
- key_bitmap = 'bitmap' + str(i)
- if key_bitmap in self.__dict__:
- bitmap_list.append(self.__dict__[key_bitmap])
- else:
- bitmap_list.append(self.bitmap)
- key_maplen = 'maplen' + str(i)
- if key_maplen in self.__dict__:
- maplen_list.append(self.__dict__[key_maplen])
- else:
- maplen_list.append(self.maplen)
- if maplen_list[-1] is None: # calculate it if not specified
- maplen_list[-1] = int(len(bitmap_list[-1]) / 2)
- key_block = 'block' + str(i)
- if key_block in self.__dict__:
- block_list.append(self.__dict__[key_block])
- else:
- block_list.append(self.block)
-
- # dump RR-type specific part (NSEC or NSEC3)
- self.dump_fixedpart(f, 2 * self.nbitmap + \
- int(len(''.join(bitmap_list)) / 2))
-
- # dump the bitmap
- for i in range(0, self.nbitmap):
- f.write('# Bitmap: Block=%d, Length=%d\n' %
- (block_list[i], maplen_list[i]))
- f.write('%02x %02x %s\n' %
- (block_list[i], maplen_list[i], bitmap_list[i]))
-
-class NSEC(NSECBASE):
- rdlen = None # auto-calculate
- nextname = 'next.example.com'
- def dump_fixedpart(self, f, bitmap_totallen):
- name_wire = encode_name(self.nextname)
- if self.rdlen is None:
- # if rdlen needs to be calculated, it must be based on the bitmap
- # length, because the configured maplen can be fake.
- self.rdlen = int(len(name_wire) / 2) + bitmap_totallen
- f.write('\n# NSEC RDATA (RDLEN=%d)\n' % self.rdlen)
- f.write('%04x\n' % self.rdlen);
- f.write('# Next Name=%s (%d bytes)\n' % (self.nextname,
- int(len(name_wire) / 2)))
- f.write('%s\n' % name_wire)
-
-class NSEC3(NSECBASE):
- rdlen = None # auto-calculate
- hashalg = 1 # SHA-1
- optout = False # opt-out flag
- mbz = 0 # other flag fields (none defined yet)
- iterations = 1
- saltlen = 5
- salt = 's' * saltlen
- hashlen = 20
- hash = 'h' * hashlen
- def dump_fixedpart(self, f, bitmap_totallen):
- if self.rdlen is None:
- # if rdlen needs to be calculated, it must be based on the bitmap
- # length, because the configured maplen can be fake.
- self.rdlen = 4 + 1 + len(self.salt) + 1 + len(self.hash) \
- + bitmap_totallen
- f.write('\n# NSEC3 RDATA (RDLEN=%d)\n' % self.rdlen)
- f.write('%04x\n' % self.rdlen)
- optout_val = 1 if self.optout else 0
- f.write('# Hash Alg=%s, Opt-Out=%d, Other Flags=%0x, Iterations=%d\n' %
- (code_totext(self.hashalg, rdict_nsec3_algorithm),
- optout_val, self.mbz, self.iterations))
- f.write('%02x %02x %04x\n' %
- (self.hashalg, (self.mbz << 1) | optout_val, self.iterations))
- f.write("# Salt Len=%d, Salt='%s'\n" % (self.saltlen, self.salt))
- f.write('%02x%s%s\n' % (self.saltlen,
- ' ' if len(self.salt) > 0 else '',
- encode_string(self.salt)))
- f.write("# Hash Len=%d, Hash='%s'\n" % (self.hashlen, self.hash))
- f.write('%02x%s%s\n' % (self.hashlen,
- ' ' if len(self.hash) > 0 else '',
- encode_string(self.hash)))
-
-class RRSIG:
- rdlen = -1 # auto-calculate
- covered = 1 # A
- algorithm = 5 # RSA-SHA1
- labels = -1 # auto-calculate (#labels of signer)
- originalttl = 3600
- expiration = int(time.mktime(datetime.strptime('20100131120000',
- dnssec_timefmt).timetuple()))
- inception = int(time.mktime(datetime.strptime('20100101120000',
- dnssec_timefmt).timetuple()))
- tag = 0x1035
- signer = 'example.com'
- signature = 0x123456789abcdef123456789abcdef
- def dump(self, f):
- name_wire = encode_name(self.signer)
- sig_wire = '%x' % self.signature
- rdlen = self.rdlen
- if rdlen < 0:
- rdlen = int(18 + len(name_wire) / 2 + len(str(sig_wire)) / 2)
- labels = self.labels
- if labels < 0:
- labels = count_namelabels(self.signer)
- f.write('\n# RRSIG RDATA (RDLEN=%d)\n' % rdlen)
- f.write('%04x\n' % rdlen);
- f.write('# Covered=%s Algorithm=%s Labels=%d OrigTTL=%d\n' %
- (code_totext(self.covered, rdict_rrtype),
- code_totext(self.algorithm, rdict_algorithm), labels,
- self.originalttl))
- f.write('%04x %02x %02x %08x\n' % (self.covered, self.algorithm,
- labels, self.originalttl))
- f.write('# Expiration=%s, Inception=%s\n' %
- (str(self.expiration), str(self.inception)))
- f.write('%08x %08x\n' % (self.expiration, self.inception))
- f.write('# Tag=%d Signer=%s and Signature\n' % (self.tag, self.signer))
- f.write('%04x %s %s\n' % (self.tag, name_wire, sig_wire))
-
-class TSIG(RR):
- rdlen = None # auto-calculate
- algorithm = 'hmac-sha256'
- time_signed = 1286978795 # arbitrarily chosen default
- fudge = 300
- mac_size = None # use a common value for the algorithm
- mac = None # use 'x' * mac_size
- original_id = 2845 # arbitrarily chosen default
- error = 0
- other_len = None # 6 if error is BADTIME; otherwise 0
- other_data = None # use time_signed + fudge + 1 for BADTIME
- dict_macsize = { 'hmac-md5' : 16, 'hmac-sha1' : 20, 'hmac-sha256' : 32 }
-
- # TSIG has some special defaults
- def __init__(self):
- super().__init__()
- self.rr_class = 'ANY'
- self.rr_ttl = 0
-
- def dump(self, f):
- if str(self.algorithm) == 'hmac-md5':
- name_wire = encode_name('hmac-md5.sig-alg.reg.int')
- else:
- name_wire = encode_name(self.algorithm)
- mac_size = self.mac_size
- if mac_size is None:
- if self.algorithm in self.dict_macsize.keys():
- mac_size = self.dict_macsize[self.algorithm]
- else:
- raise RuntimeError('TSIG Mac Size cannot be determined')
- mac = encode_string('x' * mac_size) if self.mac is None else \
- encode_string(self.mac, mac_size)
- other_len = self.other_len
- if other_len is None:
- # 18 = BADTIME
- other_len = 6 if self.error == 18 else 0
- other_data = self.other_data
- if other_data is None:
- other_data = '%012x' % (self.time_signed + self.fudge + 1) \
- if self.error == 18 else ''
- else:
- other_data = encode_string(self.other_data, other_len)
- if self.rdlen is None:
- self.rdlen = int(len(name_wire) / 2 + 16 + len(mac) / 2 + \
- len(other_data) / 2)
- self.dump_header(f, self.rdlen)
- f.write('# Algorithm=%s Time-Signed=%d Fudge=%d\n' %
- (self.algorithm, self.time_signed, self.fudge))
- f.write('%s %012x %04x\n' % (name_wire, self.time_signed, self.fudge))
- f.write('# MAC Size=%d MAC=(see hex)\n' % mac_size)
- f.write('%04x%s\n' % (mac_size, ' ' + mac if len(mac) > 0 else ''))
- f.write('# Original-ID=%d Error=%d\n' % (self.original_id, self.error))
- f.write('%04x %04x\n' % (self.original_id, self.error))
- f.write('# Other-Len=%d Other-Data=(see hex)\n' % other_len)
- f.write('%04x%s\n' % (other_len,
- ' ' + other_data if len(other_data) > 0 else ''))
-
-def get_config_param(section):
- config_param = {'name' : (Name, {}),
- 'header' : (DNSHeader, header_xtables),
- 'question' : (DNSQuestion, question_xtables),
- 'edns' : (EDNS, {}), 'a' : (A, {}), 'ns' : (NS, {}),
- 'soa' : (SOA, {}), 'txt' : (TXT, {}),
- 'rp' : (RP, {}), 'rrsig' : (RRSIG, {}),
- 'nsec' : (NSEC, {}), 'nsec3' : (NSEC3, {}),
- 'tsig' : (TSIG, {}) }
- s = section
- m = re.match('^([^:]+)/\d+$', section)
- if m:
- s = m.group(1)
- return config_param[s]
-
-usage = '''usage: %prog [options] input_file'''
-
-if __name__ == "__main__":
- parser = OptionParser(usage=usage)
- parser.add_option('-o', '--output', action='store', dest='output',
- default=None, metavar='FILE',
- help='output file name [default: prefix of input_file]')
- (options, args) = parser.parse_args()
-
- if len(args) == 0:
- parser.error('input file is missing')
- configfile = args[0]
-
- outputfile = options.output
- if not outputfile:
- m = re.match('(.*)\.[^.]+$', configfile)
- if m:
- outputfile = m.group(1)
- else:
- raise ValueError('output file is not specified and input file is not in the form of "output_file.suffix"')
-
- config = configparser.SafeConfigParser()
- config.read(configfile)
-
- output = open(outputfile, 'w')
-
- print_header(output, configfile)
-
- # First try the 'custom' mode; if it fails assume the standard mode.
- try:
- sections = config.get('custom', 'sections').split(':')
- except configparser.NoSectionError:
- sections = ['header', 'question', 'edns']
-
- for s in sections:
- section_param = get_config_param(s)
- (obj, xtables) = (section_param[0](), section_param[1])
- if get_config(config, s, obj, xtables):
- obj.dump(output)
-
- output.close()
diff --git a/src/lib/dns/tests/testdata/message_fromWire17.spec b/src/lib/dns/tests/testdata/message_fromWire17.spec
new file mode 100644
index 0000000..366cf05
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire17.spec
@@ -0,0 +1,22 @@
+#
+# A simple DNS query message with TSIG signed
+#
+
+[custom]
+sections: header:question:tsig
+[header]
+id: 0x22c2
+rd: 1
+arcount: 1
+[question]
+name: www.example.com
+rrtype: TXT
+[tsig]
+as_rr: True
+# TSIG QNAME won't be compressed
+rr_name: www.example.com
+algorithm: hmac-md5
+time_signed: 0x4e179212
+mac_size: 16
+mac: 0x8214b04634e32323d651ac60b08e6388
+original_id: 0x22c2
diff --git a/src/lib/dns/tests/testdata/message_fromWire18.spec b/src/lib/dns/tests/testdata/message_fromWire18.spec
new file mode 100644
index 0000000..0b2592a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire18.spec
@@ -0,0 +1,23 @@
+#
+# Another simple DNS query message with TSIG signed. Only ID and time signed
+# (and MAC as a result) are different.
+#
+
+[custom]
+sections: header:question:tsig
+[header]
+id: 0xd6e2
+rd: 1
+arcount: 1
+[question]
+name: www.example.com
+rrtype: TXT
+[tsig]
+as_rr: True
+# TSIG QNAME won't be compressed
+rr_name: www.example.com
+algorithm: hmac-md5
+time_signed: 0x4e17b38d
+mac_size: 16
+mac: 0x903b5b194a799b03a37718820c2404f2
+original_id: 0xd6e2
diff --git a/src/lib/dns/tests/testdata/message_fromWire19.spec b/src/lib/dns/tests/testdata/message_fromWire19.spec
new file mode 100644
index 0000000..8212dbf
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire19.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# answer section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+ancount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire20.spec b/src/lib/dns/tests/testdata/message_fromWire20.spec
new file mode 100644
index 0000000..91986e4
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire20.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# authority section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+nscount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire21.spec b/src/lib/dns/tests/testdata/message_fromWire21.spec
new file mode 100644
index 0000000..cd6aac9
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire21.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# additional section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+arcount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire22.spec b/src/lib/dns/tests/testdata/message_fromWire22.spec
new file mode 100644
index 0000000..a52523b
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire22.spec
@@ -0,0 +1,14 @@
+#
+# A simple DNS message containing one SOA RR in the answer section. This is
+# intended to be trimmed to emulate a bogus message.
+#
+
+[custom]
+sections: header:question:soa
+[header]
+qr: 1
+ancount: 1
+[question]
+rrtype: SOA
+[soa]
+as_rr: True
diff --git a/src/lib/dns/tests/testdata/message_toWire4.spec b/src/lib/dns/tests/testdata/message_toWire4.spec
new file mode 100644
index 0000000..aab7e10
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_toWire4.spec
@@ -0,0 +1,27 @@
+#
+# Truncated DNS response with TSIG signed
+# This is expected to be a response to "fromWire17"
+#
+
+[custom]
+sections: header:question:tsig
+[header]
+id: 0x22c2
+rd: 1
+qr: 1
+aa: 1
+# It's "truncated":
+tc: 1
+arcount: 1
+[question]
+name: www.example.com
+rrtype: TXT
+[tsig]
+as_rr: True
+# TSIG QNAME won't be compressed
+rr_name: www.example.com
+algorithm: hmac-md5
+time_signed: 0x4e179212
+mac_size: 16
+mac: 0x88adc3811d1d6bec7c684438906fc694
+original_id: 0x22c2
diff --git a/src/lib/dns/tests/testdata/message_toWire5.spec b/src/lib/dns/tests/testdata/message_toWire5.spec
new file mode 100644
index 0000000..e97fb43
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_toWire5.spec
@@ -0,0 +1,36 @@
+#
+# A longest possible (without EDNS) DNS response with TSIG, i.e. totatl
+# length should be 512 bytes.
+#
+
+[custom]
+sections: header:question:txt/1:txt/2:tsig
+[header]
+id: 0xd6e2
+rd: 1
+qr: 1
+aa: 1
+ancount: 2
+arcount: 1
+[question]
+name: www.example.com
+rrtype: TXT
+[txt/1]
+as_rr: True
+# QNAME is fully compressed
+rr_name: ptr=12
+string: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde
+[txt/2]
+as_rr: True
+# QNAME is fully compressed
+rr_name: ptr=12
+string: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0
+[tsig]
+as_rr: True
+# TSIG QNAME won't be compressed
+rr_name: www.example.com
+algorithm: hmac-md5
+time_signed: 0x4e17b38d
+mac_size: 16
+mac: 0xbe2ba477373d2496891e2fda240ee4ec
+original_id: 0xd6e2
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
new file mode 100644
index 0000000..f831313
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: afsdb
+[afsdb]
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
new file mode 100644
index 0000000..f33e768
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com
+[afsdb]
+server: afsdb.ptr=0
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
new file mode 100644
index 0000000..993032f
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
new file mode 100644
index 0000000..37abf13
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
new file mode 100644
index 0000000..0ea79dd
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+server: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
new file mode 100644
index 0000000..1946458
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
new file mode 100644
index 0000000..c80011a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
@@ -0,0 +1,8 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com.
+[afsdb]
+subtype: 0
+server: root.example.com
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_dhcid_fromWire b/src/lib/dns/tests/testdata/rdata_dhcid_fromWire
new file mode 100644
index 0000000..0c8d56a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_dhcid_fromWire
@@ -0,0 +1,12 @@
+#
+# DHCID RDATA stored in an input buffer
+#
+# Valid RDATA for 0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=
+#
+# RDLENGHT=41 bytes
+# 0 1
+ 00 29
+# 0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=
+d0 b2 20 d0 bb d0 b5 d1 81 d1 83 20 d1 80 d0 be
+d0 b4 d0 b8 d0 bb d0 b0 d1 81 d1 8c 20 d1 91 d0
+bb d0 be d1 87 d0 ba d0 b0
diff --git a/src/lib/dns/tests/testdata/rdata_dhcid_toWire b/src/lib/dns/tests/testdata/rdata_dhcid_toWire
new file mode 100644
index 0000000..99ec229
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_dhcid_toWire
@@ -0,0 +1,7 @@
+#
+# DHCID RDATA stored in an output buffer
+#
+# 0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=
+d0 b2 20 d0 bb d0 b5 d1 81 d1 83 20 d1 80 d0 be
+d0 b4 d0 b8 d0 bb d0 b0 d1 81 d1 8c 20 d1 91 d0
+bb d0 be d1 87 d0 ba d0 b0
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
new file mode 100644
index 0000000..2c43db0
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: minfo
+[minfo]
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
new file mode 100644
index 0000000..d781cac
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
@@ -0,0 +1,7 @@
+[custom]
+sections: name:minfo
+[name]
+name: a.example.com.
+[minfo]
+rmailbox: rmailbox.ptr=02
+emailbox: emailbox.ptr=02
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
new file mode 100644
index 0000000..a1d4b76
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+# rdlength too short
+[minfo]
+emailbox: emailbox.ptr=11
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
new file mode 100644
index 0000000..269a6ce
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+# rdlength too long
+[minfo]
+emailbox: emailbox.ptr=11
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
new file mode 100644
index 0000000..3a888e3
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+# bogus rmailbox name
+[minfo]
+rmailbox: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
new file mode 100644
index 0000000..c75ed8e
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+# bogus emailbox name
+[minfo]
+emailbox: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
new file mode 100644
index 0000000..7b340a3
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+[minfo]
+emailbox: emailbox.ptr=09
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
new file mode 100644
index 0000000..132f118
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+[minfo]
+rmailbox: root.example.com.
+emailbox: emailbox.ptr=05
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
new file mode 100644
index 0000000..d99a381
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
@@ -0,0 +1,7 @@
+#
+# A simplest form of MINFO: all default parameters
+#
+[custom]
+sections: minfo
+[minfo]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
new file mode 100644
index 0000000..0f78fcc
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
@@ -0,0 +1,8 @@
+#
+# A simplest form of MINFO: custom rmailbox and default emailbox
+#
+[custom]
+sections: minfo
+[minfo]
+rmailbox: root.example.com.
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_srv_fromWire b/src/lib/dns/tests/testdata/rdata_srv_fromWire
new file mode 100644
index 0000000..dac87e9
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_srv_fromWire
@@ -0,0 +1,36 @@
+#
+# various kinds of SRV RDATA stored in an input buffer
+#
+# RDLENGHT=21 bytes
+# 0 1
+ 00 15
+# 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 20 1 2(bytes)
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63 6f 6d 00
+#
+# short length
+# 3 4
+ 00 12
+# 5 6 7 8 9 30 1 2 3 4 5 6 7 8 9 40 1 2 3 4 5
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63 6f 6d 00
+#
+# length too long
+# 6 7
+ 00 19
+#
+# 8 9 50 1 2 3 4 5 6 7 8 9 60 1 2 3 4 5 6 7 8
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63 6f 6d 00
+#
+#
+# incomplete target name
+# 9 70
+ 00 12
+# 1 2 3 4 5 6 7 8 9 80 1 2 3 4 5 6 7 8
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63
+#
+#
+# Valid compressed target name: 'a' + pointer
+# 9 90
+ 00 0a
+#
+# 1 2 3 4 5 6 7 8 9 100
+ 00 01 00 05 05 dc 01 61 c0 0a
diff --git a/src/lib/dns/tests/tsig_unittest.cc b/src/lib/dns/tests/tsig_unittest.cc
index 55c3ac2..7944b29 100644
--- a/src/lib/dns/tests/tsig_unittest.cc
+++ b/src/lib/dns/tests/tsig_unittest.cc
@@ -425,6 +425,29 @@ TEST_F(TSIGTest, signUsingHMACSHA1) {
}
}
+TEST_F(TSIGTest, signUsingHMACSHA224) {
+ isc::util::detail::gettimeFunction = testGetTime<0x4dae7d5f>;
+
+ secret.clear();
+ decodeBase64("MA+QDhXbyqUak+qnMFyTyEirzng=", secret);
+ TSIGContext sha1_ctx(TSIGKey(test_name, TSIGKey::HMACSHA224_NAME(),
+ &secret[0], secret.size()));
+
+ const uint16_t sha1_qid = 0x0967;
+ const uint8_t expected_mac[] = {
+ 0x3b, 0x93, 0xd3, 0xc5, 0xf9, 0x64, 0xb9, 0xc5, 0x00, 0x35,
+ 0x02, 0x69, 0x9f, 0xfc, 0x44, 0xd6, 0xe2, 0x66, 0xf4, 0x08,
+ 0xef, 0x33, 0xa2, 0xda, 0xa1, 0x48, 0x71, 0xd3
+ };
+ {
+ SCOPED_TRACE("Sign test using HMAC-SHA224");
+ commonSignChecks(createMessageAndSign(sha1_qid, test_name, &sha1_ctx),
+ sha1_qid, 0x4dae7d5f, expected_mac,
+ sizeof(expected_mac), 0, 0, NULL,
+ TSIGKey::HMACSHA224_NAME());
+ }
+}
+
// The first part of this test checks verifying the signed query used for
// the "sign" test.
// The second part of this test generates a signed response to the signed
@@ -904,4 +927,76 @@ TEST_F(TSIGTest, tooShortMAC) {
}
}
+TEST_F(TSIGTest, getTSIGLength) {
+ // Check for the most common case with various algorithms
+ // See the comment in TSIGContext::getTSIGLength() for calculation and
+ // parameter notation.
+ // The key name (www.example.com) is the same for most cases, where n1=17
+
+ // hmac-md5.sig-alg.reg.int.: n2=26, x=16
+ EXPECT_EQ(85, tsig_ctx->getTSIGLength());
+
+ // hmac-sha1: n2=11, x=20
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name, TSIGKey::HMACSHA1_NAME(),
+ &dummy_data[0], 20)));
+ EXPECT_EQ(74, tsig_ctx->getTSIGLength());
+
+ // hmac-sha256: n2=13, x=32
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name,
+ TSIGKey::HMACSHA256_NAME(),
+ &dummy_data[0], 32)));
+ EXPECT_EQ(88, tsig_ctx->getTSIGLength());
+
+ // hmac-sha224: n2=13, x=28
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name,
+ TSIGKey::HMACSHA224_NAME(),
+ &dummy_data[0], 28)));
+ EXPECT_EQ(84, tsig_ctx->getTSIGLength());
+
+ // hmac-sha384: n2=13, x=48
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name,
+ TSIGKey::HMACSHA384_NAME(),
+ &dummy_data[0], 48)));
+ EXPECT_EQ(104, tsig_ctx->getTSIGLength());
+
+ // hmac-sha512: n2=13, x=64
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name,
+ TSIGKey::HMACSHA512_NAME(),
+ &dummy_data[0], 64)));
+ EXPECT_EQ(120, tsig_ctx->getTSIGLength());
+
+ // bad key case: n1=len(badkey.example.com)=20, n2=26, x=0
+ tsig_ctx.reset(new TSIGContext(badkey_name, TSIGKey::HMACMD5_NAME(),
+ keyring));
+ EXPECT_EQ(72, tsig_ctx->getTSIGLength());
+
+ // bad sig case: n1=17, n2=26, x=0
+ isc::util::detail::gettimeFunction = testGetTime<0x4da8877a>;
+ createMessageFromFile("message_toWire2.wire");
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name, TSIGKey::HMACMD5_NAME(),
+ &dummy_data[0],
+ dummy_data.size())));
+ {
+ SCOPED_TRACE("Verify resulting in BADSIG");
+ commonVerifyChecks(*tsig_ctx, message.getTSIGRecord(),
+ &received_data[0], received_data.size(),
+ TSIGError::BAD_SIG(), TSIGContext::RECEIVED_REQUEST);
+ }
+ EXPECT_EQ(69, tsig_ctx->getTSIGLength());
+
+ // bad time case: n1=17, n2=26, x=16, y=6
+ isc::util::detail::gettimeFunction = testGetTime<0x4da8877a - 1000>;
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name, TSIGKey::HMACMD5_NAME(),
+ &dummy_data[0],
+ dummy_data.size())));
+ {
+ SCOPED_TRACE("Verify resulting in BADTIME");
+ commonVerifyChecks(*tsig_ctx, message.getTSIGRecord(),
+ &received_data[0], received_data.size(),
+ TSIGError::BAD_TIME(),
+ TSIGContext::RECEIVED_REQUEST);
+ }
+ EXPECT_EQ(91, tsig_ctx->getTSIGLength());
+}
+
} // end namespace
diff --git a/src/lib/dns/tests/tsigkey_unittest.cc b/src/lib/dns/tests/tsigkey_unittest.cc
index dac3c49..20ee802 100644
--- a/src/lib/dns/tests/tsigkey_unittest.cc
+++ b/src/lib/dns/tests/tsigkey_unittest.cc
@@ -40,6 +40,9 @@ TEST_F(TSIGKeyTest, algorithmNames) {
EXPECT_EQ(Name("hmac-md5.sig-alg.reg.int"), TSIGKey::HMACMD5_NAME());
EXPECT_EQ(Name("hmac-sha1"), TSIGKey::HMACSHA1_NAME());
EXPECT_EQ(Name("hmac-sha256"), TSIGKey::HMACSHA256_NAME());
+ EXPECT_EQ(Name("hmac-sha224"), TSIGKey::HMACSHA224_NAME());
+ EXPECT_EQ(Name("hmac-sha384"), TSIGKey::HMACSHA384_NAME());
+ EXPECT_EQ(Name("hmac-sha512"), TSIGKey::HMACSHA512_NAME());
// Also check conversion to cryptolink definitions
EXPECT_EQ(isc::cryptolink::MD5, TSIGKey(key_name, TSIGKey::HMACMD5_NAME(),
@@ -49,6 +52,15 @@ TEST_F(TSIGKeyTest, algorithmNames) {
EXPECT_EQ(isc::cryptolink::SHA256, TSIGKey(key_name,
TSIGKey::HMACSHA256_NAME(),
NULL, 0).getAlgorithm());
+ EXPECT_EQ(isc::cryptolink::SHA224, TSIGKey(key_name,
+ TSIGKey::HMACSHA224_NAME(),
+ NULL, 0).getAlgorithm());
+ EXPECT_EQ(isc::cryptolink::SHA384, TSIGKey(key_name,
+ TSIGKey::HMACSHA384_NAME(),
+ NULL, 0).getAlgorithm());
+ EXPECT_EQ(isc::cryptolink::SHA512, TSIGKey(key_name,
+ TSIGKey::HMACSHA512_NAME(),
+ NULL, 0).getAlgorithm());
}
TEST_F(TSIGKeyTest, construct) {
diff --git a/src/lib/dns/tsig.cc b/src/lib/dns/tsig.cc
index 714b2a5..1bda021 100644
--- a/src/lib/dns/tsig.cc
+++ b/src/lib/dns/tsig.cc
@@ -58,10 +58,32 @@ getTSIGTime() {
}
struct TSIGContext::TSIGContextImpl {
- TSIGContextImpl(const TSIGKey& key) :
- state_(INIT), key_(key), error_(Rcode::NOERROR()),
- previous_timesigned_(0)
- {}
+ TSIGContextImpl(const TSIGKey& key,
+ TSIGError error = TSIGError::NOERROR()) :
+ state_(INIT), key_(key), error_(error),
+ previous_timesigned_(0), digest_len_(0)
+ {
+ if (error == TSIGError::NOERROR()) {
+ // In normal (NOERROR) case, the key should be valid, and we
+ // should be able to pre-create a corresponding HMAC object,
+ // which will be likely to be used for sign or verify later.
+ // We do this in the constructor so that we can know the expected
+ // digest length in advance. The creation should normally succeed,
+ // but the key information could be still broken, which could
+ // trigger an exception inside the cryptolink module. We ignore
+ // it at this moment; a subsequent sign/verify operation will try
+ // to create the HMAC, which would also fail.
+ try {
+ hmac_.reset(CryptoLink::getCryptoLink().createHMAC(
+ key_.getSecret(), key_.getSecretLength(),
+ key_.getAlgorithm()),
+ deleteHMAC);
+ } catch (const Exception&) {
+ return;
+ }
+ digest_len_ = hmac_->getOutputLength();
+ }
+ }
// This helper method is used from verify(). It's expected to be called
// just before verify() returns. It updates internal state based on
@@ -85,6 +107,23 @@ struct TSIGContext::TSIGContextImpl {
return (error);
}
+ // A shortcut method to create an HMAC object for sign/verify. If one
+ // has been successfully created in the constructor, return it; otherwise
+ // create a new one and return it. In the former case, the ownership is
+ // transferred to the caller; the stored HMAC will be reset after the
+ // call.
+ HMACPtr createHMAC() {
+ if (hmac_) {
+ HMACPtr ret = HMACPtr();
+ ret.swap(hmac_);
+ return (ret);
+ }
+ return (HMACPtr(CryptoLink::getCryptoLink().createHMAC(
+ key_.getSecret(), key_.getSecretLength(),
+ key_.getAlgorithm()),
+ deleteHMAC));
+ }
+
// The following three are helper methods to compute the digest for
// TSIG sign/verify in order to unify the common code logic for sign()
// and verify() and to keep these callers concise.
@@ -111,6 +150,8 @@ struct TSIGContext::TSIGContextImpl {
vector<uint8_t> previous_digest_;
TSIGError error_;
uint64_t previous_timesigned_; // only meaningful for response with BADTIME
+ size_t digest_len_;
+ HMACPtr hmac_;
};
void
@@ -221,8 +262,7 @@ TSIGContext::TSIGContext(const Name& key_name, const Name& algorithm_name,
// be used in subsequent response with a TSIG indicating a BADKEY
// error.
impl_ = new TSIGContextImpl(TSIGKey(key_name, algorithm_name,
- NULL, 0));
- impl_->error_ = TSIGError::BAD_KEY();
+ NULL, 0), TSIGError::BAD_KEY());
} else {
impl_ = new TSIGContextImpl(*result.key);
}
@@ -232,6 +272,45 @@ TSIGContext::~TSIGContext() {
delete impl_;
}
+size_t
+TSIGContext::getTSIGLength() const {
+ //
+ // The space required for an TSIG record is:
+ //
+ // n1 bytes for the (key) name
+ // 2 bytes for the type
+ // 2 bytes for the class
+ // 4 bytes for the ttl
+ // 2 bytes for the rdlength
+ // n2 bytes for the algorithm name
+ // 6 bytes for the time signed
+ // 2 bytes for the fudge
+ // 2 bytes for the MAC size
+ // x bytes for the MAC
+ // 2 bytes for the original id
+ // 2 bytes for the error
+ // 2 bytes for the other data length
+ // y bytes for the other data (at most)
+ // ---------------------------------
+ // 26 + n1 + n2 + x + y bytes
+ //
+
+ // Normally the digest length ("x") is the length of the underlying
+ // hash output. If a key related error occurred, however, the
+ // corresponding TSIG will be "unsigned", and the digest length will be 0.
+ const size_t digest_len =
+ (impl_->error_ == TSIGError::BAD_KEY() ||
+ impl_->error_ == TSIGError::BAD_SIG()) ? 0 : impl_->digest_len_;
+
+ // Other Len ("y") is normally 0; if BAD_TIME error occurred, the
+ // subsequent TSIG will contain 48 bits of the server current time.
+ const size_t other_len = (impl_->error_ == TSIGError::BAD_TIME()) ? 6 : 0;
+
+ return (26 + impl_->key_.getKeyName().getLength() +
+ impl_->key_.getAlgorithmName().getLength() +
+ digest_len + other_len);
+}
+
TSIGContext::State
TSIGContext::getState() const {
return (impl_->state_);
@@ -276,11 +355,7 @@ TSIGContext::sign(const uint16_t qid, const void* const data,
return (tsig);
}
- HMACPtr hmac(CryptoLink::getCryptoLink().createHMAC(
- impl_->key_.getSecret(),
- impl_->key_.getSecretLength(),
- impl_->key_.getAlgorithm()),
- deleteHMAC);
+ HMACPtr hmac(impl_->createHMAC());
// If the context has previous MAC (either the Request MAC or its own
// previous MAC), digest it.
@@ -406,11 +481,7 @@ TSIGContext::verify(const TSIGRecord* const record, const void* const data,
return (impl_->postVerifyUpdate(error, NULL, 0));
}
- HMACPtr hmac(CryptoLink::getCryptoLink().createHMAC(
- impl_->key_.getSecret(),
- impl_->key_.getSecretLength(),
- impl_->key_.getAlgorithm()),
- deleteHMAC);
+ HMACPtr hmac(impl_->createHMAC());
// If the context has previous MAC (either the Request MAC or its own
// previous MAC), digest it.
diff --git a/src/lib/dns/tsig.h b/src/lib/dns/tsig.h
index bceec25..028d295 100644
--- a/src/lib/dns/tsig.h
+++ b/src/lib/dns/tsig.h
@@ -353,6 +353,27 @@ public:
TSIGError verify(const TSIGRecord* const record, const void* const data,
const size_t data_len);
+ /// Return the expected length of TSIG RR after \c sign()
+ ///
+ /// This method returns the length of the TSIG RR that would be
+ /// produced as a result of \c sign() with the state of the context
+ /// at the time of the call. The expected length can be decided
+ /// from the key and the algorithm (which determines the MAC size if
+ /// included) and the recorded TSIG error. Specifically, if a key
+ /// related error has been identified, the MAC will be excluded; if
+ /// a time error has occurred, the TSIG will include "other data".
+ ///
+ /// This method is provided mainly for the convenience of the Message
+ /// class, which needs to know the expected TSIG length in rendering a
+ /// signed DNS message so that it can handle truncated messages with TSIG
+ /// correctly. Normal applications wouldn't need this method. The Python
+ /// binding for this method won't be provided for the same reason.
+ ///
+ /// \exception None
+ ///
+ /// \return The expected TISG RR length in bytes
+ size_t getTSIGLength() const;
+
/// Return the current state of the context
///
/// \note
diff --git a/src/lib/dns/tsigerror.h b/src/lib/dns/tsigerror.h
index 9794c41..8efd3ae 100644
--- a/src/lib/dns/tsigerror.h
+++ b/src/lib/dns/tsigerror.h
@@ -22,17 +22,11 @@
namespace isc {
namespace dns {
-
-class RRClass;
-
/// TSIG errors
///
/// The \c TSIGError class objects represent standard errors related to
/// TSIG protocol operations as defined in related specifications, mainly
/// in RFC2845.
-///
-/// (RCODEs) of the header section of DNS messages, and extended response
-/// codes as defined in the EDNS specification.
class TSIGError {
public:
/// Constants for pre-defined TSIG error values.
@@ -58,7 +52,7 @@ public:
///
/// \exception None
///
- /// \param code The underlying 16-bit error code value of the \c TSIGError.
+ /// \param error_code The underlying 16-bit error code value of the \c TSIGError.
explicit TSIGError(uint16_t error_code) : code_(error_code) {}
/// Constructor from \c Rcode.
diff --git a/src/lib/dns/tsigkey.cc b/src/lib/dns/tsigkey.cc
index 4082fbe..d7d60eb 100644
--- a/src/lib/dns/tsigkey.cc
+++ b/src/lib/dns/tsigkey.cc
@@ -42,6 +42,16 @@ namespace {
if (name == TSIGKey::HMACSHA256_NAME()) {
return (isc::cryptolink::SHA256);
}
+ if (name == TSIGKey::HMACSHA224_NAME()) {
+ return (isc::cryptolink::SHA224);
+ }
+ if (name == TSIGKey::HMACSHA384_NAME()) {
+ return (isc::cryptolink::SHA384);
+ }
+ if (name == TSIGKey::HMACSHA512_NAME()) {
+ return (isc::cryptolink::SHA512);
+ }
+
return (isc::cryptolink::UNKNOWN_HASH);
}
}
@@ -207,6 +217,24 @@ Name& TSIGKey::HMACSHA256_NAME() {
return (alg_name);
}
+const
+Name& TSIGKey::HMACSHA224_NAME() {
+ static Name alg_name("hmac-sha224");
+ return (alg_name);
+}
+
+const
+Name& TSIGKey::HMACSHA384_NAME() {
+ static Name alg_name("hmac-sha384");
+ return (alg_name);
+}
+
+const
+Name& TSIGKey::HMACSHA512_NAME() {
+ static Name alg_name("hmac-sha512");
+ return (alg_name);
+}
+
struct TSIGKeyRing::TSIGKeyRingImpl {
typedef map<Name, TSIGKey> TSIGKeyMap;
typedef pair<Name, TSIGKey> NameAndKey;
diff --git a/src/lib/dns/tsigkey.h b/src/lib/dns/tsigkey.h
index f0df709..6081dd3 100644
--- a/src/lib/dns/tsigkey.h
+++ b/src/lib/dns/tsigkey.h
@@ -113,10 +113,10 @@ public:
/// \brief Constructor from an input string
///
/// The string must be of the form:
- /// <name>:<secret>[:<algorithm>]
- /// Where <name> is a domain name for the key, <secret> is a
+ /// name:secret[:algorithm]
+ /// Where "name" is a domain name for the key, "secret" is a
/// base64 representation of the key secret, and the optional
- /// algorithm is an algorithm identifier as specified in RFC4635.
+ /// "algorithm" is an algorithm identifier as specified in RFC 4635.
/// The default algorithm is hmac-md5.sig-alg.reg.int.
///
/// The same restriction about the algorithm name (and secret) as that
@@ -188,11 +188,10 @@ public:
///
/// The resulting string will be of the form
/// name:secret:algorithm
- /// Where <name> is a domain name for the key, <secret> is a
- /// base64 representation of the key secret, and algorithm is
- /// an algorithm identifier as specified in RFC4635
+ /// Where "name" is a domain name for the key, "secret" is a
+ /// base64 representation of the key secret, and "algorithm" is
+ /// an algorithm identifier as specified in RFC 4635.
///
- /// \param key the TSIG key to convert
/// \return The string representation of the given TSIGKey.
std::string toText() const;
@@ -206,6 +205,9 @@ public:
static const Name& HMACMD5_NAME(); ///< HMAC-MD5 (RFC2845)
static const Name& HMACSHA1_NAME(); ///< HMAC-SHA1 (RFC4635)
static const Name& HMACSHA256_NAME(); ///< HMAC-SHA256 (RFC4635)
+ static const Name& HMACSHA224_NAME(); ///< HMAC-SHA256 (RFC4635)
+ static const Name& HMACSHA384_NAME(); ///< HMAC-SHA256 (RFC4635)
+ static const Name& HMACSHA512_NAME(); ///< HMAC-SHA256 (RFC4635)
//@}
private:
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index a42037b..433bb7d 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -137,6 +137,18 @@ public:
};
///
+/// \brief A generic exception that is thrown when a function is
+/// not implemented.
+///
+/// This may be due to unfinished implementation or in case the
+/// function isn't even planned to be provided for that situation.
+class NotImplemented : public Exception {
+public:
+ NotImplemented(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+///
/// A shortcut macro to insert known values into exception arguments.
///
/// It allows the \c stream argument to be part of a statement using an
@@ -163,6 +175,17 @@ public:
oss__ << stream; \
throw type(__FILE__, __LINE__, oss__.str().c_str()); \
} while (1)
+
+///
+/// Similar as isc_throw, but allows the exception to have one additional
+/// parameter (the stream/text goes first)
+#define isc_throw_1(type, stream, param1) \
+ do { \
+ std::ostringstream oss__; \
+ oss__ << stream; \
+ throw type(__FILE__, __LINE__, oss__.str().c_str(), param1); \
+ } while (1)
+
}
#endif // __EXCEPTIONS_H
diff --git a/src/lib/exceptions/tests/run_unittests.cc b/src/lib/exceptions/tests/run_unittests.cc
index 0908071..6a0de4f 100644
--- a/src/lib/exceptions/tests/run_unittests.cc
+++ b/src/lib/exceptions/tests/run_unittests.cc
@@ -17,5 +17,8 @@
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
+
+ // Unlike other tests we cannot use our wrapper for RUN_ALL_TESTS()
+ // due to dependency.
return (RUN_ALL_TESTS());
}
diff --git a/src/lib/log/Makefile.am b/src/lib/log/Makefile.am
index c27b3e4..957d350 100644
--- a/src/lib/log/Makefile.am
+++ b/src/lib/log/Makefile.am
@@ -2,32 +2,38 @@ SUBDIRS = . compiler tests
AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
AM_CPPFLAGS += $(BOOST_INCLUDES)
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/log -I$(top_builddir)/src/lib/log
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
CLEANFILES = *.gcno *.gcda
lib_LTLIBRARIES = liblog.la
liblog_la_SOURCES =
-liblog_la_SOURCES += debug_levels.h logger_levels.h
liblog_la_SOURCES += dummylog.h dummylog.cc
+liblog_la_SOURCES += logimpl_messages.cc logimpl_messages.h
+liblog_la_SOURCES += log_dbglevels.h
+liblog_la_SOURCES += log_formatter.h log_formatter.cc
liblog_la_SOURCES += logger.cc logger.h
liblog_la_SOURCES += logger_impl.cc logger_impl.h
+liblog_la_SOURCES += logger_level.h
+liblog_la_SOURCES += logger_level.cc logger_level.h
+liblog_la_SOURCES += logger_level_impl.cc logger_level_impl.h
+liblog_la_SOURCES += logger_manager.cc logger_manager.h
+liblog_la_SOURCES += logger_manager_impl.cc logger_manager_impl.h
+liblog_la_SOURCES += logger_name.cc logger_name.h
+liblog_la_SOURCES += logger_specification.h
liblog_la_SOURCES += logger_support.cc logger_support.h
-liblog_la_SOURCES += messagedef.cc messagedef.h
+liblog_la_SOURCES += logger_unittest_support.cc logger_unittest_support.h
+liblog_la_SOURCES += log_messages.cc log_messages.h
+liblog_la_SOURCES += macros.h
liblog_la_SOURCES += message_dictionary.cc message_dictionary.h
liblog_la_SOURCES += message_exception.h
liblog_la_SOURCES += message_initializer.cc message_initializer.h
liblog_la_SOURCES += message_reader.cc message_reader.h
liblog_la_SOURCES += message_types.h
-liblog_la_SOURCES += root_logger_name.cc root_logger_name.h
-liblog_la_SOURCES += log_formatter.h log_formatter.cc
-liblog_la_SOURCES += macros.h
+liblog_la_SOURCES += output_option.cc output_option.h
EXTRA_DIST = README
-EXTRA_DIST += messagedef.mes
-EXTRA_DIST += logger_impl_log4cxx.cc logger_impl_log4cxx.h
-EXTRA_DIST += xdebuglevel.cc xdebuglevel.h
+EXTRA_DIST += logimpl_messages.mes
+EXTRA_DIST += log_messages.mes
# Note: the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS)
@@ -39,5 +45,6 @@ if USE_CLANGPP
# Same for clang++, but we need to turn off -Werror completely.
liblog_la_CXXFLAGS += -Wno-error
endif
-liblog_la_CPPFLAGS = $(AM_CPPFLAGS)
-liblog_la_LIBADD = $(top_builddir)/src/lib/util/libutil.la
+liblog_la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES)
+liblog_la_LDFLAGS = $(LOG4CPLUS_LDFLAGS)
+liblog_la_LIBADD = $(top_builddir)/src/lib/util/libutil.la
diff --git a/src/lib/log/README b/src/lib/log/README
index 529eefc..3693abb 100644
--- a/src/lib/log/README
+++ b/src/lib/log/README
@@ -1,11 +1,12 @@
This directory holds the first release of the logging system.
+
Basic Ideas
===========
The BIND-10 logging system merges two ideas:
* A hierarchical logging system similar to that used in Java (i.e. log4j)
-* Separation of message definitions and text
+* Separation of message use from message text
Hierarchical Logging System
@@ -28,23 +29,26 @@ above, the INFO/Syslog attributes could be associated with the root logger
while the DEBUG/file attributes are associated with the "cache" logger.
-Separation of Messages Definitions And Text
-===========================================
-The reason for this is to allow the message text to be overridden by versions
-in a local language. To do this, each message is identified by an identifier
-e.g. "OPENIN". Within the program, this is the symbol passed to the logging
-system. The logger system uses the symbol as an index into a dictionary to
-retrieve the message associated with it (e.g. "unable to open %s for input").
-substitutes any message parameters (in this example, the string that is an
-invalid filename) and logs it to the destination.
+Separation of Messages Use from Message Text
+============================================
+By separating the use of the message from the text associated with this -
+in essence, defining message text in an external file - it is possible to
+replace the supplied text of the messages with a local language version.
-In the BIND-10 system, a set of default messages are linked into the
-program. At run-time. each program reads a message file, updating the
-stored definitions; this updated text is logged. However, to aid support,
-the message identifier so in the example above, the message finally logged
-would be something like:
+Each message is identified by an identifier e.g. "LOG_WRITE_ERROR".
+Within the program, this is the symbol passed to the logging system.
+The logger system uses the symbol as an index into a dictionary to
+retrieve the message associated with it (e.g. "unable to open %s for
+input"). It then substitutes any message parameters (in this example,
+the name of the file where the write operation failed) and logs it to
+the destination.
- FAC_OPENIN, unable to open a.txt for input
+In BIND-10, a the default text for each message is linked into the
+program. Each program is able to read a locally-defined message file
+when it starts, updating the stored definitions with site-specific text.
+When the message is logged, the updated text is output. However, the
+message identifier is always included in the output so that the origin
+of the message can be identified even if the text has been changed.
Using The System
@@ -52,8 +56,9 @@ Using The System
The steps in using the system are:
1. Create a message file. This defines messages by an identification - a
- mnemonic for the message, typically 6-12 characters long - and a message.
- The file is described in more detail below.
+ mnemonic for the message, the convention being that these are a few
+ words separated by underscores - and text that explains the message in
+ more detail. The file is described in more detail below.
Ideally the file should have a file type of ".mes".
@@ -73,9 +78,7 @@ The steps in using the system are:
described in more detail below.
5. To set the debug level and run-time message file, call initLogger (declared
- in logger_support.h) in the main program unit. This is a temporary solution
- for Year 2, and will be replaced at a later date, the information coming
- from the configuration database.
+ in logger_support.h) in the main program unit.
Message Files
@@ -90,16 +93,16 @@ An example file could be:
-- BEGIN --
# Example message file
-# $ID:$
-$PREFIX TEST_
$NAMESPACE isc::log
-% TEST1 message %1 is much too large
-This message is a test for the general message code
+% LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'
+A line starting with a dollar symbol was found, but the first word on the line
+(shown in the message) was not a recognised message compiler directive.
-% UNKNOWN unknown message
-Issued when the message is unknown.
+% LOG_WRITE_ERROR error writing to %1: %2
+The specified error was encountered by the message compiler when writing to
+the named output file.
-- END --
@@ -114,10 +117,8 @@ Points to note:
a line by themselves - inline comments will be interpreted as part of the
text of the line.
-* Lines starting $ are directives. At present, two directives are recognised:
-
- * $PREFIX, which has one optional argument: the string used to prefix symbols.
- If absent, there is no prefix to the symbols. (Prefixes are explained below.)
+* Lines starting $ are directives. At present, just one directive is
+ recognised:
* $NAMESPACE, which has one argument: the namespace in which the symbols are
created. In the absence of a $NAMESPACE directive, symbols will be put in
@@ -127,18 +128,33 @@ Points to note:
identification and the message text, the latter including zero or more
replacement tokens, e.g.
- % TEST message %1 is larger than the permitted length of %2
+ % LOG_WRITE_ERROR error writing to %1: %2
* There may be zero or more spaces between the leading "%" and the message
- identification (which, in the example above, is the word "TEST").
-
- * The replacement tokens are the strings "%1", "%2" etc. When a message
- is logged, these are replaced with the arguments passed to the logging
- call: %1 refers to the first argument, %2 to the second etc. Within the
- message text, the placeholders can appear in any order, and placeholders
- can be repeated.
-
-* Remaining lines indicate an explanation for the preceding message. These
+ identification (which, in the example above, is the string
+ "LOG_WRITE_ERROR").
+
+ * The message identification can be any string of letters, digits and
+ underscores, but should not start with a digit. The convention adopted
+ in BIND 10 is for the first component (before the first underscore) to be
+ a string indicating the origin of the message, and the remainder to
+ describe the message. So in the example above, the LOG_ indicates that
+ the error originated from the logging library and the "WRITE_ERROR"
+ indicates that there was a problem in a write operation.
+
+ * The rest of the line - from the first non-space character to the
+ last non- space character - is taken exactly for the text
+ of the message. There are no restrictions on what characters may
+ be in this text, other than they be printable. (This means that
+ both single-quote (') and double-quote (") characters are allowed.)
+ The message text may include replacement tokens (the strings "%1",
+ "%2" etc.). When a message is logged, these are replaced with the
+ arguments passed to the logging call: %1 refers to the first argument,
+ %2 to the second etc. Within the message text, the placeholders
+ can appear in any order and placeholders can be repeated. Otherwise,
+ the message is printed unmodified.
+
+* Remaining lines indicate an explanation for the preceding message. These
are intended to be processed by a separate program and used to generate
an error messages manual. They are ignored by the message compiler.
@@ -147,42 +163,36 @@ Message Compiler
The message compiler is a program built in the src/log/compiler directory.
It is invoked by the command:
- message [-h] [-v] <message-file>
+ message [-h] [-v] -p] <message-file>
+
+("-v" prints the version number and exits; "-h" prints brief help text.) The
+compiler produces source files for C++ and Python.
-("-v" prints the version number and exits; "-h" prints brief help text.)
-The message compiler processes the message file to produce two files:
+C++ Files
+---------
+Without the "-p" option, the message compiler processes the message file
+to produce two files:
1) A C++ header file (called <message-file-name>.h) that holds lines of
the form:
namespace <namespace> {
- extern const isc::log::MessageID PREFIX_IDENTIFIER;
+ extern const isc::log::MessageID LOG_WRITE_ERROR;
:
}
The symbols define the keys in the global message dictionary, with the
namespace enclosing the symbols set by the $NAMESPACE directive.
-The "PREFIX_" part of the symbol name is the string defined in the $PREFIX
-the argument to the directive. So "$PREFIX MSG_" would prefix the identifier
-ABC with "MSG_" to give the symbol MSG_ABC. Similarly "$PREFIX E" would
-prefix it with "E" to give the symbol EABC. If no $PREFIX is given, no
-prefix appears (so the symbol in this example would be ABC).
-
-The prefix is "syntactic sugar". Generally all symbols in a given message file
-will be prefixed with the same set of letters. By extracting these into
-a separate prefix, it becomes easier to disambiguate the different symbols.
-
-There may be multiple $PREFIX directives in a file. A $PREFIX directive applies
-to all message definitions between it an the next $PREFIX directive. A $PREFIX
-directive with no arguments clears the current prefix.
+(This is the reason for the restriction on message identifiers - they
+have to be valid C++ symbol names.)
2) A C++ source file (called <message-file-name>.cc) that holds the definitions
of the global symbols and code to insert the symbols and messages into the map.
Symbols are defined to be equal to strings holding the identifier, e.g.
- extern const isc::log::MessageID MSG_DUPLNS = "MSG_DUPLNS";
+ extern const isc::log::MessageID LOG_WRITE_ERROR = "LOG_WRITE_ERROR";
(The implementation allows symbols to be compared. However, use of strings
should not be assumed - a future implementation may change this.)
@@ -208,145 +218,252 @@ A check is made as each is added; if the identifier already exists, it is
added to "overflow" vector; the vector is printed to the main logging output
when logging is finally enabled (to indicate a programming error).
+Python Files
+------------
+If the "-p" option is given, the compiler produces a Python module defining
+the messages. The format of this is:
+
+import isc.log
+ :
+LOG_WRITE_ERROR = isc.log.create_message("LOG_WRITE_ERROR",
+ "error writing to %1 : %2")
-Using the Logging
-=================
-To use the current version of the logging:
+(The definition is output on one line - it is split across two lines in this
+document for readability.)
+The module can be imported into other Python code, and messages logged
+in a similar way to C++ using the Python logging library.
+
+Using the Logging - C++
+=======================
1. Build message header file and source file as describe above.
-2. In the main module of the program, declare an instance of the
- RootLoggerName class to define the name of the program's root logger, e.g.
+2. The main program unit must include a call to isc::log::initLogger()
+ (described in more detail below) to set the logging severity, debug log
+ level, and external message file:
+
+ a) The logging severity is one of the enum defined in logger.h, i.e.
+
+ isc::log::DEBUG
+ isc::log::INFO
+ isc::log::WARN
+ isc::log::ERROR
+ isc::log::FATAL
+ isc::log::NONE
+
+ b) The debug log level is only interpreted when the severity is
+ DEBUG and is an integer ranging from 0 to 99. 0 should be used
+ for the highest-level debug messages and 99 for the lowest-level
+ (and typically more verbose) messages.
- #include <log/root_logger_name.h>
+ c) The external message file. If present, this is the same as a
+ standard message file, although it should not include any
+ directives. (A single directive of a particular type will be
+ ignored; multiple directives will cause the read of the file to
+ fail with an error.)
- isc::log::RootLoggerName("b10-auth");
+ The settings remain in effect until the logging configuration is read,
+ and so provide the default logging during program initialization.
- This can be declared inside or outside an execution unit.
+3. Declare a logger through which the message will be logged.
-2. In the code that needs to do logging, declare a logger with a given name,
- e.g.
+ isc::log::Logger logger("name");
- #include <log/logger.h>
- :
- isc::log::Logger logger("myname"); // "myname" can be anything
+ The string passed to the constructor is the name of the logger (it
+ can be any string) and is used when configuring it. Loggers with
+ the same name share the same configuration.
- The above example assumes declaration outside a function. If declaring
- non-statically within a function, declare it as:
+4. Issue logging calls using supplied macros in "log/macros.h", e.g.
- isc::log::Logger logger("myname", true);
+ LOG_ERROR(logger, LOG_WRITE_ERROR).arg("output.txt");
- (The argument is required to support a possible future implementation of
- logging. Currently it has no effect.)
+ (The macros are more efficient that calls to the methods on the logger
+ class: they avoid the overhead of evaluating the parameters to arg()
+ if the settings are such that the message is not going to be output.)
-3. The main program unit should include a call to isc::log::initLogger()
- (defined in logger_support.h) to set the logging severity, debug log level,
- and external message file.
+Using the Logging - Python
+==========================
+1. Build message module as describe above.
- a) The logging severity is one of the enum defined in logger.h, i.e.
+2. The main program unit must include a call to isc.log.init()
+ (described in more detail below) to set the to set the logging
+ severity, debug log level, and external message file:
- isc::log::DEBUG
- isc::log::INFO
- isc::log::WARN
- isc::log::ERROR
- isc::log::FATAL
- isc::log::NONE
+ a) The logging severity is one of the strings:
- b) The debug log level is only interpreted when the severity is DEBUG and
- is an integer ranging from 0 to 99. 0 should be used for the
- highest-level debug messages and 99 for the lowest-level (and typically
- more verbose) messages.
+ DEBUG
+ INFO
+ WARN
+ ERROR
+ FATAL
+ NONE
- c) The external message file. If present, this is the same as a standard
- message file, although it should not include any directives. (A single
- directive of a particular type will be ignored; multiple directives will
- cause the read of the file to fail with an error.)
+ b) The debug log level is only interpreted when the severity is
+ DEBUG and is an integer ranging from 0 to 99. 0 should be used
+ for the highest-level debug messages and 99 for the lowest-level
+ (and typically more verbose) messages.
-4. Issue logging calls using methods on logger, e.g.
+ c) The external message file. If present, this is the same as a
+ standard message file, although it should not include any
+ directives. (Any that are there will be ignored.)
- logger.error(DPS_NSTIMEOUT).arg("isc.org");
+ The settings remain in effect until the logging configuration is read,
+ and so provide the default logging during program initialization.
- (where, in the example above we might have defined the symbol in the message
- file with something along the lines of:
+3. Declare a logger through which the message will be logged.
- $PREFIX DPS_
- :
- NSTIMEOUT queries to all nameservers for %1 have timed out
+ isc.log.Logger logger("name")
- At present, the only logging is to the console.
+ The string passed to the constructor is the name of the logger (it
+ can be any string) and is used when configuring it. Loggers with
+ the same name share the same configuration.
+4. Issue calls to the logging methods:
-Efficiency Considerations
--------------------------
-A common pattern in logging is a debug call of the form:
+ logger.error(LOG_WRITE_ERROR, "output.txt");
- logger.debug(dbglevel, MSGID).arg(expensive_call()).arg(...
+Logging Initialization
+======================
+In all cases, if an attempt is made to use a logging method before the logging
+has been initialized, the program will terminate with a LoggingNotInitialized
+exception.
-... where "expensive_call()" is a function call to obtain logging information
-that may be computationally intensive. Although the cost may be justified
-when debugging is enabled, the cost is still incurred even if debugging is
-disabled and the debug() method returns without outputting anything. (The
-same may be true of other logging levels, although there are likely to be
-fewer calls to logger.info(), logger.error() etc. throughout the code and
-they are less likely to be disabled.)
+C++
+---
+Logging Initialization is carried out by calling initLogger(). There are two
+variants to the call, one for use by production programs and one for use by
+unit tests.
-For this reason, a set of macros is provided and are called using the
-construct:
+Variant #1, Used by Production Programs
+---------------------------------------
+void isc::log::initLogger(const std::string& root,
+ isc::log::Severity severity = isc::log::INFO,
+ int dbglevel = 0, const char* file = NULL);
- LOG_DEBUG(logger, dbglevel, MSGID).arg(expensive_call()).arg(...
- LOG_INFO(logger, MSGID).arg(expensive_call()...)
+This is the call that should be used by production programs:
-If these are used, the arguments passed to the arg() method are not evaluated
-if the relevant logging level is disabled.
+root
+Name of the program (e.g. "b10-auth"). This is also the name of the root
+logger and is used when configuring logging.
+
+severity
+Default severity that the program will start logging with. Although this may
+be overridden when the program obtains its configuration from the configuration
+database, this is the severity that it used until then. (This may be set by
+a command-line parameter.)
+
+dbglevel
+The debug level used if "severity" is set to isc::log::DEBUG.
+
+file
+The name of a local message file. This will be read and its definitions used
+to replace the compiled-in text of the messages.
+
+
+Variant #2, Used by Unit Tests
+------------------------------
+ void isc::log::initLogger()
+
+This is the call that should be used by unit tests. In this variant, all the
+options are supplied by environment variables. (It should not be used for
+production programs to avoid the chance that the program operation is affected
+by inadvertently-defined environment variables.)
+
+The environment variables are:
+
+B10_LOGGER_ROOT
+Sets the "root" for the unit test. If not defined, the name "bind10" is used.
+
+B10_LOGGER_SEVERITY
+The severity to set for the root logger in the unit test. Valid values are
+"DEBUG", "INFO", "WARN", "ERROR", "FATAL" and "NONE". If not defined, "INFO"
+is used.
+
+B10_LOGGER_DBGLEVEL
+If B10_LOGGER_SEVERITY is set to "DEBUG", the debug level. This can be a
+number between 0 and 99, and defaults to 0.
+
+B10_LOGGER_LOCALMSG
+If defined, points to a local message file. The default is not to use a local
+message file.
+
+B10_LOGGER_DESTINATION
+The location to which log message are written. This can be one of:
+
+ stdout Message are written to stdout
+ stderr Messages are written to stderr
+ syslog[:facility] Messages are written to syslog. If the optional
+ "facility" is used, the messages are written using
+ that facility. (This defaults to "local0" if not
+ specified.)
+ Anything else Interpreted as the name of a file to which output
+ is appended. If the file does not exist, a new one
+ is opened.
+
+In the case of "stdout", "stderr" and "syslog", they must be written exactly
+as is - no leading or trailing spaces, and in lower-case.
+
+Python
+------
+To be supplied
Severity Guidelines
===================
-When using logging, the question arises, what severity should a message be
-logged at? The following is a suggestion - as always, the decision must be
-made in the context of which the message is logged.
+When using logging, the question arises, what severity should a message
+be logged at? The following is a suggestion - as always, the decision
+must be made in the context of which the message is logged.
+
+One thing that should always be borne in mind is whether the logging
+could be used as a vector for a DOS attack. For example, if a warning
+message is logged every time an invalid packet is received, an attacker
+could simply send large numbers of invalid packets. (Of course, warnings
+could be disabled (or just warnings for that that particular logger),
+but nevertheless the message is an attack vector.)
FATAL
-----
The program has encountered an error that is so severe that it cannot
-continue (or there is no point in continuing). When a fatal error has been
-logged, the program will usually exit immediately (via a call to abort()) or
-shortly afterwards, after dumping some diagnostic information.
+continue (or there is no point in continuing). When a fatal error
+has been logged, the program will usually exit immediately (or shortly
+afterwards) after dumping some diagnostic information.
ERROR
-----
-Something has happened such that the program can continue but the results
-for the current (or future) operations cannot be guaranteed to be correct,
-or the results will be correct but the service is impaired. For example,
-the program started but attempts to open one or more network interfaces failed.
+Something has happened such that the program can continue but the
+results for the current (or future) operations cannot be guaranteed to
+be correct, or the results will be correct but the service is impaired.
+For example, the program started but attempts to open one or more network
+interfaces failed.
WARN
----
An unusual event happened. Although the program will continue working
-normally, the event was sufficiently out of the ordinary to warrant drawing
-attention to it. For example, at program start-up a zone was loaded that
-contained no resource records,
+normally, the event was sufficiently out of the ordinary to warrant
+drawing attention to it. For example, at program start-up a zone was
+loaded that contained no resource records,
INFO
----
A normal but significant event has occurred that should be recorded,
-e.g. the program has started or is just about to terminate, a new zone has
-been created, etc.
+e.g. the program has started or is just about to terminate, a new zone
+has been created, etc.
DEBUG
-----
This severity is only enabled on for debugging purposes. A debug level is
associated with debug messages, level 0 (the default) being for high-level
-messages and level 99 (the maximum) for the lowest level. How the messages
-are distributed between the levels is up to the developer. So if debugging
-the NSAS (for example), a level 0 message might record the creation of a new
-zone, a level 10 recording a timeout when trying to get a nameserver address,
-but a level 50 would record every query for an address. (And we might add
-level 51 to record every update of the RTT.)
-
-Note that like severities, levels are cumulative; so if level 25 is set as the
-debug level, all debug levels from 0 to 25 will be output. In fact, it is
-probably easier to visualise the debug levels as part of the severity system:
+messages and level 99 (the maximum) for the lowest level. How the
+messages are distributed between the levels is up to the developer.
+So if debugging the NSAS (for example), a level 0 message might record
+the creation of a new zone, a level 10 recording a timeout when trying
+to get a nameserver address, but a level 50 would record every query for
+an address. (And we might add level 70 to record every update of the RTT.)
+
+Note that like severities, levels are cumulative; so if level 25 is
+set as the debug level, all debug levels from 0 to 25 will be output.
+In fact, it is probably easier to visualise the debug levels as part of
+the severity system:
FATAL High
ERROR
@@ -360,50 +477,41 @@ probably easier to visualise the debug levels as part of the severity system:
When a particular severity is set, it - and all severities and/or debug
levels above it - will be logged.
+To try to ensure that the information from different modules is roughly
+comparable for the same debug level, a set of standard debug levels has
+been defined for common type of debug output. However, modules are free
+to set their own debug levels or define additional ones.
+
Logging Sources v Logging Severities
------------------------------------
-When logging events, make a distinction between events related to the server
-and events related to DNS messages received. Caution needs to be exercised
-with the latter as, if the logging is enabled in the normal course of events,
-such logging could be a denial of service vector. For example, suppose that
-the main authoritative service logger were to log both zone loading and
-unloading as INFO and a warning message if it received an invalid packet. An
-attacker could make the INFO messages unusable by flooding the server with
-malformed packets.
+When logging events, make a distinction between events related to the
+server and events related to DNS messages received. Caution needs to
+be exercised with the latter as, if the logging is enabled in the normal
+course of events, such logging could be a denial of service vector. For
+example, suppose that the main authoritative service logger were to
+log both zone loading and unloading as INFO and a warning message if
+it received an invalid packet. An attacker could make the INFO messages
+unusable by flooding the server with malformed packets.
There are two approaches to get round this:
a) Make the logging of packet-dependent events a DEBUG-severity message.
-DEBUG is not enabled by default, so these events will not be recorded unless
-DEBUG is specifically chosen.
+DEBUG is not enabled by default, so these events will not be recorded
+unless DEBUG is specifically chosen.
b) Record system-related and packet-related messages via different loggers
-(e.g. in the example given, server events could be logged using the logger
-"auth" and packet-related events at that level logged using the logger
-"pkt-auth".) As the loggers are independent and the severity levels
-independent, fine-tuning of what and what is not recorded can be achieved.
+(e.g. in the example given, server events could be logged using the
+logger "auth" and packet-related events at that level logged using the
+logger "pkt-auth".) As the loggers are independent and the severity
+levels independent, fine-tuning of what and what is not recorded can
+be achieved.
Notes
=====
The message compiler is written in C++ (instead of Python) because it
contains a component that reads the message file. This component is used
-in both the message compiler and the server; in the server it is used when
-the server starts up (or when triggered by a command) to read in a message
-file to overwrite the internal dictionary. Writing it in C++ means there
-is only one piece of code that does this functionality.
-
-
-Outstanding Issues
-==================
-* Ability to configure system according to configuration database.
-
-
-log4cxx Issues
-==============
-Some experimental code to utilise log4cxx as an underlying implementation
-is present in the source code directory although it is not currently used.
-The files are:
-
- logger_impl_log4cxx.{cc,h}
- xdebuglevel.{cc,h}
+in both the message compiler and the server; in the server it is used
+when the server starts up (or when triggered by a command) to read in
+a message file to overwrite the internal dictionary. Writing it in C++
+means there is only one piece of code that does this functionality.
diff --git a/src/lib/log/compiler/Makefile.am b/src/lib/log/compiler/Makefile.am
index d51ba05..1f47ba9 100644
--- a/src/lib/log/compiler/Makefile.am
+++ b/src/lib/log/compiler/Makefile.am
@@ -1,8 +1,6 @@
SUBDIRS = .
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/log -I$(top_builddir)/src/lib/log
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
@@ -14,7 +12,7 @@ endif
CLEANFILES = *.gcno *.gcda
noinst_PROGRAMS = message
-message_SOURCES = message.cc
-message_LDADD = $(top_builddir)/src/lib/log/liblog.la
-message_LDADD += $(top_builddir)/src/lib/util/libutil.la
+message_SOURCES = message.cc
+message_LDADD = $(top_builddir)/src/lib/log/liblog.la
+message_LDADD += $(top_builddir)/src/lib/util/libutil.la
diff --git a/src/lib/log/compiler/message.cc b/src/lib/log/compiler/message.cc
index 457a62e..f74020a 100644
--- a/src/lib/log/compiler/message.cc
+++ b/src/lib/log/compiler/message.cc
@@ -28,19 +28,22 @@
#include <util/filename.h>
#include <util/strutil.h>
+#include <log/log_messages.h>
#include <log/message_dictionary.h>
#include <log/message_exception.h>
#include <log/message_reader.h>
-#include <log/messagedef.h>
#include <log/logger.h>
+#include <boost/foreach.hpp>
+
using namespace std;
using namespace isc::log;
using namespace isc::util;
static const char* VERSION = "1.0-0";
+/// \file log/compiler/message.cc
/// \brief Message Compiler
///
/// \b Overview<BR>
@@ -53,13 +56,16 @@ static const char* VERSION = "1.0-0";
/// \b Invocation<BR>
/// The program is invoked with the command:
///
-/// <tt>message [-v | -h | \<message-file\>]</tt>
+/// <tt>message [-v | -h | -p | -d <dir> | <message-file>]</tt>
///
-/// It reads the message file and writes out two files of the same name in the
-/// default directory but with extensions of .h and .cc.
+/// It reads the message file and writes out two files of the same
+/// name in the current working directory (unless -d is used) but
+/// with extensions of .h and .cc, or .py if -p is used.
///
-/// \-v causes it to print the version number and exit. \-h prints a help
-/// message (and exits).
+/// -v causes it to print the version number and exit. -h prints a help
+/// message (and exits). -p sets the output to python. -d <dir> will make
+/// it write the output file(s) to dir instead of current working
+/// directory
/// \brief Print Version
@@ -78,10 +84,12 @@ version() {
void
usage() {
cout <<
- "Usage: message [-h] [-v] <message-file>\n" <<
+ "Usage: message [-h] [-v] [-p] [-d dir] <message-file>\n" <<
"\n" <<
"-h Print this message and exit\n" <<
"-v Print the program version and exit\n" <<
+ "-p Output python source instead of C++ ones\n" <<
+ "-d <dir> Place output files in given directory\n" <<
"\n" <<
"<message-file> is the name of the input message file.\n";
}
@@ -103,7 +111,7 @@ currentTime() {
// Convert to string and strip out the trailing newline
string current_time = buffer;
- return isc::util::str::trim(current_time);
+ return (isc::util::str::trim(current_time));
}
@@ -124,7 +132,7 @@ sentinel(Filename& file) {
string ext = file.extension();
string sentinel_text = "__" + name + "_" + ext.substr(1);
isc::util::str::uppercase(sentinel_text);
- return sentinel_text;
+ return (sentinel_text);
}
@@ -151,7 +159,7 @@ quoteString(const string& instring) {
outstring += instring[i];
}
- return outstring;
+ return (outstring);
}
@@ -174,7 +182,7 @@ sortedIdentifiers(MessageDictionary& dictionary) {
}
sort(ident.begin(), ident.end());
- return ident;
+ return (ident);
}
@@ -204,7 +212,7 @@ splitNamespace(string ns) {
// ... and return the vector of namespace components split on the single
// colon.
- return isc::util::str::tokens(ns, ":");
+ return (isc::util::str::tokens(ns, ":"));
}
@@ -237,6 +245,52 @@ writeClosingNamespace(ostream& output, const vector<string>& ns) {
}
}
+/// \breif Write python file
+///
+/// Writes the python file containing the symbol definitions as module level
+/// constants. These are objects which register themself at creation time,
+/// so they can be replaced by dictionary later.
+///
+/// \param file Name of the message file. The source code is written to a file
+/// file of the same name but with a .py suffix.
+/// \param dictionary The dictionary holding the message definitions.
+/// \param output_directory if not null NULL, output files are written
+/// to the given directory. If NULL, they are written to the current
+/// working directory.
+///
+/// \note We don't use the namespace as in C++. We don't need it, because
+/// python file/module works as implicit namespace as well.
+
+void
+writePythonFile(const string& file, MessageDictionary& dictionary,
+ const char* output_directory)
+{
+ Filename message_file(file);
+ Filename python_file(Filename(message_file.name()).useAsDefault(".py"));
+ if (output_directory != NULL) {
+ python_file.setDirectory(output_directory);
+ }
+
+ // Open the file for writing
+ ofstream pyfile(python_file.fullName().c_str());
+
+ // Write the comment and imports
+ pyfile <<
+ "# File created from " << message_file.fullName() << " on " <<
+ currentTime() << "\n" <<
+ "\n" <<
+ "import isc.log\n" <<
+ "\n";
+
+ vector<string> idents(sortedIdentifiers(dictionary));
+ BOOST_FOREACH(const string& ident, idents) {
+ pyfile << ident << " = isc.log.create_message(\"" <<
+ ident << "\", \"" << quoteString(dictionary.getText(ident)) <<
+ "\")\n";
+ }
+
+ pyfile.close();
+}
/// \brief Write Header File
///
@@ -250,13 +304,19 @@ writeClosingNamespace(ostream& output, const vector<string>& ns) {
/// \param ns Namespace in which the definitions are to be placed. An empty
/// string indicates no namespace.
/// \param dictionary Dictionary holding the message definitions.
+/// \param output_directory if not null NULL, output files are written
+/// to the given directory. If NULL, they are written to the current
+/// working directory.
void
writeHeaderFile(const string& file, const vector<string>& ns_components,
- MessageDictionary& dictionary)
+ MessageDictionary& dictionary, const char* output_directory)
{
Filename message_file(file);
Filename header_file(Filename(message_file.name()).useAsDefault(".h"));
+ if (output_directory != NULL) {
+ header_file.setDirectory(output_directory);
+ }
// Text to use as the sentinels.
string sentinel_text = sentinel(header_file);
@@ -264,52 +324,46 @@ writeHeaderFile(const string& file, const vector<string>& ns_components,
// Open the output file for writing
ofstream hfile(header_file.fullName().c_str());
- try {
- if (hfile.fail()) {
- throw MessageException(MSG_OPENOUT, header_file.fullName(),
- strerror(errno));
- }
-
- // Write the header preamble. If there is an error, we'll pick it up
- // after the last write.
-
- hfile <<
- "// File created from " << message_file.fullName() << " on " <<
- currentTime() << "\n" <<
- "\n" <<
- "#ifndef " << sentinel_text << "\n" <<
- "#define " << sentinel_text << "\n" <<
- "\n" <<
- "#include <log/message_types.h>\n" <<
- "\n";
-
- // Write the message identifiers, bounded by a namespace declaration
- writeOpeningNamespace(hfile, ns_components);
-
- vector<string> idents = sortedIdentifiers(dictionary);
- for (vector<string>::const_iterator j = idents.begin();
- j != idents.end(); ++j) {
- hfile << "extern const isc::log::MessageID " << *j << ";\n";
- }
- hfile << "\n";
+ if (hfile.fail()) {
+ throw MessageException(LOG_OPEN_OUTPUT_FAIL, header_file.fullName(),
+ strerror(errno));
+ }
- writeClosingNamespace(hfile, ns_components);
+ // Write the header preamble. If there is an error, we'll pick it up
+ // after the last write.
+
+ hfile <<
+ "// File created from " << message_file.fullName() << " on " <<
+ currentTime() << "\n" <<
+ "\n" <<
+ "#ifndef " << sentinel_text << "\n" <<
+ "#define " << sentinel_text << "\n" <<
+ "\n" <<
+ "#include <log/message_types.h>\n" <<
+ "\n";
+
+ // Write the message identifiers, bounded by a namespace declaration
+ writeOpeningNamespace(hfile, ns_components);
+
+ vector<string> idents = sortedIdentifiers(dictionary);
+ for (vector<string>::const_iterator j = idents.begin();
+ j != idents.end(); ++j) {
+ hfile << "extern const isc::log::MessageID " << *j << ";\n";
+ }
+ hfile << "\n";
- // ... and finally the postamble
- hfile << "#endif // " << sentinel_text << "\n";
+ writeClosingNamespace(hfile, ns_components);
- // Report errors (if any) and exit
- if (hfile.fail()) {
- throw MessageException(MSG_WRITERR, header_file.fullName(),
- strerror(errno));
- }
+ // ... and finally the postamble
+ hfile << "#endif // " << sentinel_text << "\n";
- hfile.close();
- }
- catch (MessageException&) {
- hfile.close();
- throw;
+ // Report errors (if any) and exit
+ if (hfile.fail()) {
+ throw MessageException(LOG_WRITE_ERROR, header_file.fullName(),
+ strerror(errno));
}
+
+ hfile.close();
}
@@ -347,86 +401,93 @@ replaceNonAlphaNum(char c) {
/// optimisation is done at link-time, not compiler-time. In this it _may_
/// decide to remove the initializer object because of a lack of references
/// to it. But until BIND-10 is ported to Windows, we won't know.
-
+///
+/// \param file Name of the message file. The header file is written to a
+/// file of the same name but with a .h suffix.
+/// \param ns Namespace in which the definitions are to be placed. An empty
+/// string indicates no namespace.
+/// \param dictionary Dictionary holding the message definitions.
+/// \param output_directory if not null NULL, output files are written
+/// to the given directory. If NULL, they are written to the current
+/// working directory.
void
writeProgramFile(const string& file, const vector<string>& ns_components,
- MessageDictionary& dictionary)
+ MessageDictionary& dictionary,
+ const char* output_directory)
{
Filename message_file(file);
Filename program_file(Filename(message_file.name()).useAsDefault(".cc"));
+ if (output_directory) {
+ program_file.setDirectory(output_directory);
+ }
// Open the output file for writing
ofstream ccfile(program_file.fullName().c_str());
- try {
- if (ccfile.fail()) {
- throw MessageException(MSG_OPENOUT, program_file.fullName(),
- strerror(errno));
- }
- // Write the preamble. If there is an error, we'll pick it up after
- // the last write.
+ if (ccfile.fail()) {
+ throw MessageException(LOG_OPEN_OUTPUT_FAIL, program_file.fullName(),
+ strerror(errno));
+ }
- ccfile <<
- "// File created from " << message_file.fullName() << " on " <<
- currentTime() << "\n" <<
- "\n" <<
- "#include <cstddef>\n" <<
- "#include <log/message_types.h>\n" <<
- "#include <log/message_initializer.h>\n" <<
- "\n";
+ // Write the preamble. If there is an error, we'll pick it up after
+ // the last write.
- // Declare the message symbols themselves.
+ ccfile <<
+ "// File created from " << message_file.fullName() << " on " <<
+ currentTime() << "\n" <<
+ "\n" <<
+ "#include <cstddef>\n" <<
+ "#include <log/message_types.h>\n" <<
+ "#include <log/message_initializer.h>\n" <<
+ "\n";
- writeOpeningNamespace(ccfile, ns_components);
+ // Declare the message symbols themselves.
- vector<string> idents = sortedIdentifiers(dictionary);
- for (vector<string>::const_iterator j = idents.begin();
- j != idents.end(); ++j) {
- ccfile << "extern const isc::log::MessageID " << *j <<
- " = \"" << *j << "\";\n";
- }
- ccfile << "\n";
+ writeOpeningNamespace(ccfile, ns_components);
- writeClosingNamespace(ccfile, ns_components);
+ vector<string> idents = sortedIdentifiers(dictionary);
+ for (vector<string>::const_iterator j = idents.begin();
+ j != idents.end(); ++j) {
+ ccfile << "extern const isc::log::MessageID " << *j <<
+ " = \"" << *j << "\";\n";
+ }
+ ccfile << "\n";
- // Now the code for the message initialization.
+ writeClosingNamespace(ccfile, ns_components);
- ccfile <<
- "namespace {\n" <<
- "\n" <<
- "const char* values[] = {\n";
+ // Now the code for the message initialization.
- // Output the identifiers and the associated text.
- idents = sortedIdentifiers(dictionary);
- for (vector<string>::const_iterator i = idents.begin();
- i != idents.end(); ++i) {
- ccfile << " \"" << *i << "\", \"" <<
- quoteString(dictionary.getText(*i)) << "\",\n";
- }
+ ccfile <<
+ "namespace {\n" <<
+ "\n" <<
+ "const char* values[] = {\n";
+ // Output the identifiers and the associated text.
+ idents = sortedIdentifiers(dictionary);
+ for (vector<string>::const_iterator i = idents.begin();
+ i != idents.end(); ++i) {
+ ccfile << " \"" << *i << "\", \"" <<
+ quoteString(dictionary.getText(*i)) << "\",\n";
+ }
- // ... and the postamble
- ccfile <<
- " NULL\n" <<
- "};\n" <<
- "\n" <<
- "const isc::log::MessageInitializer initializer(values);\n" <<
- "\n" <<
- "} // Anonymous namespace\n" <<
- "\n";
-
- // Report errors (if any) and exit
- if (ccfile.fail()) {
- throw MessageException(MSG_WRITERR, program_file.fullName(),
- strerror(errno));
- }
- ccfile.close();
- }
- catch (MessageException&) {
- ccfile.close();
- throw;
+ // ... and the postamble
+ ccfile <<
+ " NULL\n" <<
+ "};\n" <<
+ "\n" <<
+ "const isc::log::MessageInitializer initializer(values);\n" <<
+ "\n" <<
+ "} // Anonymous namespace\n" <<
+ "\n";
+
+ // Report errors (if any) and exit
+ if (ccfile.fail()) {
+ throw MessageException(LOG_WRITE_ERROR, program_file.fullName(),
+ strerror(errno));
}
+
+ ccfile.close();
}
@@ -466,24 +527,35 @@ warnDuplicates(MessageReader& reader) {
int
main(int argc, char* argv[]) {
- const char* soptions = "hv"; // Short options
+ const char* soptions = "hvpd:"; // Short options
optind = 1; // Ensure we start a new scan
int opt; // Value of the option
+ bool doPython = false;
+ const char *output_directory = NULL;
+
while ((opt = getopt(argc, argv, soptions)) != -1) {
switch (opt) {
+ case 'd':
+ output_directory = optarg;
+ break;
+
+ case 'p':
+ doPython = true;
+ break;
+
case 'h':
usage();
- return 0;
+ return (0);
case 'v':
version();
- return 0;
+ return (0);
default:
// A message will have already been output about the error.
- return 1;
+ return (1);
}
}
@@ -491,11 +563,11 @@ main(int argc, char* argv[]) {
if (optind < (argc - 1)) {
cout << "Error: excess arguments in command line\n";
usage();
- return 1;
+ return (1);
} else if (optind >= argc) {
cout << "Error: missing message file\n";
usage();
- return 1;
+ return (1);
}
string message_file = argv[optind];
@@ -508,20 +580,34 @@ main(int argc, char* argv[]) {
MessageReader reader(&dictionary);
reader.readFile(message_file);
- // Get the namespace into which the message definitions will be put and
- // split it into components.
- vector<string> ns_components = splitNamespace(reader.getNamespace());
-
- // Write the header file.
- writeHeaderFile(message_file, ns_components, dictionary);
-
- // Write the file that defines the message symbols and text
- writeProgramFile(message_file, ns_components, dictionary);
+ if (doPython) {
+ // Warn in case of ignored directives
+ if (!reader.getNamespace().empty()) {
+ cerr << "Python mode, ignoring the $NAMESPACE directive" <<
+ endl;
+ }
+
+ // Write the whole python file
+ writePythonFile(message_file, dictionary, output_directory);
+ } else {
+ // Get the namespace into which the message definitions will be put and
+ // split it into components.
+ vector<string> ns_components =
+ splitNamespace(reader.getNamespace());
+
+ // Write the header file.
+ writeHeaderFile(message_file, ns_components, dictionary,
+ output_directory);
+
+ // Write the file that defines the message symbols and text
+ writeProgramFile(message_file, ns_components, dictionary,
+ output_directory);
+ }
// Finally, warn of any duplicates encountered.
warnDuplicates(reader);
}
- catch (MessageException& e) {
+ catch (const MessageException& e) {
// Create an error message from the ID and the text
MessageDictionary& global = MessageDictionary::globalDictionary();
string text = e.id();
@@ -535,9 +621,9 @@ main(int argc, char* argv[]) {
cerr << text << "\n";
- return 1;
+ return (1);
}
- return 0;
+ return (0);
}
diff --git a/src/lib/log/debug_levels.h b/src/lib/log/debug_levels.h
deleted file mode 100644
index bb2b524..0000000
--- a/src/lib/log/debug_levels.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#ifndef __DEBUG_LEVELS_H
-#define __DEBUG_LEVELS_H
-
-/// \brief Defines Debug Levels
-///
-/// Defines the maximum and minimum debug levels and the number of levels.
-/// These are defined using #define as they are referenced in the construction
-/// of variables declared outside execution units. (In this way we avoid the
-/// "static initialization fiasco" problem.)
-
-#define MIN_DEBUG_LEVEL (0)
-#define MAX_DEBUG_LEVEL (99)
-#define NUM_DEBUG_LEVEL (MAX_DEBUG_LEVEL - MIN_DEBUG_LEVEL + 1)
-
-#endif // __DEBUG_LEVELS_H
diff --git a/src/lib/log/log_dbglevels.h b/src/lib/log/log_dbglevels.h
new file mode 100644
index 0000000..d713714
--- /dev/null
+++ b/src/lib/log/log_dbglevels.h
@@ -0,0 +1,93 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOG_DBGLVLS_H
+#define __LOG_DBGLVLS_H
+
+/// \file
+///
+/// When a message is logged with DEBUG severity, the debug level associated
+/// with the message is also specified. This debug level is a number
+/// ranging from 0 to 99; the idea is that the higher the debug level, the
+/// more detailed the message.
+///
+/// If debug messages are being logged, the logging system allows them to be
+/// filtered by debug level - only messages logged with a level equal to or
+/// less than the set debug level will be output. (For example, if the
+/// filter is set to 30, only debug messages logged with levels in the range
+/// 0 to 30 will be output; messages logged with levels 31 to 99 will be
+/// suppressed.)
+///
+/// Levels of 30 or below are reserved for debug messages that are most
+/// likely to be useful for an administrator. Levels 31 to 99 are for use by
+/// someone familiar with the code. "Useful for an administrator" is,
+/// admittedly, a subjective term: it is loosely defined as messages helping
+/// someone diagnose a problem that they could solve without needing to dive
+/// into the code. So it covers things like start-up steps and configuration
+/// messages.
+///
+/// In practice, this means that levels of 30 and below are most-likely to
+/// be used by the top-level programs, and 31 and above by the various
+/// libraries.
+///
+/// This file defines a set of standard debug levels for use across all loggers.
+/// In this way users can have some expection of what will be output when
+/// enabling debugging. Symbols are prefixed DBGLVL so as not to clash with
+/// DBG_ symbols in the various modules.
+///
+/// \note If the names of debug constants are changed, or if ones are added or
+/// removed, edit the file src/lib/python/isc/log/log.cc to update the log
+/// level definitions available to Python. The change does not need to be
+/// made if only the numeric values of constants are updated.
+
+namespace {
+
+/// Process startup/shutdown debug messages. Note that these are _debug_
+/// messages, as other messages related to startup and shutdown may be output
+/// with another severity. For example, when the authoritative server starts
+/// up, the "server started" message could be output at a severity of INFO.
+/// "Server starting" and messages indicating the stages in startup should be
+/// debug messages output at this severity.
+///
+/// This is given a value of 0 as that is the level selected if debugging is
+/// enabled without giving a level.
+const int DBGLVL_START_SHUT = 0;
+
+/// This debug level is reserved for logging the exchange of messages/commands
+/// between processes, including configuration messages.
+const int DBGLVL_COMMAND = 10;
+
+/// If the commands have associated data, this level is when they are printed.
+/// This includes configuration messages.
+const int DBGLVL_COMMAND_DATA = 20;
+
+// The following constants are suggested values for common operations.
+// Depending on the exact nature of the code, modules may or may not use these
+// levels.
+
+/// Trace basic operations.
+const int DBGLVL_TRACE_BASIC = 40;
+
+/// Trace data associated with the basic operations.
+const int DBGLVL_TRACE_BASIC_DATA = 45;
+
+/// Trace detailed operations.
+const int DBGLVL_TRACE_DETAIL = 50;
+
+/// Trace data associated with detailed operations.
+const int DBGLVL_TRACE_DETAIL_DATA = 55;
+
+} // Anonymous namespace
+
+#endif // __LOG_DBGLVLS_H
diff --git a/src/lib/log/log_formatter.h b/src/lib/log/log_formatter.h
index cda1d96..7a9e5fa 100644
--- a/src/lib/log/log_formatter.h
+++ b/src/lib/log/log_formatter.h
@@ -15,12 +15,31 @@
#ifndef __LOG_FORMATTER_H
#define __LOG_FORMMATER_H
+#include <cstddef>
#include <string>
+#include <iostream>
+
+#include <exceptions/exceptions.h>
#include <boost/lexical_cast.hpp>
+#include <log/logger_level.h>
namespace isc {
namespace log {
+/// \brief Format Failure
+///
+/// This exception is used to wrap a bad_lexical_cast exception thrown during
+/// formatting an argument.
+
+class FormatFailure : public isc::Exception {
+public:
+ FormatFailure(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what)
+ {}
+};
+
+
+///
/// \brief The internal replacement routine
///
/// This is used internally by the Formatter. Replaces a placeholder
@@ -73,13 +92,17 @@ private:
///
/// If NULL, we are not active and should not produce anything.
mutable Logger* logger_;
- /// \brief Prefix (eg. "ERROR", "DEBUG" or like that)
- const char* prefix_;
+
+ /// \brief Message severity
+ Severity severity_;
+
/// \brief The messages with %1, %2... placeholders
std::string* message_;
+
/// \brief Which will be the next placeholder to replace
unsigned nextPlaceholder_;
- Formatter& operator =(const Formatter& other);
+
+
public:
/// \brief Constructor of "active" formatter
///
@@ -89,56 +112,102 @@ public:
///
/// It is not expected to be called by user of logging system directly.
///
- /// \param prefix The severity prefix, like "ERROR" or "DEBUG"
+ /// \param severity The severity of the message (DEBUG, ERROR etc.)
/// \param message The message with placeholders. We take ownership of
/// it and we will modify the string. Must not be NULL unless
/// logger is also NULL, but it's not checked.
/// \param logger The logger where the final output will go, or NULL
/// if no output is wanted.
- Formatter(const char* prefix = NULL, std::string* message = NULL,
+ Formatter(const Severity& severity = NONE, std::string* message = NULL,
Logger* logger = NULL) :
- logger_(logger), prefix_(prefix), message_(message),
- nextPlaceholder_(1)
+ logger_(logger), severity_(severity), message_(message),
+ nextPlaceholder_(0)
{
}
+ /// \brief Copy constructor
+ ///
+ /// "Control" is passed to the created object in that it is the created object
+ /// that will have responsibility for outputting the formatted message - the
+ /// object being copied relinquishes that responsibility.
Formatter(const Formatter& other) :
- logger_(other.logger_), prefix_(other.prefix_),
+ logger_(other.logger_), severity_(other.severity_),
message_(other.message_), nextPlaceholder_(other.nextPlaceholder_)
{
- other.logger_ = false;
+ other.logger_ = NULL;
}
+
/// \brief Destructor.
//
/// This is the place where output happens if the formatter is active.
~ Formatter() {
if (logger_) {
- logger_->output(prefix_, *message_);
+ logger_->output(severity_, *message_);
delete message_;
}
}
+
+ /// \brief Assignment operator
+ ///
+ /// Essentially the same function as the assignment operator - the object being
+ /// assigned to takes responsibility for outputting the message.
+ Formatter& operator =(const Formatter& other) {
+ if (&other != this) {
+ logger_ = other.logger_;
+ severity_ = other.severity_;
+ message_ = other.message_;
+ nextPlaceholder_ = other.nextPlaceholder_;
+ other.logger_ = NULL;
+ }
+
+ return *this;
+ }
+
/// \brief Replaces another placeholder
///
/// Replaces another placeholder and returns a new formatter with it.
/// Deactivates the current formatter. In case the formatter is not active,
/// only produces another inactive formatter.
///
- /// \param arg The argument to place into the placeholder.
+ /// \param value The argument to place into the placeholder.
template<class Arg> Formatter& arg(const Arg& value) {
if (logger_) {
- return (arg(boost::lexical_cast<std::string>(value)));
+ try {
+ return (arg(boost::lexical_cast<std::string>(value)));
+ } catch (const boost::bad_lexical_cast& ex) {
+
+ // A bad_lexical_cast during a conversion to a string is
+ // *extremely* unlikely to fail. However, there is nothing
+ // in the documentation that rules it out, so we need to handle
+ // it. As it is a potentially very serious problem, throw the
+ // exception detailing the problem with as much information as
+ // we can. (Note that this does not include 'value' -
+ // boost::lexical_cast failed to convert it to a string, so an
+ // attempt to do so here would probably fail as well.)
+ isc_throw(FormatFailure, "bad_lexical_cast in call to "
+ "Formatter::arg(): " << ex.what());
+ }
} else {
return (*this);
}
}
+
/// \brief String version of arg.
+ ///
+ /// \param arg The text to place into the placeholder.
Formatter& arg(const std::string& arg) {
if (logger_) {
- // FIXME: This logic has a problem. If we had a message like
- // "%1 %2" and called .arg("%2").arg(42), we would get "42 %2".
- // But we consider this to be rare enough not to complicate
- // matters.
- replacePlaceholder(message_, arg, nextPlaceholder_ ++);
+ // Note that this method does a replacement and returns the
+ // modified string. If there are multiple invocations of arg() (e.g.
+ // logger.info(msgid).arg(xxx).arg(yyy)...), each invocation
+ // operates on the string returned by the previous one. This
+ // sequential operation means that if we had a message like "%1 %2",
+ // and called .arg("%2").arg(42), we would get "42 42"; the first
+ // call replaces the %1" with "%2" and the second replaces all
+ // occurrences of "%2" with 42. (Conversely, the sequence
+ // .arg(42).arg("%1") would return "42 %1" - there are no recursive
+ // replacements).
+ replacePlaceholder(message_, arg, ++nextPlaceholder_ );
}
return (*this);
}
diff --git a/src/lib/log/log_messages.cc b/src/lib/log/log_messages.cc
new file mode 100644
index 0000000..f60898c
--- /dev/null
+++ b/src/lib/log/log_messages.cc
@@ -0,0 +1,63 @@
+// File created from log_messages.mes on Thu Jul 7 15:32:06 2011
+
+#include <cstddef>
+#include <log/message_types.h>
+#include <log/message_initializer.h>
+
+namespace isc {
+namespace log {
+
+extern const isc::log::MessageID LOG_BAD_DESTINATION = "LOG_BAD_DESTINATION";
+extern const isc::log::MessageID LOG_BAD_SEVERITY = "LOG_BAD_SEVERITY";
+extern const isc::log::MessageID LOG_BAD_STREAM = "LOG_BAD_STREAM";
+extern const isc::log::MessageID LOG_DUPLICATE_MESSAGE_ID = "LOG_DUPLICATE_MESSAGE_ID";
+extern const isc::log::MessageID LOG_DUPLICATE_NAMESPACE = "LOG_DUPLICATE_NAMESPACE";
+extern const isc::log::MessageID LOG_INPUT_OPEN_FAIL = "LOG_INPUT_OPEN_FAIL";
+extern const isc::log::MessageID LOG_INVALID_MESSAGE_ID = "LOG_INVALID_MESSAGE_ID";
+extern const isc::log::MessageID LOG_NAMESPACE_EXTRA_ARGS = "LOG_NAMESPACE_EXTRA_ARGS";
+extern const isc::log::MessageID LOG_NAMESPACE_INVALID_ARG = "LOG_NAMESPACE_INVALID_ARG";
+extern const isc::log::MessageID LOG_NAMESPACE_NO_ARGS = "LOG_NAMESPACE_NO_ARGS";
+extern const isc::log::MessageID LOG_NO_MESSAGE_ID = "LOG_NO_MESSAGE_ID";
+extern const isc::log::MessageID LOG_NO_MESSAGE_TEXT = "LOG_NO_MESSAGE_TEXT";
+extern const isc::log::MessageID LOG_NO_SUCH_MESSAGE = "LOG_NO_SUCH_MESSAGE";
+extern const isc::log::MessageID LOG_OPEN_OUTPUT_FAIL = "LOG_OPEN_OUTPUT_FAIL";
+extern const isc::log::MessageID LOG_PREFIX_EXTRA_ARGS = "LOG_PREFIX_EXTRA_ARGS";
+extern const isc::log::MessageID LOG_PREFIX_INVALID_ARG = "LOG_PREFIX_INVALID_ARG";
+extern const isc::log::MessageID LOG_READING_LOCAL_FILE = "LOG_READING_LOCAL_FILE";
+extern const isc::log::MessageID LOG_READ_ERROR = "LOG_READ_ERROR";
+extern const isc::log::MessageID LOG_UNRECOGNISED_DIRECTIVE = "LOG_UNRECOGNISED_DIRECTIVE";
+extern const isc::log::MessageID LOG_WRITE_ERROR = "LOG_WRITE_ERROR";
+
+} // namespace log
+} // namespace isc
+
+namespace {
+
+const char* values[] = {
+ "LOG_BAD_DESTINATION", "unrecognized log destination: %1",
+ "LOG_BAD_SEVERITY", "unrecognized log severity: %1",
+ "LOG_BAD_STREAM", "bad log console output stream: %1",
+ "LOG_DUPLICATE_MESSAGE_ID", "duplicate message ID (%1) in compiled code",
+ "LOG_DUPLICATE_NAMESPACE", "line %1: duplicate $NAMESPACE directive found",
+ "LOG_INPUT_OPEN_FAIL", "unable to open message file %1 for input: %2",
+ "LOG_INVALID_MESSAGE_ID", "line %1: invalid message identification '%2'",
+ "LOG_NAMESPACE_EXTRA_ARGS", "line %1: $NAMESPACE directive has too many arguments",
+ "LOG_NAMESPACE_INVALID_ARG", "line %1: $NAMESPACE directive has an invalid argument ('%2')",
+ "LOG_NAMESPACE_NO_ARGS", "line %1: no arguments were given to the $NAMESPACE directive",
+ "LOG_NO_MESSAGE_ID", "line %1: message definition line found without a message ID",
+ "LOG_NO_MESSAGE_TEXT", "line %1: line found containing a message ID ('%2') and no text",
+ "LOG_NO_SUCH_MESSAGE", "could not replace message text for '%1': no such message",
+ "LOG_OPEN_OUTPUT_FAIL", "unable to open %1 for output: %2",
+ "LOG_PREFIX_EXTRA_ARGS", "line %1: $PREFIX directive has too many arguments",
+ "LOG_PREFIX_INVALID_ARG", "line %1: $PREFIX directive has an invalid argument ('%2')",
+ "LOG_READING_LOCAL_FILE", "reading local message file %1",
+ "LOG_READ_ERROR", "error reading from message file %1: %2",
+ "LOG_UNRECOGNISED_DIRECTIVE", "line %1: unrecognised directive '%2'",
+ "LOG_WRITE_ERROR", "error writing to %1: %2",
+ NULL
+};
+
+const isc::log::MessageInitializer initializer(values);
+
+} // Anonymous namespace
+
diff --git a/src/lib/log/log_messages.h b/src/lib/log/log_messages.h
new file mode 100644
index 0000000..10e1501
--- /dev/null
+++ b/src/lib/log/log_messages.h
@@ -0,0 +1,35 @@
+// File created from log_messages.mes on Thu Jul 7 15:32:06 2011
+
+#ifndef __LOG_MESSAGES_H
+#define __LOG_MESSAGES_H
+
+#include <log/message_types.h>
+
+namespace isc {
+namespace log {
+
+extern const isc::log::MessageID LOG_BAD_DESTINATION;
+extern const isc::log::MessageID LOG_BAD_SEVERITY;
+extern const isc::log::MessageID LOG_BAD_STREAM;
+extern const isc::log::MessageID LOG_DUPLICATE_MESSAGE_ID;
+extern const isc::log::MessageID LOG_DUPLICATE_NAMESPACE;
+extern const isc::log::MessageID LOG_INPUT_OPEN_FAIL;
+extern const isc::log::MessageID LOG_INVALID_MESSAGE_ID;
+extern const isc::log::MessageID LOG_NAMESPACE_EXTRA_ARGS;
+extern const isc::log::MessageID LOG_NAMESPACE_INVALID_ARG;
+extern const isc::log::MessageID LOG_NAMESPACE_NO_ARGS;
+extern const isc::log::MessageID LOG_NO_MESSAGE_ID;
+extern const isc::log::MessageID LOG_NO_MESSAGE_TEXT;
+extern const isc::log::MessageID LOG_NO_SUCH_MESSAGE;
+extern const isc::log::MessageID LOG_OPEN_OUTPUT_FAIL;
+extern const isc::log::MessageID LOG_PREFIX_EXTRA_ARGS;
+extern const isc::log::MessageID LOG_PREFIX_INVALID_ARG;
+extern const isc::log::MessageID LOG_READING_LOCAL_FILE;
+extern const isc::log::MessageID LOG_READ_ERROR;
+extern const isc::log::MessageID LOG_UNRECOGNISED_DIRECTIVE;
+extern const isc::log::MessageID LOG_WRITE_ERROR;
+
+} // namespace log
+} // namespace isc
+
+#endif // __LOG_MESSAGES_H
diff --git a/src/lib/log/log_messages.mes b/src/lib/log/log_messages.mes
new file mode 100644
index 0000000..f150f39
--- /dev/null
+++ b/src/lib/log/log_messages.mes
@@ -0,0 +1,146 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \brief Message Utility Message File
+#
+# This is the source of the set of messages generated by the message and
+# logging components. The associated .h and .cc files are created by hand from
+# this file though and are not built during the build process; this is to avoid
+# the chicken-and-egg situation where we need the files to build the message
+# compiler, yet we need the compiler to build the files.
+
+$NAMESPACE isc::log
+
+% LOG_BAD_DESTINATION unrecognized log destination: %1
+A logger destination value was given that was not recognized. The
+destination should be one of "console", "file", or "syslog".
+
+% LOG_BAD_SEVERITY unrecognized log severity: %1
+A logger severity value was given that was not recognized. The severity
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
+
+% LOG_BAD_STREAM bad log console output stream: %1
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
+
+% LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code. This indicates a
+programming error; please submit a bug report.
+
+% LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.) Such a condition is regarded as an
+error and the read will be abandoned.
+
+% LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2
+The program was not able to open the specified input message file for
+the reason given.
+
+% LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+
+% LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+
+% LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+
+% LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+
+% LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+
+% LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+
+% LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message
+During start-up a local message file was read. A line with the listed
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+
+There may be several reasons why this message may appear:
+
+- The message ID has been mis-spelled in the local message file.
+
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+
+Whatever the reason, there is no impact on the operation of BIND 10.
+
+% LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
+
+% LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+
+% LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
+
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+
+% LOG_READING_LOCAL_FILE reading local message file %1
+This is an informational message output by BIND 10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
+
+% LOG_READ_ERROR error reading from message file %1: %2
+The specified error was encountered reading from the named message file.
+
+% LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
+
+% LOG_WRITE_ERROR error writing to %1: %2
+The specified error was encountered by the message compiler when writing
+to the named output file.
diff --git a/src/lib/log/logger.cc b/src/lib/log/logger.cc
index c340315..d10e979 100644
--- a/src/lib/log/logger.cc
+++ b/src/lib/log/logger.cc
@@ -17,9 +17,10 @@
#include <log/logger.h>
#include <log/logger_impl.h>
+#include <log/logger_name.h>
+#include <log/logger_support.h>
#include <log/message_dictionary.h>
#include <log/message_types.h>
-#include <log/root_logger_name.h>
#include <util/strutil.h>
@@ -28,10 +29,14 @@ using namespace std;
namespace isc {
namespace log {
-// Initialize Logger implementation. Does not check whether the implementation
-// has already been initialized - that was done by the caller (getLoggerPtr()).
+// Initialize underlying logger, but only if logging has been initialized.
void Logger::initLoggerImpl() {
- loggerptr_ = new LoggerImpl(name_, infunc_);
+ if (isLoggingInitialized()) {
+ loggerptr_ = new LoggerImpl(name_);
+ } else {
+ isc_throw(LoggingNotInitialized, "attempt to access logging function "
+ "before logging has been initialized");
+ }
}
// Destructor.
@@ -75,6 +80,14 @@ Logger::getDebugLevel() {
return (getLoggerPtr()->getDebugLevel());
}
+// Effective debug level (only relevant if messages of severity DEBUG are being
+// logged).
+
+int
+Logger::getEffectiveDebugLevel() {
+ return (getLoggerPtr()->getEffectiveDebugLevel());
+}
+
// Check on the current severity settings
bool
@@ -112,14 +125,14 @@ Logger::isFatalEnabled() {
// Output methods
void
-Logger::output(const char* sevText, const string& message) {
- getLoggerPtr()->outputRaw(sevText, message);
+Logger::output(const Severity& severity, const std::string& message) {
+ getLoggerPtr()->outputRaw(severity, message);
}
Logger::Formatter
Logger::debug(int dbglevel, const isc::log::MessageID& ident) {
if (isDebugEnabled(dbglevel)) {
- return (Formatter("DEBUG", getLoggerPtr()->lookupMessage(ident),
+ return (Formatter(DEBUG, getLoggerPtr()->lookupMessage(ident),
this));
} else {
return (Formatter());
@@ -129,7 +142,7 @@ Logger::debug(int dbglevel, const isc::log::MessageID& ident) {
Logger::Formatter
Logger::info(const isc::log::MessageID& ident) {
if (isInfoEnabled()) {
- return (Formatter("INFO ", getLoggerPtr()->lookupMessage(ident),
+ return (Formatter(INFO, getLoggerPtr()->lookupMessage(ident),
this));
} else {
return (Formatter());
@@ -139,7 +152,7 @@ Logger::info(const isc::log::MessageID& ident) {
Logger::Formatter
Logger::warn(const isc::log::MessageID& ident) {
if (isWarnEnabled()) {
- return (Formatter("WARN ", getLoggerPtr()->lookupMessage(ident),
+ return (Formatter(WARN, getLoggerPtr()->lookupMessage(ident),
this));
} else {
return (Formatter());
@@ -149,7 +162,7 @@ Logger::warn(const isc::log::MessageID& ident) {
Logger::Formatter
Logger::error(const isc::log::MessageID& ident) {
if (isErrorEnabled()) {
- return (Formatter("ERROR", getLoggerPtr()->lookupMessage(ident),
+ return (Formatter(ERROR, getLoggerPtr()->lookupMessage(ident),
this));
} else {
return (Formatter());
@@ -159,22 +172,18 @@ Logger::error(const isc::log::MessageID& ident) {
Logger::Formatter
Logger::fatal(const isc::log::MessageID& ident) {
if (isFatalEnabled()) {
- return (Formatter("FATAL", getLoggerPtr()->lookupMessage(ident),
+ return (Formatter(FATAL, getLoggerPtr()->lookupMessage(ident),
this));
} else {
return (Formatter());
}
}
-bool Logger::operator==(Logger& other) {
- return (*getLoggerPtr() == *other.getLoggerPtr());
-}
-
-// Protected methods (used for testing)
+// Comparison (testing only)
-void
-Logger::reset() {
- LoggerImpl::reset();
+bool
+Logger::operator==(Logger& other) {
+ return (*getLoggerPtr() == *other.getLoggerPtr());
}
} // namespace log
diff --git a/src/lib/log/logger.h b/src/lib/log/logger.h
index 6bd8924..96168c0 100644
--- a/src/lib/log/logger.h
+++ b/src/lib/log/logger.h
@@ -18,34 +18,85 @@
#include <cstdlib>
#include <string>
-#include <log/debug_levels.h>
-#include <log/logger_levels.h>
+#include <exceptions/exceptions.h>
+#include <log/logger_level.h>
#include <log/message_types.h>
#include <log/log_formatter.h>
namespace isc {
namespace log {
-/// \brief Logging API
-///
-/// This module forms the interface into the logging subsystem. Features of the
-/// system and its implementation are:
-///
-/// # Multiple logging objects can be created, each given a name; those with the
-/// same name share characteristics (like destination, level being logged
-/// etc.)
-/// # Messages can be logged at severity levels of FATAL, ERROR, WARN, INFO or
-/// DEBUG. The DEBUG level has further sub-levels numbered 0 (least
-/// informative) to 99 (most informative).
-/// # Each logger has a severity level set associated with it. When a message
-/// is logged, it is output only if it is logged at a level equal to the
-/// logger severity level or greater, e.g. if the logger's severity is WARN,
-/// only messages logged at WARN, ERROR or FATAL will be output.
-/// # Messages are identified by message identifiers, which are keys into a
-/// message dictionary.
+/// \page LoggingApi Logging API
+/// \section LoggingApiOverview Overview
+/// BIND 10 logging uses the concepts of the widely-used Java logging
+/// package log4j (http://logging.apache.log/log4j), albeit implemented
+/// in C++ using an open-source port. Features of the system are:
+///
+/// - Within the code objects - known as loggers - can be created and
+/// used to log messages. These loggers have names; those with the
+/// same name share characteristics (such as output destination).
+/// - Loggers have a hierarchical relationship, with each logger being
+/// the child of another logger, except for the top of the hierarchy, the
+/// root logger. If a logger does not log a message, it is passed to the
+/// parent logger.
+/// - Messages can be logged at severity levels of FATAL, ERROR, WARN, INFO
+/// or DEBUG. The DEBUG level has further sub-levels numbered 0 (least
+/// informative) to 99 (most informative).
+/// - Each logger has a severity level set associated with it. When a
+/// message is logged, it is output only if it is logged at a level equal
+/// to the logger severity level or greater, e.g. if the logger's severity
+/// is WARN, only messages logged at WARN, ERROR or FATAL will be output.
+///
+/// \section LoggingApiLoggerNames BIND 10 Logger Names
+/// Within BIND 10, the root logger root logger is given the name of the
+/// program (via the stand-alone function setRootLoggerName()). Other loggers
+/// are children of the root logger and are named "<program>.<sublogger>".
+/// This name appears in logging output, allowing users to identify both
+/// the BIND 10 program and the component within the program that generated
+/// the message.
+///
+/// When creating a logger, the abbreviated name "<sublogger>" can be used;
+/// the program name will be prepended to it when the logger is created.
+/// In this way, individual libraries can have their own loggers without
+/// worrying about the program in which they are used, but:
+/// - The origin of the message will be clearly identified.
+/// - The same component can have different options (e.g. logging severity)
+/// in different programs at the same time.
+///
+/// \section LoggingApiLoggingMessages Logging Messages
+/// Instead of embedding the text of messages within the code, each message
+/// is referred to using a symbolic name. The logging code uses this name as
+/// a key in a dictionary from which the message text is obtained. Such a
+/// system allows for the optional replacement of message text at run time.
+/// More details about the message disction (and the compiler used to create
+/// the symbol definitions) can be found in other modules in the src/lib/log
+/// directory.
class LoggerImpl; // Forward declaration of the implementation class
+/// \brief Logging Not Initialized
+///
+/// Exception thrown if an attempt is made to access a logging function
+/// if the logging system has not been initialized.
+class LoggingNotInitialized : public isc::Exception {
+public:
+ LoggingNotInitialized(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what)
+ {}
+};
+
+/// \brief Logger Class
+///
+/// This class is the main class used for logging. Use comprises:
+///
+/// 1. Constructing a logger by instantiating it with a specific name. (If the
+/// same logger is in multiple functions within a file, overhead can be
+/// minimised by declaring it as a file-wide static variable.)
+/// 2. Using the error(), info() etc. methods to log an error. (However, it is
+/// recommended to use the LOG_ERROR, LOG_INFO etc. macros defined in macros.h.
+/// These will avoid the potentially-expensive evaluation of arguments if the
+/// severity is such that the message will be suppressed.)
+
class Logger {
public:
@@ -56,32 +107,7 @@ public:
/// \param name Name of the logger. If the name is that of the root name,
/// this creates an instance of the root logger; otherwise it creates a
/// child of the root logger.
- ///
- /// \param infunc This argument is present to get round a bug in some
- /// implementations of the logging system. If the logger is declared in
- /// a function (such that it will be deleted when the function exits,
- /// before the program ends), set this true. If declared outside a
- /// function (such that it gets deleted during program rundown), set false
- /// (the default).\n
- /// \n
- /// The problems encountered was that during program rundown, one logging
- /// implementation (log4cxx) threw a MutexException (this is described in
- /// https://issues.apache.org/jira/browse/LOGCXX-322). As this only occurs
- /// during program rundown, the issue is not serious - it just looks bad to
- /// have the program crash instead of shut down cleanly.\n
- /// \n
- /// If log4cxx is chosen as the implementation, this flag controls the
- /// deletion of the underlying log4cxx data structures when the logger is
- /// deleted. Setting it false for externally-declared loggers inhibits
- /// their deletion; so at program exit the memory is not reclaimed during
- /// program rundown, only when the process is selected. Setting it true
- /// for loggers that will be deleted in the normal running of the program
- /// enables their deletion - which causes no issues as the problem only
- /// manifests itself during program rundown.
- /// \n
- /// The flag has no effect on non-log4cxx implementations.
- Logger(const std::string& name, bool infunc = false) :
- loggerptr_(NULL), name_(name), infunc_(infunc)
+ Logger(const std::string& name) : loggerptr_(NULL), name_(name)
{}
/// \brief Destructor
@@ -95,7 +121,6 @@ public:
/// \return The full name of the logger (including the root name)
virtual std::string getName();
-
/// \brief Set Severity Level for Logger
///
/// Sets the level at which this logger will log messages. If none is set,
@@ -107,14 +132,12 @@ public:
/// outside these limits is silently coerced to the nearest boundary.
virtual void setSeverity(isc::log::Severity severity, int dbglevel = 1);
-
/// \brief Get Severity Level for Logger
///
/// \return The current logging level of this logger. In most cases though,
/// the effective logging level is what is required.
virtual isc::log::Severity getSeverity();
-
/// \brief Get Effective Severity Level for Logger
///
/// \return The effective severity level of the logger. This is the same
@@ -122,13 +145,18 @@ public:
/// is the severity of the parent.
virtual isc::log::Severity getEffectiveSeverity();
-
/// \brief Return DEBUG Level
///
/// \return Current setting of debug level. This is returned regardless of
/// whether the severity is set to debug.
virtual int getDebugLevel();
+ /// \brief Get Effective Debug Level for Logger
+ ///
+ /// \return The effective debug level of the logger. This is the same
+ /// as getDebugLevel() if the logger has a debug level set, but otherwise
+ /// is the debug level of the parent.
+ virtual int getEffectiveDebugLevel();
/// \brief Returns if Debug Message Should Be Output
///
@@ -137,23 +165,18 @@ public:
/// checked is less than or equal to the debug level set for the logger.
virtual bool isDebugEnabled(int dbglevel = MIN_DEBUG_LEVEL);
-
/// \brief Is INFO Enabled?
virtual bool isInfoEnabled();
-
/// \brief Is WARNING Enabled?
virtual bool isWarnEnabled();
-
/// \brief Is ERROR Enabled?
virtual bool isErrorEnabled();
-
/// \brief Is FATAL Enabled?
virtual bool isFatalEnabled();
-
/// \brief Output Debug Message
///
/// \param dbglevel Debug level, ranging between 0 and 99. Higher numbers
@@ -161,25 +184,21 @@ public:
/// \param ident Message identification.
Formatter debug(int dbglevel, const MessageID& ident);
-
/// \brief Output Informational Message
///
/// \param ident Message identification.
Formatter info(const MessageID& ident);
-
/// \brief Output Warning Message
///
/// \param ident Message identification.
Formatter warn(const MessageID& ident);
-
/// \brief Output Error Message
///
/// \param ident Message identification.
Formatter error(const MessageID& ident);
-
/// \brief Output Fatal Message
///
/// \param ident Message identification.
@@ -188,25 +207,20 @@ public:
/// \brief Equality
///
/// Check if two instances of this logger refer to the same stream.
- /// (This method is principally for testing.)
///
/// \return true if the logger objects are instances of the same logger.
bool operator==(Logger& other);
-protected:
-
- /// \brief Reset Global Data
- ///
- /// Used for testing, this calls upon the underlying logger implementation
- /// to clear any global data.
- static void reset();
-
private:
friend class isc::log::Formatter<Logger>;
+
/// \brief Raw output function
///
/// This is used by the formatter to output formatted output.
- void output(const char* sevText, const std::string& message);
+ ///
+ /// \param severity Severity of the message being output.
+ /// \param message Text of the message to be output.
+ void output(const Severity& severity, const std::string& message);
/// \brief Copy Constructor
///
@@ -222,15 +236,14 @@ private:
/// \brief Initialize Implementation
///
- /// Returns the logger pointer. If not yet set, the underlying
- /// implementation class is initialized.\n
- /// \n
- /// The reason for this indirection is to avoid the "static initialization
- /// fiacso", whereby we cannot rely on the order of static initializations.
- /// The main problem is the root logger name - declared statically - which
- /// is referenced by various loggers. By deferring a reference to it until
- /// after the program starts executing - by which time the root name object
- /// will be initialized - we avoid this problem.
+ /// Returns the logger pointer. If not yet set, the implementation class is
+ /// initialized.
+ ///
+ /// The main reason for this is to allow loggers to be declared statically
+ /// before the underlying logging system is initialized. However, any
+ /// attempt to access a logging method on any logger before initialization -
+ /// regardless of whether is is statically or automatically declared - will
+ /// cause a "LoggingNotInitialized" exception to be thrown.
///
/// \return Returns pointer to implementation
LoggerImpl* getLoggerPtr() {
@@ -245,7 +258,6 @@ private:
LoggerImpl* loggerptr_; ///< Pointer to the underlying logger
std::string name_; ///< Copy of the logger name
- bool infunc_; ///< Copy of the infunc argument
};
} // namespace log
diff --git a/src/lib/log/logger_impl.cc b/src/lib/log/logger_impl.cc
index b30f835..046da13 100644
--- a/src/lib/log/logger_impl.cc
+++ b/src/lib/log/logger_impl.cc
@@ -19,38 +19,37 @@
#include <stdarg.h>
#include <stdio.h>
#include <boost/lexical_cast.hpp>
+#include <boost/static_assert.hpp>
+
+#include <log4cplus/configurator.h>
-#include <log/debug_levels.h>
-#include <log/root_logger_name.h>
#include <log/logger.h>
#include <log/logger_impl.h>
+#include <log/logger_level.h>
+#include <log/logger_level_impl.h>
+#include <log/logger_name.h>
#include <log/message_dictionary.h>
#include <log/message_types.h>
-#include <log/root_logger_name.h>
#include <util/strutil.h>
+// Note: as log4cplus and the BIND 10 logger have many concepts in common, and
+// thus many similar names, to disambiguate types we don't "use" the log4cplus
+// namespace: instead, all log4cplus types are explicitly qualified.
+
using namespace std;
namespace isc {
namespace log {
-// Static initializations
-
-LoggerImpl::LoggerInfoMap LoggerImpl::logger_info_;
-LoggerImpl::LoggerInfo LoggerImpl::root_logger_info_(isc::log::INFO, 0);
-
-// Constructor
-LoggerImpl::LoggerImpl(const std::string& name, bool)
+// Constructor. The setting of logger_ must be done when the variable is
+// constructed (instead of being left to the body of the function); at least
+// one compiler requires that all member variables be constructed before the
+// constructor is run, but log4cplus::Logger (the type of logger_) has no
+// default constructor.
+LoggerImpl::LoggerImpl(const string& name) : name_(expandLoggerName(name)),
+ logger_(log4cplus::Logger::getInstance(name_))
{
- // Are we the root logger?
- if (name == getRootLoggerName()) {
- is_root_ = true;
- name_ = name;
- } else {
- is_root_ = false;
- name_ = getRootLoggerName() + "." + name;
- }
}
// Destructor. (Here because of virtual declaration.)
@@ -59,161 +58,72 @@ LoggerImpl::~LoggerImpl() {
}
// Set the severity for logging.
-
void
LoggerImpl::setSeverity(isc::log::Severity severity, int dbglevel) {
-
- // Silently coerce the debug level into the valid range of 0 to 99
-
- int debug_level = max(MIN_DEBUG_LEVEL, min(MAX_DEBUG_LEVEL, dbglevel));
- if (is_root_) {
-
- // Can only set severity for the root logger, you can't disable it.
- // Any attempt to do so is silently ignored.
- if (severity != isc::log::DEFAULT) {
- root_logger_info_ = LoggerInfo(severity, debug_level);
- }
-
- } else if (severity == isc::log::DEFAULT) {
-
- // Want to set to default; this means removing the information
- // about this logger from the logger_info_ if it is set.
- LoggerInfoMap::iterator i = logger_info_.find(name_);
- if (i != logger_info_.end()) {
- logger_info_.erase(i);
- }
-
- } else {
-
- // Want to set this information
- logger_info_[name_] = LoggerInfo(severity, debug_level);
- }
+ Level level(severity, dbglevel);
+ logger_.setLogLevel(LoggerLevelImpl::convertFromBindLevel(level));
}
// Return severity level
-
isc::log::Severity
LoggerImpl::getSeverity() {
+ Level level = LoggerLevelImpl::convertToBindLevel(logger_.getLogLevel());
+ return level.severity;
+}
- if (is_root_) {
- return (root_logger_info_.severity);
- }
- else {
- LoggerInfoMap::iterator i = logger_info_.find(name_);
- if (i != logger_info_.end()) {
- return ((i->second).severity);
- }
- else {
- return (isc::log::DEFAULT);
- }
- }
+// Return current debug level (only valid if current severity level is DEBUG).
+int
+LoggerImpl::getDebugLevel() {
+ Level level = LoggerLevelImpl::convertToBindLevel(logger_.getLogLevel());
+ return level.dbglevel;
}
// Get effective severity. Either the current severity or, if not set, the
// severity of the root level.
-
isc::log::Severity
LoggerImpl::getEffectiveSeverity() {
-
- if (!is_root_ && !logger_info_.empty()) {
-
- // Not root logger and there is at least one item in the info map for a
- // logger.
- LoggerInfoMap::iterator i = logger_info_.find(name_);
- if (i != logger_info_.end()) {
-
- // Found, so return the severity.
- return ((i->second).severity);
- }
- }
-
- // Must be the root logger, or this logger is defaulting to the root logger
- // settings.
- return (root_logger_info_.severity);
+ Level level = LoggerLevelImpl::convertToBindLevel(logger_.getChainedLogLevel());
+ return level.severity;
}
-// Get the debug level. This returns 0 unless the severity is DEBUG.
-
+// Return effective debug level (only valid if current effective severity level
+// is DEBUG).
int
-LoggerImpl::getDebugLevel() {
-
- if (!is_root_ && !logger_info_.empty()) {
-
- // Not root logger and there is something in the map, check if there
- // is a setting for this one.
- LoggerInfoMap::iterator i = logger_info_.find(name_);
- if (i != logger_info_.end()) {
-
- // Found, so return the debug level.
- if ((i->second).severity == isc::log::DEBUG) {
- return ((i->second).dbglevel);
- } else {
- return (0);
- }
- }
- }
-
- // Must be the root logger, or this logger is defaulting to the root logger
- // settings.
- if (root_logger_info_.severity == isc::log::DEBUG) {
- return (root_logger_info_.dbglevel);
- } else {
- return (0);
- }
+LoggerImpl::getEffectiveDebugLevel() {
+ Level level = LoggerLevelImpl::convertToBindLevel(logger_.getChainedLogLevel());
+ return level.dbglevel;
}
-// The code for isXxxEnabled is quite simple and is in the header. The only
-// exception is isDebugEnabled() where we have the complication of the debug
-// levels.
-
-bool
-LoggerImpl::isDebugEnabled(int dbglevel) {
-
- if (!is_root_ && !logger_info_.empty()) {
-
- // Not root logger and there is something in the map, check if there
- // is a setting for this one.
- LoggerInfoMap::iterator i = logger_info_.find(name_);
- if (i != logger_info_.end()) {
-
- // Found, so return the debug level.
- if ((i->second).severity <= isc::log::DEBUG) {
- return ((i->second).dbglevel >= dbglevel);
- } else {
- return (false); // Nothing lower than debug
- }
- }
- }
-
- // Must be the root logger, or this logger is defaulting to the root logger
- // settings.
- if (root_logger_info_.severity <= isc::log::DEBUG) {
- return (root_logger_info_.dbglevel >= dbglevel);
- } else {
- return (false);
- }
-}
// Output a general message
string*
LoggerImpl::lookupMessage(const MessageID& ident) {
- return (new string(string(ident) + ", " +
+ return (new string(string(ident) + " " +
MessageDictionary::globalDictionary().getText(ident)));
}
void
-LoggerImpl::outputRaw(const char* sevText, const string& message) {
- // Get the time in a struct tm format, and convert to text
- time_t t_time;
- time(&t_time);
- struct tm* tm_time = localtime(&t_time);
-
- char chr_time[32];
- (void) strftime(chr_time, sizeof(chr_time), "%Y-%m-%d %H:%M:%S", tm_time);
-
- // Now output.
- cout << chr_time << " " << sevText << " [" << getName() << "] " <<
- message << endl;
+LoggerImpl::outputRaw(const Severity& severity, const string& message) {
+ switch (severity) {
+ case DEBUG:
+ LOG4CPLUS_DEBUG(logger_, message);
+ break;
+
+ case INFO:
+ LOG4CPLUS_INFO(logger_, message);
+ break;
+
+ case WARN:
+ LOG4CPLUS_WARN(logger_, message);
+ break;
+
+ case ERROR:
+ LOG4CPLUS_ERROR(logger_, message);
+ break;
+
+ case FATAL:
+ LOG4CPLUS_FATAL(logger_, message);
+ }
}
} // namespace log
diff --git a/src/lib/log/logger_impl.h b/src/lib/log/logger_impl.h
index 187e478..90bd41a 100644
--- a/src/lib/log/logger_impl.h
+++ b/src/lib/log/logger_impl.h
@@ -18,15 +18,19 @@
#include <stdarg.h>
#include <time.h>
+#include <iostream>
#include <cstdlib>
#include <string>
#include <map>
#include <utility>
-#include <log/debug_levels.h>
-#include <log/logger.h>
+
+// log4cplus logger header file
+#include <log4cplus/logger.h>
+
+// BIND-10 logger files
+#include <log/logger_level_impl.h>
#include <log/message_types.h>
-#include <log/root_logger_name.h>
namespace isc {
namespace log {
@@ -35,46 +39,36 @@ namespace log {
///
/// The logger uses a "pimpl" idiom for implementation, where the base logger
/// class contains little more than a pointer to the implementation class, and
-/// all actions are carried out by the latter. This class is an implementation
-/// class that just outputs to stdout.
+/// all actions are carried out by the latter.
+///
+/// This particular implementation is based on log4cplus (from sourceforge:
+/// http://log4cplus.sourceforge.net). Particular items of note:
+///
+/// a) BIND 10 loggers have names of the form "program.sublogger". In other
+/// words, each of the loggers is a sub-logger of the main program logger.
+/// In log4cplus, there is a root logger (called "root" according to the
+/// documentation, but actually unnamed) and all loggers created are subloggers
+/// if it.
+///
+/// In this implementation, the log4cplus root logger is unused. Instead, the
+/// BIND 10 root logger is created as a child of the log4cplus root logger,
+/// and all other loggers used in the program are created as sub-loggers of
+/// that. In this way, the logging system can just include the name of the
+/// logger in each message without the need to specially consider if the
+/// message is the root logger or not.
+///
+/// b) The idea of debug levels is implemented. See logger_level.h and
+/// logger_level_impl.h for more details on this.
class LoggerImpl {
public:
- /// \brief Information About Logger
- ///
- /// Holds a information about a logger, namely its severity and its debug
- /// level. This could be a std::pair, except that it gets confusing when
- /// accessing the LoggerInfoMap: that returns a pair, so we to reference
- /// elements we would use constructs like ((i->first).second);
- struct LoggerInfo {
- isc::log::Severity severity;
- int dbglevel;
-
- LoggerInfo(isc::log::Severity sev = isc::log::INFO,
- int dbg = MIN_DEBUG_LEVEL) : severity(sev), dbglevel(dbg)
- {}
- };
-
-
- /// \brief Information About All Loggers
- ///
- /// Information about all loggers in the system - except the root logger -
- /// is held in a map, linking name of the logger (excluding the root
- /// name component) and its set severity and debug levels. The root
- /// logger information is held separately.
- typedef std::map<std::string, LoggerInfo> LoggerInfoMap;
-
-
/// \brief Constructor
///
/// Creates a logger of the specific name.
///
/// \param name Name of the logger.
- ///
- /// \param exit_delete This argument is present to get round a bug in
- /// the log4cxx implementation. It is unused here.
- LoggerImpl(const std::string& name, bool);
+ LoggerImpl(const std::string& name);
/// \brief Destructor
@@ -94,16 +88,16 @@ public:
///
/// \param severity Severity level to log
/// \param dbglevel If the severity is DEBUG, this is the debug level.
- /// This can be in the range 1 to 100 and controls the verbosity. A value
+ /// This can be in the range 0 to 99 and controls the verbosity. A value
/// outside these limits is silently coerced to the nearest boundary.
- virtual void setSeverity(isc::log::Severity severity, int dbglevel = 1);
+ virtual void setSeverity(Severity severity, int dbglevel = 1);
/// \brief Get Severity Level for Logger
///
/// \return The current logging level of this logger. In most cases though,
/// the effective logging level is what is required.
- virtual isc::log::Severity getSeverity();
+ virtual Severity getSeverity();
/// \brief Get Effective Severity Level for Logger
@@ -111,67 +105,62 @@ public:
/// \return The effective severity level of the logger. This is the same
/// as getSeverity() if the logger has a severity level set, but otherwise
/// is the severity of the parent.
- virtual isc::log::Severity getEffectiveSeverity();
+ virtual Severity getEffectiveSeverity();
- /// \brief Return DEBUG Level
+ /// \brief Return debug level
///
- /// \return Current setting of debug level. This is returned regardless of
- /// whether the
+ /// \return Current setting of debug level. This will be zero if the
+ /// the current severity level is not DEBUG.
virtual int getDebugLevel();
+ /// \brief Return effective debug level
+ ///
+ /// \return Current setting of effective debug level. This will be zero if
+ /// the current effective severity level is not DEBUG.
+ virtual int getEffectiveDebugLevel();
+
+
/// \brief Returns if Debug Message Should Be Output
///
/// \param dbglevel Level for which debugging is checked. Debugging is
/// enabled only if the logger has DEBUG enabled and if the dbglevel
/// checked is less than or equal to the debug level set for the logger.
- virtual bool
- isDebugEnabled(int dbglevel = MIN_DEBUG_LEVEL);
+ virtual bool isDebugEnabled(int dbglevel = MIN_DEBUG_LEVEL) {
+ Level level(DEBUG, dbglevel);
+ return logger_.isEnabledFor(LoggerLevelImpl::convertFromBindLevel(level));
+ }
/// \brief Is INFO Enabled?
virtual bool isInfoEnabled() {
- return (isEnabled(isc::log::INFO));
+ return (logger_.isEnabledFor(log4cplus::INFO_LOG_LEVEL));
}
/// \brief Is WARNING Enabled?
virtual bool isWarnEnabled() {
- return (isEnabled(isc::log::WARN));
+ return (logger_.isEnabledFor(log4cplus::WARN_LOG_LEVEL));
}
/// \brief Is ERROR Enabled?
virtual bool isErrorEnabled() {
- return (isEnabled(isc::log::ERROR));
+ return (logger_.isEnabledFor(log4cplus::ERROR_LOG_LEVEL));
}
/// \brief Is FATAL Enabled?
virtual bool isFatalEnabled() {
- return (isEnabled(isc::log::FATAL));
- }
-
-
- /// \brief Common Severity check
- ///
- /// Implements the common severity check. As an optimisation, this checks
- /// to see if any logger-specific levels have been set (a quick check as it
- /// just involves seeing if the collection of logger information is empty).
- /// if not, it returns the information for the root level; if so, it has
- /// to take longer and look up the information in the map holding the
- /// logging details.
- virtual bool isEnabled(isc::log::Severity severity) {
- if (logger_info_.empty()) {
- return (root_logger_info_.severity <= severity);
- }
- else {
- return (getSeverity() <= severity);
- }
+ return (logger_.isEnabledFor(log4cplus::FATAL_LOG_LEVEL));
}
/// \brief Raw output
///
/// Writes the message with time into the log. Used by the Formatter
/// to produce output.
- void outputRaw(const char* sev_text, const std::string& message);
+ ///
+ /// \param severity Severity of the message. (This controls the prefix
+ /// label output with the message text.)
+ /// \param message Text of the message.
+ void outputRaw(const Severity& severity, const std::string& message);
/// \brief Look up message text in dictionary
///
@@ -188,28 +177,9 @@ public:
return (name_ == other.name_);
}
-
- /// \brief Reset Global Data
- ///
- /// Only used for testing, this clears all the logger information and
- /// resets it back to default values.
- static void reset() {
- root_logger_info_ = LoggerInfo(isc::log::INFO, MIN_DEBUG_LEVEL);
- logger_info_.clear();
- }
-
-
private:
- bool is_root_; ///< true if a root logger
- std::string name_; ///< Name of this logger
-
- // Split the status of the root logger from this logger. If - is will
- // probably be the usual case - no per-logger setting is enabled, a
- // quick check of logger_info_.empty() will return true and we can quickly
- // return the root logger status without a length lookup in the map.
-
- static LoggerInfo root_logger_info_; ///< Status of root logger
- static LoggerInfoMap logger_info_; ///< Store of debug levels etc.
+ std::string name_; ///< Full name of this logger
+ log4cplus::Logger logger_; ///< Underlying log4cplus logger
};
} // namespace log
diff --git a/src/lib/log/logger_impl_log4cxx.cc b/src/lib/log/logger_impl_log4cxx.cc
deleted file mode 100644
index afe2d56..0000000
--- a/src/lib/log/logger_impl_log4cxx.cc
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE
-
-#include <iostream>
-
-#include <stdarg.h>
-#include <stdio.h>
-
-#include <log4cxx/appender.h>
-#include <log4cxx/basicconfigurator.h>
-#include <log4cxx/patternlayout.h>
-#include <log4cxx/consoleappender.h>
-
-#include <log/root_logger_name.h>
-#include <log/logger.h>
-#include <log/logger_impl.h>
-#include <log/message_dictionary.h>
-#include <log/message_types.h>
-#include <log/xdebuglevel.h>
-
-#include <util/strutil.h>
-
-using namespace std;
-
-namespace isc {
-namespace log {
-
-// Static initializations
-
-bool LoggerImpl::init_ = false;
-
-// Destructor. Delete log4cxx stuff if "don't delete" is clear.
-
-LoggerImpl::~LoggerImpl() {
- if (exit_delete_) {
- delete loggerptr_;
- }
-}
-
-// Initialize logger - create a logger as a child of the root logger. With
-// log4cxx this is assured by naming the logger <parent>.<child>.
-
-void
-LoggerImpl::initLogger() {
-
- // Initialize basic logging if not already done. This is a one-off for
- // all loggers.
- if (!init_) {
-
- // TEMPORARY
- // Add a suitable console logger to the log4cxx root logger. (This
- // is the logger at the root of the log4cxx tree, not the BIND-10 root
- // logger, which is one level down.) The chosen format is:
- //
- // YYYY-MM-DD hh:mm:ss.sss [logger] SEVERITY: text
- //
- // As noted, this is a temporary hack: it is done here to ensure that
- // a suitable output and output pattern is set. Future versions of the
- // software will set this based on configuration data.
-
- log4cxx::LayoutPtr layout(
- new log4cxx::PatternLayout(
- "%d{yyyy-MM-DD HH:mm:ss.SSS} %-5p [%c] %m\n"));
- log4cxx::AppenderPtr console(
- new log4cxx::ConsoleAppender(layout));
- log4cxx::LoggerPtr sys_root_logger = log4cxx::Logger::getRootLogger();
- sys_root_logger->addAppender(console);
-
- // Set the default logging to INFO
- sys_root_logger->setLevel(log4cxx::Level::getInfo());
-
- // All static stuff initialized
- init_ = true;
- }
-
- // Initialize this logger. Name this as to whether the BIND-10 root logger
- // name has been set. (If not, this mucks up the hierarchy :-( ).
- string root_name = RootLoggerName::getName();
- if (root_name.empty() || (name_ == root_name)) {
- loggerptr_ = new log4cxx::LoggerPtr(log4cxx::Logger::getLogger(name_));
- }
- else {
- loggerptr_ = new log4cxx::LoggerPtr(
- log4cxx::Logger::getLogger(root_name + "." + name_)
- );
- }
-}
-
-
-// Set the severity for logging. There is a 1:1 mapping between the logging
-// severity and the log4cxx logging levels, apart from DEBUG.
-//
-// In log4cxx, each of the logging levels (DEBUG, INFO, WARN etc.) has a numeric
-// value. The level is set to one of these and any numeric level equal to or
-// above it that is reported. For example INFO has a value of 20000 and ERROR
-// a value of 40000. So if a message of WARN severity (= 30000) is logged, it is
-// not logged when the logger's severity level is ERROR (as 30000 !>= 40000).
-// It is reported if the logger's severity level is set to WARN (as 30000 >=
-/// 30000) or INFO (30000 >= 20000).
-//
-// This gives a simple system for handling different debug levels. The debug
-// level is a number between 0 and 99, with 0 being least verbose and 99 the
-// most. To implement this seamlessly, when DEBUG is set, the numeric value
-// of the logging level is actually set to (DEBUG - debug-level). Similarly
-// messages of level "n" are logged at a logging level of (DEBUG - n). Thus if
-// the logging level is set to DEBUG and the debug level set to 25, the actual
-// level set is 10000 - 25 = 99975.
-//
-// Attempting to log a debug message of level 26 is an attempt to log a message
-// of level 10000 - 26 = 9974. As 9974 !>= 9975, it is not logged. A
-// message of level 25 is, because 9975 >= 9975.
-//
-// The extended set of logging levels is implemented by the XDebugLevel class.
-
-void
-LoggerImpl::setSeverity(isc::log::Severity severity, int dbglevel) {
- switch (severity) {
- case NONE:
- getLogger()->setLevel(log4cxx::Level::getOff());
- break;
-
- case FATAL:
- getLogger()->setLevel(log4cxx::Level::getFatal());
- break;
-
- case ERROR:
- getLogger()->setLevel(log4cxx::Level::getError());
- break;
-
- case WARN:
- getLogger()->setLevel(log4cxx::Level::getWarn());
- break;
-
- case INFO:
- getLogger()->setLevel(log4cxx::Level::getInfo());
- break;
-
- case DEBUG:
- getLogger()->setLevel(
- log4cxx::XDebugLevel::getExtendedDebug(dbglevel));
- break;
-
- // Will get here for DEFAULT or any other value. This disables the
- // logger's own severity and it defaults to the severity of the parent
- // logger.
- default:
- getLogger()->setLevel(0);
- }
-}
-
-// Convert between numeric log4cxx logging level and BIND-10 logging severity.
-
-isc::log::Severity
-LoggerImpl::convertLevel(int value) {
-
- // The order is optimised. This is only likely to be called when testing
- // for writing debug messages, so the check for DEBUG_INT is first.
- if (value <= log4cxx::Level::DEBUG_INT) {
- return (DEBUG);
- } else if (value <= log4cxx::Level::INFO_INT) {
- return (INFO);
- } else if (value <= log4cxx::Level::WARN_INT) {
- return (WARN);
- } else if (value <= log4cxx::Level::ERROR_INT) {
- return (ERROR);
- } else if (value <= log4cxx::Level::FATAL_INT) {
- return (FATAL);
- } else {
- return (NONE);
- }
-}
-
-
-// Return the logging severity associated with this logger.
-
-isc::log::Severity
-LoggerImpl::getSeverityCommon(const log4cxx::LoggerPtr& ptrlogger,
- bool check_parent) {
-
- log4cxx::LevelPtr level = ptrlogger->getLevel();
- if (level == log4cxx::LevelPtr()) {
-
- // Null level returned, logging should be that of the parent.
-
- if (check_parent) {
- log4cxx::LoggerPtr parent = ptrlogger->getParent();
- if (parent == log4cxx::LoggerPtr()) {
-
- // No parent, so reached the end of the chain. Return INFO
- // severity.
- return (INFO);
- }
- else {
- return (getSeverityCommon(parent, check_parent));
- }
- }
- else {
- return (DEFAULT);
- }
- } else {
- return (convertLevel(level->toInt()));
- }
-}
-
-
-// Get the debug level. This returns 0 unless the severity is DEBUG.
-
-int
-LoggerImpl::getDebugLevel() {
-
- log4cxx::LevelPtr level = getLogger()->getLevel();
- if (level == log4cxx::LevelPtr()) {
-
- // Null pointer returned, logging should be that of the parent.
- return (0);
-
- } else {
- int severity = level->toInt();
- if (severity <= log4cxx::Level::DEBUG_INT) {
- return (log4cxx::Level::DEBUG_INT - severity);
- }
- else {
- return (0);
- }
- }
-}
-
-
-
-} // namespace log
-} // namespace isc
diff --git a/src/lib/log/logger_impl_log4cxx.h b/src/lib/log/logger_impl_log4cxx.h
deleted file mode 100644
index 3101347..0000000
--- a/src/lib/log/logger_impl_log4cxx.h
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#ifndef __LOGGER_IMPL_LOG4CXX_H
-#define __LOGGER_IMPL_LOG4CXX_H
-
-#include <cstdlib>
-#include <string>
-#include <boost/lexical_cast.hpp>
-#include <log4cxx/logger.h>
-#include <log4cxx/logger.h>
-
-#include <log/debug_levels.h>
-#include <log/logger.h>
-#include <log/message_types.h>
-
-namespace isc {
-namespace log {
-
-/// \brief Log4cxx Logger Implementation
-///
-/// The logger uses a "pimpl" idiom for implementation, where the base logger
-/// class contains little more than a pointer to the implementation class, and
-/// all actions are carried out by the latter. This class is an implementation
-/// class interfacing to the log4cxx logging system.
-
-class LoggerImpl {
-public:
-
- /// \brief Constructor
- ///
- /// Creates/attaches to a logger of a specific name.
- ///
- /// \param name Name of the logger. If the name is that of the root name,
- /// this creates an instance of the root logger; otherwise it creates a
- /// child of the root logger.
- ///
- /// \param exit_delete This argument is present to get round a bug in
- /// log4cxx. If a log4cxx logger is declared outside an execution unit, it
- /// is not deleted until the program runs down. At that point all such
- /// objects - including internal log4cxx objects - are deleted. However,
- /// there seems to be a bug in log4cxx where the way that such objects are
- /// destroyed causes a MutexException to be thrown (this is described in
- /// https://issues.apache.org/jira/browse/LOGCXX-322). As this only occurs
- /// during program rundown, the issue is not serious - it just looks bad to
- /// have the program crash instead of shut down cleanly.\n
- /// \n
- /// The original implementation of the isc::log::Logger had as a member a
- /// log4cxx logger (actually a LoggerPtr). If the isc:: Logger was declared
- /// statically, when it was destroyed at the end of the program the internal
- /// LoggerPtr was destroyed, which triggered the problem. The problem did
- /// not occur if the isc::log::Logger was created on the stack. To get
- /// round this, the internal LoggerPtr is now created dynamically. The
- /// exit_delete argument controls its destruction: if true, it is destroyed
- /// in the ISC Logger destructor. If false, it is not.\n
- /// \n
- /// When creating an isc::log::Logger on the stack, the argument should be
- /// false (the default); when the Logger is destroyed, all the internal
- /// log4cxx objects are destroyed. As only the logger (and not the internal
- /// log4cxx data structures are being destroyed), all is well. However,
- /// when creating the logger statically, the argument should be false. This
- /// means that the log4cxx objects are not destroyed at program rundown;
- /// instead memory is reclaimed and files are closed when the process is
- /// destroyed, something that does not trigger the bug.
- LoggerImpl(const std::string& name, bool exit_delete = false) :
- loggerptr_(NULL), name_(name), exit_delete_(exit_delete)
- {}
-
-
- /// \brief Destructor
- virtual ~LoggerImpl();
-
-
- /// \brief Get the full name of the logger (including the root name)
- virtual std::string getName() {
- return (getLogger()->getName());
- }
-
-
- /// \brief Set Severity Level for Logger
- ///
- /// Sets the level at which this logger will log messages. If none is set,
- /// the level is inherited from the parent.
- ///
- /// \param severity Severity level to log
- /// \param dbglevel If the severity is DEBUG, this is the debug level.
- /// This can be in the range 1 to 100 and controls the verbosity. A value
- /// outside these limits is silently coerced to the nearest boundary.
- virtual void setSeverity(isc::log::Severity severity, int dbglevel = 1);
-
-
- /// \brief Get Severity Level for Logger
- ///
- /// \return The current logging level of this logger. In most cases though,
- /// the effective logging level is what is required.
- virtual isc::log::Severity getSeverity() {
- return (getSeverityCommon(getLogger(), false));
- }
-
-
- /// \brief Get Effective Severity Level for Logger
- ///
- /// \return The effective severity level of the logger. This is the same
- /// as getSeverity() if the logger has a severity level set, but otherwise
- /// is the severity of the parent.
- virtual isc::log::Severity getEffectiveSeverity() {
- return (getSeverityCommon(getLogger(), true));
- }
-
-
- /// \brief Return DEBUG Level
- ///
- /// \return Current setting of debug level. This is returned regardless of
- /// whether the
- virtual int getDebugLevel();
-
-
- /// \brief Returns if Debug Message Should Be Output
- ///
- /// \param dbglevel Level for which debugging is checked. Debugging is
- /// enabled only if the logger has DEBUG enabled and if the dbglevel
- /// checked is less than or equal to the debug level set for the logger.
- virtual bool
- isDebugEnabled(int dbglevel = MIN_DEBUG_LEVEL) {
- return (getLogger()->getEffectiveLevel()->toInt() <=
- (log4cxx::Level::DEBUG_INT - dbglevel));
- }
-
-
- /// \brief Is INFO Enabled?
- virtual bool isInfoEnabled() {
- return (getLogger()->isInfoEnabled());
- }
-
-
- /// \brief Is WARNING Enabled?
- virtual bool isWarnEnabled() {
- return (getLogger()->isWarnEnabled());
- }
-
-
- /// \brief Is ERROR Enabled?
- virtual bool isErrorEnabled() {
- return (getLogger()->isErrorEnabled());
- }
-
-
- /// \brief Is FATAL Enabled?
- virtual bool isFatalEnabled() {
- return (getLogger()->isFatalEnabled());
- }
-
-
- /// \brief Output Debug Message
- ///
- /// \param ident Message identification.
- /// \param text Text to log
- void debug(const MessageID& ident, const char* text) {
- LOG4CXX_DEBUG(getLogger(), ident << ", " << text);
- }
-
-
- /// \brief Output Informational Message
- ///
- /// \param ident Message identification.
- /// \param text Text to log
- void info(const MessageID& ident, const char* text) {
- LOG4CXX_INFO(getLogger(), ident << ", " << text);
- }
-
-
- /// \brief Output Warning Message
- ///
- /// \param ident Message identification.
- /// \param text Text to log
- void warn(const MessageID& ident, const char* text) {
- LOG4CXX_WARN(getLogger(), ident << ", " << text);
- }
-
-
- /// \brief Output Error Message
- ///
- /// \param ident Message identification.
- /// \param text Text to log
- void error(const MessageID& ident, const char* text) {
- LOG4CXX_ERROR(getLogger(), ident << ", " << text);
- }
-
-
- /// \brief Output Fatal Message
- ///
- /// \param ident Message identification.
- /// \param text Text to log
- void fatal(const MessageID& ident, const char* text) {
- LOG4CXX_FATAL(getLogger(), ident << ", " << text);
- }
-
- //@{
- /// \brief Testing Methods
- ///
- /// The next set of methods are used in testing. As they are accessed from
- /// the main logger class, they must be public.
-
- /// \brief Equality
- ///
- /// Check if two instances of this logger refer to the same stream.
- /// (This method is principally for testing.)
- ///
- /// \return true if the logger objects are instances of the same logger.
- bool operator==(LoggerImpl& other) {
- return (*loggerptr_ == *other.loggerptr_);
- }
-
-
- /// \brief Logger Initialized
- ///
- /// Check that the logger has been properly initialized. (This method
- /// is principally for testing.)
- ///
- /// \return true if this logger object has been initialized.
- bool isInitialized() {
- return (loggerptr_ != NULL);
- }
-
- /// \brief Reset Global Data
- ///
- /// Only used for testing, this clears all the logger information and
- /// resets it back to default values. This is a no-op for log4cxx.
- static void reset() {
- }
-
- //@}
-
-protected:
-
- /// \brief Convert Between BIND-10 and log4cxx Logging Levels
- ///
- /// This method is marked protected to allow for unit testing.
- ///
- /// \param value log4cxx numeric logging level
- ///
- /// \return BIND-10 logging severity
- isc::log::Severity convertLevel(int value);
-
-private:
-
- /// \brief Get Severity Level for Logger
- ///
- /// This is common code for getSeverity() and getEffectiveSeverity() -
- /// it returns the severity of the logger; if not set (and the check_parent)
- /// flag is set, it searches up the parent-child tree until a severity
- /// level is found and uses that.
- ///
- /// \param ptrlogger Pointer to the log4cxx logger to check.
- /// \param check_parent true to search up the tree, false to return the
- /// current level.
- ///
- /// \return The effective severity level of the logger. This is the same
- /// as getSeverity() if the logger has a severity level set, but otherwise
- /// is the severity of the parent.
- isc::log::Severity getSeverityCommon(const log4cxx::LoggerPtr& ptrlogger,
- bool check_parent);
-
-
-
- /// \brief Initialize log4cxx Logger
- ///
- /// Creates the log4cxx logger used internally. A function is provided for
- /// this so that the creation does not take place when this Logger object
- /// is created but when it is used. As the latter occurs in executable
- /// code but the former can occur during initialization, this order
- /// guarantees that anything that is statically initialized has completed
- /// its initialization by the time the logger is used.
- void initLogger();
-
-
- /// \brief Return underlying log4cxx logger, initializing it if necessary
- ///
- /// \return Loggerptr object
- log4cxx::LoggerPtr& getLogger() {
- if (loggerptr_ == NULL) {
- initLogger();
- }
- return (*loggerptr_);
- }
-
- // Members. Note that loggerptr_ is a pointer to a LoggerPtr, which is
- // itself a pointer to the underlying log4cxx logger. This is due to the
- // problems with memory deletion on program exit, explained in the comments
- // for the "exit_delete" parameter in this class's constructor.
-
- log4cxx::LoggerPtr* loggerptr_; ///< Pointer to the underlying logger
- std::string name_; ///< Name of this logger]
- bool exit_delete_; ///< Delete loggerptr_ on exit?
-
- // NOTE - THIS IS A PLACE HOLDER
- static bool init_; ///< Set true when initialized
-};
-
-} // namespace log
-} // namespace isc
-
-
-#endif // __LOGGER_IMPL_LOG4CXX_H
diff --git a/src/lib/log/logger_level.cc b/src/lib/log/logger_level.cc
new file mode 100644
index 0000000..abac5be
--- /dev/null
+++ b/src/lib/log/logger_level.cc
@@ -0,0 +1,48 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE
+
+#include <log/logger_level.h>
+#include <log/macros.h>
+#include <log/log_messages.h>
+
+#include <boost/algorithm/string.hpp>
+
+
+namespace isc {
+namespace log {
+
+isc::log::Severity
+getSeverity(const std::string& sev_str) {
+ if (boost::iequals(sev_str, "DEBUG")) {
+ return isc::log::DEBUG;
+ } else if (boost::iequals(sev_str, "INFO")) {
+ return isc::log::INFO;
+ } else if (boost::iequals(sev_str, "WARN")) {
+ return isc::log::WARN;
+ } else if (boost::iequals(sev_str, "ERROR")) {
+ return isc::log::ERROR;
+ } else if (boost::iequals(sev_str, "FATAL")) {
+ return isc::log::FATAL;
+ } else if (boost::iequals(sev_str, "NONE")) {
+ return isc::log::NONE;
+ } else {
+ Logger logger("log");
+ LOG_ERROR(logger, LOG_BAD_SEVERITY).arg(sev_str);
+ return isc::log::INFO;
+ }
+}
+
+
+} // namespace log
+} // namespace isc
diff --git a/src/lib/log/logger_level.h b/src/lib/log/logger_level.h
new file mode 100644
index 0000000..ea60c3c
--- /dev/null
+++ b/src/lib/log/logger_level.h
@@ -0,0 +1,76 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOGGER_LEVEL_H
+#define __LOGGER_LEVEL_H
+
+#include <string>
+
+namespace isc {
+namespace log {
+
+/// \brief Severity Levels
+///
+/// Defines the severity levels for logging. This is shared between the logger
+/// and the implementations classes.
+///
+/// N.B. The order of the levels - DEBUG less than INFO less that WARN etc. is
+/// implicitly assumed in several implementations. They must not be changed.
+
+typedef enum {
+ DEFAULT = 0, // Default to logging level of the parent
+ DEBUG = 1,
+ INFO = 2,
+ WARN = 3,
+ ERROR = 4,
+ FATAL = 5,
+ NONE = 6 // Disable logging
+} Severity;
+
+/// Minimum/maximum debug levels.
+
+const int MIN_DEBUG_LEVEL = 0;
+const int MAX_DEBUG_LEVEL = 99;
+
+/// \brief Log level structure
+///
+/// A simple pair structure that provides suitable names for the members. It
+/// holds a combination of logging severity and debug level.
+struct Level {
+ Severity severity; ///< Logging severity
+ int dbglevel; ///< Debug level
+
+ Level(Severity sev = DEFAULT, int dbg = MIN_DEBUG_LEVEL) :
+ severity(sev), dbglevel(dbg)
+ {}
+
+ // Default assignment and copy constructor is appropriate
+};
+
+/// \brief Returns the isc::log::Severity value represented by the given string
+///
+/// This must be one of the strings "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or
+/// "NONE". (Case is not important, but the string most not contain leading or
+/// trailing spaces.)
+///
+/// \param sev_str The string representing severity value
+///
+/// \return The severity. If the string is not recognized, an error will be
+/// logged and the string will return isc::log::INFO.
+isc::log::Severity getSeverity(const std::string& sev_str);
+
+} // namespace log
+} // namespace isc
+
+#endif // __LOGGER_LEVEL_H
diff --git a/src/lib/log/logger_level_impl.cc b/src/lib/log/logger_level_impl.cc
new file mode 100644
index 0000000..397f6d4
--- /dev/null
+++ b/src/lib/log/logger_level_impl.cc
@@ -0,0 +1,217 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <string.h>
+#include <iostream>
+#include <boost/lexical_cast.hpp>
+
+#include <log4cplus/logger.h>
+
+#include <log/logger_level.h>
+#include <log/logger_level_impl.h>
+#include <log/logimpl_messages.h>
+#include <log/macros.h>
+
+using namespace log4cplus;
+using namespace std;
+
+namespace {
+isc::log::Logger logger("log");
+}
+
+namespace isc {
+namespace log {
+
+// Convert BIND 10 level to a log4cplus logging level.
+log4cplus::LogLevel
+LoggerLevelImpl::convertFromBindLevel(const Level& level) {
+
+ // BIND 10 logging levels are small integers so we can do a table lookup
+ static const log4cplus::LogLevel log4cplus_levels[] = {
+ log4cplus::NOT_SET_LOG_LEVEL,
+ log4cplus::DEBUG_LOG_LEVEL,
+ log4cplus::INFO_LOG_LEVEL,
+ log4cplus::WARN_LOG_LEVEL,
+ log4cplus::ERROR_LOG_LEVEL,
+ log4cplus::FATAL_LOG_LEVEL,
+ log4cplus::OFF_LOG_LEVEL
+ };
+
+ // ... with compile-time checks to ensure that table indexes are correct.
+ BOOST_STATIC_ASSERT(static_cast<int>(DEFAULT) == 0);
+ BOOST_STATIC_ASSERT(static_cast<int>(DEBUG) == 1);
+ BOOST_STATIC_ASSERT(static_cast<int>(INFO) == 2);
+ BOOST_STATIC_ASSERT(static_cast<int>(WARN) == 3);
+ BOOST_STATIC_ASSERT(static_cast<int>(ERROR) == 4);
+ BOOST_STATIC_ASSERT(static_cast<int>(FATAL) == 5);
+ BOOST_STATIC_ASSERT(static_cast<int>(NONE) == 6);
+
+ // Do the conversion
+ if (level.severity == DEBUG) {
+
+ // Debug severity, so the log4cplus level returned depends on the
+ // debug level. Silently limit the debug level to the range
+ // MIN_DEBUG_LEVEL to MAX_DEBUG_LEVEL (avoids the hassle of throwing
+ // and catching exceptions and besides, this is for debug information).
+ int limited = std::max(MIN_DEBUG_LEVEL,
+ std::min(level.dbglevel, MAX_DEBUG_LEVEL));
+ LogLevel newlevel = static_cast<int>(DEBUG_LOG_LEVEL -
+ (limited - MIN_DEBUG_LEVEL));
+ return (static_cast<log4cplus::LogLevel>(newlevel));
+
+ } else {
+
+ // Can do a table lookup to speed things up. There is no need to check
+ // that the index is out of range. That the variable is of type
+ // isc::log::Severity ensures that it must be one of the Severity enum
+ // members - conversion of a numeric value to an enum is not permitted.
+ return (log4cplus_levels[level.severity]);
+ }
+}
+
+// Convert log4cplus logging level to BIND 10 debug level. It is up to the
+// caller to validate that the debug level is valid.
+Level
+LoggerLevelImpl::convertToBindLevel(const log4cplus::LogLevel loglevel) {
+
+ // Not easy to do a table lookup as the numerical values of log4cplus levels
+ // are quite high.
+ if (loglevel <= log4cplus::NOT_SET_LOG_LEVEL) {
+ return (Level(DEFAULT));
+
+ } else if (loglevel <= log4cplus::DEBUG_LOG_LEVEL) {
+
+ // Debug severity, so extract the debug level from the numeric value.
+ // If outside the limits, change the severity to the level above or
+ // below.
+ int dbglevel = MIN_DEBUG_LEVEL +
+ static_cast<int>(log4cplus::DEBUG_LOG_LEVEL) -
+ static_cast<int>(loglevel);
+ if (dbglevel > MAX_DEBUG_LEVEL) {
+ return (Level(DEFAULT));
+ } else if (dbglevel < MIN_DEBUG_LEVEL) {
+ return (Level(INFO));
+ }
+ return (Level(DEBUG, dbglevel));
+
+ } else if (loglevel <= log4cplus::INFO_LOG_LEVEL) {
+ return (Level(INFO));
+
+ } else if (loglevel <= log4cplus::WARN_LOG_LEVEL) {
+ return (Level(WARN));
+
+ } else if (loglevel <= log4cplus::ERROR_LOG_LEVEL) {
+ return (Level(ERROR));
+
+ } else if (loglevel <= log4cplus::FATAL_LOG_LEVEL) {
+ return (Level(FATAL));
+
+ }
+
+ return (Level(NONE));
+}
+
+
+// Convert string to appropriate logging level
+log4cplus::LogLevel
+LoggerLevelImpl::logLevelFromString(const log4cplus::tstring& level) {
+
+ std::string name = level; // Get to known type
+ size_t length = name.size(); // Length of the string
+
+ if (length < 5) {
+
+ // String can't possibly start DEBUG so we don't know what it is.
+ // As per documentation, return NOT_SET level.
+ return (NOT_SET_LOG_LEVEL);
+ }
+ else {
+ if (strncasecmp(name.c_str(), "DEBUG", 5) == 0) {
+
+ // String starts "DEBUG" (or "debug" or any case mixture). The
+ // rest of the string - if any - should be a number.
+ if (length == 5) {
+
+ // It is plain "DEBUG". Take this as level 0.
+ return (DEBUG_LOG_LEVEL);
+ }
+ else {
+
+ // Try converting the remainder to an integer. The "5" is
+ // the length of the string "DEBUG". Note that if the number
+ // is outside the range of debug levels, it is coerced to the
+ // nearest limit. Thus a level of DEBUG509 will end up as
+ // if DEBUG99 has been specified.
+ try {
+ int dbglevel = boost::lexical_cast<int>(name.substr(5));
+ if (dbglevel < MIN_DEBUG_LEVEL) {
+ LOG_WARN(logger, LOGIMPL_BELOW_MIN_DEBUG).arg(dbglevel)
+ .arg(MIN_DEBUG_LEVEL);
+ dbglevel = MIN_DEBUG_LEVEL;
+
+ } else if (dbglevel > MAX_DEBUG_LEVEL) {
+ LOG_WARN(logger, LOGIMPL_ABOVE_MAX_DEBUG).arg(dbglevel)
+ .arg(MAX_DEBUG_LEVEL);
+ dbglevel = MAX_DEBUG_LEVEL;
+
+ }
+ return convertFromBindLevel(Level(DEBUG, dbglevel));
+ }
+ catch (boost::bad_lexical_cast&) {
+ LOG_ERROR(logger, LOGIMPL_BAD_DEBUG_STRING).arg(name);
+ return (NOT_SET_LOG_LEVEL);
+ }
+ }
+ } else {
+
+ // Unknown string - return default. Log4cplus will call any other
+ // registered conversion functions to interpret it.
+ return (NOT_SET_LOG_LEVEL);
+ }
+ }
+}
+
+// Convert logging level to string. If the level is a valid debug level,
+// return the string DEBUG, else return the empty string.
+log4cplus::tstring
+LoggerLevelImpl::logLevelToString(log4cplus::LogLevel level) {
+ Level bindlevel = convertToBindLevel(level);
+ Severity& severity = bindlevel.severity;
+ int& dbglevel = bindlevel.dbglevel;
+
+ if ((severity == DEBUG) &&
+ ((dbglevel >= MIN_DEBUG_LEVEL) && (dbglevel <= MAX_DEBUG_LEVEL))) {
+ return (tstring("DEBUG"));
+ }
+
+ // Unknown, so return empty string for log4cplus to try other conversion
+ // functions.
+ return (tstring());
+}
+
+// Initialization. Register the conversion functions with the LogLevelManager.
+void
+LoggerLevelImpl::init() {
+
+ // Get the singleton log-level manager.
+ LogLevelManager& manager = getLogLevelManager();
+
+ // Register the conversion functions
+ manager.pushFromStringMethod(LoggerLevelImpl::logLevelFromString);
+ manager.pushToStringMethod(LoggerLevelImpl::logLevelToString);
+}
+
+} // namespace log
+} // namespace isc
diff --git a/src/lib/log/logger_level_impl.h b/src/lib/log/logger_level_impl.h
new file mode 100644
index 0000000..c990796
--- /dev/null
+++ b/src/lib/log/logger_level_impl.h
@@ -0,0 +1,127 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOGGER_LEVEL_IMPL_H
+#define __LOGGER_LEVEL_IMPL_H
+
+#include <log4cplus/logger.h>
+#include <log/logger_level.h>
+
+namespace isc {
+namespace log {
+
+/// \brief Implementation aspects of logging levels
+///
+/// This extends the log4cplus level set to allow 100 debug levels.
+///
+/// First some terminology, as the use of the term "level" gets confusing. The
+/// code and comments here use the term "level" in two contexts:
+///
+/// Logging level: The category of messages to log. By default log4cplus
+/// defines the following logging levels: OFF_LOG_LEVEL, FATAL_LOG_LEVEL,
+/// ERROR_LOG_LEVEL, WARN_LOG_LEVEL, INFO_LOG_LEVEL, DEBUG_LOG_LEVEL,
+/// TRACE_LOG_LEVEL, ALL_LOG_LEVEL (which here will be abbreviated OFF, FATAL
+/// etc.). Within the context of BIND-10, OFF, TRACE and ALL are not used
+/// and the idea of DEBUG has been extended, as will be seen below. In
+/// BIND 10 terms, this is known as "severity"; the "logging level" usage will
+/// usually be used when talking about log4cplus aspects of the idea (as
+/// log4cplus uses that teminology).
+///
+/// Debug level: This is a number that ranges from 0 to 99 and is used by the
+/// application to control the detail of debug output. A value of 0 gives the
+/// highest-level debug output; a value of 99 gives the most verbose and most
+/// detailed. Debug messages (or whatever debug level) are only ever output
+/// when the logging level is set to DEBUG. (Note that the numbers 0 and 99
+/// are not hard-coded - they are the constants MIN_DEBUG_LEVEL and
+/// MAX_DEBUG_LEVEL.)
+///
+/// With log4cplus, the various logging levels have a numeric value associated
+/// with them, such that FATAL > ERROR > WARNING etc. This suggests that the
+/// idea of debug levels can be incorporated into the existing logging level
+/// scheme by assigning them appropriate numeric values, i.e.
+///
+/// WARNING > INFO > DEBUG > DEBUG - 1 > DEBUG - 2 > ... > DEBUG - 99
+///
+/// Setting a numeric level of DEBUG enables the basic messages; setting higher
+/// debug levels (corresponding to lower numeric logging levels) will enable
+/// progressively more messages. The lowest debug level (0) is chosen such that
+/// it corresponds to the default level of DEBUG.
+///
+/// This class comprises nothing more than static methods to aid the conversion
+/// of logging levels between log4cplus and BIND 10, and to register those
+/// levels with log4cplus.
+
+class LoggerLevelImpl {
+public:
+
+ /// \brief Convert BIND 10 level to log4cplus logging level
+ ///
+ /// Converts the BIND 10 severity level into a log4cplus logging level.
+ /// If the severity is DEBUG, the current BIND 10 debug level is taken
+ /// into account when doing the conversion.
+ ///
+ /// \param level BIND 10 severity and debug level
+ ///
+ /// \return Equivalent log4cplus logging level.
+ static
+ log4cplus::LogLevel convertFromBindLevel(const isc::log::Level& level);
+
+ /// \brief Convert log4cplus logging level to BIND 10 logging level
+ ///
+ /// Converts the log4cplus log level into a BIND 10 severity level.
+ /// The log4cplus log level may be non-standard in which case it is
+ /// encoding a BIND 10 debug level as well.
+ ///
+ /// \param loglevel log4cplus log level
+ ///
+ /// \return Equivalent BIND 10 severity and debug level
+ static
+ isc::log::Level convertToBindLevel(const log4cplus::LogLevel loglevel);
+
+ /// \brief Convert string to log4cplus logging level
+ ///
+ /// BIND 10 extends the set of logging levels in log4cplus with a group
+ /// of debug levels. These are given names DEBUG0 through DEBUG99 (with
+ /// DEBUG0 being equivalent to DEBUG, the standard log level. If the name
+ /// is DEBUGn but n lies outside the range of debug levels, debug level
+ /// specifies is coerced to the nearest valid value. If the string is just
+ /// not recognised, a NOT_SET_LOG_LEVEL is returned.
+ ///
+ /// \param level String representing the logging level.
+ ///
+ /// \return Corresponding log4cplus log level
+ static
+ log4cplus::LogLevel logLevelFromString(const log4cplus::tstring& level);
+
+ /// \brief Convert log level to string
+ ///
+ /// If the log level is one of the extended debug levels, the string DEBUG
+ /// is returned, otherwise the empty string.
+ ///
+ /// \param level Extended logging level
+ ///
+ /// \return Equivalent string.
+ static log4cplus::tstring logLevelToString(log4cplus::LogLevel level);
+
+ /// \brief Initialize extended logging levels
+ ///
+ /// This must be called once, after log4cplus has been initialized. It
+ /// registers the level/string converter functions.
+ static void init();
+};
+
+} // namespace log
+} // namespace isc
+
+#endif // __LOGGER_LEVEL_IMPL_H
diff --git a/src/lib/log/logger_levels.h b/src/lib/log/logger_levels.h
deleted file mode 100644
index 2f123e8..0000000
--- a/src/lib/log/logger_levels.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#ifndef __LOGGER_LEVELS_H
-#define __LOGGER_LEVELS_H
-
-namespace isc {
-namespace log {
-
-/// \brief Severity Levels
-///
-/// Defines the severity levels for logging. This is shared between the logger
-/// and the implementations classes.
-///
-/// N.B. The order of the levels - DEBUG less than INFO less that WARN etc. is
-/// implicitly assumed in several implementations. They must not be changed.
-
-typedef enum {
- DEFAULT = 0, // Default to logging level of the parent
- DEBUG = 1,
- INFO = 2,
- WARN = 3,
- ERROR = 4,
- FATAL = 5,
- NONE = 6 // Disable logging
-} Severity;
-
-} // namespace log
-} // namespace isc
-
-#endif // __LOGGER_LEVELS_H
diff --git a/src/lib/log/logger_manager.cc b/src/lib/log/logger_manager.cc
new file mode 100644
index 0000000..70e0d6f
--- /dev/null
+++ b/src/lib/log/logger_manager.cc
@@ -0,0 +1,184 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <vector>
+
+#include <log/logger.h>
+#include <log/logger_manager.h>
+#include <log/logger_manager_impl.h>
+#include <log/logger_name.h>
+#include <log/logger_support.h>
+#include <log/log_messages.h>
+#include <log/macros.h>
+#include <log/message_dictionary.h>
+#include <log/message_exception.h>
+#include <log/message_initializer.h>
+#include <log/message_initializer.h>
+#include <log/message_reader.h>
+#include <log/message_types.h>
+
+using namespace std;
+
+namespace {
+
+// Logger used for logging messages within the logging code itself.
+isc::log::Logger logger("log");
+
+// Static stores for the initialization severity and debug level.
+// These are put in methods to avoid a "static initialization fiasco".
+
+isc::log::Severity& initSeverity() {
+ static isc::log::Severity severity = isc::log::INFO;
+ return (severity);
+}
+
+int& initDebugLevel() {
+ static int dbglevel = 0;
+ return (dbglevel);
+}
+
+std::string& initRootName() {
+ static std::string root("bind10");
+ return (root);
+}
+
+} // Anonymous namespace
+
+
+namespace isc {
+namespace log {
+
+// Constructor - create the implementation class.
+LoggerManager::LoggerManager() {
+ impl_ = new LoggerManagerImpl();
+}
+
+// Destructor - get rid of the implementation class
+LoggerManager::~LoggerManager() {
+ delete impl_;
+}
+
+// Initialize processing
+void
+LoggerManager::processInit() {
+ impl_->processInit();
+}
+
+// Process logging specification
+void
+LoggerManager::processSpecification(const LoggerSpecification& spec) {
+ impl_->processSpecification(spec);
+}
+
+// End Processing
+void
+LoggerManager::processEnd() {
+ impl_->processEnd();
+}
+
+
+/// Logging system initialization
+
+void
+LoggerManager::init(const std::string& root, isc::log::Severity severity,
+ int dbglevel, const char* file)
+{
+ // Save name, severity and debug level for later. No need to save the
+ // file name as once the local message file is read the messages will
+ // not be lost.
+ initRootName() = root;
+ initSeverity() = severity;
+ initDebugLevel() = dbglevel;
+
+ // Create the BIND 10 root logger and set the default severity and
+ // debug level. This is the logger that has the name of the application.
+ // All other loggers created in this application will be its children.
+ setRootLoggerName(root);
+
+ // Initialize the implementation logging. After this point, some basic
+ // logging has been set up and messages can be logged.
+ LoggerManagerImpl::init(severity, dbglevel);
+ setLoggingInitialized();
+
+ // Check if there were any duplicate message IDs in the default dictionary
+ // and if so, log them. Log using the logging facility logger.
+ vector<string>& duplicates = MessageInitializer::getDuplicates();
+ if (!duplicates.empty()) {
+
+ // There are duplicates present. This will be listed in alphabetic
+ // order of message ID, so they need to be sorted. This list itself may
+ // contain duplicates; if so, the message ID is listed as many times as
+ // there are duplicates.
+ sort(duplicates.begin(), duplicates.end());
+ for (vector<string>::iterator i = duplicates.begin();
+ i != duplicates.end(); ++i) {
+ LOG_WARN(logger, LOG_DUPLICATE_MESSAGE_ID).arg(*i);
+ }
+
+ }
+
+ // Replace any messages with local ones (if given)
+ if (file) {
+ readLocalMessageFile(file);
+ }
+}
+
+
+// Read local message file
+// TODO This should be done after the configuration has been read so that
+// the file can be placed in the local configuration
+void
+LoggerManager::readLocalMessageFile(const char* file) {
+
+ MessageDictionary& dictionary = MessageDictionary::globalDictionary();
+ MessageReader reader(&dictionary);
+ try {
+
+ logger.info(LOG_READING_LOCAL_FILE).arg(file);
+ reader.readFile(file, MessageReader::REPLACE);
+
+ // File successfully read. As each message in the file is supposed to
+ // replace one in the dictionary, any ID read that can't be located in
+ // the dictionary will not be used. To aid problem diagnosis, the
+ // unknown message IDs are listed.
+ MessageReader::MessageIDCollection unknown = reader.getNotAdded();
+ for (MessageReader::MessageIDCollection::const_iterator
+ i = unknown.begin(); i != unknown.end(); ++i) {
+ string message_id = boost::lexical_cast<string>(*i);
+ logger.warn(LOG_NO_SUCH_MESSAGE).arg(message_id);
+ }
+ }
+ catch (MessageException& e) {
+ MessageID ident = e.id();
+ vector<string> args = e.arguments();
+
+ // Log the variable number of arguments. The actual message will be
+ // logged when the error_message variable is destroyed.
+ Formatter<isc::log::Logger> error_message = logger.error(ident);
+ for (int i = 0; i < args.size(); ++i) {
+ error_message = error_message.arg(args[i]);
+ }
+ }
+}
+
+// Reset logging to settings passed to init()
+void
+LoggerManager::reset() {
+ setRootLoggerName(initRootName());
+ LoggerManagerImpl::reset(initSeverity(), initDebugLevel());
+}
+
+} // namespace log
+} // namespace isc
diff --git a/src/lib/log/logger_manager.h b/src/lib/log/logger_manager.h
new file mode 100644
index 0000000..dece0c9
--- /dev/null
+++ b/src/lib/log/logger_manager.h
@@ -0,0 +1,141 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOGGER_MANAGER_H
+#define __LOGGER_MANAGER_H
+
+#include "exceptions/exceptions.h"
+#include <log/logger_specification.h>
+
+// Generated if, when updating the logging specification, an unknown
+// destination is encountered.
+class UnknownLoggingDestination : public isc::Exception {
+public:
+ UnknownLoggingDestination(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what)
+ {}
+};
+
+namespace isc {
+namespace log {
+
+class LoggerManagerImpl;
+
+/// \brief Logger Manager
+///
+/// The logger manager class exists to process the set of logger specifications
+/// and use them to set up the logging in the program appropriately.
+///
+/// To isolate the underlying implementation from basic processing, the
+/// LoggerManager is implemented using the "pimpl" idiom.
+
+class LoggerManager {
+public:
+ /// \brief Constructor
+ LoggerManager();
+
+ /// \brief Destructor
+ ~LoggerManager();
+
+ /// \brief Process Specifications
+ ///
+ /// Replaces the current logging configuration by the one given.
+ ///
+ /// \param start Iterator pointing to the start of the collection of
+ /// logging specifications.
+ /// \param finish Iterator pointing to the end of the collection of
+ /// logging specification.
+ template <typename T>
+ void process(T start, T finish) {
+ processInit();
+ for (T i = start; i != finish; ++i) {
+ processSpecification(*i);
+ }
+ processEnd();
+ }
+
+ /// \brief Process a single specification
+ ///
+ /// A convenience function for a single specification.
+ ///
+ /// \param spec Specification to process
+ void process(const LoggerSpecification& spec) {
+ processInit();
+ processSpecification(spec);
+ processEnd();
+ }
+
+ /// \brief Run-Time Initialization
+ ///
+ /// Performs run-time initialization of the logger system, in particular
+ /// supplying the root logger name and name of a replacement message file.
+ ///
+ /// This must be the first logging function called in the program. If
+ /// an attempt is made to log a message before this is function is called,
+ /// the results will be dependent on the underlying logging package.
+ ///
+ /// \param root Name of the root logger. This should be set to the name of
+ /// the program.
+ /// \param severity Severity at which to log
+ /// \param dbglevel Debug severity (ignored if "severity" is not "DEBUG")
+ /// \param file Name of the local message file. This must be NULL if there
+ /// is no local message file.
+ static void init(const std::string& root,
+ isc::log::Severity severity = isc::log::INFO,
+ int dbglevel = 0, const char* file = NULL);
+
+ /// \brief Reset logging
+ ///
+ /// Resets logging to whatever was set in the call to init().
+ static void reset();
+
+ /// \brief Read local message file
+ ///
+ /// Reads the local message file into the global dictionary, overwriting
+ /// existing messages. If the file contained any message IDs not in the
+ /// dictionary, they are listed in a warning message.
+ ///
+ /// \param file Name of the local message file
+ static void readLocalMessageFile(const char* file);
+
+private:
+ /// \brief Initialize Processing
+ ///
+ /// Initializes the processing of a list of specifications by resetting all
+ /// loggers to their defaults, which is to pass the message to their
+ /// parent logger. (Except for the root logger, where the default action is
+ /// to output the message.)
+ void processInit();
+
+ /// \brief Process Logging Specification
+ ///
+ /// Processes the given specification. It is assumed at this point that
+ /// either the logger does not exist or has been made inactive.
+ void processSpecification(const LoggerSpecification& spec);
+
+ /// \brief End Processing
+ ///
+ /// Place holder for finish processing.
+ /// TODO: Check that the root logger has something enabled
+ void processEnd();
+
+ // Members
+ LoggerManagerImpl* impl_; ///< Pointer to implementation
+};
+
+} // namespace log
+} // namespace isc
+
+
+#endif // __LOGGER_MANAGER_H
diff --git a/src/lib/log/logger_manager_impl.cc b/src/lib/log/logger_manager_impl.cc
new file mode 100644
index 0000000..d69cec8
--- /dev/null
+++ b/src/lib/log/logger_manager_impl.cc
@@ -0,0 +1,228 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <iostream>
+
+#include <log4cplus/logger.h>
+#include <log4cplus/configurator.h>
+#include <log4cplus/consoleappender.h>
+#include <log4cplus/fileappender.h>
+#include <log4cplus/syslogappender.h>
+
+#include <log/logger.h>
+#include <log/logger_level_impl.h>
+#include <log/logger_manager.h>
+#include <log/logger_manager_impl.h>
+#include <log/log_messages.h>
+#include <log/logger_name.h>
+#include <log/logger_specification.h>
+
+using namespace std;
+
+namespace isc {
+namespace log {
+
+// Reset hierarchy of loggers back to default settings. This removes all
+// appenders from loggers, sets their severity to NOT_SET (so that events are
+// passed back to the parent) and resets the root logger to logging
+// informational messages. (This last is not a log4cplus default, so we have to
+// explicitly reset the logging severity.)
+
+void
+LoggerManagerImpl::processInit() {
+ log4cplus::Logger::getDefaultHierarchy().resetConfiguration();
+ initRootLogger();
+}
+
+// Process logging specification. Set up the common states then dispatch to
+// add output specifications.
+
+void
+LoggerManagerImpl::processSpecification(const LoggerSpecification& spec) {
+
+ log4cplus::Logger logger = log4cplus::Logger::getInstance(
+ expandLoggerName(spec.getName()));
+
+ // Set severity level according to specification entry.
+ logger.setLogLevel(LoggerLevelImpl::convertFromBindLevel(
+ Level(spec.getSeverity(), spec.getDbglevel())));
+
+ // Set the additive flag.
+ logger.setAdditivity(spec.getAdditive());
+
+ // Output options given?
+ if (spec.optionCount() > 0) {
+
+ // Yes, so replace all appenders for this logger.
+ logger.removeAllAppenders();
+
+ // Now process output specifications.
+ for (LoggerSpecification::const_iterator i = spec.begin();
+ i != spec.end(); ++i) {
+ switch (i->destination) {
+ case OutputOption::DEST_CONSOLE:
+ createConsoleAppender(logger, *i);
+ break;
+
+ case OutputOption::DEST_FILE:
+ createFileAppender(logger, *i);
+ break;
+
+ case OutputOption::DEST_SYSLOG:
+ createSyslogAppender(logger, *i);
+ break;
+
+ default:
+ // Not a valid destination. As we are in the middle of updating
+ // logging destinations, we could be in the situation where
+ // there are no valid appenders. For this reason, throw an
+ // exception.
+ isc_throw(UnknownLoggingDestination,
+ "Unknown logging destination, code = " <<
+ i->destination);
+ }
+ }
+ }
+}
+
+// Console appender - log to either stdout or stderr.
+void
+LoggerManagerImpl::createConsoleAppender(log4cplus::Logger& logger,
+ const OutputOption& opt)
+{
+ log4cplus::SharedAppenderPtr console(
+ new log4cplus::ConsoleAppender(
+ (opt.stream == OutputOption::STR_STDERR), opt.flush));
+ setConsoleAppenderLayout(console);
+ logger.addAppender(console);
+}
+
+// File appender. Depending on whether a maximum size is given, either
+// a standard file appender or a rolling file appender will be created.
+void
+LoggerManagerImpl::createFileAppender(log4cplus::Logger& logger,
+ const OutputOption& opt)
+{
+ LOG4CPLUS_OPEN_MODE_TYPE mode =
+ LOG4CPLUS_FSTREAM_NAMESPACE::ios::app; // Append to existing file
+
+ log4cplus::SharedAppenderPtr fileapp;
+ if (opt.maxsize == 0) {
+ fileapp = log4cplus::SharedAppenderPtr(new log4cplus::FileAppender(
+ opt.filename, mode, opt.flush));
+ } else {
+ fileapp = log4cplus::SharedAppenderPtr(
+ new log4cplus::RollingFileAppender(opt.filename, opt.maxsize,
+ opt.maxver, opt.flush));
+ }
+
+ // use the same console layout for the files.
+ setConsoleAppenderLayout(fileapp);
+ logger.addAppender(fileapp);
+}
+
+// Syslog appender.
+void
+LoggerManagerImpl::createSyslogAppender(log4cplus::Logger& logger,
+ const OutputOption& opt)
+{
+ log4cplus::SharedAppenderPtr syslogapp(
+ new log4cplus::SysLogAppender(opt.facility));
+ setSyslogAppenderLayout(syslogapp);
+ logger.addAppender(syslogapp);
+}
+
+
+// One-time initialization of the log4cplus system
+
+void
+LoggerManagerImpl::init(isc::log::Severity severity, int dbglevel) {
+
+ // Set up basic configurator. This attaches a ConsoleAppender to the
+ // root logger with suitable output. This is used until we we have
+ // actually read the logging configuration, in which case the output
+ // may well be changed.
+ log4cplus::BasicConfigurator config;
+ config.configure();
+
+ // Add the additional debug levels
+ LoggerLevelImpl::init();
+
+ reset(severity, dbglevel);
+}
+
+// Reset logging to default configuration. This closes all appenders
+// and resets the root logger to output INFO messages to the console.
+// It is principally used in testing.
+void
+LoggerManagerImpl::reset(isc::log::Severity severity, int dbglevel) {
+
+ // Initialize the root logger
+ initRootLogger(severity, dbglevel);
+}
+
+// Initialize the root logger
+void LoggerManagerImpl::initRootLogger(isc::log::Severity severity,
+ int dbglevel)
+{
+ log4cplus::Logger::getDefaultHierarchy().resetConfiguration();
+
+ // Set the log4cplus root to not output anything - effectively we are
+ // ignoring it.
+ log4cplus::Logger::getRoot().setLogLevel(log4cplus::OFF_LOG_LEVEL);
+
+ // Set the level for the BIND 10 root logger to the given severity and
+ // debug level.
+ log4cplus::Logger b10root = log4cplus::Logger::getInstance(
+ getRootLoggerName());
+ b10root.setLogLevel(LoggerLevelImpl::convertFromBindLevel(
+ Level(severity, dbglevel)));
+
+ // Set the BIND 10 root to use a console logger.
+ OutputOption opt;
+ createConsoleAppender(b10root, opt);
+}
+
+// Set the the "console" layout for the given appenders. This layout includes
+// a date/time and the name of the logger.
+
+void LoggerManagerImpl::setConsoleAppenderLayout(
+ log4cplus::SharedAppenderPtr& appender)
+{
+ // Create the pattern we want for the output - local time.
+ string pattern = "%D{%Y-%m-%d %H:%M:%S.%q} %-5p [%c] %m\n";
+
+ // Finally the text of the message
+ auto_ptr<log4cplus::Layout> layout(new log4cplus::PatternLayout(pattern));
+ appender->setLayout(layout);
+}
+
+// Set the the "syslog" layout for the given appenders. This is the same
+// as the console, but without the timestamp (which is expected to be
+// set by syslogd).
+
+void LoggerManagerImpl::setSyslogAppenderLayout(
+ log4cplus::SharedAppenderPtr& appender)
+{
+ // Create the pattern we want for the output - local time.
+ string pattern = "%-5p [%c] %m\n";
+
+ // Finally the text of the message
+ auto_ptr<log4cplus::Layout> layout(new log4cplus::PatternLayout(pattern));
+ appender->setLayout(layout);
+}
+
+} // namespace log
+} // namespace isc
diff --git a/src/lib/log/logger_manager_impl.h b/src/lib/log/logger_manager_impl.h
new file mode 100644
index 0000000..f99f832
--- /dev/null
+++ b/src/lib/log/logger_manager_impl.h
@@ -0,0 +1,169 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOGGER_MANAGER_IMPL_H
+#define __LOGGER_MANAGER_IMPL_H
+
+#include <string>
+
+#include <log4cplus/appender.h>
+#include <log/logger_level.h>
+
+// Forward declaration to avoid need to include log4cplus header file here.
+namespace log4cplus {
+class Logger;
+class Appender;
+}
+
+namespace isc {
+namespace log {
+
+// Forward declarations
+class LoggerSpecification;
+class OutputOption;
+
+/// \brief Logger Manager Implementation
+///
+/// This is the implementation of the logger manager for the log4cplus
+/// underlying logger.
+///
+/// As noted in logger_manager.h, the logger manager class exists to set up the
+/// logging given a set of specifications. This class handles the processing
+/// of those specifications.
+///
+/// Note: the logging has been implemented using a "pimpl" idiom to conceal
+/// the underlying implementation (log4cplus) from the BIND 10 interface.
+/// This requires that there be an implementation class, even though in this
+/// case, all the implementation class methods can be declared static.
+
+class LoggerManagerImpl {
+public:
+
+ /// \brief Constructor
+ LoggerManagerImpl()
+ {}
+
+ /// \brief Initialize Processing
+ ///
+ /// This resets the hierachy of loggers back to their defaults. This means
+ /// that all non-root loggers (if they exist) are set to NOT_SET, and the
+ /// root logger reset to logging informational messages.
+ static void processInit();
+
+ /// \brief Process Specification
+ ///
+ /// Processes the specification for a single logger.
+ ///
+ /// \param spec Logging specification for this logger
+ static void processSpecification(const LoggerSpecification& spec);
+
+ /// \brief End Processing
+ ///
+ /// Terminates the processing of the logging specifications.
+ static void processEnd()
+ {}
+
+ /// \brief Implementation-specific initialization
+ ///
+ /// Sets the basic configuration for logging (the root logger has INFO and
+ /// more severe messages routed to stdout). Unless this function (or
+ /// process() with a valid specification for all loggers that will log
+ /// messages) is called before a message is logged, log4cplus will output
+ /// a message to stderr noting that logging has not been initialized.
+ ///
+ /// It is assumed here that the name of the BIND 10 root logger can be
+ /// obtained from the global function getRootLoggerName().
+ ///
+ /// \param severity Severity to be associated with this logger
+ /// \param dbglevel Debug level associated with the root logger
+ static void init(isc::log::Severity severity = isc::log::INFO,
+ int dbglevel = 0);
+
+ /// \brief Reset logging
+ ///
+ /// Resets to default configuration (root logger logging to the console
+ /// with INFO severity).
+ ///
+ /// \param severity Severity to be associated with this logger
+ /// \param dbglevel Debug level associated with the root logger
+ static void reset(isc::log::Severity severity = isc::log::INFO,
+ int dbglevel = 0);
+
+private:
+ /// \brief Create console appender
+ ///
+ /// Creates an object that, when attached to a logger, will log to one
+ /// of the output streams (stdout or stderr).
+ ///
+ /// \param logger Log4cplus logger to which the appender must be attached.
+ /// \param opt Output options for this appender.
+ static void createConsoleAppender(log4cplus::Logger& logger,
+ const OutputOption& opt);
+
+ /// \brief Create file appender
+ ///
+ /// Creates an object that, when attached to a logger, will log to a
+ /// specified file. This also includes the ability to "roll" files when
+ /// they reach a specified size.
+ ///
+ /// \param logger Log4cplus logger to which the appender must be attached.
+ /// \param opt Output options for this appender.
+ static void createFileAppender(log4cplus::Logger& logger,
+ const OutputOption& opt);
+
+ /// \brief Create syslog appender
+ ///
+ /// Creates an object that, when attached to a logger, will log to the
+ /// syslog file.
+ ///
+ /// \param logger Log4cplus logger to which the appender must be attached.
+ /// \param opt Output options for this appender.
+ static void createSyslogAppender(log4cplus::Logger& logger,
+ const OutputOption& opt);
+
+ /// \brief Set default layout and severity for root logger
+ ///
+ /// Initializes the root logger to BIND 10 defaults - console output and
+ /// the passed severity/debug level.
+ ///
+ /// \param severity Severity of messages that the logger should output.
+ /// \param dbglevel Debug level if severity = DEBUG
+ static void initRootLogger(isc::log::Severity severity = isc::log::INFO,
+ int dbglevel = 0);
+
+ /// \brief Set layout for console appender
+ ///
+ /// Sets the layout of the specified appender to one suitable for file
+ /// or console output:
+ ///
+ /// YYYY-MM-DD HH:MM:SS.ssss SEVERITY [root.logger] message
+ ///
+ /// \param appender Appender for which this pattern is to be set.
+ static void setConsoleAppenderLayout(log4cplus::SharedAppenderPtr& appender);
+
+ /// \brief Set layout for syslog appender
+ ///
+ /// Sets the layout of the specified appender to one suitable for the
+ /// syslog file:
+ ///
+ /// SEVERITY [root.logger] message
+ ///
+ /// \param appender Appender for which this pattern is to be set.
+ static void setSyslogAppenderLayout(log4cplus::SharedAppenderPtr& appender);
+};
+
+} // namespace log
+} // namespace isc
+
+#endif // __LOGGER_MANAGER_IMPL_H
diff --git a/src/lib/log/logger_name.cc b/src/lib/log/logger_name.cc
new file mode 100644
index 0000000..abfcd5e
--- /dev/null
+++ b/src/lib/log/logger_name.cc
@@ -0,0 +1,59 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include "log/logger_name.h"
+
+namespace isc {
+namespace log {
+
+namespace {
+
+// Obtain the root logger name in a way that is safe for statically-initialized
+// objects.
+
+std::string&
+getRootLoggerNameInternal() {
+ static std::string root_name;
+ return (root_name);
+}
+
+} // Anonymous namespace
+
+void
+setRootLoggerName(const std::string& name) {
+ getRootLoggerNameInternal() = name;
+}
+
+const std::string& getRootLoggerName() {
+ return (getRootLoggerNameInternal());
+}
+
+std::string expandLoggerName(const std::string& name) {
+
+ // Are we the root logger, or does the logger name start with
+ // the string "<root_logger_name>.". If so, use a logger
+ // whose name is the one given.
+ if ((name == getRootLoggerName()) ||
+ (name.find(getRootLoggerName() + std::string(".")) == 0)) {
+ return (name);
+
+ }
+
+ // Anything else is assumed to be a sub-logger of the root logger.
+ return (getRootLoggerName() + "." + name);
+}
+
+} // namespace log
+} // namespace isc
diff --git a/src/lib/log/logger_name.h b/src/lib/log/logger_name.h
new file mode 100644
index 0000000..82ea2ad
--- /dev/null
+++ b/src/lib/log/logger_name.h
@@ -0,0 +1,57 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOGGER_NAME_H
+#define __LOGGER_NAME_H
+
+#include <string>
+
+/// \brief Define Name of Root Logger
+///
+/// In BIND-10, the name root logger of a program is the name of the program
+/// itself (in contrast to packages such as log4cplus where the root logger name
+// is something like "root"). These trivial functions allow the setting and
+// getting of that name by the logger classes.
+
+namespace isc {
+namespace log {
+
+/// \brief Set root logger name
+///
+/// This function should be called by the program's initialization code before
+/// any logging functions are called.
+///
+/// \param name Name of the root logger. This should be the program name.
+void setRootLoggerName(const std::string& name);
+
+/// \brief Get root logger name
+///
+/// \return Name of the root logger.
+const std::string& getRootLoggerName();
+
+/// \brief Expand logger name
+///
+/// Given a logger name, returns the fully-expanded logger name. If the name
+/// starts with the root logger name, it is returned as-is. Otherwise it is
+/// prefixed with the root logger name.
+///
+/// \param name Name to expand.
+///
+/// \return Fully-expanded logger name.
+std::string expandLoggerName(const std::string& name);
+
+}
+}
+
+#endif // __LOGGER_NAME_H
diff --git a/src/lib/log/logger_specification.h b/src/lib/log/logger_specification.h
new file mode 100644
index 0000000..6805fdd
--- /dev/null
+++ b/src/lib/log/logger_specification.h
@@ -0,0 +1,156 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOGGER_SPECIFICATION_H
+#define __LOGGER_SPECIFICATION_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <log/logger_level.h>
+#include <log/output_option.h>
+
+/// \brief Logger Specification
+///
+/// The logging configuration options are a list of logger specifications, each
+/// of which represents a logger and the options for its appenders.
+///
+/// Unlike OutputOption (which is a struct), this contains a bit more
+/// structure and is concealed in a class.
+
+#include <vector>
+
+namespace isc {
+namespace log {
+
+class LoggerSpecification {
+public:
+ typedef std::vector<OutputOption>::iterator iterator;
+ typedef std::vector<OutputOption>::const_iterator const_iterator;
+
+ /// \brief Constructor
+ ///
+ /// \param name Name of the logger.
+ /// \param severity Severity at which this logger logs
+ /// \param dbglevel Debug level
+ /// \param additive true to cause message logged with this logger to be
+ /// passed to the parent for logging.
+ LoggerSpecification(const std::string& name = "",
+ isc::log::Severity severity = isc::log::INFO,
+ int dbglevel = 0, bool additive = false) :
+ name_(name), severity_(severity), dbglevel_(dbglevel),
+ additive_(additive)
+ {}
+
+ /// \brief Set the name of the logger.
+ ///
+ /// \param name Name of the logger.
+ void setName(const std::string& name) {
+ name_ = name;
+ }
+
+ /// \return Return logger name.
+ std::string getName() const {
+ return name_;
+ }
+
+ /// \brief Set the severity.
+ ///
+ /// \param severity New severity of the logger.
+ void setSeverity(isc::log::Severity severity) {
+ severity_ = severity;
+ }
+
+ /// \return Return logger severity.
+ isc::log::Severity getSeverity() const {
+ return severity_;
+ }
+
+ /// \brief Set the debug level.
+ ///
+ /// \param dbglevel New debug level of the logger.
+ void setDbglevel(int dbglevel) {
+ dbglevel_ = dbglevel;
+ }
+
+ /// \return Return logger debug level
+ int getDbglevel() const {
+ return dbglevel_;
+ }
+
+ /// \brief Set the additive flag.
+ ///
+ /// \param additive New value of the additive flag.
+ void setAdditive(bool additive) {
+ additive_ = additive;
+ }
+
+ /// \return Return additive flag.
+ int getAdditive() const {
+ return additive_;
+ }
+
+ /// \brief Add output option.
+ ///
+ /// \param option Option to add to the list.
+ void addOutputOption(const OutputOption& option) {
+ options_.push_back(option);
+ }
+
+ /// \return Iterator to start of output options.
+ iterator begin() {
+ return options_.begin();
+ }
+
+ /// \return Iterator to start of output options.
+ const_iterator begin() const {
+ return options_.begin();
+ }
+
+ /// \return Iterator to end of output options.
+ iterator end() {
+ return options_.end();
+ }
+
+ /// \return Iterator to end of output options.
+ const_iterator end() const {
+ return options_.end();
+ }
+
+ /// \return Number of output specification options.
+ size_t optionCount() const {
+ return options_.size();
+ }
+
+ /// \brief Reset back to defaults.
+ void reset() {
+ name_ = "";
+ severity_ = isc::log::INFO;
+ dbglevel_ = 0;
+ additive_ = false;
+ options_.clear();
+ }
+
+private:
+ std::string name_; ///< Logger name
+ isc::log::Severity severity_; ///< Severity for this logger
+ int dbglevel_; ///< Debug level
+ bool additive_; ///< Chaining output
+ std::vector<OutputOption> options_; ///< Logger options
+};
+
+} // namespace log
+} // namespace isc
+
+#endif // __LOGGER_SPEC_IFICATIONH
diff --git a/src/lib/log/logger_support.cc b/src/lib/log/logger_support.cc
index e17c47d..2097136 100644
--- a/src/lib/log/logger_support.cc
+++ b/src/lib/log/logger_support.cc
@@ -12,197 +12,42 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE
-/// \brief Temporary Logger Support
-///
-/// Performs run-time initialization of the logger system. In particular, it
-/// is passed information from the command line and:
-///
-/// a) Sets the severity of the messages being logged (and debug level if
-/// appropriate).
-/// b) Reads in the local message file is one has been supplied.
-///
-/// These functions will be replaced once the code has been written to obtain
-/// the logging parameters from the configuration database.
-
-#include <iostream>
-#include <algorithm>
-#include <iostream>
#include <string>
-#include <vector>
-#include <boost/lexical_cast.hpp>
-
-#include <log/logger.h>
#include <log/logger_support.h>
-#include <log/messagedef.h>
-#include <log/message_dictionary.h>
-#include <log/message_exception.h>
-#include <log/message_initializer.h>
-#include <log/message_reader.h>
-#include <log/message_types.h>
-#include <log/root_logger_name.h>
-
-namespace isc {
-namespace log {
+#include <log/logger_manager.h>
using namespace std;
-// Declare a logger for the logging subsystem. This is a sub-logger of the
-// root logger and is used in all functions in this file.
-Logger logger("log");
-
+namespace {
-/// \brief Reads Local Message File
-///
-/// Reads the local message file into the global dictionary, overwriting
-/// existing messages. If the file contained any message IDs not in the
-/// dictionary, they are listed in a warning message.
-///
-/// \param file Name of the local message file
-static void
-readLocalMessageFile(const char* file) {
+// Flag to hold logging initialization state.
+bool logging_init_state = false;
- MessageDictionary& dictionary = MessageDictionary::globalDictionary();
- MessageReader reader(&dictionary);
- try {
- logger.info(MSG_RDLOCMES).arg(file);
- reader.readFile(file, MessageReader::REPLACE);
+} // Anonymous namespace
- // File successfully read, list the duplicates
- MessageReader::MessageIDCollection unknown = reader.getNotAdded();
- for (MessageReader::MessageIDCollection::const_iterator
- i = unknown.begin(); i != unknown.end(); ++i) {
- string message_id = boost::lexical_cast<string>(*i);
- logger.warn(MSG_IDNOTFND).arg(message_id);
- }
- }
- catch (MessageException& e) {
- MessageID ident = e.id();
- vector<string> args = e.arguments();
- switch (args.size()) {
- case 0:
- logger.error(ident);
- break;
-
- case 1:
- logger.error(ident).arg(args[0]);
- break;
+namespace isc {
+namespace log {
- case 2:
- logger.error(ident).arg(args[0]).arg(args[1]);
- break;
+// Return initialization state.
+bool
+isLoggingInitialized() {
+ return (logging_init_state);
+}
- default: // 3 or more (3 should be the maximum)
- logger.error(ident).arg(args[0]).arg(args[1]).arg(args[2]);
- }
- }
+// Set initialization state. (Note: as logging can be initialized via a direct
+// call to LoggerManager::init(), this function is called from there, not from
+// the initialization functions in this file.
+void
+setLoggingInitialized(bool state) {
+ logging_init_state = state;
}
-/// Logger Run-Time Initialization
+// Logger Run-Time Initialization.
void
initLogger(const string& root, isc::log::Severity severity, int dbglevel,
const char* file) {
-
- // Create the application root logger and set the default severity and
- // debug level. This is the logger that has the name of the application.
- // All other loggers created in this application will be its children.
- setRootLoggerName(root);
- Logger root_logger(isc::log::getRootLoggerName(), true);
-
- // Set the severity associated with it. If no other logger has a severity,
- // this will be the default.
- root_logger.setSeverity(severity, dbglevel);
-
- // Check if there were any duplicate message IDs in the default dictionary
- // and if so, log them. Log using the logging facility root logger.
- vector<string>& duplicates = MessageInitializer::getDuplicates();
- if (!duplicates.empty()) {
-
- // There are - sort and remove any duplicates.
- sort(duplicates.begin(), duplicates.end());
- vector<string>::iterator new_end =
- unique(duplicates.begin(), duplicates.end());
- for (vector<string>::iterator i = duplicates.begin(); i != new_end; ++i) {
- logger.warn(MSG_DUPMSGID).arg(*i);
- }
-
- }
-
- // Replace any messages with local ones (if given)
- if (file) {
- readLocalMessageFile(file);
- }
-}
-
-/// Logger Run-Time Initialization via Environment Variables
-void initLogger() {
-
- // Root logger name is defined by the environment variable B10_LOGGER_ROOT.
- // If not present, the name is "b10root".
- const char* DEFAULT_ROOT = "b10root";
- const char* root = getenv("B10_LOGGER_ROOT");
- if (! root) {
- root = DEFAULT_ROOT;
- }
-
- // Set the logging severity. The environment variable is
- // B10_LOGGER_SEVERITY, and can be one of "DEBUG", "INFO", "WARN", "ERROR"
- // of "FATAL". Note that the string must be in upper case with no leading
- // of trailing blanks.
- isc::log::Severity severity = isc::log::DEFAULT;
- const char* sev_char = getenv("B10_LOGGER_SEVERITY");
- if (sev_char) {
- string sev_string(sev_char);
- if (sev_string == "DEBUG") {
- severity = isc::log::DEBUG;
- } else if (sev_string == "INFO") {
- severity = isc::log::INFO;
- } else if (sev_string == "WARN") {
- severity = isc::log::WARN;
- } else if (sev_string == "ERROR") {
- severity = isc::log::ERROR;
- } else if (sev_string == "FATAL") {
- severity = isc::log::FATAL;
- } else {
- std::cerr << "**ERROR** unrecognised logger severity of '"
- << sev_string << "' - default severity will be used\n";
- }
- }
-
- // If the severity is debug, get the debug level (environment variable
- // B10_LOGGER_DBGLEVEL), which should be in the range 0 to 99.
- int dbglevel = 0;
- if (severity == isc::log::DEBUG) {
- const char* dbg_char = getenv("B10_LOGGER_DBGLEVEL");
- if (dbg_char) {
- int level = 0;
- try {
- level = boost::lexical_cast<int>(dbg_char);
- if (level < MIN_DEBUG_LEVEL) {
- std::cerr << "**ERROR** debug level of " << level
- << " is invalid - a value of " << MIN_DEBUG_LEVEL
- << " will be used\n";
- level = MIN_DEBUG_LEVEL;
- } else if (level > MAX_DEBUG_LEVEL) {
- std::cerr << "**ERROR** debug level of " << level
- << " is invalid - a value of " << MAX_DEBUG_LEVEL
- << " will be used\n";
- level = MAX_DEBUG_LEVEL;
- }
- } catch (...) {
- // Error, but not fatal to the test
- std::cerr << "**ERROR** Unable to translate "
- "B10_LOGGER_DBGLEVEL - a value of 0 will be used\n";
- }
- dbglevel = level;
- }
- }
-
- /// Set the local message file
- const char* localfile = getenv("B10_LOGGER_LOCALMSG");
-
- // Initialize logging
- initLogger(root, severity, dbglevel, localfile);
+ LoggerManager::init(root, severity, dbglevel, file);
}
} // namespace log
diff --git a/src/lib/log/logger_support.h b/src/lib/log/logger_support.h
index f4861b2..4ce3ced 100644
--- a/src/lib/log/logger_support.h
+++ b/src/lib/log/logger_support.h
@@ -15,13 +15,38 @@
#ifndef __LOGGER_SUPPORT_H
#define __LOGGER_SUPPORT_H
+#include <unistd.h>
+
#include <string>
#include <log/logger.h>
+#include <log/logger_unittest_support.h>
+
+/// \file
+/// \brief Logging initialization functions
+///
+/// Contains a set of functions relating to logging initialization that are
+/// used by the production code.
namespace isc {
namespace log {
-/// \brief Run-Time Initialization
+/// \brief Is logging initialized?
+///
+/// As some underlying logging implementations can behave unpredictably if they
+/// have not been initialized when a logging function is called, their
+/// initialization state is tracked. The logger functions will check this flag
+/// and throw an exception if logging is not initialized at that point.
+///
+/// \return true if logging has been initialized, false if not
+bool isLoggingInitialized();
+
+/// \brief Set state of "logging initialized" flag
+///
+/// \param state State to set the flag to. (This is expected to be "true" - the
+/// default - for all code apart from specific unit tests.)
+void setLoggingInitialized(bool state = true);
+
+/// \brief Run-time initialization
///
/// Performs run-time initialization of the logger in particular supplying:
///
@@ -36,42 +61,11 @@ namespace log {
/// \param severity Severity at which to log
/// \param dbglevel Debug severity (ignored if "severity" is not "DEBUG")
/// \param file Name of the local message file.
-void initLogger(const std::string& root, isc::log::Severity severity,
- int dbglevel, const char* file);
-
-
-/// \brief Run-Time Initialization from Environment
-///
-/// Performs run-time initialization of the logger via the setting of
-/// environment variables. These are:
-///
-/// B10_LOGGER_ROOT
-/// Name of the root logger. If not given, the string "b10root" will be used.
-///
-/// B10_LOGGER_SEVERITY
-/// Severity of messages that will be logged. This must be one of the strings
-/// "DEBUG", "INFO", "WARN", "ERROR", "FATAL". (Must be upper case and must
-/// not contain leading or trailing spaces.) If not specified (or if
-/// specified but incorrect), the default for the logging system will be used
-/// (currently INFO).
-///
-/// B10_LOGGER_DBGLEVEL
-/// Ignored if the level is not DEBUG, this should be a number between 0 and
-/// 99 indicating the logging severity. The default is 0. If outside these
-/// limits or if not a number, a value of 0 is used.
-///
-/// B10_LOGGER_LOCALMSG
-/// If defined, the path specification of a file that contains message
-/// definitions replacing ones in the default dictionary.
-///
-/// Any errors in the settings cause messages to be output to stderr.
-///
-/// This function is most likely to be called from unit test programs.
-
-void initLogger();
+void initLogger(const std::string& root,
+ isc::log::Severity severity = isc::log::INFO,
+ int dbglevel = 0, const char* file = NULL);
} // namespace log
} // namespace isc
-
#endif // __LOGGER_SUPPORT_H
diff --git a/src/lib/log/logger_unittest_support.cc b/src/lib/log/logger_unittest_support.cc
new file mode 100644
index 0000000..a0969be
--- /dev/null
+++ b/src/lib/log/logger_unittest_support.cc
@@ -0,0 +1,175 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <iostream>
+#include <algorithm>
+#include <string>
+
+#include <log/logger_level.h>
+#include <log/logger_name.h>
+#include <log/logger_manager.h>
+#include <log/logger_specification.h>
+#include <log/logger_unittest_support.h>
+#include <log/logger_support.h>
+#include <log/output_option.h>
+
+using namespace std;
+
+namespace isc {
+namespace log {
+
+// Get the logging severity. This is defined by the environment variable
+// B10_LOGGER_SEVERITY, and can be one of "DEBUG", "INFO", "WARN", "ERROR"
+// of "FATAL". (Note that the string must be in upper case with no leading
+// of trailing blanks.) If not present, the default severity passed to the
+// function is returned.
+isc::log::Severity
+b10LoggerSeverity(isc::log::Severity defseverity) {
+ const char* sev_char = getenv("B10_LOGGER_SEVERITY");
+ if (sev_char) {
+ return (isc::log::getSeverity(sev_char));
+ }
+ return (defseverity);
+}
+
+// Get the debug level. This is defined by the envornment variable
+// B10_LOGGER_DBGLEVEL. If not defined, a default value passed to the function
+// is returned.
+int
+b10LoggerDbglevel(int defdbglevel) {
+ const char* dbg_char = getenv("B10_LOGGER_DBGLEVEL");
+ if (dbg_char) {
+ int level = 0;
+ try {
+ level = boost::lexical_cast<int>(dbg_char);
+ if (level < MIN_DEBUG_LEVEL) {
+ std::cerr << "**ERROR** debug level of " << level
+ << " is invalid - a value of " << MIN_DEBUG_LEVEL
+ << " will be used\n";
+ level = MIN_DEBUG_LEVEL;
+ } else if (level > MAX_DEBUG_LEVEL) {
+ std::cerr << "**ERROR** debug level of " << level
+ << " is invalid - a value of " << MAX_DEBUG_LEVEL
+ << " will be used\n";
+ level = MAX_DEBUG_LEVEL;
+ }
+ } catch (...) {
+ // Error, but not fatal to the test
+ std::cerr << "**ERROR** Unable to translate "
+ "B10_LOGGER_DBGLEVEL - a value of 0 will be used\n";
+ }
+ return (level);
+ }
+
+ return (defdbglevel);
+}
+
+
+// Reset characteristics of the root logger to that set by the environment
+// variables B10_LOGGER_SEVERITY, B10_LOGGER_DBGLEVEL and B10_LOGGER_DESTINATION.
+
+void
+resetUnitTestRootLogger() {
+
+ using namespace isc::log;
+
+ // Constants: not declared static as this is function is expected to be
+ // called once only
+ const string DEVNULL = "/dev/null";
+ const string STDOUT = "stdout";
+ const string STDERR = "stderr";
+ const string SYSLOG = "syslog";
+ const string SYSLOG_COLON = "syslog:";
+
+ // Get the destination. If not specified, assume /dev/null. (The default
+ // severity for unit tests is DEBUG, which generates a lot of output.
+ // Routing the logging to /dev/null will suppress that, whilst still
+ // ensuring that the code paths are tested.)
+ const char* destination = getenv("B10_LOGGER_DESTINATION");
+ const string dest((destination == NULL) ? DEVNULL : destination);
+
+ // Prepare the objects to define the logging specification
+ LoggerSpecification spec(getRootLoggerName(),
+ b10LoggerSeverity(isc::log::DEBUG),
+ b10LoggerDbglevel(isc::log::MAX_DEBUG_LEVEL));
+ OutputOption option;
+
+ // Set up output option according to destination specification
+ if (dest == STDOUT) {
+ option.destination = OutputOption::DEST_CONSOLE;
+ option.stream = OutputOption::STR_STDOUT;
+
+ } else if (dest == STDERR) {
+ option.destination = OutputOption::DEST_CONSOLE;
+ option.stream = OutputOption::STR_STDERR;
+
+ } else if (dest == SYSLOG) {
+ option.destination = OutputOption::DEST_SYSLOG;
+ // Use default specified in OutputOption constructor for the
+ // syslog destination
+
+ } else if (dest.find(SYSLOG_COLON) == 0) {
+ option.destination = OutputOption::DEST_SYSLOG;
+ // Must take account of the string actually being "syslog:"
+ if (dest == SYSLOG_COLON) {
+ cerr << "**ERROR** value for B10_LOGGER_DESTINATION of " <<
+ SYSLOG_COLON << " is invalid, " << SYSLOG <<
+ " will be used instead\n";
+ // Use default for logging facility
+
+ } else {
+ // Everything else in the string is the facility name
+ option.facility = dest.substr(SYSLOG_COLON.size());
+ }
+
+ } else {
+ // Not a recognised destination, assume a file.
+ option.destination = OutputOption::DEST_FILE;
+ option.filename = dest;
+ }
+
+ // ... and set the destination
+ spec.addOutputOption(option);
+ LoggerManager manager;
+ manager.process(spec);
+}
+
+
+// Logger Run-Time Initialization via Environment Variables
+void initLogger(isc::log::Severity severity, int dbglevel) {
+
+ // Root logger name is defined by the environment variable B10_LOGGER_ROOT.
+ // If not present, the name is "bind10".
+ const char* DEFAULT_ROOT = "bind10";
+ const char* root = getenv("B10_LOGGER_ROOT");
+ if (! root) {
+ root = DEFAULT_ROOT;
+ }
+
+ // Set the local message file
+ const char* localfile = getenv("B10_LOGGER_LOCALMSG");
+
+ // Initialize logging
+ initLogger(root, isc::log::DEBUG, isc::log::MAX_DEBUG_LEVEL, localfile);
+
+ // Now set reset the output destination of the root logger, overriding
+ // the default severity, debug level and destination with those specified
+ // in the environment variables. (The two-step approach is used as the
+ // setUnitTestRootLoggerCharacteristics() function is used in several
+ // places in the BIND 10 tests, and it avoid duplicating code.)
+ resetUnitTestRootLogger();
+}
+
+} // namespace log
+} // namespace isc
diff --git a/src/lib/log/logger_unittest_support.h b/src/lib/log/logger_unittest_support.h
new file mode 100644
index 0000000..ce9121b
--- /dev/null
+++ b/src/lib/log/logger_unittest_support.h
@@ -0,0 +1,126 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOGGER_UNITTEST_SUPPORT_H
+#define __LOGGER_UNITTEST_SUPPORT_H
+
+#include <string>
+#include <log/logger.h>
+
+/// \file
+/// \brief Miscellaneous logging functions used by the unit tests.
+///
+/// As the configuration database is unsually unavailable during unit tests,
+/// the functions defined here allow a limited amount of logging configuration
+/// through the use of environment variables
+
+namespace isc {
+namespace log {
+
+/// \brief Run-Time Initialization for Unit Tests from Environment
+///
+/// Performs run-time initialization of the logger via the setting of
+/// environment variables. These are:
+///
+/// - B10_LOGGER_ROOT\n
+/// Name of the root logger. If not given, the string "bind10" will be used.
+///
+/// - B10_LOGGER_SEVERITY\n
+/// Severity of messages that will be logged. This must be one of the strings
+/// "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE". (Must be upper case
+/// and must not contain leading or trailing spaces.) If not specified (or if
+/// specified but incorrect), the default passed as argument to this function
+/// (currently DEBUG) will be used.
+///
+/// - B10_LOGGER_DBGLEVEL\n
+/// Ignored if the level is not DEBUG, this should be a number between 0 and
+/// 99 indicating the logging severity. The default is 0. If outside these
+/// limits or if not a number, The value passed to this function (default
+/// of MAX_DEBUG_LEVEL) is used.
+///
+/// - B10_LOGGER_LOCALMSG\n
+/// If defined, the path specification of a file that contains message
+/// definitions replacing ones in the default dictionary.
+///
+/// - B10_LOGGER_DESTINATION\n
+/// If defined, the destination of the logging output. This can be one of:
+/// - \c stdout Send output to stdout.
+/// - \c stderr Send output to stderr
+/// - \c syslog Send output to syslog using the facility local0.
+/// - \c syslog:xxx Send output to syslog, using the facility xxx. ("xxx"
+/// should be one of the syslog facilities such as "local0".) There must
+/// be a colon between "syslog" and "xxx
+/// - \c other Anything else is interpreted as the name of a file to which
+/// output is appended. If the file does not exist, it is created.
+///
+/// Any errors in the settings cause messages to be output to stderr.
+///
+/// This function is aimed at test programs, allowing the default settings to
+/// be overridden by the tester. It is not intended for use in production
+/// code.
+///
+/// TODO: Rename. This function overloads the initLogger() function that can
+/// be used to initialize production programs. This may lead to confusion.
+void initLogger(isc::log::Severity severity = isc::log::DEBUG,
+ int dbglevel = isc::log::MAX_DEBUG_LEVEL);
+
+
+/// \brief Obtains logging severity from B10_LOGGER_SEVERITY
+///
+/// Support function called by the unit test logging initialization code.
+/// It returns the logging severity defined by B10_LOGGER_SEVERITY. If
+/// not defined it returns the default passed to it.
+///
+/// \param defseverity Default severity used if B10_LOGGER_SEVERITY is not
+// defined.
+///
+/// \return Severity to use for the logging.
+isc::log::Severity b10LoggerSeverity(isc::log::Severity defseverity);
+
+
+/// \brief Obtains logging debug level from B10_LOGGER_DBGLEVEL
+///
+/// Support function called by the unit test logging initialization code.
+/// It returns the logging debug level defined by B10_LOGGER_DBGLEVEL. If
+/// not defined, it returns the default passed to it.
+///
+/// N.B. If there is an error, a message is written to stderr and a value
+/// related to the error is used. (This is because (a) logging is not yet
+/// initialized, hence only the error stream is known to exist, and (b) this
+/// function is only used in unit test logging initialization, so incorrect
+/// selection of a level is not really an issue.)
+///
+/// \param defdbglevel Default debug level to be used if B10_LOGGER_DBGLEVEL
+/// is not defined.
+///
+/// \return Debug level to use.
+int b10LoggerDbglevel(int defdbglevel);
+
+
+/// \brief Reset root logger characteristics
+///
+/// This is a simplified interface into the resetting of the characteristics
+/// of the root logger. It is aimed for use in unit tests and resets the
+/// characteristics of the root logger to use a severity, debug level and
+/// destination set by the environment variables B10_LOGGER_SEVERITY,
+/// B10_LOGGER_DBGLEVEL and B10_LOGGER_DESTINATION.
+void
+resetUnitTestRootLogger();
+
+} // namespace log
+} // namespace isc
+
+
+
+#endif // __LOGGER_UNITTEST_SUPPORT_H
diff --git a/src/lib/log/logimpl_messages.cc b/src/lib/log/logimpl_messages.cc
new file mode 100644
index 0000000..ca8552e
--- /dev/null
+++ b/src/lib/log/logimpl_messages.cc
@@ -0,0 +1,29 @@
+// File created from logimpl_messages.mes on Wed Jun 22 10:57:02 2011
+
+#include <cstddef>
+#include <log/message_types.h>
+#include <log/message_initializer.h>
+
+namespace isc {
+namespace log {
+
+extern const isc::log::MessageID LOGIMPL_ABOVE_MAX_DEBUG = "LOGIMPL_ABOVE_MAX_DEBUG";
+extern const isc::log::MessageID LOGIMPL_BAD_DEBUG_STRING = "LOGIMPL_BAD_DEBUG_STRING";
+extern const isc::log::MessageID LOGIMPL_BELOW_MIN_DEBUG = "LOGIMPL_BELOW_MIN_DEBUG";
+
+} // namespace log
+} // namespace isc
+
+namespace {
+
+const char* values[] = {
+ "LOGIMPL_ABOVE_MAX_DEBUG", "debug level of %1 is too high and will be set to the maximum of %2",
+ "LOGIMPL_BAD_DEBUG_STRING", "debug string '%1' has invalid format",
+ "LOGIMPL_BELOW_MIN_DEBUG", "debug level of %1 is too low and will be set to the minimum of %2",
+ NULL
+};
+
+const isc::log::MessageInitializer initializer(values);
+
+} // Anonymous namespace
+
diff --git a/src/lib/log/logimpl_messages.h b/src/lib/log/logimpl_messages.h
new file mode 100644
index 0000000..1b94838
--- /dev/null
+++ b/src/lib/log/logimpl_messages.h
@@ -0,0 +1,18 @@
+// File created from logimpl_messages.mes on Wed Jun 22 10:57:02 2011
+
+#ifndef __LOGIMPL_MESSAGES_H
+#define __LOGIMPL_MESSAGES_H
+
+#include <log/message_types.h>
+
+namespace isc {
+namespace log {
+
+extern const isc::log::MessageID LOGIMPL_ABOVE_MAX_DEBUG;
+extern const isc::log::MessageID LOGIMPL_BAD_DEBUG_STRING;
+extern const isc::log::MessageID LOGIMPL_BELOW_MIN_DEBUG;
+
+} // namespace log
+} // namespace isc
+
+#endif // __LOGIMPL_MESSAGES_H
diff --git a/src/lib/log/logimpl_messages.mes b/src/lib/log/logimpl_messages.mes
new file mode 100644
index 0000000..c40f80c
--- /dev/null
+++ b/src/lib/log/logimpl_messages.mes
@@ -0,0 +1,43 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \brief Logger Implementation Messages
+#
+# This holds messages generated by the underlying logger implementation. They
+# are likely to be specific to that implementation, and may well change if the
+# underlying implementation is changed. For that reason, they have been put
+# in a separate file.
+
+$NAMESPACE isc::log
+
+% LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+
+% LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
+
+% LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
diff --git a/src/lib/log/macros.h b/src/lib/log/macros.h
index 3128131..42fb42e 100644
--- a/src/lib/log/macros.h
+++ b/src/lib/log/macros.h
@@ -16,6 +16,7 @@
#define __LOG_MACROS_H
#include <log/logger.h>
+#include <log/log_dbglevels.h>
/// \brief Macro to conveniently test debug output and log it
#define LOG_DEBUG(LOGGER, LEVEL, MESSAGE) \
diff --git a/src/lib/log/message_dictionary.h b/src/lib/log/message_dictionary.h
index 23f76d7..519986d 100644
--- a/src/lib/log/message_dictionary.h
+++ b/src/lib/log/message_dictionary.h
@@ -79,7 +79,7 @@ public:
///
/// \return true if the message was added to the dictionary, false if the
/// message existed and it was not added.
- virtual bool add (const std::string& ident, const std::string& test);
+ virtual bool add (const std::string& ident, const std::string& text);
/// \brief Replace Message
diff --git a/src/lib/log/message_reader.cc b/src/lib/log/message_reader.cc
index 1a0b242..2710ab8 100644
--- a/src/lib/log/message_reader.cc
+++ b/src/lib/log/message_reader.cc
@@ -20,8 +20,8 @@
#include <iostream>
#include <fstream>
+#include <log/log_messages.h>
#include <log/message_exception.h>
-#include <log/messagedef.h>
#include <log/message_reader.h>
#include <util/strutil.h>
@@ -48,7 +48,7 @@ MessageReader::readFile(const string& file, MessageReader::Mode mode) {
// Open the file.
ifstream infile(file.c_str());
if (infile.fail()) {
- throw MessageException(MSG_OPENIN, file, strerror(errno));
+ throw MessageException(LOG_INPUT_OPEN_FAIL, file, strerror(errno));
}
// Loop round reading it. As we process the file one line at a time,
@@ -65,7 +65,7 @@ MessageReader::readFile(const string& file, MessageReader::Mode mode) {
// Why did the loop terminate?
if (!infile.eof()) {
- throw MessageException(MSG_READERR, file, strerror(errno));
+ throw MessageException(LOG_READ_ERROR, file, strerror(errno));
}
infile.close();
}
@@ -114,7 +114,7 @@ MessageReader::parseDirective(const std::string& text) {
} else {
// Unrecognised directive
- throw MessageException(MSG_UNRECDIR, tokens[0], lineno_);
+ throw MessageException(LOG_UNRECOGNISED_DIRECTIVE, tokens[0], lineno_);
}
}
@@ -138,13 +138,13 @@ MessageReader::parsePrefix(const vector<string>& tokens) {
// and numeric characters (and underscores) and does not start with a
// digit.
if (invalidSymbol(prefix_)) {
- throw MessageException(MSG_PRFINVARG, prefix_, lineno_);
+ throw MessageException(LOG_PREFIX_INVALID_ARG, prefix_, lineno_);
}
} else {
// Too many arguments
- throw MessageException(MSG_PRFEXTRARG, lineno_);
+ throw MessageException(LOG_PREFIX_EXTRA_ARGS, lineno_);
}
}
@@ -172,10 +172,10 @@ MessageReader::parseNamespace(const vector<string>& tokens) {
// Check argument count
if (tokens.size() < 2) {
- throw MessageException(MSG_NSNOARG, lineno_);
+ throw MessageException(LOG_NAMESPACE_NO_ARGS, lineno_);
} else if (tokens.size() > 2) {
- throw MessageException(MSG_NSEXTRARG, lineno_);
+ throw MessageException(LOG_NAMESPACE_EXTRA_ARGS, lineno_);
}
@@ -187,12 +187,12 @@ MessageReader::parseNamespace(const vector<string>& tokens) {
"abcdefghijklmnopqrstuvwxyz"
"0123456789_:";
if (tokens[1].find_first_not_of(valid_chars) != string::npos) {
- throw MessageException(MSG_NSINVARG, tokens[1], lineno_);
+ throw MessageException(LOG_NAMESPACE_INVALID_ARG, tokens[1], lineno_);
}
// All OK - unless the namespace has already been set.
if (ns_.size() != 0) {
- throw MessageException(MSG_DUPLNS, lineno_);
+ throw MessageException(LOG_DUPLICATE_NAMESPACE, lineno_);
}
// Prefix has not been set, so set it and return success.
@@ -219,7 +219,7 @@ MessageReader::parseMessage(const std::string& text, MessageReader::Mode mode) {
// A line comprising just the message introducer is not valid.
if (text.size() == 1) {
- throw MessageException(MSG_NOMSGID, text, lineno_);
+ throw MessageException(LOG_NO_MESSAGE_ID, text, lineno_);
}
// Strip off the introducer and any leading space after that.
@@ -230,7 +230,7 @@ MessageReader::parseMessage(const std::string& text, MessageReader::Mode mode) {
if (first_delim == string::npos) {
// Just a single token in the line - this is not valid
- throw MessageException(MSG_NOMSGTXT, message_line, lineno_);
+ throw MessageException(LOG_NO_MESSAGE_TEXT, message_line, lineno_);
}
// Extract the first token into the message ID, preceding it with the
@@ -240,7 +240,7 @@ MessageReader::parseMessage(const std::string& text, MessageReader::Mode mode) {
string ident = prefix_ + message_line.substr(0, first_delim);
if (prefix_.empty()) {
if (invalidSymbol(ident)) {
- throw MessageException(MSG_INVMSGID, ident, lineno_);
+ throw MessageException(LOG_INVALID_MESSAGE_ID, ident, lineno_);
}
}
isc::util::str::uppercase(ident);
@@ -252,7 +252,7 @@ MessageReader::parseMessage(const std::string& text, MessageReader::Mode mode) {
// ?? This happens if there are trailing delimiters, which should not
// occur as we have stripped trailing spaces off the line. Just treat
// this as a single-token error for simplicity's sake.
- throw MessageException(MSG_NOMSGTXT, message_line, lineno_);
+ throw MessageException(LOG_NO_MESSAGE_TEXT, message_line, lineno_);
}
// Add the result to the dictionary and to the non-added list if the add to
diff --git a/src/lib/log/messagedef.cc b/src/lib/log/messagedef.cc
deleted file mode 100644
index 5cc89b3..0000000
--- a/src/lib/log/messagedef.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// File created from messagedef.mes on Mon May 9 13:52:54 2011
-
-#include <cstddef>
-#include <log/message_types.h>
-#include <log/message_initializer.h>
-
-namespace isc {
-namespace log {
-
-extern const isc::log::MessageID MSG_DUPLNS = "MSG_DUPLNS";
-extern const isc::log::MessageID MSG_DUPMSGID = "MSG_DUPMSGID";
-extern const isc::log::MessageID MSG_IDNOTFND = "MSG_IDNOTFND";
-extern const isc::log::MessageID MSG_INVMSGID = "MSG_INVMSGID";
-extern const isc::log::MessageID MSG_NOMSGID = "MSG_NOMSGID";
-extern const isc::log::MessageID MSG_NOMSGTXT = "MSG_NOMSGTXT";
-extern const isc::log::MessageID MSG_NSEXTRARG = "MSG_NSEXTRARG";
-extern const isc::log::MessageID MSG_NSINVARG = "MSG_NSINVARG";
-extern const isc::log::MessageID MSG_NSNOARG = "MSG_NSNOARG";
-extern const isc::log::MessageID MSG_OPENIN = "MSG_OPENIN";
-extern const isc::log::MessageID MSG_OPENOUT = "MSG_OPENOUT";
-extern const isc::log::MessageID MSG_PRFEXTRARG = "MSG_PRFEXTRARG";
-extern const isc::log::MessageID MSG_PRFINVARG = "MSG_PRFINVARG";
-extern const isc::log::MessageID MSG_RDLOCMES = "MSG_RDLOCMES";
-extern const isc::log::MessageID MSG_READERR = "MSG_READERR";
-extern const isc::log::MessageID MSG_UNRECDIR = "MSG_UNRECDIR";
-extern const isc::log::MessageID MSG_WRITERR = "MSG_WRITERR";
-
-} // namespace log
-} // namespace isc
-
-namespace {
-
-const char* values[] = {
- "MSG_DUPLNS", "line %1: duplicate $NAMESPACE directive found",
- "MSG_DUPMSGID", "duplicate message ID (%1) in compiled code",
- "MSG_IDNOTFND", "could not replace message text for '%1': no such message",
- "MSG_INVMSGID", "line %1: invalid message identification '%2'",
- "MSG_NOMSGID", "line %1: message definition line found without a message ID",
- "MSG_NOMSGTXT", "line %1: line found containing a message ID ('%2') and no text",
- "MSG_NSEXTRARG", "line %1: $NAMESPACE directive has too many arguments",
- "MSG_NSINVARG", "line %1: $NAMESPACE directive has an invalid argument ('%2')",
- "MSG_NSNOARG", "line %1: no arguments were given to the $NAMESPACE directive",
- "MSG_OPENIN", "unable to open message file %1 for input: %2",
- "MSG_OPENOUT", "unable to open %1 for output: %2",
- "MSG_PRFEXTRARG", "line %1: $PREFIX directive has too many arguments",
- "MSG_PRFINVARG", "line %1: $PREFIX directive has an invalid argument ('%2')",
- "MSG_RDLOCMES", "reading local message file %1",
- "MSG_READERR", "error reading from message file %1: %2",
- "MSG_UNRECDIR", "line %1: unrecognised directive '%2'",
- "MSG_WRITERR", "error writing to %1: %2",
- NULL
-};
-
-const isc::log::MessageInitializer initializer(values);
-
-} // Anonymous namespace
-
diff --git a/src/lib/log/messagedef.h b/src/lib/log/messagedef.h
deleted file mode 100644
index 79c8bab..0000000
--- a/src/lib/log/messagedef.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// File created from messagedef.mes on Mon May 9 13:52:54 2011
-
-#ifndef __MESSAGEDEF_H
-#define __MESSAGEDEF_H
-
-#include <log/message_types.h>
-
-namespace isc {
-namespace log {
-
-extern const isc::log::MessageID MSG_DUPLNS;
-extern const isc::log::MessageID MSG_DUPMSGID;
-extern const isc::log::MessageID MSG_IDNOTFND;
-extern const isc::log::MessageID MSG_INVMSGID;
-extern const isc::log::MessageID MSG_NOMSGID;
-extern const isc::log::MessageID MSG_NOMSGTXT;
-extern const isc::log::MessageID MSG_NSEXTRARG;
-extern const isc::log::MessageID MSG_NSINVARG;
-extern const isc::log::MessageID MSG_NSNOARG;
-extern const isc::log::MessageID MSG_OPENIN;
-extern const isc::log::MessageID MSG_OPENOUT;
-extern const isc::log::MessageID MSG_PRFEXTRARG;
-extern const isc::log::MessageID MSG_PRFINVARG;
-extern const isc::log::MessageID MSG_RDLOCMES;
-extern const isc::log::MessageID MSG_READERR;
-extern const isc::log::MessageID MSG_UNRECDIR;
-extern const isc::log::MessageID MSG_WRITERR;
-
-} // namespace log
-} // namespace isc
-
-#endif // __MESSAGEDEF_H
diff --git a/src/lib/log/messagedef.mes b/src/lib/log/messagedef.mes
deleted file mode 100644
index 51c04fa..0000000
--- a/src/lib/log/messagedef.mes
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# \brief Message Utility Message File
-#
-# This is the source of the set of messages generated by the message and
-# logging components. The associated .h and .cc files are created by hand from
-# this file though and are not built during the build process; this is to avoid
-# the chicken-and-egg situation where we need the files to build the message
-# compiler, yet we need the compiler to build the files.
-
-$PREFIX MSG_
-$NAMESPACE isc::log
-
-% DUPMSGID duplicate message ID (%1) in compiled code
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules. (All message
-IDs should be unique throughout BIND10.) This has no impact on the operation
-of the server other that erroneous messages may be logged. (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded. However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
-
-% DUPLNS line %1: duplicate $NAMESPACE directive found
-When reading a message file, more than one $NAMESPACE directive was found. In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
-
-% IDNOTFND could not replace message text for '%1': no such message
-During start-up a local message file was read. A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary. Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
-
-This message may appear a number of times in the file, once for every such
-unknown message identification.
-
-% INVMSGID line %1: invalid message identification '%2'
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
-
-% NOMSGID line %1: message definition line found without a message ID
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
-
-% NOMSGTXT line %1: line found containing a message ID ('%2') and no text
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
-
-% NSEXTRARG line %1: $NAMESPACE directive has too many arguments
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
-
-% NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')
-The $NAMESPACE argument should be a valid C++ namespace. The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct. The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
-
-% NSNOARG line %1: no arguments were given to the $NAMESPACE directive
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
-
-% OPENIN unable to open message file %1 for input: %2
-The program was not able to open the specified input message file for the
-reason given.
-
-% OPENOUT unable to open %1 for output: %2
-The program was not able to open the specified output file for the reason
-given.
-
-% PRFEXTRARG line %1: $PREFIX directive has too many arguments
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created. This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
-
-% PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')
-The $PREFIX argument is used in a symbol name in a C++ header file. As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
-
-% RDLOCMES reading local message file %1
-This is an informational message output by BIND10 when it starts to read a
-local message file. (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
-
-% READERR error reading from message file %1: %2
-The specified error was encountered reading from the named message file.
-
-% WRITERR error writing to %1: %2
-The specified error was encountered by the message compiler when writing to
-the named output file.
-
-% UNRECDIR line %1: unrecognised directive '%2'
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
diff --git a/src/lib/log/output_option.cc b/src/lib/log/output_option.cc
new file mode 100644
index 0000000..f56efb9
--- /dev/null
+++ b/src/lib/log/output_option.cc
@@ -0,0 +1,55 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+
+#include <boost/algorithm/string.hpp>
+
+#include <log/log_messages.h>
+#include <log/macros.h>
+#include <log/output_option.h>
+
+namespace isc {
+namespace log {
+
+OutputOption::Destination
+getDestination(const std::string& dest_str) {
+ if (boost::iequals(dest_str, "console")) {
+ return OutputOption::DEST_CONSOLE;
+ } else if (boost::iequals(dest_str, "file")) {
+ return OutputOption::DEST_FILE;
+ } else if (boost::iequals(dest_str, "syslog")) {
+ return OutputOption::DEST_SYSLOG;
+ } else {
+ Logger logger("log");
+ LOG_ERROR(logger, LOG_BAD_DESTINATION).arg(dest_str);
+ return OutputOption::DEST_CONSOLE;
+ }
+}
+
+OutputOption::Stream
+getStream(const std::string& stream_str) {
+ if (boost::iequals(stream_str, "stderr")) {
+ return OutputOption::STR_STDERR;
+ } else if (boost::iequals(stream_str, "stdout")) {
+ return OutputOption::STR_STDOUT;
+ } else {
+ Logger logger("log");
+ LOG_ERROR(logger, LOG_BAD_STREAM).arg(stream_str);
+ return OutputOption::STR_STDOUT;
+ }
+}
+
+} // namespace log
+} // namespace isc
diff --git a/src/lib/log/output_option.h b/src/lib/log/output_option.h
new file mode 100644
index 0000000..cbb7e95
--- /dev/null
+++ b/src/lib/log/output_option.h
@@ -0,0 +1,85 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __OUTPUT_OPTION_H
+#define __OUTPUT_OPTION_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string>
+
+/// \brief Logger Output Option
+///
+/// The logging configuration options are a list of logger specifications, each
+/// with one or more output options. This class represents an output option;
+/// one or more of these are attached to a LoggerSpecification object which is
+/// then passed to the LoggerManager to configure the logger.
+///
+/// Although there are three distinct output types (console, file, syslog) and
+/// the options for each do not really overlap. Although it is tempting to
+/// define a base OutputOption class and derive a class for each type
+/// (ConsoleOutputOptions etc.), it would be messy to use in practice. At
+/// some point the exact class would have to be known to get the class-specific
+/// options and the (pointer to) the base class cast to the appropriate type.
+/// Instead, this "struct" contains the union of all output options; it is up
+/// to the caller to cherry-pick the members it needs.
+///
+/// One final note: this object holds data and does no computation. For this
+/// reason, it is a "struct" and members are accessed directly instead of
+/// through methods.
+
+namespace isc {
+namespace log {
+
+struct OutputOption {
+
+ /// Destinations. Prefixed "DEST_" to avoid problems with the C stdio.h
+ /// FILE type.
+ typedef enum {
+ DEST_CONSOLE = 0,
+ DEST_FILE = 1,
+ DEST_SYSLOG = 2
+ } Destination;
+
+ /// If console, stream on which messages are output
+ typedef enum {
+ STR_STDOUT = 1,
+ STR_STDERR = 2
+ } Stream;
+
+ /// \brief Constructor
+ OutputOption() : destination(DEST_CONSOLE), stream(STR_STDERR),
+ flush(false), facility("LOCAL0"), filename(""),
+ maxsize(0), maxver(0)
+ {}
+
+ /// Members.
+
+ Destination destination; ///< Where the output should go
+ Stream stream; ///< stdout/stderr if console output
+ bool flush; ///< true to flush after each message
+ std::string facility; ///< syslog facility
+ std::string filename; ///< Filename if file output
+ size_t maxsize; ///< 0 if no maximum size
+ unsigned int maxver; ///< Maximum versions (none if <= 0)
+};
+
+OutputOption::Destination getDestination(const std::string& dest_str);
+OutputOption::Stream getStream(const std::string& stream_str);
+
+
+} // namespace log
+} // namespace isc
+
+#endif // __OUTPUT_OPTION_H
diff --git a/src/lib/log/root_logger_name.cc b/src/lib/log/root_logger_name.cc
deleted file mode 100644
index 58d9407..0000000
--- a/src/lib/log/root_logger_name.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <string>
-#include <root_logger_name.h>
-
-namespace isc {
-namespace log {
-
-namespace {
-
-// Obtain the root logger name in a way that is safe for statically-initialized
-// objects.
-
-std::string&
-getRootLoggerNameInternal() {
- static std::string root_name;
- return (root_name);
-}
-
-} // Anonymous namespace
-
-void
-setRootLoggerName(const std::string& name) {
- getRootLoggerNameInternal() = name;
-}
-
-const std::string& getRootLoggerName() {
- return (getRootLoggerNameInternal());
-}
-
-} // namespace log
-} // namespace isc
diff --git a/src/lib/log/root_logger_name.h b/src/lib/log/root_logger_name.h
deleted file mode 100644
index 9d50332..0000000
--- a/src/lib/log/root_logger_name.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#ifndef __ROOT_LOGGER_NAME_H
-#define __ROOT_LOGGER_NAME_H
-
-#include <string>
-
-/// \brief Define Name of Root Logger
-///
-/// In BIND-10, the name root logger of a program is the name of the program
-/// itself (in contrast to packages such as log4cxx where the root logger name
-// is something like "."). These trivial functions allow the setting and
-// getting of that name by the logger classes.
-
-namespace isc {
-namespace log {
-
-/// \brief Set Root Logger Name
-///
-/// This function should be called by the program's initialization code before
-/// any logging functions are called.
-///
-/// \param name Name of the root logger. This should be the program name.
-void setRootLoggerName(const std::string& name);
-
-/// \brief Get Root Logger Name
-///
-/// \return Name of the root logger.
-const std::string& getRootLoggerName();
-
-}
-}
-
-#endif // __ROOT_LOGGER_NAME_H
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index 46065e8..a5f793c 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -2,8 +2,6 @@ SUBDIRS = .
AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
AM_CPPFLAGS += $(BOOST_INCLUDES)
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/log -I$(top_builddir)/src/lib/log
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
AM_CXXFLAGS = $(B10_CXXFLAGS)
if USE_STATIC_LINK
@@ -15,33 +13,64 @@ CLEANFILES = *.gcno *.gcda
TESTS =
if HAVE_GTEST
TESTS += run_unittests
-run_unittests_SOURCES = root_logger_name_unittest.cc
+run_unittests_SOURCES = run_unittests.cc
+run_unittests_SOURCES += log_formatter_unittest.cc
+run_unittests_SOURCES += logger_level_impl_unittest.cc
+run_unittests_SOURCES += logger_level_unittest.cc
+run_unittests_SOURCES += logger_manager_unittest.cc
+run_unittests_SOURCES += logger_name_unittest.cc
+run_unittests_SOURCES += logger_support_unittest.cc
run_unittests_SOURCES += logger_unittest.cc
+run_unittests_SOURCES += logger_specification_unittest.cc
run_unittests_SOURCES += message_dictionary_unittest.cc
-run_unittests_SOURCES += message_reader_unittest.cc
-run_unittests_SOURCES += message_initializer_unittest.cc
run_unittests_SOURCES += message_initializer_unittest_2.cc
-run_unittests_SOURCES += run_unittests.cc
-run_unittests_SOURCES += log_formatter_unittest.cc
+run_unittests_SOURCES += message_initializer_unittest.cc
+run_unittests_SOURCES += message_reader_unittest.cc
+run_unittests_SOURCES += output_option_unittest.cc
-run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) $(LOG4CPLUS_INCLUDES)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+run_unittests_CXXFLAGS = $(AM_CXXFLAGS)
+if USE_CLANGPP
+# This is to workaround unused variables tcout and tcerr in
+# log4cplus's streams.h.
+run_unittests_CXXFLAGS += -Wno-unused-variable
+endif
run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
endif
-TESTS += logger_support_test
-logger_support_test_SOURCES = logger_support_test.cc
-logger_support_test_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-logger_support_test_LDFLAGS = $(AM_LDFLAGS)
-logger_support_test_LDADD = $(top_builddir)/src/lib/log/liblog.la
-logger_support_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
+noinst_PROGRAMS = logger_example
+logger_example_SOURCES = logger_example.cc
+logger_example_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+logger_example_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
+logger_example_LDADD = $(top_builddir)/src/lib/log/liblog.la
+logger_example_LDADD += $(top_builddir)/src/lib/util/libutil.la
+logger_example_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+
+noinst_PROGRAMS += init_logger_test
+init_logger_test_SOURCES = init_logger_test.cc
+init_logger_test_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+init_logger_test_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
+init_logger_test_LDADD = $(top_builddir)/src/lib/log/liblog.la
+init_logger_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
+init_logger_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+
+noinst_PROGRAMS += $(TESTS)
-noinst_PROGRAMS = $(TESTS)
+# Additional test using the shell. These are principally tests
+# where the global logging environment is affected, and where the
+# output needs to be compared with stored output (where "cut" and
+# "diff" are useful utilities).
-# Additional test using the shell
-PYTESTS = run_time_init_test.sh
check-local:
- $(SHELL) $(abs_builddir)/run_time_init_test.sh
+ $(SHELL) $(abs_builddir)/console_test.sh
+ $(SHELL) $(abs_builddir)/destination_test.sh
+ $(SHELL) $(abs_builddir)/init_logger_test.sh
+ $(SHELL) $(abs_builddir)/local_file_test.sh
+ $(SHELL) $(abs_builddir)/severity_test.sh
diff --git a/src/lib/log/tests/console_test.sh.in b/src/lib/log/tests/console_test.sh.in
new file mode 100755
index 0000000..a16dc23
--- /dev/null
+++ b/src/lib/log/tests/console_test.sh.in
@@ -0,0 +1,67 @@
+#!/bin/sh
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# The logger supports the idea of a "console" logger than logs to either stdout
+# or stderr. This test checks that both these options work.
+
+testname="Console output test"
+echo $testname
+
+failcount=0
+tempfile=@abs_builddir@/console_test_tempfile_$$
+
+# Look at tempfile and check that the count equals the expected count
+passfail() {
+ count=`wc -l $tempfile | awk '{print $1}'`
+ if [ $count -eq $1 ]; then
+ echo " pass"
+ else
+ echo " FAIL"
+ failcount=`expr $failcount + $1`
+ fi
+}
+
+echo -n "1. Checking that console output to stdout goes to stdout:"
+rm -f $tempfile
+./logger_example -c stdout -s error 1> $tempfile 2> /dev/null
+passfail 4
+
+echo -n "2. Checking that console output to stdout does not go to stderr:"
+rm -f $tempfile
+./logger_example -c stdout -s error 1> /dev/null 2> $tempfile
+passfail 0
+
+echo -n "3. Checking that console output to stderr goes to stderr:"
+rm -f $tempfile
+./logger_example -c stderr -s error 1> /dev/null 2> $tempfile
+passfail 4
+
+echo -n "4. Checking that console output to stderr does not go to stdout:"
+rm -f $tempfile
+./logger_example -c stderr -s error 1> $tempfile 2> /dev/null
+passfail 0
+
+if [ $failcount -eq 0 ]; then
+ echo "PASS: $testname"
+elif [ $failcount -eq 1 ]; then
+ echo "FAIL: $testname - 1 test failed"
+else
+ echo "FAIL: $testname - $failcount tests failed"
+fi
+
+# Tidy up
+rm -f $tempfile
+
+exit $failcount
diff --git a/src/lib/log/tests/destination_test.sh.in b/src/lib/log/tests/destination_test.sh.in
new file mode 100755
index 0000000..1cfb9fb
--- /dev/null
+++ b/src/lib/log/tests/destination_test.sh.in
@@ -0,0 +1,91 @@
+#!/bin/sh
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Checks that the logger will route messages to the chosen destination.
+
+testname="Destination test"
+echo $testname
+
+failcount=0
+tempfile=@abs_builddir@/destination_test_tempfile_$$
+destfile1=@abs_builddir@/destination_test_destfile_1_$$
+destfile2=@abs_builddir@/destination_test_destfile_2_$$
+
+passfail() {
+ if [ $1 -eq 0 ]; then
+ echo " pass"
+ else
+ echo " FAIL"
+ failcount=`expr $failcount + $1`
+ fi
+}
+
+echo "1. One logger, multiple destinations:"
+cat > $tempfile << .
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+.
+rm -f $destfile1 $destfile2
+./logger_example -s error -f $destfile1 -f $destfile2
+
+echo -n " - destination 1:"
+cut -d' ' -f3- $destfile1 | diff $tempfile -
+passfail $?
+
+echo -n " - destination 2:"
+cut -d' ' -f3- $destfile2 | diff $tempfile -
+passfail $?
+
+echo "2. Two loggers, different destinations and severities"
+rm -f $destfile1 $destfile2
+./logger_example -l example -s info -f $destfile1 -l alpha -s warn -f $destfile2
+
+# All output for example and example.beta should have gone to destfile1.
+# Output for example.alpha should have done to destfile2.
+
+cat > $tempfile << .
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+WARN [example] LOG_BAD_STREAM bad log console output stream: example
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+WARN [example.beta] LOG_BAD_STREAM bad log console output stream: beta_warn
+INFO [example.beta] LOG_READ_ERROR error reading from message file beta: info
+.
+echo -n " - destination 1:"
+cut -d' ' -f3- $destfile1 | diff $tempfile -
+passfail $?
+
+echo -n " - destination 2:"
+cat > $tempfile << .
+WARN [example.alpha] LOG_READ_ERROR error reading from message file a.txt: dummy reason
+.
+cut -d' ' -f3- $destfile2 | diff $tempfile -
+passfail $?
+
+if [ $failcount -eq 0 ]; then
+ echo "PASS: $testname"
+elif [ $failcount -eq 1 ]; then
+ echo "FAIL: $testname - 1 test failed"
+else
+ echo "FAIL: $testname - $failcount tests failed"
+fi
+
+# Tidy up.
+rm -f $tempfile $destfile1 $destfile2
+
+exit $failcount
diff --git a/src/lib/log/tests/init_logger_test.cc b/src/lib/log/tests/init_logger_test.cc
new file mode 100644
index 0000000..104c078
--- /dev/null
+++ b/src/lib/log/tests/init_logger_test.cc
@@ -0,0 +1,42 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <log/macros.h>
+#include <log/logger_support.h>
+#include <log/log_messages.h>
+
+using namespace isc::log;
+
+/// \brief Test InitLogger
+///
+/// A program used in testing the logger that initializes logging using
+/// initLogger(), then outputs several messages at different severities and
+/// debug levels. An external script sets the environment variables and checks
+/// that they have the desired effect.
+
+int
+main(int, char**) {
+ initLogger();
+ Logger logger("log");
+
+ LOG_DEBUG(logger, 0, LOG_BAD_DESTINATION).arg("debug-0");
+ LOG_DEBUG(logger, 50, LOG_BAD_DESTINATION).arg("debug-50");
+ LOG_DEBUG(logger, 99, LOG_BAD_DESTINATION).arg("debug-99");
+ LOG_INFO(logger, LOG_BAD_SEVERITY).arg("info");
+ LOG_WARN(logger, LOG_BAD_STREAM).arg("warn");
+ LOG_ERROR(logger, LOG_DUPLICATE_MESSAGE_ID).arg("error");
+ LOG_FATAL(logger, LOG_NO_MESSAGE_ID).arg("fatal");
+
+ return (0);
+}
diff --git a/src/lib/log/tests/init_logger_test.sh.in b/src/lib/log/tests/init_logger_test.sh.in
new file mode 100755
index 0000000..795419b
--- /dev/null
+++ b/src/lib/log/tests/init_logger_test.sh.in
@@ -0,0 +1,110 @@
+#!/bin/sh
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Checks that the initLogger() call uses for unit tests respects the setting of
+# the environment variables.
+
+testname="initLogger test"
+echo $testname
+
+failcount=0
+tempfile=@abs_builddir@/init_logger_test_tempfile_$$
+destfile=@abs_builddir@/init_logger_test_destfile_$$
+
+passfail() {
+ if [ $1 -eq 0 ]; then
+ echo " pass"
+ else
+ echo " FAIL"
+ failcount=`expr $failcount + $1`
+ fi
+}
+
+echo "1. Checking that B10_LOGGER_SEVERITY/B10_LOGGER_DBGLEVEL work"
+
+echo -n " - severity=DEBUG, dbglevel=99: "
+cat > $tempfile << .
+DEBUG [bind10.log] LOG_BAD_DESTINATION unrecognized log destination: debug-0
+DEBUG [bind10.log] LOG_BAD_DESTINATION unrecognized log destination: debug-50
+DEBUG [bind10.log] LOG_BAD_DESTINATION unrecognized log destination: debug-99
+INFO [bind10.log] LOG_BAD_SEVERITY unrecognized log severity: info
+WARN [bind10.log] LOG_BAD_STREAM bad log console output stream: warn
+ERROR [bind10.log] LOG_DUPLICATE_MESSAGE_ID duplicate message ID (error) in compiled code
+FATAL [bind10.log] LOG_NO_MESSAGE_ID line fatal: message definition line found without a message ID
+.
+B10_LOGGER_DESTINATION=stdout B10_LOGGER_SEVERITY=DEBUG B10_LOGGER_DBGLEVEL=99 ./init_logger_test | \
+ cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+echo -n " - severity=DEBUG, dbglevel=50: "
+cat > $tempfile << .
+DEBUG [bind10.log] LOG_BAD_DESTINATION unrecognized log destination: debug-0
+DEBUG [bind10.log] LOG_BAD_DESTINATION unrecognized log destination: debug-50
+INFO [bind10.log] LOG_BAD_SEVERITY unrecognized log severity: info
+WARN [bind10.log] LOG_BAD_STREAM bad log console output stream: warn
+ERROR [bind10.log] LOG_DUPLICATE_MESSAGE_ID duplicate message ID (error) in compiled code
+FATAL [bind10.log] LOG_NO_MESSAGE_ID line fatal: message definition line found without a message ID
+.
+B10_LOGGER_DESTINATION=stdout B10_LOGGER_SEVERITY=DEBUG B10_LOGGER_DBGLEVEL=50 ./init_logger_test | \
+ cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+echo -n " - severity=WARN: "
+cat > $tempfile << .
+WARN [bind10.log] LOG_BAD_STREAM bad log console output stream: warn
+ERROR [bind10.log] LOG_DUPLICATE_MESSAGE_ID duplicate message ID (error) in compiled code
+FATAL [bind10.log] LOG_NO_MESSAGE_ID line fatal: message definition line found without a message ID
+.
+B10_LOGGER_DESTINATION=stdout B10_LOGGER_SEVERITY=WARN ./init_logger_test | \
+ cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+echo "2. Checking that B10_LOGGER_DESTINATION works"
+
+echo -n " - stdout: "
+cat > $tempfile << .
+FATAL [bind10.log] LOG_NO_MESSAGE_ID line fatal: message definition line found without a message ID
+.
+rm -f $destfile
+B10_LOGGER_SEVERITY=FATAL B10_LOGGER_DESTINATION=stdout ./init_logger_test 1> $destfile
+cut -d' ' -f3- $destfile | diff $tempfile -
+passfail $?
+
+echo -n " - stderr: "
+rm -f $destfile
+B10_LOGGER_SEVERITY=FATAL B10_LOGGER_DESTINATION=stderr ./init_logger_test 2> $destfile
+cut -d' ' -f3- $destfile | diff $tempfile -
+passfail $?
+
+echo -n " - file: "
+rm -f $destfile
+B10_LOGGER_SEVERITY=FATAL B10_LOGGER_DESTINATION=$destfile ./init_logger_test
+cut -d' ' -f3- $destfile | diff $tempfile -
+passfail $?
+
+# Note: can't automatically test syslog output.
+
+if [ $failcount -eq 0 ]; then
+ echo "PASS: $testname"
+elif [ $failcount -eq 1 ]; then
+ echo "FAIL: $testname - 1 test failed"
+else
+ echo "FAIL: $testname - $failcount tests failed"
+fi
+
+# Tidy up.
+rm -f $tempfile $destfile
+
+exit $failcount
diff --git a/src/lib/log/tests/local_file_test.sh.in b/src/lib/log/tests/local_file_test.sh.in
new file mode 100755
index 0000000..9b898e6
--- /dev/null
+++ b/src/lib/log/tests/local_file_test.sh.in
@@ -0,0 +1,83 @@
+#!/bin/sh
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Checks that a local message file can override the definitions in the message
+# dictionary.
+
+testname="Local message file test"
+echo $testname
+
+failcount=0
+localmes=@abs_builddir@/localdef_mes_$$
+tempfile=@abs_builddir@/run_time_init_test_tempfile_$$
+
+passfail() {
+ if [ $1 -eq 0 ]; then
+ echo " pass"
+ else
+ echo " FAIL"
+ failcount=`expr $failcount + $1`
+ fi
+}
+
+# Create the local message file for testing
+
+cat > $localmes << .
+% LOG_NOTHERE this message is not in the global dictionary
+% LOG_READ_ERROR replacement read error, parameters: '%1' and '%2'
+% LOG_READING_LOCAL_FILE replacement read local message file, parameter is '%1'
+.
+
+echo -n "1. Local message replacement:"
+cat > $tempfile << .
+WARN [example.log] LOG_NO_SUCH_MESSAGE could not replace message text for 'LOG_NOTHERE': no such message
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE replacement read local message file, parameter is 'dummy/file'
+WARN [example] LOG_BAD_STREAM bad log console output stream: example
+WARN [example.alpha] LOG_READ_ERROR replacement read error, parameters: 'a.txt' and 'dummy reason'
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+WARN [example.beta] LOG_BAD_STREAM bad log console output stream: beta_warn
+.
+./logger_example -c stdout -s warn $localmes | cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+echo -n "2. Report error if unable to read local message file:"
+cat > $tempfile << .
+ERROR [example.log] LOG_INPUT_OPEN_FAIL unable to open message file $localmes for input: No such file or directory
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+WARN [example] LOG_BAD_STREAM bad log console output stream: example
+WARN [example.alpha] LOG_READ_ERROR error reading from message file a.txt: dummy reason
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+WARN [example.beta] LOG_BAD_STREAM bad log console output stream: beta_warn
+.
+rm -f $localmes
+./logger_example -c stdout -s warn $localmes | cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+if [ $failcount -eq 0 ]; then
+ echo "PASS: $testname"
+elif [ $failcount -eq 1 ]; then
+ echo "FAIL: $testname - 1 test failed"
+else
+ echo "FAIL: $testname - $failcount tests failed"
+fi
+
+# Tidy up.
+rm -f $tempfile
+
+exit $failcount
diff --git a/src/lib/log/tests/log_formatter_unittest.cc b/src/lib/log/tests/log_formatter_unittest.cc
index b67831a..b91665d 100644
--- a/src/lib/log/tests/log_formatter_unittest.cc
+++ b/src/lib/log/tests/log_formatter_unittest.cc
@@ -14,6 +14,7 @@
#include <gtest/gtest.h>
#include <log/log_formatter.h>
+#include <log/logger_level.h>
#include <vector>
#include <string>
@@ -24,11 +25,11 @@ namespace {
class FormatterTest : public ::testing::Test {
protected:
- typedef pair<const char*, string> Output;
+ typedef pair<isc::log::Severity, string> Output;
typedef isc::log::Formatter<FormatterTest> Formatter;
vector<Output> outputs;
public:
- void output(const char* prefix, const string& message) {
+ void output(const isc::log::Severity& prefix, const string& message) {
outputs.push_back(Output(prefix, message));
}
// Just shortcut for new string
@@ -46,9 +47,9 @@ TEST_F(FormatterTest, inactive) {
// Create an active formatter and check it produces output. Does no arg
// substitution yet
TEST_F(FormatterTest, active) {
- Formatter("TEST", s("Text of message"), this);
+ Formatter(isc::log::INFO, s("Text of message"), this);
ASSERT_EQ(1, outputs.size());
- EXPECT_STREQ("TEST", outputs[0].first);
+ EXPECT_EQ(isc::log::INFO, outputs[0].first);
EXPECT_EQ("Text of message", outputs[0].second);
}
@@ -62,53 +63,53 @@ TEST_F(FormatterTest, inactiveArg) {
TEST_F(FormatterTest, stringArg) {
{
SCOPED_TRACE("C++ string");
- Formatter("TEST", s("Hello %1"), this).arg(string("World"));
+ Formatter(isc::log::INFO, s("Hello %1"), this).arg(string("World"));
ASSERT_EQ(1, outputs.size());
- EXPECT_STREQ("TEST", outputs[0].first);
+ EXPECT_EQ(isc::log::INFO, outputs[0].first);
EXPECT_EQ("Hello World", outputs[0].second);
}
{
SCOPED_TRACE("C++ string");
- Formatter("TEST", s("Hello %1"), this).arg(string("Internet"));
+ Formatter(isc::log::INFO, s("Hello %1"), this).arg(string("Internet"));
ASSERT_EQ(2, outputs.size());
- EXPECT_STREQ("TEST", outputs[1].first);
+ EXPECT_EQ(isc::log::INFO, outputs[1].first);
EXPECT_EQ("Hello Internet", outputs[1].second);
}
}
// Can convert to string
TEST_F(FormatterTest, intArg) {
- Formatter("TEST", s("The answer is %1"), this).arg(42);
+ Formatter(isc::log::INFO, s("The answer is %1"), this).arg(42);
ASSERT_EQ(1, outputs.size());
- EXPECT_STREQ("TEST", outputs[0].first);
+ EXPECT_EQ(isc::log::INFO, outputs[0].first);
EXPECT_EQ("The answer is 42", outputs[0].second);
}
// Can use multiple arguments at different places
TEST_F(FormatterTest, multiArg) {
- Formatter("TEST", s("The %2 are %1"), this).arg("switched").
+ Formatter(isc::log::INFO, s("The %2 are %1"), this).arg("switched").
arg("arguments");
ASSERT_EQ(1, outputs.size());
- EXPECT_STREQ("TEST", outputs[0].first);
+ EXPECT_EQ(isc::log::INFO, outputs[0].first);
EXPECT_EQ("The arguments are switched", outputs[0].second);
}
// Can survive and complains if placeholder is missing
TEST_F(FormatterTest, missingPlace) {
- EXPECT_NO_THROW(Formatter("TEST", s("Missing the first %2"), this).
+ EXPECT_NO_THROW(Formatter(isc::log::INFO, s("Missing the first %2"), this).
arg("missing").arg("argument"));
ASSERT_EQ(1, outputs.size());
- EXPECT_STREQ("TEST", outputs[0].first);
+ EXPECT_EQ(isc::log::INFO, outputs[0].first);
EXPECT_EQ("Missing the first argument "
"@@Missing placeholder %1 for 'missing'@@", outputs[0].second);
}
// Can replace multiple placeholders
TEST_F(FormatterTest, multiPlaceholder) {
- Formatter("TEST", s("The %1 is the %1"), this).
+ Formatter(isc::log::INFO, s("The %1 is the %1"), this).
arg("first rule of tautology club");
ASSERT_EQ(1, outputs.size());
- EXPECT_STREQ("TEST", outputs[0].first);
+ EXPECT_EQ(isc::log::INFO, outputs[0].first);
EXPECT_EQ("The first rule of tautology club is "
"the first rule of tautology club", outputs[0].second);
}
@@ -116,9 +117,9 @@ TEST_F(FormatterTest, multiPlaceholder) {
// Test we can cope with replacement containing the placeholder
TEST_F(FormatterTest, noRecurse) {
// If we recurse, this will probably eat all the memory and crash
- Formatter("TEST", s("%1"), this).arg("%1 %1");
+ Formatter(isc::log::INFO, s("%1"), this).arg("%1 %1");
ASSERT_EQ(1, outputs.size());
- EXPECT_STREQ("TEST", outputs[0].first);
+ EXPECT_EQ(isc::log::INFO, outputs[0].first);
EXPECT_EQ("%1 %1", outputs[0].second);
}
diff --git a/src/lib/log/tests/logger_example.cc b/src/lib/log/tests/logger_example.cc
new file mode 100644
index 0000000..2170066
--- /dev/null
+++ b/src/lib/log/tests/logger_example.cc
@@ -0,0 +1,305 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// \brief Example Program
+///
+/// Simple example program showing how to use the logger. The various
+/// command-line options let most aspects of the logger be exercised, so
+/// making this a useful tool for testing.
+///
+/// See the usage() method for details of use.
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <boost/lexical_cast.hpp>
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <util/strutil.h>
+
+#include <log/logger.h>
+#include <log/logger_level.h>
+#include <log/logger_manager.h>
+#include <log/logger_name.h>
+#include <log/logger_specification.h>
+#include <log/macros.h>
+
+// Include a set of message definitions.
+#include <log/log_messages.h>
+
+using namespace isc::log;
+using namespace std;
+
+
+// Print usage information
+
+void usage() {
+ cout <<
+"logger_support_test [-h | [logger_spec] [[logger_spec]...]]\n"
+"\n"
+" -h Print this message and exit\n"
+"\n"
+"The rest of the command line comprises the set of logger specifications.\n"
+"Each specification is of the form:\n"
+"\n"
+" -l logger [-s severity] [-d dbglevel] output_spec] [[output_spec] ...\n"
+"\n"
+"where:\n"
+"\n"
+" -l logger Give the name of the logger to which the following\n"
+" output specifications will apply.\n"
+"\n"
+"Each logger is followed by the indication of the serverity it is logging\n"
+"and, if applicable, its debug level:\n"
+"\n"
+" -d dbglevel Debug level. Only interpreted if the severity is 'debug'\n"
+" this is a number between 0 and 99.\n"
+" -s severity Set the severity of messages output. 'severity' is one\n"
+" of 'debug', 'info', 'warn', 'error', 'fatal', the default\n"
+" being 'info'.\n"
+"\n"
+"The output specifications - there may be more than one per logger - detail\n"
+"the output streams attached to the logger. These are of the form:\n"
+"\n"
+" -c stream | -f file [-m maxver] [-z maxsize] | -y facility\n"
+"\n"
+"These are:\n"
+"\n"
+" -c stream Send output to the console. 'stream' is one of 'stdout'\n"
+" of 'stderr'.\n"
+" -f file Send output to specified file, appending to existing file\n"
+" if one exists.\n"
+" -y facility Send output to the syslog file with the given facility\n"
+" name (e.g. local1, cron etc.)\n"
+"\n"
+"The following can be specified for the file logger:\n"
+"\n"
+" -m maxver If file rolling is selected (by the maximum file size being\n"
+" non-zero), the maximum number of versions to keep (defaults\n"
+" to 0)\n"
+" -z maxsize Maximum size of the file before the file is closed and a\n"
+" new one opened. The default of 0 means no maximum size.\n"
+"\n"
+"If none of -c, -f or -y is given, by default, output is sent to stdout. If no\n"
+"logger is specified, the default is the program's root logger ('example').\n";
+
+}
+
+
+// The program sets the attributes on the root logger and logs a set of
+// messages. Looking at the output determines whether the program worked.
+
+int main(int argc, char** argv) {
+ const string ROOT_NAME = "example";
+
+ bool sw_found = false; // Set true if switch found
+ bool c_found = false; // Set true if "-c" found
+ bool f_found = false; // Set true if "-f" found
+ bool y_found = false; // Set true if "-y" found
+ int option; // For getopt() processing
+ OutputOption def_opt; // Default output option - used
+ // for initialization
+ LoggerSpecification cur_spec(ROOT_NAME);// Current specification
+ OutputOption cur_opt; // Current output option
+ vector<LoggerSpecification> loggers; // Set of logger specifications
+ vector<OutputOption> options; // Output options for logger
+ std::string severity; // Severity set for logger
+
+ // Initialize logging system - set the root logger name.
+ LoggerManager manager;
+ manager.init(ROOT_NAME);
+
+ // In the parsing loop that follows, the construction of the logging
+ // specification is always "one behind". In other words, the parsing of
+ // command-line options updates thge current logging specification/output
+ // options. When the flag indicating a new logger or output specification
+ // is encountered, the previous one is added to the list.
+ //
+ // One complication is that there is deemed to be a default active when
+ // the parsing starts (console output for the BIND 10 root logger). This
+ // is included in the logging specifications UNLESS the first switch on
+ // the command line is a "-l" flag starting a new logger. To track this,
+ // the "sw_found" flag is set when a switch is completey processed. The
+ // processing of "-l" will only add information for a previous logger to
+ // the list if this flag is set.
+ while ((option = getopt(argc, argv, "hc:d:f:l:m:s:y:z:")) != -1) {
+ switch (option) {
+ case 'c': // Console output
+ // New output spec. If one was currently active, add it to the
+ // list and reset the current output option to the defaults.
+ if (c_found || f_found || y_found) {
+ cur_spec.addOutputOption(cur_opt);
+ cur_opt = def_opt;
+ c_found = f_found = y_found = false;
+ }
+
+ // Set the output option for this switch.
+ c_found = true;
+ cur_opt.destination = OutputOption::DEST_CONSOLE;
+ if (strcmp(optarg, "stdout") == 0) {
+ cur_opt.stream = OutputOption::STR_STDOUT;
+
+ } else if (strcmp(optarg, "stderr") == 0) {
+ cur_opt.stream = OutputOption::STR_STDERR;
+
+ } else {
+ cerr << "Unrecognised console option: " << optarg << "\n";
+ return (1);
+ }
+ break;
+
+ case 'd': // Debug level
+ cur_spec.setDbglevel(boost::lexical_cast<int>(optarg));
+ break;
+
+ case 'f': // File output specification
+ // New output spec. If one was currently active, add it to the
+ // list and reset the current output option to the defaults.
+ if (c_found || f_found || y_found) {
+ cur_spec.addOutputOption(cur_opt);
+ cur_opt = def_opt;
+ c_found = f_found = y_found = false;
+ }
+
+ // Set the output option for this switch.
+ f_found = true;
+ cur_opt.destination = OutputOption::DEST_FILE;
+ cur_opt.filename = optarg;
+ break;
+
+ case 'h': // Help
+ usage();
+ return (0);
+
+ case 'l': // Logger
+ // If a current specification is active, add the last output option
+ // to it, add it to the list and reset. A specification is active
+ // if at least one switch has been previously found.
+ if (sw_found) {
+ cur_spec.addOutputOption(cur_opt);
+ loggers.push_back(cur_spec);
+ cur_spec.reset();
+ }
+
+ // Set the logger name
+ cur_spec.setName(std::string(optarg));
+
+ // Reset the output option to the default.
+ cur_opt = def_opt;
+
+ // Indicate nothing is found to prevent the console option (the
+ // default output option) being added to the output list if an
+ // output option is found.
+ c_found = f_found = y_found = false;
+ break;
+
+ case 'm': // Maximum file version
+ if (!f_found) {
+ std::cerr << "Attempt to set maximum version (-m) "
+ "outside of file output specification\n";
+ return (1);
+ }
+ try {
+ cur_opt.maxsize = boost::lexical_cast<unsigned int>(optarg);
+ } catch (boost::bad_lexical_cast&) {
+ std::cerr << "Maximum version (-m) argument must be a positive "
+ "integer\n";
+ return (1);
+ }
+ break;
+
+ case 's': // Severity
+ severity = optarg;
+ isc::util::str::uppercase(severity);
+ cur_spec.setSeverity(getSeverity(severity));
+ break;
+
+ case 'y': // Syslog output
+ // New output spec. If one was currently active, add it to the
+ // list and reset the current output option to the defaults.
+ if (c_found || f_found || y_found) {
+ cur_spec.addOutputOption(cur_opt);
+ cur_opt = def_opt;
+ c_found = f_found = y_found = false;
+ }
+ y_found = true;
+ cur_opt.destination = OutputOption::DEST_SYSLOG;
+ cur_opt.facility = optarg;
+ break;
+
+ case 'z': // Maximum size
+ if (! f_found) {
+ std::cerr << "Attempt to set file size (-z) "
+ "outside of file output specification\n";
+ return (1);
+ }
+ try {
+ cur_opt.maxsize = boost::lexical_cast<size_t>(optarg);
+ } catch (boost::bad_lexical_cast&) {
+ std::cerr << "File size (-z) argument must be a positive "
+ "integer\n";
+ return (1);
+ }
+ break;
+
+
+ default:
+ std::cerr << "Unrecognised option: " <<
+ static_cast<char>(option) << "\n";
+ return (1);
+ }
+
+ // Have found at least one command-line switch, so note the fact.
+ sw_found = true;
+ }
+
+ // Add the current (unfinished specification) to the list.
+ cur_spec.addOutputOption(cur_opt);
+ loggers.push_back(cur_spec);
+
+ // Set the logging options.
+ manager.process(loggers.begin(), loggers.end());
+
+ // Set the local file
+ if (optind < argc) {
+ LoggerManager::readLocalMessageFile(argv[optind]);
+ }
+
+ // Log a few messages to different loggers.
+ isc::log::Logger logger_ex(ROOT_NAME);
+ isc::log::Logger logger_alpha("alpha");
+ isc::log::Logger logger_beta("beta");
+
+ LOG_FATAL(logger_ex, LOG_WRITE_ERROR).arg("test1").arg("42");
+ LOG_ERROR(logger_ex, LOG_READING_LOCAL_FILE).arg("dummy/file");
+ LOG_WARN(logger_ex, LOG_BAD_STREAM).arg("example");
+ LOG_WARN(logger_alpha, LOG_READ_ERROR).arg("a.txt").arg("dummy reason");
+ LOG_INFO(logger_alpha, LOG_INPUT_OPEN_FAIL).arg("example.msg").arg("dummy reason");
+ LOG_DEBUG(logger_ex, 0, LOG_READING_LOCAL_FILE).arg("example/0");
+ LOG_DEBUG(logger_ex, 24, LOG_READING_LOCAL_FILE).arg("example/24");
+ LOG_DEBUG(logger_ex, 25, LOG_READING_LOCAL_FILE).arg("example/25");
+ LOG_DEBUG(logger_ex, 26, LOG_READING_LOCAL_FILE).arg("example/26");
+ LOG_FATAL(logger_beta, LOG_BAD_SEVERITY).arg("beta_fatal");
+ LOG_ERROR(logger_beta, LOG_BAD_DESTINATION).arg("beta_error");
+ LOG_WARN(logger_beta, LOG_BAD_STREAM).arg("beta_warn");
+ LOG_INFO(logger_beta, LOG_READ_ERROR).arg("beta").arg("info");
+ LOG_DEBUG(logger_beta, 25, LOG_BAD_SEVERITY).arg("beta/25");
+ LOG_DEBUG(logger_beta, 26, LOG_BAD_SEVERITY).arg("beta/26");
+
+ return (0);
+}
diff --git a/src/lib/log/tests/logger_impl_log4cxx_unittest.cc b/src/lib/log/tests/logger_impl_log4cxx_unittest.cc
deleted file mode 100644
index cab2678..0000000
--- a/src/lib/log/tests/logger_impl_log4cxx_unittest.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <iostream>
-#include <string>
-
-#include <gtest/gtest.h>
-
-#include <log/root_logger_name.h>
-#include <log/logger.h>
-#include <log/logger_impl.h>
-#include <log/messagedef.h>
-
-using namespace isc;
-using namespace isc::log;
-using namespace std;
-
-/// \brief Log4cxx Implementation Tests
-///
-/// Some tests of methods that are not directly tested by the logger unit tests
-/// (when the logger is configured to use log4cxx)
-
-namespace isc {
-namespace log {
-
-/// \brief Test Logger
-///
-/// This logger is a subclass of the logger implementation class under test, but
-/// makes protected methods public (for testing)
-
-class TestLoggerImpl : public LoggerImpl {
-public:
- /// \brief constructor
- TestLoggerImpl(const string& name) : LoggerImpl(name, true)
- {}
-
-
- /// \brief Conversion Between log4cxx Number and BIND-10 Severity
- Severity convertLevel(int value) {
- return (LoggerImpl::convertLevel(value));
- }
-};
-
-} // namespace log
-} // namespace isc
-
-
-class LoggerImplTest : public ::testing::Test {
-protected:
- LoggerImplTest()
- {
- }
-};
-
-// Test the number to severity conversion function
-
-TEST_F(LoggerImplTest, ConvertLevel) {
-
- // Create a logger
- RootLoggerName::setName("test3");
- TestLoggerImpl logger("alpha");
-
- // Basic 1:1
- EXPECT_EQ(isc::log::DEBUG, logger.convertLevel(log4cxx::Level::DEBUG_INT));
- EXPECT_EQ(isc::log::INFO, logger.convertLevel(log4cxx::Level::INFO_INT));
- EXPECT_EQ(isc::log::WARN, logger.convertLevel(log4cxx::Level::WARN_INT));
- EXPECT_EQ(isc::log::WARN, logger.convertLevel(log4cxx::Level::WARN_INT));
- EXPECT_EQ(isc::log::ERROR, logger.convertLevel(log4cxx::Level::ERROR_INT));
- EXPECT_EQ(isc::log::FATAL, logger.convertLevel(log4cxx::Level::FATAL_INT));
- EXPECT_EQ(isc::log::FATAL, logger.convertLevel(log4cxx::Level::FATAL_INT));
- EXPECT_EQ(isc::log::NONE, logger.convertLevel(log4cxx::Level::OFF_INT));
-
- // Now some debug levels
- EXPECT_EQ(isc::log::DEBUG,
- logger.convertLevel(log4cxx::Level::DEBUG_INT - 1));
- EXPECT_EQ(isc::log::DEBUG,
- logger.convertLevel(log4cxx::Level::DEBUG_INT - MAX_DEBUG_LEVEL));
- EXPECT_EQ(isc::log::DEBUG,
- logger.convertLevel(log4cxx::Level::DEBUG_INT - 2 * MAX_DEBUG_LEVEL));
-}
diff --git a/src/lib/log/tests/logger_level_impl_unittest.cc b/src/lib/log/tests/logger_level_impl_unittest.cc
new file mode 100644
index 0000000..dacd202
--- /dev/null
+++ b/src/lib/log/tests/logger_level_impl_unittest.cc
@@ -0,0 +1,174 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <iostream>
+#include <string>
+
+#include <gtest/gtest.h>
+#include <boost/static_assert.hpp>
+#include <boost/lexical_cast.hpp>
+
+#include <log/logger_level_impl.h>
+#include <log/logger_support.h>
+#include <log4cplus/logger.h>
+
+using namespace isc::log;
+using namespace std;
+
+class LoggerLevelImplTest : public ::testing::Test {
+protected:
+ LoggerLevelImplTest() {
+ // Ensure logging set to default for unit tests
+ resetUnitTestRootLogger();
+ }
+
+ ~LoggerLevelImplTest()
+ {}
+};
+
+
+// Checks that the log4cplus and BIND 10 levels convert correctly
+TEST_F(LoggerLevelImplTest, DefaultConversionFromBind) {
+ log4cplus::LogLevel fatal =
+ LoggerLevelImpl::convertFromBindLevel(Level(FATAL));
+ EXPECT_EQ(log4cplus::FATAL_LOG_LEVEL, fatal);
+
+ log4cplus::LogLevel error =
+ LoggerLevelImpl::convertFromBindLevel(Level(ERROR));
+ EXPECT_EQ(log4cplus::ERROR_LOG_LEVEL, error);
+
+ log4cplus::LogLevel warn =
+ LoggerLevelImpl::convertFromBindLevel(Level(WARN));
+ EXPECT_EQ(log4cplus::WARN_LOG_LEVEL, warn);
+
+ log4cplus::LogLevel info =
+ LoggerLevelImpl::convertFromBindLevel(Level(INFO));
+ EXPECT_EQ(log4cplus::INFO_LOG_LEVEL, info);
+
+ log4cplus::LogLevel debug =
+ LoggerLevelImpl::convertFromBindLevel(Level(DEBUG));
+ EXPECT_EQ(log4cplus::DEBUG_LOG_LEVEL, debug);
+}
+
+// Checks that the debug severity and level converts correctly
+TEST_F(LoggerLevelImplTest, DebugConversionFromBind) {
+ log4cplus::LogLevel debug0 =
+ LoggerLevelImpl::convertFromBindLevel(Level(DEBUG, 0));
+ EXPECT_EQ(log4cplus::DEBUG_LOG_LEVEL - 0, debug0);
+
+ log4cplus::LogLevel debug1 =
+ LoggerLevelImpl::convertFromBindLevel(Level(DEBUG, 1));
+ EXPECT_EQ(log4cplus::DEBUG_LOG_LEVEL - 1, debug1);
+
+ log4cplus::LogLevel debug99 =
+ LoggerLevelImpl::convertFromBindLevel(Level(DEBUG, 99));
+ EXPECT_EQ(log4cplus::DEBUG_LOG_LEVEL - 99, debug99);
+
+ // Out of range should be coerced to the nearest boundary
+ log4cplus::LogLevel debug_1 =
+ LoggerLevelImpl::convertFromBindLevel(Level(DEBUG, MIN_DEBUG_LEVEL - 1));
+ EXPECT_EQ(log4cplus::DEBUG_LOG_LEVEL, debug_1);
+
+ log4cplus::LogLevel debug100 =
+ LoggerLevelImpl::convertFromBindLevel(Level(DEBUG, MAX_DEBUG_LEVEL + 1));
+ EXPECT_EQ(log4cplus::DEBUG_LOG_LEVEL - MAX_DEBUG_LEVEL, debug100);
+}
+
+// Do the checks the other way
+static void
+test_convert_to(const char* trace, isc::log::Severity severity, int dbglevel,
+ log4cplus::LogLevel level)
+{
+ SCOPED_TRACE(trace);
+ Level test = LoggerLevelImpl::convertToBindLevel(level);
+ EXPECT_EQ(severity, test.severity);
+ EXPECT_EQ(dbglevel, test.dbglevel);
+}
+
+TEST_F(LoggerLevelImplTest, ConversionToBind) {
+ test_convert_to("FATAL", FATAL, MIN_DEBUG_LEVEL, log4cplus::FATAL_LOG_LEVEL);
+ test_convert_to("ERROR", ERROR, MIN_DEBUG_LEVEL, log4cplus::ERROR_LOG_LEVEL);
+ test_convert_to("WARN", WARN , MIN_DEBUG_LEVEL, log4cplus::WARN_LOG_LEVEL);
+ test_convert_to("INFO", INFO , MIN_DEBUG_LEVEL, log4cplus::INFO_LOG_LEVEL);
+ test_convert_to("DEBUG", DEBUG, MIN_DEBUG_LEVEL, log4cplus::DEBUG_LOG_LEVEL);
+
+ test_convert_to("DEBUG0", DEBUG, MIN_DEBUG_LEVEL + 0,
+ (log4cplus::DEBUG_LOG_LEVEL));
+ test_convert_to("DEBUG1", DEBUG, MIN_DEBUG_LEVEL + 1,
+ (log4cplus::DEBUG_LOG_LEVEL - 1));
+ test_convert_to("DEBUG2", DEBUG, MIN_DEBUG_LEVEL + 2,
+ (log4cplus::DEBUG_LOG_LEVEL - 2));
+ test_convert_to("DEBUG99", DEBUG, MIN_DEBUG_LEVEL + 99,
+ (log4cplus::DEBUG_LOG_LEVEL - 99));
+
+ // ... and some invalid valid values
+ test_convert_to("DEBUG-1", INFO, MIN_DEBUG_LEVEL,
+ (log4cplus::DEBUG_LOG_LEVEL + 1));
+ BOOST_STATIC_ASSERT(MAX_DEBUG_LEVEL == 99);
+ test_convert_to("DEBUG+100", DEFAULT, 0,
+ (log4cplus::DEBUG_LOG_LEVEL - MAX_DEBUG_LEVEL - 1));
+}
+
+// Check that we can convert from a string to the new log4cplus levels
+TEST_F(LoggerLevelImplTest, FromString) {
+
+ // Test all valid values
+ for (int i = MIN_DEBUG_LEVEL; i <= MAX_DEBUG_LEVEL; ++i) {
+ std::string token = string("DEBUG") + boost::lexical_cast<std::string>(i);
+ EXPECT_EQ(log4cplus::DEBUG_LOG_LEVEL - i,
+ LoggerLevelImpl::logLevelFromString(token));
+ }
+
+ // ... in lowercase too
+ for (int i = MIN_DEBUG_LEVEL; i <= MAX_DEBUG_LEVEL; ++i) {
+ std::string token = string("debug") + boost::lexical_cast<std::string>(i);
+ EXPECT_EQ(log4cplus::DEBUG_LOG_LEVEL - i,
+ LoggerLevelImpl::logLevelFromString(token));
+ }
+
+ // A few below the minimum
+ for (int i = MIN_DEBUG_LEVEL - 5; i < MIN_DEBUG_LEVEL; ++i) {
+ std::string token = string("DEBUG") + boost::lexical_cast<std::string>(i);
+ EXPECT_EQ(log4cplus::DEBUG_LOG_LEVEL, LoggerLevelImpl::logLevelFromString(token));
+ }
+
+ // ... and above the maximum
+ for (int i = MAX_DEBUG_LEVEL + 1; i < MAX_DEBUG_LEVEL + 5; ++i) {
+ std::string token = string("DEBUG") + boost::lexical_cast<std::string>(i);
+ EXPECT_EQ(log4cplus::DEBUG_LOG_LEVEL - MAX_DEBUG_LEVEL,
+ LoggerLevelImpl::logLevelFromString(token));
+ }
+
+ // Invalid strings.
+ EXPECT_EQ(log4cplus::NOT_SET_LOG_LEVEL,
+ LoggerLevelImpl::logLevelFromString("DEBU"));
+ EXPECT_EQ(log4cplus::NOT_SET_LOG_LEVEL,
+ LoggerLevelImpl::logLevelFromString("unrecognised"));
+}
+
+// ... and check the conversion back again. All levels should convert to "DEBUG".
+TEST_F(LoggerLevelImplTest, ToString) {
+
+ for (int i = MIN_DEBUG_LEVEL; i <= MAX_DEBUG_LEVEL; ++i) {
+ EXPECT_EQ(std::string("DEBUG"),
+ LoggerLevelImpl::logLevelToString(log4cplus::DEBUG_LOG_LEVEL - i));
+ }
+
+ // ... and that out of range stuff returns an empty string.
+ EXPECT_EQ(std::string(),
+ LoggerLevelImpl::logLevelToString(log4cplus::DEBUG_LOG_LEVEL + 1));
+ EXPECT_EQ(std::string(),
+ LoggerLevelImpl::logLevelToString(
+ log4cplus::DEBUG_LOG_LEVEL - MAX_DEBUG_LEVEL - 100));
+}
diff --git a/src/lib/log/tests/logger_level_unittest.cc b/src/lib/log/tests/logger_level_unittest.cc
new file mode 100644
index 0000000..641a6cc
--- /dev/null
+++ b/src/lib/log/tests/logger_level_unittest.cc
@@ -0,0 +1,84 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <iostream>
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include <log/logger.h>
+#include <log/logger_manager.h>
+#include <log/log_messages.h>
+#include <log/logger_support.h>
+
+using namespace isc;
+using namespace isc::log;
+using namespace std;
+
+class LoggerLevelTest : public ::testing::Test {
+protected:
+ LoggerLevelTest() {
+ // Logger initialization is done in main(). As logging tests may
+ // alter the default logging output, it is reset here.
+ resetUnitTestRootLogger();
+ }
+ ~LoggerLevelTest() {
+ LoggerManager::reset();
+ }
+};
+
+
+// Checks that the logger is named correctly.
+
+TEST_F(LoggerLevelTest, Creation) {
+
+ // Default
+ isc::log::Level level1;
+ EXPECT_EQ(isc::log::DEFAULT, level1.severity);
+ EXPECT_EQ(isc::log::MIN_DEBUG_LEVEL, level1.dbglevel);
+
+ // Single argument constructor.
+ isc::log::Level level2(isc::log::FATAL);
+ EXPECT_EQ(isc::log::FATAL, level2.severity);
+ EXPECT_EQ(isc::log::MIN_DEBUG_LEVEL, level2.dbglevel);
+
+ // Two-argument constructor
+ isc::log::Level level3(isc::log::DEBUG, 42);
+ EXPECT_EQ(isc::log::DEBUG, level3.severity);
+ EXPECT_EQ(42, level3.dbglevel);
+}
+
+TEST_F(LoggerLevelTest, getSeverity) {
+ EXPECT_EQ(DEBUG, getSeverity("DEBUG"));
+ EXPECT_EQ(DEBUG, getSeverity("debug"));
+ EXPECT_EQ(DEBUG, getSeverity("DeBuG"));
+ EXPECT_EQ(INFO, getSeverity("INFO"));
+ EXPECT_EQ(INFO, getSeverity("info"));
+ EXPECT_EQ(INFO, getSeverity("iNfO"));
+ EXPECT_EQ(WARN, getSeverity("WARN"));
+ EXPECT_EQ(WARN, getSeverity("warn"));
+ EXPECT_EQ(WARN, getSeverity("wARn"));
+ EXPECT_EQ(ERROR, getSeverity("ERROR"));
+ EXPECT_EQ(ERROR, getSeverity("error"));
+ EXPECT_EQ(ERROR, getSeverity("ERRoR"));
+ EXPECT_EQ(FATAL, getSeverity("FATAL"));
+ EXPECT_EQ(FATAL, getSeverity("fatal"));
+ EXPECT_EQ(FATAL, getSeverity("FAtaL"));
+
+ // bad values should default to stdout
+ EXPECT_EQ(INFO, getSeverity("some bad value"));
+ EXPECT_EQ(INFO, getSeverity(""));
+
+ LoggerManager::reset();
+}
diff --git a/src/lib/log/tests/logger_manager_unittest.cc b/src/lib/log/tests/logger_manager_unittest.cc
new file mode 100644
index 0000000..0bdfc74
--- /dev/null
+++ b/src/lib/log/tests/logger_manager_unittest.cc
@@ -0,0 +1,321 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <fstream>
+#include <iostream>
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include <boost/scoped_array.hpp>
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <log/macros.h>
+#include <log/log_messages.h>
+#include <log/logger.h>
+#include <log/logger_level.h>
+#include <log/logger_manager.h>
+#include <log/logger_specification.h>
+#include <log/output_option.h>
+
+#include "tempdir.h"
+
+using namespace isc;
+using namespace isc::log;
+using namespace std;
+
+/// \brief LoggerManager Test
+class LoggerManagerTest : public ::testing::Test {
+public:
+ LoggerManagerTest() {
+ // Initialization of logging is done in main()
+ }
+
+ ~LoggerManagerTest() {
+ LoggerManager::reset();
+ }
+};
+
+
+
+// Convenience class to create the specification for the logger "filelogger",
+// which, as the name suggests, logs to a file. It remembers the file name and
+// deletes the file when instance of the class is destroyed.
+class SpecificationForFileLogger {
+public:
+
+ // Constructor - allocate file and create the specification object
+ SpecificationForFileLogger() : spec_(), name_(""), logname_("filelogger") {
+
+ // Set the output to a temporary file.
+ OutputOption option;
+ option.destination = OutputOption::DEST_FILE;
+ option.filename = name_ = createTempFilename();
+
+ // Set target output to the file logger. The defauls indicate
+ // INFO severity.
+ spec_.setName(logname_);
+ spec_.addOutputOption(option);
+ }
+
+ // Destructor, remove the file. This is only a test, so ignore failures
+ ~SpecificationForFileLogger() {
+ if (! name_.empty()) {
+ (void) unlink(name_.c_str());
+ }
+ }
+
+ // Return reference to the logging specification for this loggger
+ LoggerSpecification& getSpecification() {
+ return spec_;
+ }
+
+ // Return name of the logger
+ string getLoggerName() const {
+ return logname_;
+ }
+
+ // Return name of the file
+ string getFileName() const {
+ return name_;
+ }
+
+ // Create temporary filename
+ //
+ // The compiler warns against tmpnam() and suggests mkstemp instead.
+ // Unfortunately, this creates the filename and opens it. So we need to
+ // close and delete the file before returning the name. Also, the name
+ // is based on the template supplied and the name of the temporary
+ // directory may vary between systems. So translate TMPDIR and if that
+ // does not exist, use /tmp.
+ //
+ // \return Temporary file name
+ std::string createTempFilename() {
+ string filename = TEMP_DIR + "/bind10_logger_manager_test_XXXXXX";
+
+ // Copy into writeable storage for the call to mkstemp
+ boost::scoped_array<char> tname(new char[filename.size() + 1]);
+ strcpy(tname.get(), filename.c_str());
+
+ // Create file, close and delete it, and store the name for later.
+ // There is still a race condition here, albeit a small one.
+ int filenum = mkstemp(tname.get());
+ if (filenum == -1) {
+ isc_throw(Exception, "Unable to obtain unique filename");
+ }
+ close(filenum);
+
+ return (string(tname.get()));
+ }
+
+
+private:
+ LoggerSpecification spec_; // Specification for this file logger
+ string name_; // Name of the output file
+ string logname_; // Name of this logger
+};
+
+
+// Convenience function to read an output log file and check that each line
+// contains the expected message ID
+//
+// \param filename Name of the file to check
+// \param start Iterator pointing to first expected message ID
+// \param finish Iterator pointing to last expected message ID
+template <typename T>
+void checkFileContents(const std::string& filename, T start, T finish) {
+
+ // Access the file for input
+ ifstream infile(filename.c_str());
+ if (! infile.good()) {
+ FAIL() << "Unable to open the logging file " << filename;
+ }
+
+ // Iterate round the expected message IDs and check that they appear in
+ // the string.
+ string line; // Line read from the file
+
+ T i = start; // Iterator
+ getline(infile, line);
+ int lineno = 1;
+
+ while ((i != finish) && (infile.good())) {
+
+ // Check that the message ID appears in the line.
+ EXPECT_TRUE(line.find(string(*i)) != string::npos)
+ << "Expected to find " << string(*i) << " on line " << lineno
+ << " of logging file " << filename;
+
+ // Go for the next line
+ ++i;
+ getline(infile, line);
+ ++lineno;
+ }
+
+ // Why did the loop end?
+ EXPECT_TRUE(i == finish) << "Did not reach the end of the message ID list";
+ EXPECT_TRUE(infile.eof()) << "Did not reach the end of the logging file";
+
+ // File will close when the instream is deleted at the end of this
+ // function.
+}
+
+// Check that the logger correctly creates something logging to a file.
+TEST_F(LoggerManagerTest, FileLogger) {
+
+ // Create a specification for the file logger and use the manager to
+ // connect the "filelogger" logger to it.
+ SpecificationForFileLogger file_spec;
+
+ // For the first test, we want to check that the file is created
+ // if it does not already exist. So delete the temporary file before
+ // logging the first message.
+ unlink(file_spec.getFileName().c_str());
+
+ // Set up the file appenders.
+ LoggerManager manager;
+ manager.process(file_spec.getSpecification());
+
+ // Try logging to the file. Local scope is set to ensure that the logger
+ // is destroyed before we reset the global logging. We record what we
+ // put in the file for a later comparison.
+ vector<MessageID> ids;
+ {
+
+ // Scope-limit the logger to ensure it is destroyed after the brief
+ // check. This adds weight to the idea that the logger will not
+ // keep the file open.
+ Logger logger(file_spec.getLoggerName());
+
+ LOG_FATAL(logger, LOG_DUPLICATE_MESSAGE_ID).arg("test");
+ ids.push_back(LOG_DUPLICATE_MESSAGE_ID);
+
+ LOG_FATAL(logger, LOG_DUPLICATE_NAMESPACE).arg("test");
+ ids.push_back(LOG_DUPLICATE_NAMESPACE);
+ }
+ LoggerManager::reset();
+
+ // At this point, the output file should contain two lines with messages
+ // LOG_DUPLICATE_MESSAGE_ID and LOG_DUPLICATE_NAMESPACE messages - test this.
+ checkFileContents(file_spec.getFileName(), ids.begin(), ids.end());
+
+ // Re-open the file (we have to assume that it was closed when we
+ // reset the logger - there is no easy way to check) and check that
+ // new messages are appended to it. We use the alternative
+ // invocation of process() here to check it works.
+ vector<LoggerSpecification> spec(1, file_spec.getSpecification());
+ manager.process(spec.begin(), spec.end());
+
+ // Create a new instance of the logger and log three more messages.
+ Logger logger(file_spec.getLoggerName());
+
+ LOG_FATAL(logger, LOG_NO_SUCH_MESSAGE).arg("test");
+ ids.push_back(LOG_NO_SUCH_MESSAGE);
+
+ LOG_FATAL(logger, LOG_INVALID_MESSAGE_ID).arg("test").arg("test2");
+ ids.push_back(LOG_INVALID_MESSAGE_ID);
+
+ LOG_FATAL(logger, LOG_NO_MESSAGE_ID).arg("42");
+ ids.push_back(LOG_NO_MESSAGE_ID);
+
+ // Close the file and check again
+ LoggerManager::reset();
+ checkFileContents(file_spec.getFileName(), ids.begin(), ids.end());
+}
+
+// Check if the file rolls over when it gets above a certain size.
+TEST_F(LoggerManagerTest, FileSizeRollover) {
+ // Set to a suitable minimum that log4cplus can copy with
+ static const size_t SIZE_LIMIT = 204800;
+
+ // Set up the name of the file.
+ SpecificationForFileLogger file_spec;
+ LoggerSpecification& spec = file_spec.getSpecification();
+
+ // Expand the option to ensure that a maximum version size is set.
+ LoggerSpecification::iterator opt = spec.begin();
+ EXPECT_TRUE(opt != spec.end());
+ opt->maxsize = SIZE_LIMIT; // Bytes
+ opt->maxver = 2;
+
+ // The current current output file does not exist (the creation of file_spec
+ // ensures that. Check that previous versions don't either.
+ vector<string> prev_name;
+ for (int i = 0; i < 3; ++i) {
+ prev_name.push_back(file_spec.getFileName() + "." +
+ boost::lexical_cast<string>(i + 1));
+ (void) unlink(prev_name[i].c_str());
+ }
+
+ // Generate an argument for a message that ensures that the message when
+ // logged will be over that size.
+ string big_arg(SIZE_LIMIT, 'x');
+
+ // Set up the file logger
+ LoggerManager manager;
+ manager.process(spec);
+
+ // Log the message twice using different message IDs. This should generate
+ // three files as for the log4cplus implementation, the files appear to
+ // be rolled after the message is logged.
+ {
+ Logger logger(file_spec.getLoggerName());
+ LOG_FATAL(logger, LOG_NO_SUCH_MESSAGE).arg(big_arg);
+ LOG_FATAL(logger, LOG_DUPLICATE_NAMESPACE).arg(big_arg);
+ }
+
+ // Check them.
+ LoggerManager::reset(); // Ensure files are closed
+
+ vector<MessageID> ids;
+ ids.push_back(LOG_NO_SUCH_MESSAGE);
+ checkFileContents(prev_name[1], ids.begin(), ids.end());
+
+ ids.clear();
+ ids.push_back(LOG_DUPLICATE_NAMESPACE);
+ checkFileContents(prev_name[0], ids.begin(), ids.end());
+
+ // Log another message and check that the files have rotated and that
+ // a .3 version does not exist.
+ manager.process(spec);
+ {
+ Logger logger(file_spec.getLoggerName());
+ LOG_FATAL(logger, LOG_NO_MESSAGE_TEXT).arg(big_arg);
+ }
+
+ LoggerManager::reset(); // Ensure files are closed
+
+ // Check that the files have moved.
+ ids.clear();
+ ids.push_back(LOG_DUPLICATE_NAMESPACE);
+ checkFileContents(prev_name[1], ids.begin(), ids.end());
+
+ ids.clear();
+ ids.push_back(LOG_NO_MESSAGE_TEXT);
+ checkFileContents(prev_name[0], ids.begin(), ids.end());
+
+ // ... and check that the .3 version does not exist.
+ ifstream file3(prev_name[2].c_str(), ifstream::in);
+ EXPECT_FALSE(file3.good());
+
+ // Tidy up
+ for (int i = 0; i < prev_name.size(); ++i) {
+ (void) unlink(prev_name[i].c_str());
+ }
+}
diff --git a/src/lib/log/tests/logger_name_unittest.cc b/src/lib/log/tests/logger_name_unittest.cc
new file mode 100644
index 0000000..51fead5
--- /dev/null
+++ b/src/lib/log/tests/logger_name_unittest.cc
@@ -0,0 +1,77 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include <log/logger_name.h>
+
+using namespace isc;
+using namespace isc::log;
+
+// Test class. To avoid disturbing the root logger configuration in other
+// tests in the suite, the root logger name is saved in the constructor and
+// restored in the destructor. However, this is a bit chicken and egg, as the
+// functions used to do the save and restore are those being tested...
+//
+// Note that the root name is originally set by the initialization of the
+// logging configuration done in main().
+
+class LoggerNameTest : public ::testing::Test {
+public:
+ LoggerNameTest() {
+ name_ = getRootLoggerName();
+ }
+ ~LoggerNameTest() {
+ setRootLoggerName(name_);
+ }
+
+private:
+ std::string name_; ///< Saved name
+};
+
+// Check setting and getting of root name
+
+TEST_F(LoggerNameTest, RootNameSetGet) {
+ const std::string name1 = "test1";
+ const std::string name2 = "test2";
+
+ // Check that Set/Get works
+ setRootLoggerName(name1);
+ EXPECT_EQ(name1, getRootLoggerName());
+
+ // We could not test that the root logger name is initialised
+ // correctly (as there is one instance of it and we don't know
+ // when this test will be run) so to check that setName() actually
+ // does change the name, run the test again with a different name.
+ //
+ // (There was always the outside chance that the root logger name
+ // was initialised with name1 and that setName() has no effect.)
+ setRootLoggerName(name2);
+ EXPECT_EQ(name2, getRootLoggerName());
+}
+
+// Check expansion of name
+
+TEST_F(LoggerNameTest, ExpandLoggerName) {
+ const std::string ROOT = "example";
+ const std::string NAME = "something";
+ const std::string FULL_NAME = ROOT + "." + NAME;
+
+ setRootLoggerName(ROOT);
+ EXPECT_EQ(ROOT, expandLoggerName(ROOT));
+ EXPECT_EQ(FULL_NAME, expandLoggerName(NAME));
+ EXPECT_EQ(FULL_NAME, expandLoggerName(FULL_NAME));
+}
diff --git a/src/lib/log/tests/logger_specification_unittest.cc b/src/lib/log/tests/logger_specification_unittest.cc
new file mode 100644
index 0000000..e416c32
--- /dev/null
+++ b/src/lib/log/tests/logger_specification_unittest.cc
@@ -0,0 +1,96 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include <log/logger_specification.h>
+#include <log/output_option.h>
+
+using namespace isc::log;
+using namespace std;
+
+// Check default initialization.
+TEST(LoggerSpecificationTest, DefaultInitialization) {
+ LoggerSpecification spec;
+
+ EXPECT_EQ(string(""), spec.getName());
+ EXPECT_EQ(isc::log::INFO, spec.getSeverity());
+ EXPECT_EQ(0, spec.getDbglevel());
+ EXPECT_FALSE(spec.getAdditive());
+ EXPECT_EQ(0, spec.optionCount());
+}
+
+// Non-default initialization
+TEST(LoggerSpecificationTest, Initialization) {
+ LoggerSpecification spec("alpha", isc::log::ERROR, 42, true);
+
+ EXPECT_EQ(string("alpha"), spec.getName());
+ EXPECT_EQ(isc::log::ERROR, spec.getSeverity());
+ EXPECT_EQ(42, spec.getDbglevel());
+ EXPECT_TRUE(spec.getAdditive());
+ EXPECT_EQ(0, spec.optionCount());
+}
+
+// Get/Set tests
+TEST(LoggerSpecificationTest, SetGet) {
+ LoggerSpecification spec;
+
+ spec.setName("gamma");
+ EXPECT_EQ(string("gamma"), spec.getName());
+
+ spec.setSeverity(isc::log::FATAL);
+ EXPECT_EQ(isc::log::FATAL, spec.getSeverity());
+
+ spec.setDbglevel(7);
+ EXPECT_EQ(7, spec.getDbglevel());
+
+ spec.setAdditive(true);
+ EXPECT_TRUE(spec.getAdditive());
+
+ // Should not affect option count
+ EXPECT_EQ(0, spec.optionCount());
+}
+
+// Check option setting
+TEST(LoggerSpecificationTest, AddOption) {
+ OutputOption option1;
+ option1.destination = OutputOption::DEST_FILE;
+ option1.filename = "/tmp/example.log";
+ option1.maxsize = 123456;
+
+ OutputOption option2;
+ option2.destination = OutputOption::DEST_SYSLOG;
+ option2.facility = "LOCAL7";
+
+ LoggerSpecification spec;
+ spec.addOutputOption(option1);
+ spec.addOutputOption(option2);
+ EXPECT_EQ(2, spec.optionCount());
+
+ // Iterate through them
+ LoggerSpecification::const_iterator i = spec.begin();
+
+ EXPECT_EQ(OutputOption::DEST_FILE, i->destination);
+ EXPECT_EQ(string("/tmp/example.log"), i->filename);
+ EXPECT_EQ(123456, i->maxsize);
+
+ ++i;
+ EXPECT_EQ(OutputOption::DEST_SYSLOG, i->destination);
+ EXPECT_EQ(string("LOCAL7"), i->facility);
+
+ ++i;
+ EXPECT_TRUE(i == spec.end());
+}
diff --git a/src/lib/log/tests/logger_support_test.cc b/src/lib/log/tests/logger_support_test.cc
deleted file mode 100644
index 0a2338b..0000000
--- a/src/lib/log/tests/logger_support_test.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-/// \brief Example Program
-///
-/// Simple example program showing how to use the logger.
-
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-
-#include <iostream>
-
-#include <log/logger.h>
-#include <log/macros.h>
-#include <log/logger_support.h>
-#include <log/root_logger_name.h>
-
-// Include a set of message definitions.
-#include <log/messagedef.h>
-
-using namespace isc::log;
-
-// Declare logger to use an example.
-Logger logger_ex("example");
-
-// The program is invoked:
-//
-// logger_support_test [-s severity] [-d level ] [local_file]
-//
-// "severity" is one of "debug", "info", "warn", "error", "fatal"
-// "level" is the debug level, a number between 0 and 99
-// "local_file" is the name of a local file.
-//
-// The program sets the attributes on the root logger and logs a set of
-// messages. Looking at the output determines whether the program worked.
-
-int main(int argc, char** argv) {
-
- isc::log::Severity severity = isc::log::INFO; // Default logger severity
- int dbglevel = -1; // Logger debug level
- const char* localfile = NULL; // Local message file
- int option; // For getopt() processing
- Logger logger_dlm("dlm", true); // Another example logger
-
- // Parse options
- while ((option = getopt(argc, argv, "s:d:")) != -1) {
- switch (option) {
- case 's':
- if (strcmp(optarg, "debug") == 0) {
- severity = isc::log::DEBUG;
- } else if (strcmp(optarg, "info") == 0) {
- severity = isc::log::INFO;
- } else if (strcmp(optarg, "warn") == 0) {
- severity = isc::log::WARN;
- } else if (strcmp(optarg, "error") == 0) {
- severity = isc::log::ERROR;
- } else if (strcmp(optarg, "fatal") == 0) {
- severity = isc::log::FATAL;
- } else {
- std::cout << "Unrecognised severity option: " <<
- optarg << "\n";
- exit(1);
- }
- break;
-
- case 'd':
- dbglevel = atoi(optarg);
- break;
-
- default:
- std::cout << "Unrecognised option: " <<
- static_cast<char>(option) << "\n";
- }
- }
-
- if (optind < argc) {
- localfile = argv[optind];
- }
-
- // Update the logging parameters
- initLogger("alpha", severity, dbglevel, localfile);
-
- // Log a few messages
- LOG_FATAL(logger_ex, MSG_WRITERR).arg("test1").arg("42");
- LOG_ERROR(logger_ex, MSG_RDLOCMES).arg("dummy/file");
- LOG_WARN(logger_dlm, MSG_READERR).arg("a.txt").arg("dummy reason");
- LOG_INFO(logger_dlm, MSG_OPENIN).arg("example.msg").arg("dummy reason");
- LOG_DEBUG(logger_ex, 0, MSG_RDLOCMES).arg("dummy/0");
- LOG_DEBUG(logger_ex, 24, MSG_RDLOCMES).arg("dummy/24");
- LOG_DEBUG(logger_ex, 25, MSG_RDLOCMES).arg("dummy/25");
- LOG_DEBUG(logger_ex, 26, MSG_RDLOCMES).arg("dummy/26");
-
- return (0);
-}
diff --git a/src/lib/log/tests/logger_support_unittest.cc b/src/lib/log/tests/logger_support_unittest.cc
new file mode 100644
index 0000000..b418906
--- /dev/null
+++ b/src/lib/log/tests/logger_support_unittest.cc
@@ -0,0 +1,83 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <log/log_messages.h>
+
+using namespace isc::log;
+
+class LoggerSupportTest : public ::testing::Test {
+protected:
+ LoggerSupportTest() {
+ // Logger initialization is done in main(). As logging tests may
+ // alter the default logging output, it is reset here.
+ resetUnitTestRootLogger();
+ }
+ ~LoggerSupportTest() {
+ }
+};
+
+// Check that the initialized flag can be manipulated. This is a bit chicken-
+// -and-egg: we want to reset to the flag to the original value at the end
+// of the test, so use the functions to do that. But we are trying to check
+// that these functions in fact work.
+
+TEST_F(LoggerSupportTest, InitializedFlag) {
+ bool current_flag = isLoggingInitialized();
+
+ // check we can flip the flag.
+ setLoggingInitialized(!current_flag);
+ EXPECT_NE(current_flag, isLoggingInitialized());
+ setLoggingInitialized(!isLoggingInitialized());
+ EXPECT_EQ(current_flag, isLoggingInitialized());
+
+ // Check we can set it to explicit values (tests that a call to the "set"
+ // function does not just flip the flag).
+ setLoggingInitialized(false);
+ EXPECT_FALSE(isLoggingInitialized());
+ setLoggingInitialized(false);
+ EXPECT_FALSE(isLoggingInitialized());
+
+ setLoggingInitialized(true);
+ EXPECT_TRUE(isLoggingInitialized());
+ setLoggingInitialized(true);
+ EXPECT_TRUE(isLoggingInitialized());
+
+ // Reset to original value
+ setLoggingInitialized(current_flag);
+}
+
+// Check that a logger will throw an exception if logging has not been
+// initialized.
+
+TEST_F(LoggerSupportTest, LoggingInitializationCheck) {
+
+ // Assert that logging has been initialized (it should be in main()).
+ bool current_flag = isLoggingInitialized();
+ EXPECT_TRUE(current_flag);
+
+ // Flag that it has not been initialized and declare a logger. Any logging
+ // operation should then throw.
+ setLoggingInitialized(false);
+ isc::log::Logger test_logger("test");
+
+ EXPECT_THROW(test_logger.isDebugEnabled(), LoggingNotInitialized);
+ EXPECT_THROW(test_logger.info(LOG_INPUT_OPEN_FAIL), LoggingNotInitialized);
+
+ // ... and check that they work when logging is initialized.
+ setLoggingInitialized(true);
+ EXPECT_NO_THROW(test_logger.isDebugEnabled());
+ EXPECT_NO_THROW(test_logger.info(LOG_INPUT_OPEN_FAIL));
+}
diff --git a/src/lib/log/tests/logger_unittest.cc b/src/lib/log/tests/logger_unittest.cc
index 4eff622..edca9ce 100644
--- a/src/lib/log/tests/logger_unittest.cc
+++ b/src/lib/log/tests/logger_unittest.cc
@@ -17,45 +17,27 @@
#include <gtest/gtest.h>
-#include <log/root_logger_name.h>
#include <log/logger.h>
-#include <log/messagedef.h>
+#include <log/logger_manager.h>
+#include <log/logger_name.h>
+#include <log/log_messages.h>
using namespace isc;
using namespace isc::log;
using namespace std;
-namespace isc {
-namespace log {
-
-/// \brief Test Logger
+/// \brief Logger Test
///
-/// This logger is a subclass of the logger class under test, but makes
-/// protected methods public (for testing)
-
-class TestLogger : public Logger {
-public:
- /// \brief constructor
- TestLogger(const string& name) : Logger(name, true)
- {}
-
- static void reset() {
- Logger::reset();
- }
-};
-
-} // namespace log
-} // namespace isc
-
+/// As the logger is only a shell around the implementation, this tests also
+/// checks the logger implementation class as well.
class LoggerTest : public ::testing::Test {
-protected:
- LoggerTest()
- {
+public:
+ LoggerTest() {
+ // Initialization of logging is done in main()
}
-
~LoggerTest() {
- TestLogger::reset();
+ LoggerManager::reset();
}
};
@@ -65,11 +47,10 @@ protected:
TEST_F(LoggerTest, Name) {
// Create a logger
- setRootLoggerName("test1");
Logger logger("alpha");
// ... and check the name
- EXPECT_EQ(string("test1.alpha"), logger.getName());
+ EXPECT_EQ(getRootLoggerName() + string(".alpha"), logger.getName());
}
// This test attempts to get two instances of a logger with the same name
@@ -77,22 +58,18 @@ TEST_F(LoggerTest, Name) {
TEST_F(LoggerTest, GetLogger) {
- // Set the root logger name (not strictly needed, but this will be the
- // case in the program(.
- setRootLoggerName("test2");
-
const string name1 = "alpha";
const string name2 = "beta";
// Instantiate two loggers that should be the same
- TestLogger logger1(name1);
- TestLogger logger2(name1);
+ Logger logger1(name1);
+ Logger logger2(name1);
// And check they equal
EXPECT_TRUE(logger1 == logger2);
// Instantiate another logger with another name and check that it
// is different to the previously instantiated ones.
- TestLogger logger3(name2);
+ Logger logger3(name2);
EXPECT_FALSE(logger1 == logger3);
}
@@ -101,8 +78,7 @@ TEST_F(LoggerTest, GetLogger) {
TEST_F(LoggerTest, Severity) {
// Create a logger
- setRootLoggerName("test3");
- TestLogger logger("alpha");
+ Logger logger("alpha");
// Now check the levels
logger.setSeverity(isc::log::NONE);
@@ -132,8 +108,7 @@ TEST_F(LoggerTest, Severity) {
TEST_F(LoggerTest, DebugLevels) {
// Create a logger
- setRootLoggerName("test4");
- TestLogger logger("alpha");
+ Logger logger("alpha");
// Debug level should be 0 if not at debug severity
logger.setSeverity(isc::log::NONE, 20);
@@ -174,13 +149,47 @@ TEST_F(LoggerTest, DebugLevels) {
TEST_F(LoggerTest, SeverityInheritance) {
- // Create to loggers. We cheat here as we know that the underlying
- // implementation (in this case log4cxx) will set a parent-child
- // relationship if the loggers are named <parent> and <parent>.<child>.
+ // Create two loggers. We cheat here as we know that the underlying
+ // implementation will set a parent-child relationship if the loggers
+ // are named <parent> and <parent>.<child>.
+ Logger parent("alpha");
+ Logger child("alpha.beta");
- setRootLoggerName("test5");
- TestLogger parent("alpha");
- TestLogger child("alpha.beta");
+ // By default, newly created loggers should have a level of DEFAULT
+ // (i.e. default to parent)
+ EXPECT_EQ(isc::log::DEFAULT, parent.getSeverity());
+ EXPECT_EQ(isc::log::DEFAULT, child.getSeverity());
+
+ // Set the severity of the parent to debug and check what is
+ // reported by the child.
+ parent.setSeverity(isc::log::DEBUG, 42);
+ EXPECT_EQ(42, parent.getDebugLevel());
+ EXPECT_EQ(0, child.getDebugLevel());
+ EXPECT_EQ(42, child.getEffectiveDebugLevel());
+
+ // Setting the child to DEBUG severity should set its own
+ // debug level.
+ child.setSeverity(isc::log::DEBUG, 53);
+ EXPECT_EQ(53, child.getDebugLevel());
+ EXPECT_EQ(53, child.getEffectiveDebugLevel());
+
+ // If the child severity is set to something other than DEBUG,
+ // the debug level should be reported as 0.
+ child.setSeverity(isc::log::ERROR);
+ EXPECT_EQ(0, child.getDebugLevel());
+ EXPECT_EQ(0, child.getEffectiveDebugLevel());
+}
+
+// Check that changing the parent and child debug level does not affect
+// the other.
+
+TEST_F(LoggerTest, DebugLevelInheritance) {
+
+ // Create two loggers. We cheat here as we know that the underlying
+ // implementation will set a parent-child relationship if the loggers
+ // are named <parent> and <parent>.<child>.
+ Logger parent("alpha");
+ Logger child("alpha.beta");
// By default, newly created loggers should have a level of DEFAULT
// (i.e. default to parent)
@@ -206,11 +215,9 @@ TEST_F(LoggerTest, SeverityInheritance) {
TEST_F(LoggerTest, EffectiveSeverityInheritance) {
- // Create to loggers. We cheat here as we know that the underlying
- // implementation (in this case log4cxx) will set a parent-child
- // relationship if the loggers are named <parent> and <parent>.<child>.
-
- setRootLoggerName("test6");
+ // Create two loggers. We cheat here as we know that the underlying
+ // implementation will set a parent-child relationship if the loggers
+ // are named <parent> and <parent>.<child>.
Logger parent("test6");
Logger child("test6.beta");
@@ -245,7 +252,6 @@ TEST_F(LoggerTest, EffectiveSeverityInheritance) {
TEST_F(LoggerTest, IsXxxEnabled) {
- setRootLoggerName("test7");
Logger logger("test7");
logger.setSeverity(isc::log::INFO);
@@ -316,7 +322,6 @@ TEST_F(LoggerTest, IsXxxEnabled) {
TEST_F(LoggerTest, IsDebugEnabledLevel) {
- setRootLoggerName("test8");
Logger logger("test8");
int MID_LEVEL = (MIN_DEBUG_LEVEL + MAX_DEBUG_LEVEL) / 2;
diff --git a/src/lib/log/tests/message_dictionary_unittest.cc b/src/lib/log/tests/message_dictionary_unittest.cc
index ba33820..394fea0 100644
--- a/src/lib/log/tests/message_dictionary_unittest.cc
+++ b/src/lib/log/tests/message_dictionary_unittest.cc
@@ -29,7 +29,7 @@ using namespace std;
// and the latter should be present.
static const char* values[] = {
- "MSG_DUPLNS", "duplicate $NAMESPACE directive found",
+ "LOG_DUPLICATE_NAMESPACE", "duplicate $NAMESPACE directive found",
"NEWSYM", "new symbol added",
NULL
};
@@ -190,7 +190,7 @@ TEST_F(MessageDictionaryTest, GlobalTest) {
TEST_F(MessageDictionaryTest, GlobalLoadTest) {
vector<string>& duplicates = MessageInitializer::getDuplicates();
ASSERT_EQ(1, duplicates.size());
- EXPECT_EQ(string("MSG_DUPLNS"), duplicates[0]);
+ EXPECT_EQ(string("LOG_DUPLICATE_NAMESPACE"), duplicates[0]);
string text = MessageDictionary::globalDictionary().getText("NEWSYM");
EXPECT_EQ(string("new symbol added"), text);
diff --git a/src/lib/log/tests/message_reader_unittest.cc b/src/lib/log/tests/message_reader_unittest.cc
index 7b3ba5f..d0214a4 100644
--- a/src/lib/log/tests/message_reader_unittest.cc
+++ b/src/lib/log/tests/message_reader_unittest.cc
@@ -16,7 +16,7 @@
#include <string>
#include <gtest/gtest.h>
-#include <log/messagedef.h>
+#include <log/log_messages.h>
#include <log/message_dictionary.h>
#include <log/message_exception.h>
#include <log/message_reader.h>
@@ -102,8 +102,8 @@ processLineException(MessageReader& reader, const char* what,
TEST_F(MessageReaderTest, InvalidDirectives) {
// Check that a "$" with nothing else generates an error
- processLineException(reader_, "$", MSG_UNRECDIR);
- processLineException(reader_, "$xyz", MSG_UNRECDIR);
+ processLineException(reader_, "$", LOG_UNRECOGNISED_DIRECTIVE);
+ processLineException(reader_, "$xyz", LOG_UNRECOGNISED_DIRECTIVE);
}
// Check that it can parse a prefix
@@ -117,20 +117,20 @@ TEST_F(MessageReaderTest, Prefix) {
EXPECT_NO_THROW(reader_.processLine("$PREFIX"));
// Check a $PREFIX with multiple arguments is invalid
- processLineException(reader_, "$prefix A B", MSG_PRFEXTRARG);
+ processLineException(reader_, "$prefix A B", LOG_PREFIX_EXTRA_ARGS);
// Prefixes should be alphanumeric (with underscores) and not start
// with a number.
- processLineException(reader_, "$prefix ab[cd", MSG_PRFINVARG);
- processLineException(reader_, "$prefix 123", MSG_PRFINVARG);
- processLineException(reader_, "$prefix 1ABC", MSG_PRFINVARG);
+ processLineException(reader_, "$prefix ab[cd", LOG_PREFIX_INVALID_ARG);
+ processLineException(reader_, "$prefix 123", LOG_PREFIX_INVALID_ARG);
+ processLineException(reader_, "$prefix 1ABC", LOG_PREFIX_INVALID_ARG);
// A valid prefix should be accepted
EXPECT_NO_THROW(reader_.processLine("$PREFIX dlm__"));
EXPECT_EQ(string("dlm__"), reader_.getPrefix());
// And check that the parser fails on invalid prefixes...
- processLineException(reader_, "$prefix 1ABC", MSG_PRFINVARG);
+ processLineException(reader_, "$prefix 1ABC", LOG_PREFIX_INVALID_ARG);
// Check that we can clear the prefix as well
reader_.clearPrefix();
@@ -150,13 +150,13 @@ TEST_F(MessageReaderTest, Namespace) {
EXPECT_EQ(string(""), reader_.getNamespace());
// Check that a $NAMESPACE directive with no argument generates an error.
- processLineException(reader_, "$NAMESPACE", MSG_NSNOARG);
+ processLineException(reader_, "$NAMESPACE", LOG_NAMESPACE_NO_ARGS);
// Check a $NAMESPACE with multiple arguments is invalid
- processLineException(reader_, "$namespace A B", MSG_NSEXTRARG);
+ processLineException(reader_, "$namespace A B", LOG_NAMESPACE_EXTRA_ARGS);
// Namespaces should be alphanumeric (with underscores and colons)
- processLineException(reader_, "$namespace ab[cd", MSG_NSINVARG);
+ processLineException(reader_, "$namespace ab[cd", LOG_NAMESPACE_INVALID_ARG);
// A valid $NAMESPACE should be accepted
EXPECT_NO_THROW(reader_.processLine("$NAMESPACE isc"));
@@ -176,7 +176,7 @@ TEST_F(MessageReaderTest, Namespace) {
EXPECT_EQ(string("::"), reader_.getNamespace());
// ... and that another $NAMESPACE is rejected
- processLineException(reader_, "$NAMESPACE ABC", MSG_DUPLNS);
+ processLineException(reader_, "$NAMESPACE ABC", LOG_DUPLICATE_NAMESPACE);
}
// Check that it can parse a line
diff --git a/src/lib/log/tests/output_option_unittest.cc b/src/lib/log/tests/output_option_unittest.cc
new file mode 100644
index 0000000..8f0e0de
--- /dev/null
+++ b/src/lib/log/tests/output_option_unittest.cc
@@ -0,0 +1,66 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include <log/output_option.h>
+
+using namespace isc::log;
+using namespace std;
+
+// As OutputOption is a struct, the only meaningful test is to check that it
+// initializes correctly.
+
+TEST(OutputOptionTest, Initialization) {
+ OutputOption option;
+
+ EXPECT_EQ(OutputOption::DEST_CONSOLE, option.destination);
+ EXPECT_EQ(OutputOption::STR_STDERR, option.stream);
+ EXPECT_FALSE(option.flush);
+ EXPECT_EQ(string("LOCAL0"), option.facility);
+ EXPECT_EQ(string(""), option.filename);
+ EXPECT_EQ(0, option.maxsize);
+ EXPECT_EQ(0, option.maxver);
+}
+
+TEST(OutputOption, getDestination) {
+ EXPECT_EQ(OutputOption::DEST_CONSOLE, getDestination("console"));
+ EXPECT_EQ(OutputOption::DEST_CONSOLE, getDestination("CONSOLE"));
+ EXPECT_EQ(OutputOption::DEST_CONSOLE, getDestination("CoNSoLE"));
+ EXPECT_EQ(OutputOption::DEST_FILE, getDestination("file"));
+ EXPECT_EQ(OutputOption::DEST_FILE, getDestination("FILE"));
+ EXPECT_EQ(OutputOption::DEST_FILE, getDestination("fIlE"));
+ EXPECT_EQ(OutputOption::DEST_SYSLOG, getDestination("syslog"));
+ EXPECT_EQ(OutputOption::DEST_SYSLOG, getDestination("SYSLOG"));
+ EXPECT_EQ(OutputOption::DEST_SYSLOG, getDestination("SYSlog"));
+
+ // bad values should default to DEST_CONSOLE
+ EXPECT_EQ(OutputOption::DEST_CONSOLE, getDestination("SOME_BAD_VALUE"));
+}
+
+TEST(OutputOption, getStream) {
+ EXPECT_EQ(OutputOption::STR_STDOUT, getStream("stdout"));
+ EXPECT_EQ(OutputOption::STR_STDOUT, getStream("STDOUT"));
+ EXPECT_EQ(OutputOption::STR_STDOUT, getStream("STdouT"));
+ EXPECT_EQ(OutputOption::STR_STDERR, getStream("stderr"));
+ EXPECT_EQ(OutputOption::STR_STDERR, getStream("STDERR"));
+ EXPECT_EQ(OutputOption::STR_STDERR, getStream("StDeRR"));
+
+ // bad values should default to stdout
+ EXPECT_EQ(OutputOption::STR_STDOUT, getStream("some bad value"));
+ EXPECT_EQ(OutputOption::STR_STDOUT, getStream(""));
+}
+
diff --git a/src/lib/log/tests/root_logger_name_unittest.cc b/src/lib/log/tests/root_logger_name_unittest.cc
deleted file mode 100644
index 8665794..0000000
--- a/src/lib/log/tests/root_logger_name_unittest.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <string>
-
-#include <gtest/gtest.h>
-
-#include <log/root_logger_name.h>
-
-using namespace isc;
-using namespace isc::log;
-
-class RootLoggerNameTest : public ::testing::Test {
-protected:
- RootLoggerNameTest()
- {
- }
-};
-
-// Check of the (only) functionality of the class.
-
-TEST_F(RootLoggerNameTest, SetGet) {
- const std::string name1 = "test1";
- const std::string name2 = "test2";
-
- // Check that Set/Get works
- setRootLoggerName(name1);
- EXPECT_EQ(name1, getRootLoggerName());
-
- // We could not test that the root logger name is initialised
- // correctly (as there is one instance of it and we don't know
- // when this test will be run) so to check that setName() actually
- // does change the name, run the test again with a different name.
- //
- // (There was always the outside chance that the root logger name
- // was initialised with name1 and that setName() has no effect.)
- setRootLoggerName(name2);
- EXPECT_EQ(name2, getRootLoggerName());
-}
diff --git a/src/lib/log/tests/run_time_init_test.sh.in b/src/lib/log/tests/run_time_init_test.sh.in
deleted file mode 100755
index e48a781..0000000
--- a/src/lib/log/tests/run_time_init_test.sh.in
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/sh
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-failcount=0
-localmes=@abs_builddir@/localdef_mes_$$
-tempfile=@abs_builddir@/run_time_init_test_tempfile_$$
-
-passfail() {
- if [ $1 -eq 0 ]; then
- echo "pass"
- else
- echo "FAIL"
- fi
- failcount=`expr $failcount + $1`
-}
-
-# Create the local message file for testing
-
-cat > $localmes << .
-\$PREFIX MSG_
-% NOTHERE this message is not in the global dictionary
-% READERR replacement read error, parameters: '%1' and '%2'
-% RDLOCMES replacement read local message file, parameter is '%1'
-.
-
-echo -n "1. runInitTest default parameters: "
-cat > $tempfile << .
-FATAL [alpha.example] MSG_WRITERR, error writing to test1: 42
-ERROR [alpha.example] MSG_RDLOCMES, reading local message file dummy/file
-WARN [alpha.dlm] MSG_READERR, error reading from message file a.txt: dummy reason
-INFO [alpha.dlm] MSG_OPENIN, unable to open message file example.msg for input: dummy reason
-.
-./logger_support_test | cut -d' ' -f3- | diff $tempfile -
-passfail $?
-
-echo -n "2. Severity filter: "
-cat > $tempfile << .
-FATAL [alpha.example] MSG_WRITERR, error writing to test1: 42
-ERROR [alpha.example] MSG_RDLOCMES, reading local message file dummy/file
-.
-./logger_support_test -s error | cut -d' ' -f3- | diff $tempfile -
-passfail $?
-
-echo -n "3. Debug level: "
-cat > $tempfile << .
-FATAL [alpha.example] MSG_WRITERR, error writing to test1: 42
-ERROR [alpha.example] MSG_RDLOCMES, reading local message file dummy/file
-WARN [alpha.dlm] MSG_READERR, error reading from message file a.txt: dummy reason
-INFO [alpha.dlm] MSG_OPENIN, unable to open message file example.msg for input: dummy reason
-DEBUG [alpha.example] MSG_RDLOCMES, reading local message file dummy/0
-DEBUG [alpha.example] MSG_RDLOCMES, reading local message file dummy/24
-DEBUG [alpha.example] MSG_RDLOCMES, reading local message file dummy/25
-.
-./logger_support_test -s debug -d 25 | cut -d' ' -f3- | diff $tempfile -
-passfail $?
-
-echo -n "4. Local message replacement: "
-cat > $tempfile << .
-WARN [alpha.log] MSG_IDNOTFND, could not replace message text for 'MSG_NOTHERE': no such message
-FATAL [alpha.example] MSG_WRITERR, error writing to test1: 42
-ERROR [alpha.example] MSG_RDLOCMES, replacement read local message file, parameter is 'dummy/file'
-WARN [alpha.dlm] MSG_READERR, replacement read error, parameters: 'a.txt' and 'dummy reason'
-.
-./logger_support_test -s warn $localmes | cut -d' ' -f3- | diff $tempfile -
-passfail $?
-
-rm -f $localmes
-rm -f $tempfile
-
-if [ $failcount -eq 0 ]; then
- echo "PASS: run_time_init_test"
-elif [ $failcount -eq 1 ]; then
- echo "FAIL: run_time_init_test - 1 test failed"
-else
- echo "FAIL: run_time_init_test - $failcount tests failed"
-fi
-
-exit $failcount
diff --git a/src/lib/log/tests/run_unittests.cc b/src/lib/log/tests/run_unittests.cc
index bd3c4c9..8a9d1e5 100644
--- a/src/lib/log/tests/run_unittests.cc
+++ b/src/lib/log/tests/run_unittests.cc
@@ -13,9 +13,13 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
+
+#include <log/logger_support.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
- return (RUN_ALL_TESTS());
+ isc::log::initLogger();
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/log/tests/severity_test.sh.in b/src/lib/log/tests/severity_test.sh.in
new file mode 100755
index 0000000..78d5050
--- /dev/null
+++ b/src/lib/log/tests/severity_test.sh.in
@@ -0,0 +1,89 @@
+#!/bin/sh
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Checks that the logger will limit the output of messages less severe than
+# the severity/debug setting.
+
+testname="Severity test"
+echo $testname
+
+failcount=0
+tempfile=@abs_builddir@/severity_test_tempfile_$$
+
+passfail() {
+ if [ $1 -eq 0 ]; then
+ echo " pass"
+ else
+ echo " FAIL"
+ failcount=`expr $failcount + $1`
+ fi
+}
+
+echo -n "1. Default parameters:"
+cat > $tempfile << .
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+WARN [example] LOG_BAD_STREAM bad log console output stream: example
+WARN [example.alpha] LOG_READ_ERROR error reading from message file a.txt: dummy reason
+INFO [example.alpha] LOG_INPUT_OPEN_FAIL unable to open message file example.msg for input: dummy reason
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+WARN [example.beta] LOG_BAD_STREAM bad log console output stream: beta_warn
+INFO [example.beta] LOG_READ_ERROR error reading from message file beta: info
+.
+./logger_example -c stdout | cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+echo -n "2. Severity filter:"
+cat > $tempfile << .
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+.
+./logger_example -c stdout -s error | cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+echo -n "3. Debug level:"
+cat > $tempfile << .
+FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
+ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
+WARN [example] LOG_BAD_STREAM bad log console output stream: example
+WARN [example.alpha] LOG_READ_ERROR error reading from message file a.txt: dummy reason
+INFO [example.alpha] LOG_INPUT_OPEN_FAIL unable to open message file example.msg for input: dummy reason
+DEBUG [example] LOG_READING_LOCAL_FILE reading local message file example/0
+DEBUG [example] LOG_READING_LOCAL_FILE reading local message file example/24
+DEBUG [example] LOG_READING_LOCAL_FILE reading local message file example/25
+FATAL [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta_fatal
+ERROR [example.beta] LOG_BAD_DESTINATION unrecognized log destination: beta_error
+WARN [example.beta] LOG_BAD_STREAM bad log console output stream: beta_warn
+INFO [example.beta] LOG_READ_ERROR error reading from message file beta: info
+DEBUG [example.beta] LOG_BAD_SEVERITY unrecognized log severity: beta/25
+.
+./logger_example -c stdout -s debug -d 25 | cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+if [ $failcount -eq 0 ]; then
+ echo "PASS: $testname"
+elif [ $failcount -eq 1 ]; then
+ echo "FAIL: $testname - 1 test failed"
+else
+ echo "FAIL: $testname - $failcount tests failed"
+fi
+
+# Tidy up
+rm -f $tempfile
+
+exit $failcount
diff --git a/src/lib/log/tests/tempdir.h.in b/src/lib/log/tests/tempdir.h.in
new file mode 100644
index 0000000..366fea3
--- /dev/null
+++ b/src/lib/log/tests/tempdir.h.in
@@ -0,0 +1,29 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __TEMPDIR_H
+#define __TEMPDIR_H
+
+/// \brief Define temporary directory
+///
+/// Defines the temporary directory in which temporary files used by the
+/// unit tests are created.
+
+#include <string>
+
+namespace {
+std::string TEMP_DIR("@builddir@");
+}
+
+#endif // __TEMPDIR_H
diff --git a/src/lib/log/tests/xdebuglevel_unittest.cc b/src/lib/log/tests/xdebuglevel_unittest.cc
deleted file mode 100644
index ca80e5a..0000000
--- a/src/lib/log/tests/xdebuglevel_unittest.cc
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <iostream>
-#include <string>
-
-#include <gtest/gtest.h>
-
-#include <log4cxx/level.h>
-#include <log/xdebuglevel.h>
-#include <log/debug_levels.h>
-
-/// \brief XDebugLevel (Debug Extension to Level Class)
-///
-/// The class is an extension of the log4cxx Level class; this set of tests
-/// only test the extensions, they do not test the underlying Level class
-/// itself.
-
-using namespace log4cxx;
-
-class XDebugLevelTest : public ::testing::Test {
-protected:
- XDebugLevelTest()
- {
- }
-};
-
-// Check a basic assertion about the numeric values of the debug levels
-
-TEST_F(XDebugLevelTest, NumericValues) {
- EXPECT_EQ(XDebugLevel::XDEBUG_MIN_LEVEL_INT, Level::DEBUG_INT);
- EXPECT_EQ(XDebugLevel::XDEBUG_MAX_LEVEL_INT,
- Level::DEBUG_INT - MAX_DEBUG_LEVEL);
-
- // ... and check that assumptions used below - that the debug levels
- // range from 0 to 99 - are valid.
- EXPECT_EQ(0, MIN_DEBUG_LEVEL);
- EXPECT_EQ(99, MAX_DEBUG_LEVEL);
-}
-
-
-// Checks that the main function for generating logging level objects from
-// debug levels is working.
-
-TEST_F(XDebugLevelTest, GetExtendedDebug) {
-
- // Get a debug level of 0. This should be the same as the main DEBUG
- // level.
- LevelPtr debug0 = XDebugLevel::getExtendedDebug(0);
- EXPECT_EQ(std::string("DEBUG"), debug0->toString());
- EXPECT_EQ(Level::DEBUG_INT, debug0->toInt());
- EXPECT_TRUE(*Level::getDebug() == *debug0);
-
- // Get an arbitrary debug level in the allowed range.
- LevelPtr debug32 = XDebugLevel::getExtendedDebug(32);
- EXPECT_EQ(std::string("DEBUG32"), debug32->toString());
- EXPECT_TRUE((XDebugLevel::XDEBUG_MIN_LEVEL_INT - 32) == debug32->toInt());
-
- // Check that a value outside the range gives the nearest level.
- LevelPtr debug_more = XDebugLevel::getExtendedDebug(MAX_DEBUG_LEVEL + 1);
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(MAX_DEBUG_LEVEL) == *debug_more);
-
- LevelPtr debug_less = XDebugLevel::getExtendedDebug(MIN_DEBUG_LEVEL - 1);
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(MIN_DEBUG_LEVEL) == *debug_less);
-}
-
-
-// Creation of a level from an int - should return the default debug level
-// if outside the range.
-
-TEST_F(XDebugLevelTest, FromIntOneArg) {
-
- // Check that a valid debug level is as expected
- LevelPtr debug42 = XDebugLevel::toLevel(
- XDebugLevel::XDEBUG_MIN_LEVEL_INT - 42);
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(42) == *debug42);
-
- // ... and that an invalid one returns an object of type debug.
- LevelPtr debug_invalid = XDebugLevel::toLevel(Level::getInfo()->toInt());
- EXPECT_TRUE(*Level::getDebug() == *debug_invalid);
-}
-
-
-// Creation of a level from an int - should return the default level
-// if outside the range.
-
-TEST_F(XDebugLevelTest, FromIntTwoArg) {
-
- // Check that a valid debug level is as expected
- LevelPtr debug42 = XDebugLevel::toLevel(
- (XDebugLevel::XDEBUG_MIN_LEVEL_INT - 42), Level::getFatal());
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(42) == *debug42);
-
- // ... and that an invalid one returns an object of type debug.
- LevelPtr debug_invalid = XDebugLevel::toLevel(
- Level::getInfo()->toInt(), Level::getFatal());
- EXPECT_TRUE(*Level::getFatal() == *debug_invalid);
-}
-
-
-// Creation of a level from a string - should return the default debug level
-// if outside the range.
-
-TEST_F(XDebugLevelTest, FromStringOneArg) {
-
- // Check that a valid debug levels are as expected
- LevelPtr debug85 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBUG85"));
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(85) == *debug85);
-
- LevelPtr debug92 = XDebugLevel::toLevelLS(LOG4CXX_STR("debug92"));
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(92) == *debug92);
-
- LevelPtr debug27 = XDebugLevel::toLevelLS(LOG4CXX_STR("Debug27"));
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(27) == *debug27);
-
- LevelPtr debug0 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBUG"));
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(0) == *debug0);
-
- // ... and that an invalid one returns an object of type debug (which is
- // the equivalent of a debug level 0 object).
- LevelPtr debug_invalid1 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBU"));
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(0) == *debug_invalid1);
-
- LevelPtr debug_invalid2 = XDebugLevel::toLevelLS(LOG4CXX_STR("EBU"));
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(0) == *debug_invalid2);
-
- LevelPtr debug_invalid3 = XDebugLevel::toLevelLS(LOG4CXX_STR(""));
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(0) == *debug_invalid3);
-
- LevelPtr debug_invalid4 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBUGTEN"));
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(0) == *debug_invalid4);
-
- LevelPtr debug_invalid5 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBUG105"));
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(MAX_DEBUG_LEVEL) ==
- *debug_invalid5);
-
- LevelPtr debug_invalid6 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBUG-7"));
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(MIN_DEBUG_LEVEL) ==
- *debug_invalid6);
-}
-
-
-// Creation of a level from a string - should return the default level
-// if outside the range.
-
-TEST_F(XDebugLevelTest, FromStringTwoArg) {
-
- // Check that a valid debug levels are as expected
- LevelPtr debug85 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBUG85"),
- Level::getFatal());
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(85) == *debug85);
-
- LevelPtr debug92 = XDebugLevel::toLevelLS(LOG4CXX_STR("debug92"),
- Level::getFatal());
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(92) == *debug92);
-
- LevelPtr debug27 = XDebugLevel::toLevelLS(LOG4CXX_STR("Debug27"),
- Level::getFatal());
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(27) == *debug27);
-
- LevelPtr debug0 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBUG"),
- Level::getFatal());
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(0) == *debug0);
-
- // ... and that an invalid one returns an object of type debug (which is
- // the equivalent of a debug level 0 object).
- LevelPtr debug_invalid1 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBU"),
- Level::getFatal());
- EXPECT_TRUE(*Level::getFatal() == *debug_invalid1);
-
- LevelPtr debug_invalid2 = XDebugLevel::toLevelLS(LOG4CXX_STR("EBU"),
- Level::getFatal());
- EXPECT_TRUE(*Level::getFatal() == *debug_invalid2);
-
- LevelPtr debug_invalid3 = XDebugLevel::toLevelLS(LOG4CXX_STR(""),
- Level::getFatal());
- EXPECT_TRUE(*Level::getFatal() == *debug_invalid3);
-
- LevelPtr debug_invalid4 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBUGTEN"),
- Level::getFatal());
- EXPECT_TRUE(*Level::getFatal() == *debug_invalid4);
-
- LevelPtr debug_invalid5 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBUG105"),
- Level::getFatal());
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(MAX_DEBUG_LEVEL) ==
- *debug_invalid5);
-
- LevelPtr debug_invalid6 = XDebugLevel::toLevelLS(LOG4CXX_STR("DEBUG-7"),
- Level::getFatal());
- EXPECT_TRUE(*XDebugLevel::getExtendedDebug(MIN_DEBUG_LEVEL) ==
- *debug_invalid6);
-}
diff --git a/src/lib/log/xdebuglevel.cc b/src/lib/log/xdebuglevel.cc
deleted file mode 100644
index c17a515..0000000
--- a/src/lib/log/xdebuglevel.cc
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <cassert>
-#include <algorithm>
-#include <syslog.h>
-#include <string.h>
-#include <boost/lexical_cast.hpp>
-
-#include <xdebuglevel.h>
-#include <debug_levels.h>
-#include <log4cxx/helpers/stringhelper.h>
-
-using namespace log4cxx;
-using namespace log4cxx::helpers;
-
-// Storage for the logging level objects corresponding to each debug level
-
-bool XDebugLevel::dbglevels_unset_ = true;
-LevelPtr XDebugLevel::dbglevels_[NUM_DEBUG_LEVEL];
-
-// Register the class
-
-IMPLEMENT_LOG4CXX_LEVEL(XDebugLevel)
-
-
-// Create Extended Debug Level Objects
-
-LevelPtr
-XDebugLevel::getExtendedDebug(int level) {
-
- // Initialize the logging levels corresponding to the possible range of
- // debug if we have not already done so
- if (dbglevels_unset_) {
-
- // Asserting that the minimum debug level is zero - so corresponds
- // to DEBUG_INT - means that the lowest level is set to main DEBUG
- // level. This means that the existing logging level object can be
- // used.
- assert(MIN_DEBUG_LEVEL == 0);
- dbglevels_[0] = Level::getDebug();
-
- // Create the logging level objects for the rest of the debug levels.
- // They are given names of the form DEBUG<debug level> (e.g. DEBUG42).
- // They will all correspond to a syslog level of DEBUG.
- for (int i = 1; i < NUM_DEBUG_LEVEL; ++i) {
- std::string name = std::string("DEBUG") +
- boost::lexical_cast<std::string>(i);
- dbglevels_[i] = new XDebugLevel(
- (XDebugLevel::XDEBUG_MIN_LEVEL_INT - i),
- LOG4CXX_STR(name.c_str()), LOG_DEBUG);
- }
- dbglevels_unset_ = false;
- }
-
- // Now get the logging level object asked for. Coerce the debug level to
- // lie in the acceptable range.
- int actual = std::max(MIN_DEBUG_LEVEL, std::min(MAX_DEBUG_LEVEL, level));
-
- // ... and return a pointer to the appropriate logging level object
- return (dbglevels_[actual - MIN_DEBUG_LEVEL]);
-}
-
-// Convert an integer (an absolute logging level number, not a debug level) to a
-// logging level object. If it lies outside the valid range, an object
-// corresponding to the minimum debug value is returned.
-
-LevelPtr
-XDebugLevel::toLevel(int val) {
- return (toLevel(val, getExtendedDebug(MIN_DEBUG_LEVEL)));
-}
-
-LevelPtr
-XDebugLevel::toLevel(int val, const LevelPtr& defaultLevel) {
-
- // Note the reversal of the notion of MIN and MAX - see the header file for
- // details.
- if ((val >= XDEBUG_MAX_LEVEL_INT) && (val <= XDEBUG_MIN_LEVEL_INT)) {
- return (getExtendedDebug(XDEBUG_MIN_LEVEL_INT - val));
- }
- else {
- return (defaultLevel);
- }
-}
-
-// Convert string passed to a logging level or return default level.
-
-LevelPtr
-XDebugLevel::toLevelLS(const LogString& sArg) {
- return (toLevelLS(sArg, getExtendedDebug(0)));
-}
-
-LevelPtr
-XDebugLevel::toLevelLS(const LogString& sArg, const LevelPtr& defaultLevel) {
- std::string name = sArg; // Get to known type
- size_t length = name.size(); // Length of the string
-
- if (length < 5) {
-
- // String can't possibly start DEBUG so we don't know what it is.
- return (defaultLevel);
- }
- else {
- if (strncasecmp(name.c_str(), "DEBUG", 5) == 0) {
-
- // String starts "DEBUG" (or "debug" or any case mixture). The
- // rest of the string -if any - should be a number.
- if (length == 5) {
-
- // It is plain "DEBUG". Take this as level 0.
- return (getExtendedDebug(0));
- }
- else {
-
- // Try converting the remainder to an integer. The "5" is
- // the length of the string "DEBUG". Note that if the number
- // is outside the rangeof debug levels, it is coerced to the
- // nearest limit. Thus a level of DEBUG509 will end up as
- // if DEBUG99 has been specified.
- try {
- int level = boost::lexical_cast<int>(name.substr(5));
- return (getExtendedDebug(level));
- }
- catch ((boost::bad_lexical_cast&) ){
- return (defaultLevel);
- }
- }
- }
- else {
-
- // Unknown string - return default.
- return (defaultLevel);
- }
- }
-}
diff --git a/src/lib/log/xdebuglevel.h b/src/lib/log/xdebuglevel.h
deleted file mode 100644
index e580b77..0000000
--- a/src/lib/log/xdebuglevel.h
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#ifndef __XDEBUGLEVEL_H
-#define __XDEBUGLEVEL_H
-
-#include <syslog.h>
-#include <log4cxx/level.h>
-
-#include <debug_levels.h>
-
-namespace log4cxx {
-
-/// \brief Debug Extension to Level Class
-///
-/// Based on the example given in the log4cxx distribution, this extends the
-/// log4cxx Level class to allow 100 debug levels.
-///
-/// First some terminology, as the use of the term "level" gets confusing. The
-/// code and comments here use the term "level" in two contexts:
-///
-/// Logging level: The category of messages to log. By default log4cxx defines
-/// the following logging levels: OFF, FATAL, ERROR, WARNING, INFO, DEBUG,
-/// TRACE, ALL. Within the context of BIND-10, OFF, TRACE and ALL are not used
-/// and the idea of DEBUG has been extended, as will be seen below.
-///
-/// Debug level: This is a number that ranges from 0 to 99 and is used by the
-/// application to control the detail of debug output. A value of 0 gives the
-/// highest-level debug output; a value of 99 gives the most verbose and most
-/// detailed. Debug messages (or whatever debug level) are only ever output
-/// when the logging level is set to DEBUG.
-///
-///
-/// With log4cxx, the various logging levels have a numeric value associated
-/// with them, such that FATAL > ERROR > WARNING etc. This suggests that the
-/// idea of debug levels can be incorporated into the existing logging level
-/// scheme by assigning them appropriate numeric values, i.e.
-///
-/// WARNING > INFO > DEBUG(0) > DEBUG(2) > ... > DEBUG(99)
-///
-/// Setting a numeric level of DEBUG enables the basic messages; setting lower
-/// numeric levels will enable progressively more messages. The lowest debug
-/// level (0) is chosen such that setting the general DEBUG logging level will
-/// automatically select that debug level.
-///
-/// This sub-class is needed because the log4cxx::Level class does not allow
-/// the setting of the numeric value of the current level to something other
-/// than the values enumerated in the class. It creates a set of log4cxx
-/// logging levels to correspond to the various debug levels. These levels have
-/// names in the range DEBUG1 to DEBUG99 (the existing Level DEBUG is used for
-/// a debug level of 0), although they are not used in BIND-10: instead the
-/// BIND-10 Logger class treats the logging levels and debug levels separately
-/// and combines them to choose the underlying log4cxx logging level.
-
-
-/// \brief Debug-Extended Level
-
-class XDebugLevel : public Level {
- DECLARE_LOG4CXX_LEVEL(XDebugLevel)
-
- /// Array of pointers to logging level objects, one for each debug level.
- /// The pointer corresponding to a debug level of 0 points to the DEBUG
- /// logging level object.
- static LevelPtr dbglevels_[NUM_DEBUG_LEVEL];
- static bool dbglevels_unset_;
-
-public:
-
- // Minimum and maximum debug levels. Note that XDEBUG_MIN_LEVEL_INT is the
- // number corresponding to the minimum debug level - and is actually larger
- // that XDEBUG_MAX_LEVEL_INT, the number corresponding to the maximum debug
- // level.
- enum {
- XDEBUG_MIN_LEVEL_INT = Level::DEBUG_INT - MIN_DEBUG_LEVEL,
- XDEBUG_MAX_LEVEL_INT = Level::DEBUG_INT - MAX_DEBUG_LEVEL
- };
-
- /// \brief Constructor
- ///
- /// \param level Numeric value of the logging level.
- /// \param name Name given to this logging level.
- /// \param syslogEquivalent The category to be used by syslog when it logs
- /// an event associated with the specified logging level.
- XDebugLevel(int level, const LogString& name, int syslogEquivalent) :
- Level(level, name, syslogEquivalent)
- {}
-
- /// \brief Create Logging Level Object
- ///
- /// Creates a logging level object corresponding to one of the debug levels.
- ///
- /// \param dbglevel The debug level, which ranges from MIN_DEBUG_LEVEL to
- /// MAX_DEBUG_LEVEL. It is coerced to that range if it lies outside it.
- ///
- /// \return Pointer to the desired logging level object.
- static LevelPtr getExtendedDebug(int dbglevel);
-
- /// \brief Convert Integer to a Logging Level
- ///
- /// Returns a logging level object corresponding to the given value (which
- /// is an absolute value of a logging level - it is not a debug level).
- /// If the number is invalid, an object of logging level DEBUG (the
- /// minimum debug logging level) is returned.
- ///
- /// \param val Number to convert to a logging level. This is an absolute
- /// logging level number, not a debug level.
- ///
- /// \return Pointer to the desired logging level object.
- static LevelPtr toLevel(int val);
-
- /// \brief Convert Integer to a Level
- ///
- /// Returns a logging level object corresponding to the given value (which
- /// is an absolute value of a logging level - it is not a debug level).
- /// If the number is invalid, the given default is returned.
- ///
- /// \param val Number to convert to a logging level. This is an absolute
- /// logging level number, not a debug level.
- /// \param defaultLevel Logging level to return if value is not recognised.
- ///
- /// \return Pointer to the desired logging level object.
- static LevelPtr toLevel(int val, const LevelPtr& defaultLevel);
-
- /// \brief Convert String to Logging Level
- ///
- /// Returns a logging level object corresponding to the given name. If the
- /// name is invalid, an object of logging level DEBUG (the minimum debug
- /// logging level) is returned.
- ///
- /// \param sArg Name of the logging level.
- ///
- /// \return Pointer to the desired logging level object.
- static LevelPtr toLevelLS(const LogString& sArg);
-
- /// \brief Convert String to Logging Level
- ///
- /// Returns a logging level object corresponding to the given name. If the
- /// name is invalid, the given default is returned.
- ///
- /// \param sArg name of the level.
- /// \param defaultLevel Logging level to return if name doesn't exist.
- ///
- /// \return Pointer to the desired logging level object.
- static LevelPtr toLevelLS(const LogString& sArg,
- const LevelPtr& defaultLevel);
-};
-
-} // namespace log4cxx
-
-
-#endif // __XDEBUGLEVEL_H
diff --git a/src/lib/nsas/Makefile.am b/src/lib/nsas/Makefile.am
index 3ecbca7..663afba 100644
--- a/src/lib/nsas/Makefile.am
+++ b/src/lib/nsas/Makefile.am
@@ -22,19 +22,19 @@ AM_CXXFLAGS += -Wno-unused-parameter
endif
# Define rule to build logging source files from message file
-nsasdef.h nsasdef.cc: nsasdef.mes
- $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/nsas/nsasdef.mes
+nsas_messages.h nsas_messages.cc: nsas_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/nsas/nsas_messages.mes
# What is being built.
lib_LTLIBRARIES = libnsas.la
-# Tell Automake that the nsasdef.{cc,h} source files are created in the build
+# Tell Automake that the nsas_messages.{cc,h} source files are created in the build
# process, so it must create these before doing anything else. Although they
# are a dependency of the library (so will be created from the message file
# anyway), there is no guarantee as to exactly _when_ in the build they will be
# created. As the .h file is included in other sources file (so must be
# present when they are compiled), the safest option is to create it first.
-BUILT_SOURCES = nsasdef.h nsasdef.cc
+BUILT_SOURCES = nsas_messages.h nsas_messages.cc
# Library sources. The generated files will not be in the distribution.
libnsas_la_SOURCES = address_entry.h address_entry.cc
@@ -54,10 +54,10 @@ libnsas_la_SOURCES += fetchable.h
libnsas_la_SOURCES += address_request_callback.h
libnsas_la_SOURCES += glue_hints.h glue_hints.cc
-nodist_libnsas_la_SOURCES = nsasdef.h nsasdef.cc
+nodist_libnsas_la_SOURCES = nsas_messages.h nsas_messages.cc
# The message file should be in the distribution.
-EXTRA_DIST = nsasdef.mes
+EXTRA_DIST = nsas_messages.mes
# Make sure that the generated files are got rid of in a clean operation
-CLEANFILES = *.gcno *.gcda nsasdef.h nsasdef.cc
+CLEANFILES = *.gcno *.gcda nsas_messages.h nsas_messages.cc
diff --git a/src/lib/nsas/nameserver_address_store.cc b/src/lib/nsas/nameserver_address_store.cc
index ac55409..867f028 100644
--- a/src/lib/nsas/nameserver_address_store.cc
+++ b/src/lib/nsas/nameserver_address_store.cc
@@ -32,7 +32,6 @@
#include "zone_entry.h"
#include "glue_hints.h"
#include "address_request_callback.h"
-#include "nsasdef.h"
#include "nsas_log.h"
using namespace isc::dns;
@@ -87,7 +86,7 @@ NameserverAddressStore::lookup(const string& zone, const RRClass& class_code,
boost::shared_ptr<AddressRequestCallback> callback, AddressFamily family,
const GlueHints& glue_hints)
{
- LOG_DEBUG(nsas_logger, NSAS_DBG_TRACE, NSAS_LOOKUPZONE).arg(zone);
+ LOG_DEBUG(nsas_logger, NSAS_DBG_TRACE, NSAS_SEARCH_ZONE_NS).arg(zone);
pair<bool, boost::shared_ptr<ZoneEntry> > zone_obj(
zone_hash_->getOrAdd(HashKey(zone, class_code),
@@ -108,7 +107,7 @@ NameserverAddressStore::cancel(const string& zone,
const boost::shared_ptr<AddressRequestCallback>& callback,
AddressFamily family)
{
- LOG_DEBUG(nsas_logger, NSAS_DBG_TRACE, NSAS_LOOKUPCANCEL).arg(zone);
+ LOG_DEBUG(nsas_logger, NSAS_DBG_TRACE, NSAS_LOOKUP_CANCEL).arg(zone);
boost::shared_ptr<ZoneEntry> entry(zone_hash_->get(HashKey(zone,
class_code)));
diff --git a/src/lib/nsas/nameserver_address_store.h b/src/lib/nsas/nameserver_address_store.h
index 87845c9..1af535a 100644
--- a/src/lib/nsas/nameserver_address_store.h
+++ b/src/lib/nsas/nameserver_address_store.h
@@ -92,7 +92,10 @@ public:
/// \brief cancel the given lookup action
///
- /// \param callback Callback object that would be called
+ /// \param zone Name of zone.
+ /// \param class_code Class of the zone.
+ /// \param callback Callback object that would be called.
+ /// \param family Address family for which lookup is being cancelled.
void cancel(const std::string& zone, const dns::RRClass& class_code,
const boost::shared_ptr<AddressRequestCallback>& callback,
AddressFamily family = ANY_OK);
diff --git a/src/lib/nsas/nameserver_entry.cc b/src/lib/nsas/nameserver_entry.cc
index 65b2ec2..553c35d 100644
--- a/src/lib/nsas/nameserver_entry.cc
+++ b/src/lib/nsas/nameserver_entry.cc
@@ -179,7 +179,7 @@ NameserverEntry::updateAddressRTTAtIndex(uint32_t rtt, size_t index,
new_rtt = 1;
}
addresses_[family][index].setRTT(new_rtt);
- LOG_DEBUG(nsas_logger, NSAS_DBG_RTT, NSAS_SETRTT)
+ LOG_DEBUG(nsas_logger, NSAS_DBG_RTT, NSAS_UPDATE_RTT)
.arg(addresses_[family][index].getAddress().toText())
.arg(old_rtt).arg(new_rtt);
}
@@ -234,7 +234,7 @@ class NameserverEntry::ResolverCallback :
if (!response_message ||
response_message->getRcode() != isc::dns::Rcode::NOERROR() ||
response_message->getRRCount(isc::dns::Message::SECTION_ANSWER) == 0) {
- LOG_ERROR(nsas_logger, NSAS_INVRESPSTR).arg(entry_->getName());
+ LOG_ERROR(nsas_logger, NSAS_INVALID_RESPONSE).arg(entry_->getName());
failureInternal(lock);
return;
}
@@ -249,7 +249,7 @@ class NameserverEntry::ResolverCallback :
response->getClass() != RRClass(entry_->getClass()))
{
// Invalid response type or class
- LOG_ERROR(nsas_logger, NSAS_INVRESPTC)
+ LOG_ERROR(nsas_logger, NSAS_WRONG_ANSWER)
.arg(entry_->getName()).arg(type_)
.arg(entry_->getClass()).arg(response->getType())
.arg(response->getClass());
@@ -276,7 +276,7 @@ class NameserverEntry::ResolverCallback :
// If we found it, use it. If not, create a new one.
entries.push_back(found ? *found : AddressEntry(
IOAddress(address), 1));
- LOG_DEBUG(nsas_logger, NSAS_DBG_RESULTS, NSAS_NSLKUPSUCC)
+ LOG_DEBUG(nsas_logger, NSAS_DBG_RESULTS, NSAS_FOUND_ADDRESS)
.arg(address).arg(entry_->getName());
}
@@ -322,7 +322,7 @@ class NameserverEntry::ResolverCallback :
* So mark the current address family as unreachable.
*/
virtual void failure() {
- LOG_DEBUG(nsas_logger, NSAS_DBG_RESULTS, NSAS_NSLKUPFAIL)
+ LOG_DEBUG(nsas_logger, NSAS_DBG_RESULTS, NSAS_NS_LOOKUP_FAIL)
.arg(type_).arg(entry_->getName());
Lock lock(entry_->mutex_);
failureInternal(lock);
@@ -437,7 +437,7 @@ NameserverEntry::askIP(isc::resolve::ResolverInterface* resolver,
// We are unlocked here, as the callback from that might want to lock
lock.unlock();
- LOG_DEBUG(nsas_logger, NSAS_DBG_TRACE, NSAS_NSADDR).arg(getName());
+ LOG_DEBUG(nsas_logger, NSAS_DBG_TRACE, NSAS_FIND_NS_ADDRESS).arg(getName());
askIP(resolver, RRType::A(), V4_ONLY);
askIP(resolver, RRType::AAAA(), V6_ONLY);
// Make sure we end the routine when we are not locked
diff --git a/src/lib/nsas/nsas_log.h b/src/lib/nsas/nsas_log.h
index 9631988..031f46d 100644
--- a/src/lib/nsas/nsas_log.h
+++ b/src/lib/nsas/nsas_log.h
@@ -16,7 +16,7 @@
#define __NSAS_LOG__H
#include <log/macros.h>
-#include "nsasdef.h"
+#include "nsas_messages.h"
namespace isc {
namespace nsas {
@@ -29,15 +29,15 @@ namespace nsas {
// The first level traces normal operations - asking the NSAS for an address,
// and cancelling a lookup. It also records when the NSAS calls back to the
// resolver to resolve something.
-const int NSAS_DBG_TRACE = 10;
+const int NSAS_DBG_TRACE = DBGLVL_TRACE_BASIC;
// The next level extends the normal operations and records the results of the
// lookups.
-const int NSAS_DBG_RESULTS = 20;
+const int NSAS_DBG_RESULTS = DBGLVL_TRACE_BASIC_DATA;
// Additional information on the usage of the names - the RTT values obtained
// when queries were done.
-const int NSAS_DBG_RTT = 30;
+const int NSAS_DBG_RTT = DBGLVL_TRACE_DETAIL_DATA;
/// \brief NSAS Logger
diff --git a/src/lib/nsas/nsas_messages.mes b/src/lib/nsas/nsas_messages.mes
new file mode 100644
index 0000000..512fcd5
--- /dev/null
+++ b/src/lib/nsas/nsas_messages.mes
@@ -0,0 +1,69 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::nsas
+
+% NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
+
+% NSAS_FOUND_ADDRESS found address %1 for %2
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
+
+% NSAS_INVALID_RESPONSE queried for %1 but got invalid response
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+
+% NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
+
+% NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
+
+% NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
+
+% NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms
+A NSAS (nameserver address store - part of the resolver) debug message
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+
+% NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
diff --git a/src/lib/nsas/nsasdef.mes b/src/lib/nsas/nsasdef.mes
deleted file mode 100644
index 0f32d09..0000000
--- a/src/lib/nsas/nsasdef.mes
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-$PREFIX NSAS_
-$NAMESPACE isc::nsas
-
-% INVRESPSTR queried for %1 but got invalid response
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for a RR for the
-specified nameserver but received an invalid response. Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
-
-% INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
-
-% LOOKUPCANCEL lookup for zone %1 has been cancelled
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
-
-% LOOKUPZONE searching NSAS for nameservers for zone %1
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
-
-% NSADDR asking resolver to obtain A and AAAA records for %1
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
-
-% NSLKUPFAIL failed to lookup any %1 for %2
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver. This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
-
-% NSLKUPSUCC found address %1 for %2
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
-
-% SETRTT reporting RTT for %1 as %2; new value is now %3
-A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver. The RTT has been updated using the value given and the new RTT is
-displayed. (The RTT is subject to a calculation that damps out sudden
-changes. As a result, the new RTT is not necessarily equal to the RTT
-reported.)
diff --git a/src/lib/nsas/tests/Makefile.am b/src/lib/nsas/tests/Makefile.am
index e9235ba..420e897 100644
--- a/src/lib/nsas/tests/Makefile.am
+++ b/src/lib/nsas/tests/Makefile.am
@@ -43,8 +43,8 @@ run_unittests_SOURCES += zone_entry_unittest.cc
run_unittests_SOURCES += fetchable_unittest.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+run_unittests_LDADD = $(GTEST_LDADD)
# NOTE: we may have to clean up this hack later (see the note in configure.ac)
if NEED_LIBBOOST_THREAD
@@ -56,6 +56,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
endif
diff --git a/src/lib/nsas/tests/run_unittests.cc b/src/lib/nsas/tests/run_unittests.cc
index bc672d0..e469e03 100644
--- a/src/lib/nsas/tests/run_unittests.cc
+++ b/src/lib/nsas/tests/run_unittests.cc
@@ -12,24 +12,13 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <config.h>
-#include <stdlib.h>
-
-#include <string>
-#include <boost/lexical_cast.hpp>
-
#include <gtest/gtest.h>
-
-#include <dns/tests/unittest_util.h>
#include <log/logger_support.h>
-
-using namespace std;
+#include <util/unittests/run_all.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
-
isc::log::initLogger();
-
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/nsas/zone_entry.h b/src/lib/nsas/zone_entry.h
index f772784..482b89f 100644
--- a/src/lib/nsas/zone_entry.h
+++ b/src/lib/nsas/zone_entry.h
@@ -66,7 +66,7 @@ public:
* different objects.
* \param nameserver_table Hashtable of NameServerEntry objects for
* this zone
- * \param namesever_lru LRU for the nameserver entries
+ * \param nameserver_lru LRU for the nameserver entries
* \todo Move to cc file, include the lookup (if NSAS uses resolver for
* everything)
*/
diff --git a/src/lib/python/Makefile.am b/src/lib/python/Makefile.am
index f7eb333..5924294 100644
--- a/src/lib/python/Makefile.am
+++ b/src/lib/python/Makefile.am
@@ -1,6 +1,7 @@
SUBDIRS = isc
python_PYTHON = bind10_config.py
+pythondir = $(pyexecdir)
# Explicitly define DIST_COMMON so ${python_PYTHON} is not included
# as we don't want the generated file included in distributed tarfile.
@@ -10,3 +11,7 @@ DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in bind10_config.py.in
EXTRA_DIST = bind10_config.py.in
CLEANFILES = bind10_config.pyc
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/bind10_config.py.in b/src/lib/python/bind10_config.py.in
index fe4adb5..69b17ed 100644
--- a/src/lib/python/bind10_config.py.in
+++ b/src/lib/python/bind10_config.py.in
@@ -27,6 +27,7 @@ def reload():
"@PACKAGE_NAME@",
"msgq_socket").replace("${prefix}",
"@prefix@")
+ PREFIX = "@prefix@"
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to the value of that variable, or, if defined,
@@ -41,9 +42,9 @@ def reload():
DATA_PATH = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
else:
DATA_PATH = os.environ["B10_FROM_SOURCE"]
- PLUGIN_PATHS = [DATA_PATH + '/src/bin/cfgmgr/plugins']
+ PLUGIN_PATHS = [os.environ["B10_FROM_SOURCE"] +
+ '/src/bin/cfgmgr/plugins']
else:
- PREFIX = "@prefix@"
DATA_PATH = "@localstatedir@/@PACKAGE@".replace("${prefix}", PREFIX)
PLUGIN_PATHS = ["@prefix@/share/@PACKAGE@/config_plugins"]
# For testing the plugins so they can find their own spec files
diff --git a/src/lib/python/isc/Makefile.am b/src/lib/python/isc/Makefile.am
index 7a54909..a3e74c5 100644
--- a/src/lib/python/isc/Makefile.am
+++ b/src/lib/python/isc/Makefile.am
@@ -1,5 +1,11 @@
-SUBDIRS = datasrc cc config log net notify util testutils
+SUBDIRS = datasrc cc config dns log net notify util testutils acl bind10
+SUBDIRS += xfrin log_messages
python_PYTHON = __init__.py
pythondir = $(pyexecdir)/isc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/__init__.py b/src/lib/python/isc/__init__.py
index 9204792..029f110 100644
--- a/src/lib/python/isc/__init__.py
+++ b/src/lib/python/isc/__init__.py
@@ -1,5 +1,7 @@
-import isc.datasrc
+# On some systems, it appears the dynamic linker gets
+# confused if the order is not right here
+# There is probably a solution for this, but for now:
+# order is important here!
import isc.cc
import isc.config
-#import isc.dns
-import isc.log
+import isc.datasrc
diff --git a/src/lib/python/isc/acl/Makefile.am b/src/lib/python/isc/acl/Makefile.am
new file mode 100644
index 0000000..b1afa15
--- /dev/null
+++ b/src/lib/python/isc/acl/Makefile.am
@@ -0,0 +1,45 @@
+SUBDIRS = . tests
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+python_PYTHON = __init__.py dns.py
+pythondir = $(PYTHON_SITEPKG_DIR)/isc/acl
+
+pyexec_LTLIBRARIES = acl.la _dns.la
+pyexecdir = $(PYTHON_SITEPKG_DIR)/isc/acl
+
+acl_la_SOURCES = acl.cc
+acl_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+acl_la_LDFLAGS = $(PYTHON_LDFLAGS)
+acl_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+
+_dns_la_SOURCES = dns.h dns.cc dns_requestacl_python.h dns_requestacl_python.cc
+_dns_la_SOURCES += dns_requestcontext_python.h dns_requestcontext_python.cc
+_dns_la_SOURCES += dns_requestloader_python.h dns_requestloader_python.cc
+_dns_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+_dns_la_LDFLAGS = $(PYTHON_LDFLAGS)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+_dns_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+
+# Python prefers .so, while some OSes (specifically MacOS) use a different
+# suffix for dynamic objects. -module is necessary to work this around.
+acl_la_LDFLAGS += -module
+acl_la_LIBADD = $(top_builddir)/src/lib/acl/libacl.la
+acl_la_LIBADD += $(PYTHON_LIB)
+
+_dns_la_LDFLAGS += -module
+_dns_la_LIBADD = $(top_builddir)/src/lib/acl/libdnsacl.la
+_dns_la_LIBADD += $(PYTHON_LIB)
+
+EXTRA_DIST = acl.py _dns.py
+EXTRA_DIST += acl_inc.cc
+EXTRA_DIST += dnsacl_inc.cc dns_requestacl_inc.cc dns_requestcontext_inc.cc
+EXTRA_DIST += dns_requestloader_inc.cc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/acl/__init__.py b/src/lib/python/isc/acl/__init__.py
new file mode 100644
index 0000000..d9b2838
--- /dev/null
+++ b/src/lib/python/isc/acl/__init__.py
@@ -0,0 +1,11 @@
+"""
+Here are function and classes for manipulating access control lists.
+"""
+
+# The DNS ACL loader would need the json module. Make sure it's imported
+# beforehand.
+import json
+
+# Other ACL modules highly depends on the main acl sub module, so it's
+# explicitly imported here.
+import isc.acl.acl
diff --git a/src/lib/python/isc/acl/_dns.py b/src/lib/python/isc/acl/_dns.py
new file mode 100644
index 0000000..a645a7b
--- /dev/null
+++ b/src/lib/python/isc/acl/_dns.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file is not installed; The .so version will be installed into the right
+# place at installation time.
+# This helper script is only to find it in the .libs directory when we run
+# as a test or from the build directory.
+
+import os
+import sys
+
+for base in sys.path[:]:
+ bindingdir = os.path.join(base, 'isc/acl/.libs')
+ if os.path.exists(bindingdir):
+ sys.path.insert(0, bindingdir)
+
+from _dns import *
diff --git a/src/lib/python/isc/acl/acl.cc b/src/lib/python/isc/acl/acl.cc
new file mode 100644
index 0000000..6517a12
--- /dev/null
+++ b/src/lib/python/isc/acl/acl.cc
@@ -0,0 +1,80 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <acl/acl.h>
+
+using namespace isc::util::python;
+
+#include "acl_inc.cc"
+
+namespace {
+// Commonly used Python exception objects. Right now the acl module consists
+// of only one .cc file, so we hide them in an unnamed namespace. If and when
+// we extend this module with multiple .cc files, we should move them to
+// a named namespace, say isc::acl::python, and declare them in a separate
+// header file.
+PyObject* po_ACLError;
+PyObject* po_LoaderError;
+}
+
+namespace {
+PyModuleDef acl = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "isc.acl.acl",
+ acl_doc,
+ -1,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+} // end of unnamed namespace
+
+PyMODINIT_FUNC
+PyInit_acl(void) {
+ PyObject* mod = PyModule_Create(&acl);
+ if (mod == NULL) {
+ return (NULL);
+ }
+
+ try {
+ po_ACLError = PyErr_NewException("isc.acl.Error", NULL, NULL);
+ PyObjectContainer(po_ACLError).installToModule(mod, "Error");
+
+ po_LoaderError = PyErr_NewException("isc.acl.LoaderError", NULL, NULL);
+ PyObjectContainer(po_LoaderError).installToModule(mod, "LoaderError");
+
+ // Install module constants. Note that we can let Py_BuildValue
+ // "steal" the references to these object (by specifying false to
+ // installToModule), because, unlike the exception cases above,
+ // we don't have corresponding C++ variables (see the note in
+ // pycppwrapper_util for more details).
+ PyObjectContainer(Py_BuildValue("I", isc::acl::ACCEPT)).
+ installToModule(mod, "ACCEPT", false);
+ PyObjectContainer(Py_BuildValue("I", isc::acl::REJECT)).
+ installToModule(mod, "REJECT", false);
+ PyObjectContainer(Py_BuildValue("I", isc::acl::DROP)).
+ installToModule(mod, "DROP", false);
+ } catch (...) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ return (mod);
+}
diff --git a/src/lib/python/isc/acl/acl.py b/src/lib/python/isc/acl/acl.py
new file mode 100644
index 0000000..804d78b
--- /dev/null
+++ b/src/lib/python/isc/acl/acl.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file is not installed; The .so version will be installed into the right
+# place at installation time.
+# This helper script is only to find it in the .libs directory when we run
+# as a test or from the build directory.
+
+import os
+import sys
+
+for base in sys.path[:]:
+ bindingdir = os.path.join(base, 'isc/acl/.libs')
+ if os.path.exists(bindingdir):
+ sys.path.insert(0, bindingdir)
+
+from acl import *
diff --git a/src/lib/python/isc/acl/acl_inc.cc b/src/lib/python/isc/acl/acl_inc.cc
new file mode 100644
index 0000000..a9f7c9d
--- /dev/null
+++ b/src/lib/python/isc/acl/acl_inc.cc
@@ -0,0 +1,16 @@
+namespace {
+const char* const acl_doc = "\
+Implementation module for ACL operations\n\n\
+This module provides Python bindings for the C++ classes in the\n\
+isc::acl namespace.\n\
+\n\
+Integer constants:\n\
+\n\
+ACCEPT, REJECT, DROP -- Default actions an ACL could perform.\n\
+ These are the commonly used actions in specific ACLs.\n\
+ It is possible to specify any other values, as the ACL class does\n\
+ nothing about them, but these look reasonable, so they are provided\n\
+ for convenience. It is not specified what exactly these mean and it's\n\
+ up to whoever uses them.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/acl/dns.cc b/src/lib/python/isc/acl/dns.cc
new file mode 100644
index 0000000..eb3b57b
--- /dev/null
+++ b/src/lib/python/isc/acl/dns.cc
@@ -0,0 +1,135 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <stdexcept>
+#include <boost/shared_ptr.hpp>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <cc/data.h>
+
+#include <acl/acl.h>
+#include <acl/dns.h>
+
+#include "dns.h"
+#include "dns_requestcontext_python.h"
+#include "dns_requestacl_python.h"
+#include "dns_requestloader_python.h"
+
+using namespace std;
+using boost::shared_ptr;
+using namespace isc::util::python;
+using namespace isc::data;
+using namespace isc::acl::dns;
+using namespace isc::acl::dns::python;
+
+#include "dnsacl_inc.cc"
+
+namespace {
+// This is a Python binding object corresponding to the singleton loader used
+// in the C++ version of the library.
+// We can define it as a pure object rather than through an accessor function,
+// because in Python we can ensure it has been created and initialized
+// in the module initializer by the time it's actually used.
+s_RequestLoader* po_REQUEST_LOADER;
+
+PyMethodDef methods[] = {
+ { NULL, NULL, 0, NULL }
+};
+
+PyModuleDef dnsacl = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "isc.acl._dns",
+ dnsacl_doc,
+ -1,
+ methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+} // end of unnamed namespace
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+PyObject*
+getACLException(const char* ex_name) {
+ PyObject* ex_obj = NULL;
+
+ PyObject* acl_module = PyImport_AddModule("isc.acl.acl");
+ if (acl_module != NULL) {
+ PyObject* acl_dict = PyModule_GetDict(acl_module);
+ if (acl_dict != NULL) {
+ ex_obj = PyDict_GetItemString(acl_dict, ex_name);
+ }
+ }
+
+ if (ex_obj == NULL) {
+ ex_obj = PyExc_RuntimeError;
+ }
+ return (ex_obj);
+}
+}
+}
+}
+}
+
+PyMODINIT_FUNC
+PyInit__dns(void) {
+ PyObject* mod = PyModule_Create(&dnsacl);
+ if (mod == NULL) {
+ return (NULL);
+ }
+
+ if (!initModulePart_RequestContext(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+ if (!initModulePart_RequestACL(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+ if (!initModulePart_RequestLoader(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ // Module constants
+ try {
+ if (po_REQUEST_LOADER == NULL) {
+ po_REQUEST_LOADER = static_cast<s_RequestLoader*>(
+ requestloader_type.tp_alloc(&requestloader_type, 0));
+ }
+ if (po_REQUEST_LOADER != NULL) {
+ // We gain and keep our own reference to the singleton object
+ // for the same reason as that for exception objects (see comments
+ // in pycppwrapper_util for more details). Note also that we don't
+ // bother to release the reference even if exception is thrown
+ // below (in fact, we cannot delete the singleton loader).
+ po_REQUEST_LOADER->cppobj = &getRequestLoader();
+ Py_INCREF(po_REQUEST_LOADER);
+ }
+ PyObjectContainer(po_REQUEST_LOADER).installToModule(mod,
+ "REQUEST_LOADER");
+ } catch (...) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ return (mod);
+}
diff --git a/src/lib/python/isc/acl/dns.h b/src/lib/python/isc/acl/dns.h
new file mode 100644
index 0000000..76849c5
--- /dev/null
+++ b/src/lib/python/isc/acl/dns.h
@@ -0,0 +1,52 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_ACL_DNS_H
+#define __PYTHON_ACL_DNS_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+
+// Return a Python exception object of the given name (ex_name) defined in
+// the isc.acl.acl loadable module.
+//
+// Since the acl module is a different binary image and is loaded separately
+// from the dns module, it would be very tricky to directly access to
+// C/C++ symbols defined in that module. So we get access to these object
+// using the Python interpretor through this wrapper function.
+//
+// The __init__.py file should ensure isc.acl.acl has been loaded by the time
+// whenever this function is called, and there shouldn't be any operation
+// within this function that can fail (such as dynamic memory allocation),
+// so this function should always succeed. Yet there may be an overlooked
+// failure mode, perhaps due to a bug in the binding implementation, or
+// due to invalid usage. As a last resort for such cases, this function
+// returns PyExc_RuntimeError (a C binding of Python's RuntimeError) should
+// it encounters an unexpected failure.
+extern PyObject* getACLException(const char* ex_name);
+
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+#endif // __PYTHON_ACL_DNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/acl/dns.py b/src/lib/python/isc/acl/dns.py
new file mode 100644
index 0000000..0733bc3
--- /dev/null
+++ b/src/lib/python/isc/acl/dns.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""\
+This module provides Python bindings for the C++ classes in the
+isc::acl::dns namespace. Specifically, it defines Python interfaces of
+handling access control lists (ACLs) with DNS related contexts.
+The actual binding is implemented in an effectively hidden module,
+isc.acl._dns; this frontend module is in terms of implementation so that
+the C++ binding code doesn't have to deal with complicated operations
+that could be done in a more straightforward way in native Python.
+
+For further details of the actual module, see the documentation of the
+_dns module.
+"""
+
+import pydnspp
+
+import isc.acl._dns
+from isc.acl._dns import *
+
+class RequestACL(isc.acl._dns.RequestACL):
+ """A straightforward wrapper subclass of isc.acl._dns.RequestACL.
+
+ See the base class documentation for more implementation.
+ """
+ pass
+
+class RequestLoader(isc.acl._dns.RequestLoader):
+ """A straightforward wrapper subclass of isc.acl._dns.RequestLoader.
+
+ See the base class documentation for more implementation.
+ """
+ pass
+
+class RequestContext(isc.acl._dns.RequestContext):
+ """A straightforward wrapper subclass of isc.acl._dns.RequestContext.
+
+ See the base class documentation for more implementation.
+ """
+
+ def __init__(self, remote_address, tsig=None):
+ """Wrapper for the RequestContext constructor.
+
+ Internal implementation details that the users don't have to
+ worry about: To avoid dealing with pydnspp bindings in the C++ code,
+ this wrapper converts the TSIG record in its wire format in the form
+ of byte data, and has the binding re-construct the record from it.
+ """
+ tsig_wire = b''
+ if tsig is not None:
+ if not isinstance(tsig, pydnspp.TSIGRecord):
+ raise TypeError("tsig must be a TSIGRecord, not %s" %
+ tsig.__class__.__name__)
+ tsig_wire = tsig.to_wire(tsig_wire)
+ isc.acl._dns.RequestContext.__init__(self, remote_address, tsig_wire)
+
+ def __str__(self):
+ """Wrap __str__() to convert the module name."""
+ s = isc.acl._dns.RequestContext.__str__(self)
+ return s.replace('<isc.acl._dns', '<isc.acl.dns')
diff --git a/src/lib/python/isc/acl/dns_requestacl_inc.cc b/src/lib/python/isc/acl/dns_requestacl_inc.cc
new file mode 100644
index 0000000..673fa23
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestacl_inc.cc
@@ -0,0 +1,33 @@
+namespace {
+const char* const RequestACL_doc = "\
+The DNS Request ACL.\n\
+\n\
+It holds bunch of ordered entries, each one consisting of a check for\n\
+a given DNS Request context and an action, which is one of ACCEPT,\n\
+REJECT, or DROP, as defined in the isc.acl.acl module.\n\
+The checks are tested in the order and first match counts.\n\
+\n\
+A RequestACL object cannot be constructed directly; an application\n\
+must use isc.acl.dns.load_request_acl() to create a RequestACL object.\n\
+\n\
+";
+
+const char* const RequestACL_execute_doc = "\
+execute(context) -> action \n\
+\n\
+The returned action is one of ACCEPT, REJECT or DROP as defined in\n\
+the isc.acl.acl module.\n\
+\n\
+This is the function that takes the ACL entries one by one, checks the\n\
+context against conditions and if it matches, returns the action that\n\
+belongs to the first matched entry or default action if nothing\n\
+matches.\n\
+\n\
+Parameters:\n\
+ context The thing that should be checked. It is directly passed\n\
+ to the checks.\n\
+\n\
+Return Value(s): The action for the ACL entry that first matches the\n\
+context.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/acl/dns_requestacl_python.cc b/src/lib/python/isc/acl/dns_requestacl_python.cc
new file mode 100644
index 0000000..1c38a30
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestacl_python.cc
@@ -0,0 +1,184 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <acl/acl.h>
+#include <acl/dns.h>
+
+#include "dns.h"
+#include "dns_requestacl_python.h"
+#include "dns_requestcontext_python.h"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::acl;
+using namespace isc::acl::dns;
+using namespace isc::acl::dns::python;
+
+//
+// Definition of the classes
+//
+
+// For each class, we need a struct, a helper functions (init, destroy,
+// and static wrappers around the methods we export), a list of methods,
+// and a type description
+
+//
+// RequestACL
+//
+
+// Trivial constructor.
+s_RequestACL::s_RequestACL() {}
+
+// Import pydoc text
+#include "dns_requestacl_inc.cc"
+
+namespace {
+int
+RequestACL_init(PyObject*, PyObject*, PyObject*) {
+ PyErr_SetString(getACLException("Error"),
+ "RequestACL cannot be directly constructed");
+ return (-1);
+}
+
+void
+RequestACL_destroy(PyObject* po_self) {
+ s_RequestACL* const self = static_cast<s_RequestACL*>(po_self);
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+RequestACL_execute(PyObject* po_self, PyObject* args) {
+ s_RequestACL* const self = static_cast<s_RequestACL*>(po_self);
+
+ try {
+ const s_RequestContext* po_context;
+ if (PyArg_ParseTuple(args, "O!", &requestcontext_type, &po_context)) {
+ const BasicAction action =
+ self->cppobj->execute(*po_context->cppobj);
+ return (Py_BuildValue("I", action));
+ }
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to execute ACL: " + string(ex.what());
+ PyErr_SetString(getACLException("Error"), ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in executing ACL");
+ }
+
+ return (NULL);
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef RequestACL_methods[] = {
+ { "execute", RequestACL_execute, METH_VARARGS, RequestACL_execute_doc },
+ { NULL, NULL, 0, NULL }
+};
+} // end of unnamed namespace
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RequestACL
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject requestacl_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "isc.acl._dns.RequestACL",
+ sizeof(s_RequestACL), // tp_basicsize
+ 0, // tp_itemsize
+ RequestACL_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
+ RequestACL_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RequestACL_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ RequestACL_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+bool
+initModulePart_RequestACL(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&requestacl_type) < 0) {
+ return (false);
+ }
+ void* p = &requestacl_type;
+ if (PyModule_AddObject(mod, "RequestACL", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&requestacl_type);
+
+ return (true);
+}
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/python/isc/acl/dns_requestacl_python.h b/src/lib/python/isc/acl/dns_requestacl_python.h
new file mode 100644
index 0000000..8f7ad8a
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestacl_python.h
@@ -0,0 +1,53 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_REQUESTACL_H
+#define __PYTHON_REQUESTACL_H 1
+
+#include <Python.h>
+
+#include <boost/shared_ptr.hpp>
+
+#include <acl/dns.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+
+// The s_* Class simply covers one instantiation of the object
+class s_RequestACL : public PyObject {
+public:
+ s_RequestACL();
+
+ // We don't have to use a shared pointer for its original purposes as
+ // the python object maintains reference counters itself. But the
+ // underlying C++ API only exposes a shared pointer for the ACL objects,
+ // so we store it in that form.
+ boost::shared_ptr<RequestACL> cppobj;
+};
+
+extern PyTypeObject requestacl_type;
+
+bool initModulePart_RequestACL(PyObject* mod);
+
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
+#endif // __PYTHON_REQUESTACL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/acl/dns_requestcontext_inc.cc b/src/lib/python/isc/acl/dns_requestcontext_inc.cc
new file mode 100644
index 0000000..f71bc59
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestcontext_inc.cc
@@ -0,0 +1,33 @@
+namespace {
+const char* const RequestContext_doc = "\
+DNS request to be checked.\n\
+\n\
+This plays the role of ACL context for the RequestACL object.\n\
+\n\
+Based on the minimalist philosophy, the initial implementation only\n\
+maintains the remote (source) IP address of the request and\n\
+(optionally) the TSIG record included in the request. We may add more\n\
+parameters of the request as we see the need for them. Possible\n\
+additional parameters are the local (destination) IP address, the\n\
+remote and local port numbers, various fields of the DNS request (e.g.\n\
+a particular header flag value).\n\
+\n\
+RequestContext(remote_address, tsig)\n\
+\n\
+ In this initial implementation, the constructor only takes a\n\
+ remote IP address in the form of a socket address as used in the\n\
+ Python socket module, and optionally a pydnspp.TSIGRecord object.\n\
+\n\
+ Exceptions:\n\
+ isc.acl.ACLError Normally shouldn't happen, but still possible\n\
+ for unexpected errors such as memory allocation\n\
+ failure or an invalid address text being passed.\n\
+\n\
+ Parameters:\n\
+ remote_address The remote IP address\n\
+ tsig The TSIG record included in the request message, if any.\n\
+ If the request doesn't include a TSIG, this will be None.\n\
+ If this parameter is omitted None will be assumed.\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/acl/dns_requestcontext_python.cc b/src/lib/python/isc/acl/dns_requestcontext_python.cc
new file mode 100644
index 0000000..7f33f59
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestcontext_python.cc
@@ -0,0 +1,382 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netdb.h>
+#include <string.h>
+
+#include <cassert>
+#include <memory>
+#include <string>
+#include <sstream>
+#include <stdexcept>
+
+#include <boost/scoped_ptr.hpp>
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include <dns/name.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+#include <dns/tsigrecord.h>
+
+#include <acl/dns.h>
+#include <acl/ip_check.h>
+
+#include "dns.h"
+#include "dns_requestcontext_python.h"
+
+using namespace std;
+using boost::scoped_ptr;
+using boost::lexical_cast;
+using namespace isc;
+using namespace isc::dns;
+using namespace isc::dns::rdata;
+using namespace isc::util::python;
+using namespace isc::acl::dns;
+using namespace isc::acl::dns::python;
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+
+struct s_RequestContext::Data {
+ // The constructor.
+ Data(const char* const remote_addr, const unsigned short remote_port,
+ const char* tsig_data, const Py_ssize_t tsig_len)
+ {
+ createRemoteAddr(remote_addr, remote_port);
+ createTSIGRecord(tsig_data, tsig_len);
+ }
+
+ // A convenient type converter from sockaddr_storage to sockaddr
+ const struct sockaddr& getRemoteSockaddr() const {
+ const void* p = &remote_ss;
+ return (*static_cast<const struct sockaddr*>(p));
+ }
+
+ // The remote (source) IP address of the request. Note that it needs
+ // a reference to remote_ss. That's why the latter is stored within
+ // this structure.
+ scoped_ptr<IPAddress> remote_ipaddr;
+
+ // The effective length of remote_ss. It's necessary for getnameinfo()
+ // called from sockaddrToText (__str__ backend).
+ socklen_t remote_salen;
+
+ // The TSIG record included in the request, if any. If the request
+ // doesn't contain a TSIG, this will be NULL.
+ scoped_ptr<TSIGRecord> tsig_record;
+
+private:
+ // A helper method for the constructor that is responsible for constructing
+ // the remote address.
+ void createRemoteAddr(const char* const remote_addr,
+ const unsigned short remote_port)
+ {
+ struct addrinfo hints, *res;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_DGRAM;
+ hints.ai_protocol = IPPROTO_UDP;
+ hints.ai_flags = AI_NUMERICHOST | AI_NUMERICSERV;
+ const int error(getaddrinfo(remote_addr,
+ lexical_cast<string>(remote_port).c_str(),
+ &hints, &res));
+ if (error != 0) {
+ isc_throw(InvalidParameter, "Failed to convert [" << remote_addr
+ << "]:" << remote_port << ", " << gai_strerror(error));
+ }
+ assert(sizeof(remote_ss) > res->ai_addrlen);
+ memcpy(&remote_ss, res->ai_addr, res->ai_addrlen);
+ remote_salen = res->ai_addrlen;
+ freeaddrinfo(res);
+
+ remote_ipaddr.reset(new IPAddress(getRemoteSockaddr()));
+ }
+
+ // A helper method for the constructor that is responsible for constructing
+ // the request TSIG.
+ void createTSIGRecord(const char* tsig_data, const Py_ssize_t tsig_len) {
+ if (tsig_len == 0) {
+ return;
+ }
+
+ // Re-construct the TSIG record from the passed binary. This should
+ // normally succeed because we are generally expected to be called
+ // from the frontend .py, which converts a valid TSIGRecord in its
+ // wire format. If some evil or buggy python program directly calls
+ // us with bogus data, validation in libdns++ will trigger an
+ // exception, which will be caught and converted to a Python exception
+ // in RequestContext_init().
+ isc::util::InputBuffer b(tsig_data, tsig_len);
+ const Name key_name(b);
+ const RRType tsig_type(b.readUint16());
+ const RRClass tsig_class(b.readUint16());
+ const RRTTL ttl(b.readUint32());
+ const size_t rdlen(b.readUint16());
+ const ConstRdataPtr rdata = createRdata(tsig_type, tsig_class, b,
+ rdlen);
+ tsig_record.reset(new TSIGRecord(key_name, tsig_class, ttl,
+ *rdata, 0));
+ }
+
+private:
+ struct sockaddr_storage remote_ss;
+};
+
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+
+//
+// Definition of the classes
+//
+
+// For each class, we need a struct, a helper functions (init, destroy,
+// and static wrappers around the methods we export), a list of methods,
+// and a type description
+
+//
+// RequestContext
+//
+
+// Trivial constructor.
+s_RequestContext::s_RequestContext() : cppobj(NULL), data_(NULL) {
+}
+
+// Import pydoc text
+#include "dns_requestcontext_inc.cc"
+
+namespace {
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef RequestContext_methods[] = {
+ { NULL, NULL, 0, NULL }
+};
+
+int
+RequestContext_init(PyObject* po_self, PyObject* args, PyObject*) {
+ s_RequestContext* const self = static_cast<s_RequestContext*>(po_self);
+
+ try {
+ // In this initial implementation, the constructor is simple: It
+ // takes two parameters. The first parameter should be a Python
+ // socket address object.
+ // For IPv4, it's ('address test', numeric_port); for IPv6,
+ // it's ('address text', num_port, num_flowid, num_zoneid).
+ // The second parameter is wire-format TSIG record in the form of
+ // Python byte data. If the TSIG isn't included in the request,
+ // its length will be 0.
+ // Below, we parse the argument in the most straightforward way.
+ // As the constructor becomes more complicated, we should probably
+ // make it more structural (for example, we should first retrieve
+ // the python objects, and parse them recursively)
+
+ const char* remote_addr;
+ unsigned short remote_port;
+ unsigned int remote_flowinfo; // IPv6 only, unused here
+ unsigned int remote_zoneid; // IPv6 only, unused here
+ const char* tsig_data;
+ Py_ssize_t tsig_len;
+
+ if (PyArg_ParseTuple(args, "(sH)y#", &remote_addr, &remote_port,
+ &tsig_data, &tsig_len) ||
+ PyArg_ParseTuple(args, "(sHII)y#", &remote_addr, &remote_port,
+ &remote_flowinfo, &remote_zoneid,
+ &tsig_data, &tsig_len))
+ {
+ // We need to clear the error in case the first call to ParseTuple
+ // fails.
+ PyErr_Clear();
+
+ auto_ptr<s_RequestContext::Data> dataptr(
+ new s_RequestContext::Data(remote_addr, remote_port,
+ tsig_data, tsig_len));
+ self->cppobj = new RequestContext(*dataptr->remote_ipaddr,
+ dataptr->tsig_record.get());
+ self->data_ = dataptr.release();
+ return (0);
+ }
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct RequestContext object: " +
+ string(ex.what());
+ PyErr_SetString(getACLException("Error"), ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in constructing RequestContext");
+ return (-1);
+ }
+
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to RequestContext constructor");
+
+ return (-1);
+}
+
+void
+RequestContext_destroy(PyObject* po_self) {
+ s_RequestContext* const self = static_cast<s_RequestContext*>(po_self);
+
+ delete self->cppobj;
+ delete self->data_;
+ Py_TYPE(self)->tp_free(self);
+}
+
+// A helper function for __str__()
+string
+sockaddrToText(const struct sockaddr& sa, socklen_t sa_len) {
+ char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV];
+ if (getnameinfo(&sa, sa_len, hbuf, sizeof(hbuf), sbuf, sizeof(sbuf),
+ NI_NUMERICHOST | NI_NUMERICSERV)) {
+ // In this context this should never fail.
+ isc_throw(Unexpected, "Unexpected failure in getnameinfo");
+ }
+
+ return ("[" + string(hbuf) + "]:" + string(sbuf));
+}
+
+// for the __str__() method. This method is provided mainly for internal
+// testing.
+PyObject*
+RequestContext_str(PyObject* po_self) {
+ const s_RequestContext* const self =
+ static_cast<s_RequestContext*>(po_self);
+
+ try {
+ stringstream objss;
+ objss << "<" << requestcontext_type.tp_name << " object, "
+ << "remote_addr="
+ << sockaddrToText(self->data_->getRemoteSockaddr(),
+ self->data_->remote_salen);
+ if (self->data_->tsig_record) {
+ objss << ", key=" << self->data_->tsig_record->getName();
+ }
+ objss << ">";
+ return (Py_BuildValue("s", objss.str().c_str()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to convert RequestContext object to text: " +
+ string(ex.what());
+ PyErr_SetString(PyExc_RuntimeError, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "converting RequestContext object to text");
+ }
+ return (NULL);
+}
+} // end of unnamed namespace
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RequestContext
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject requestcontext_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "isc.acl._dns.RequestContext",
+ sizeof(s_RequestContext), // tp_basicsize
+ 0, // tp_itemsize
+ RequestContext_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RequestContext_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
+ RequestContext_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RequestContext_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ RequestContext_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+bool
+initModulePart_RequestContext(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&requestcontext_type) < 0) {
+ return (false);
+ }
+ void* p = &requestcontext_type;
+ if (PyModule_AddObject(mod, "RequestContext",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&requestcontext_type);
+
+ return (true);
+}
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/python/isc/acl/dns_requestcontext_python.h b/src/lib/python/isc/acl/dns_requestcontext_python.h
new file mode 100644
index 0000000..766133b
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestcontext_python.h
@@ -0,0 +1,54 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_REQUESTCONTEXT_H
+#define __PYTHON_REQUESTCONTEXT_H 1
+
+#include <Python.h>
+
+#include <acl/dns.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+
+// The s_* Class simply covers one instantiation of the object
+class s_RequestContext : public PyObject {
+public:
+ s_RequestContext();
+ RequestContext* cppobj;
+
+ // This object needs to maintain some source data to construct the
+ // underlying RequestContext object throughout its lifetime.
+ // These are "public" so that it can be accessed in the python wrapper
+ // implementation, but essentially they should be private, and the
+ // implementation details are hidden.
+ struct Data;
+ Data* data_;
+};
+
+extern PyTypeObject requestcontext_type;
+
+bool initModulePart_RequestContext(PyObject* mod);
+
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
+#endif // __PYTHON_REQUESTCONTEXT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/acl/dns_requestloader_inc.cc b/src/lib/python/isc/acl/dns_requestloader_inc.cc
new file mode 100644
index 0000000..a911275
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestloader_inc.cc
@@ -0,0 +1,87 @@
+namespace {
+
+// Note: this is derived from the generic Loader class of the C++
+// implementation, but is slightly different from the original.
+// Be careful when you make further merge from the C++ document.
+const char* const RequestLoader_doc = "\
+Loader of DNS Request ACLs.\n\
+\n\
+The goal of this class is to convert JSON description of an ACL to\n\
+object of the ACL class (including the checks inside it).\n\
+\n\
+To allow any kind of checks to exist in the application, creators are\n\
+registered for the names of the checks (this feature is not yet\n\
+available for the python API).\n\
+\n\
+An ACL definition looks like this: [\n\
+ {\n\
+ \"action\": \"ACCEPT\",\n\
+ \"match-type\": <parameter>\n\
+ },\n\
+ {\n\
+ \"action\": \"REJECT\",\n\
+ \"match-type\": <parameter>,\n\
+ \"another-match-type\": [<parameter1>, <parameter2>]\n\
+ },\n\
+ {\n\
+ \"action\": \"DROP\"\n\
+ }\n\
+ ]\n\
+ \n\
+\n\
+This is a list of elements. Each element must have an \"action\"\n\
+entry/keyword. That one specifies which action is returned if this\n\
+element matches (the value of the key is passed to the action loader\n\
+(see the constructor), which is one of ACCEPT,\n\
+REJECT, or DROP, as defined in the isc.acl.acl module.\n\
+\n\
+The rest of the element are matches. The left side is the name of the\n\
+match type (for example \"from\" to match for source IP address).\n\
+The <parameter> is whatever is needed to describe the\n\
+match and depends on the match type, the loader passes it verbatim to\n\
+creator of that match type.\n\
+\n\
+There may be multiple match types in single element. In such case, all\n\
+of the matches must match for the element to take action (so, in the\n\
+second element, both \"match-type\" and \"another-match-type\" must be\n\
+satisfied). If there's no match in the element, the action is\n\
+taken/returned without conditions, every time (makes sense as the last\n\
+entry, as the ACL will never get past it).\n\
+\n\
+The second entry shows another thing - if there's a list as the value\n\
+for some match and the match itself is not expecting a list, it is\n\
+taken as an \"or\" - a match for at last one of the choices in the\n\
+list must match. So, for the second entry, both \"match-type\" and\n\
+\"another-match-type\" must be satisfied, but the another one is\n\
+satisfied by either parameter1 or parameter2.\n\
+\n\
+Currently, a RequestLoader object cannot be constructed directly;\n\
+an application must use the singleton loader defined in the\n\
+isc.acl.dns module, i.e., isc.acl.dns.REQUEST_LOADER.\n\
+A future version of this implementation may be extended to give\n\
+applications full flexibility of creating arbitrary loader, when\n\
+this restriction may be removed.\n\
+";
+
+const char* const RequestLoader_load_doc = "\
+load(description) -> RequestACL\n\
+\n\
+Load a DNS (Request) ACL.\n\
+\n\
+This parses an ACL list, creates internal data for each rule\n\
+and returns a RequestACl object that contains all given rules.\n\
+\n\
+Exceptions:\n\
+ LoaderError Load failed. The most likely cause of this is a syntax\n\
+ error in the description. Other internal errors such as\n\
+ memory allocation failure is also converted to this\n\
+ exception.\n\
+\n\
+Parameters:\n\
+ description String or Python representation of the JSON list of\n\
+ ACL. The Python representation is ones accepted by the\n\
+ standard json module.\n\
+\n\
+Return Value(s): The newly created RequestACL object\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/acl/dns_requestloader_python.cc b/src/lib/python/isc/acl/dns_requestloader_python.cc
new file mode 100644
index 0000000..ab421c5
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestloader_python.cc
@@ -0,0 +1,270 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <boost/shared_ptr.hpp>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <cc/data.h>
+
+#include <acl/dns.h>
+
+#include "dns.h"
+#include "dns_requestacl_python.h"
+#include "dns_requestloader_python.h"
+
+using namespace std;
+using boost::shared_ptr;
+using namespace isc::util::python;
+using namespace isc::data;
+using namespace isc::acl::dns;
+using namespace isc::acl::dns::python;
+
+//
+// Definition of the classes
+//
+
+// For each class, we need a struct, a helper functions (init, destroy,
+// and static wrappers around the methods we export), a list of methods,
+// and a type description
+
+//
+// RequestLoader
+//
+
+// Trivial constructor.
+s_RequestLoader::s_RequestLoader() : cppobj(NULL) {
+}
+
+// Import pydoc text
+#include "dns_requestloader_inc.cc"
+
+namespace {
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+int
+RequestLoader_init(PyObject*, PyObject*, PyObject*) {
+ PyErr_SetString(getACLException("Error"),
+ "RequestLoader cannot be directly constructed");
+ return (-1);
+}
+
+void
+RequestLoader_destroy(PyObject* po_self) {
+ s_RequestLoader* const self = static_cast<s_RequestLoader*>(po_self);
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+// This C structure corresponds to a Python callable object for json.dumps().
+// This is initialized at the class initialization time (in
+// initModulePart_RequestLoader() below) and it's ensured to be non NULL and
+// valid in the rest of the class implementation.
+// Getting access to the json module this way and call one of its functions
+// via PyObject_CallObject() may exceed the reasonably acceptable level for
+// straightforward bindings. But the alternative would be to write a Python
+// frontend for the entire module only for this conversion, which would also
+// be too much. So, right now, we implement everything within the binding
+// implementation. If future extensions require more such non trivial
+// wrappers, we should consider the frontend approach more seriously.
+PyObject* json_dumps_obj = NULL;
+
+PyObject*
+RequestLoader_load(PyObject* po_self, PyObject* args) {
+ s_RequestLoader* const self = static_cast<s_RequestLoader*>(po_self);
+
+ try {
+ PyObjectContainer c1, c2; // placeholder for temporary py objects
+ const char* acl_config;
+
+ // First, try string
+ int py_result = PyArg_ParseTuple(args, "s", &acl_config);
+ if (!py_result) {
+ PyErr_Clear(); // need to clear the error from ParseTuple
+
+ // If that fails, confirm the argument is a single Python object,
+ // and pass the argument to json.dumps() without conversion.
+ // Note that we should pass 'args', not 'json_obj' to
+ // PyObject_CallObject(), since this function expects a form of
+ // tuple as its argument parameter, just like ParseTuple.
+ PyObject* json_obj;
+ if (PyArg_ParseTuple(args, "O", &json_obj)) {
+ c1.reset(PyObject_CallObject(json_dumps_obj, args));
+ c2.reset(Py_BuildValue("(O)", c1.get()));
+ py_result = PyArg_ParseTuple(c2.get(), "s", &acl_config);
+ }
+ }
+ if (py_result) {
+ shared_ptr<RequestACL> acl(
+ self->cppobj->load(Element::fromJSON(acl_config)));
+ s_RequestACL* py_acl = static_cast<s_RequestACL*>(
+ requestacl_type.tp_alloc(&requestacl_type, 0));
+ if (py_acl != NULL) {
+ py_acl->cppobj = acl;
+ }
+ return (py_acl);
+ }
+ } catch (const PyCPPWrapperException&) {
+ // If the wrapper utility throws, it's most likely because an invalid
+ // type of argument is passed (and the call to json.dumps() failed
+ // above), rather than a rare case of system errors such as memory
+ // allocation failure. So we fall through to the end of this function
+ // and raise a TypeError.
+ ;
+ } catch (const exception& ex) {
+ PyErr_SetString(getACLException("LoaderError"), ex.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected C++ exception");
+ return (NULL);
+ }
+
+ PyErr_SetString(PyExc_TypeError, "RequestLoader.load() "
+ "expects str or python representation of JSON");
+ return (NULL);
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef RequestLoader_methods[] = {
+ { "load", RequestLoader_load, METH_VARARGS, RequestLoader_load_doc },
+ { NULL, NULL, 0, NULL }
+};
+} // end of unnamed namespace
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RequestLoader
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject requestloader_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "isc.acl._dns.RequestLoader",
+ sizeof(s_RequestLoader), // tp_basicsize
+ 0, // tp_itemsize
+ RequestLoader_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
+ RequestLoader_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RequestLoader_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ RequestLoader_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+bool
+initModulePart_RequestLoader(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&requestloader_type) < 0) {
+ return (false);
+ }
+ void* p = &requestloader_type;
+ if (PyModule_AddObject(mod, "RequestLoader",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+
+ // Get and hold our own reference to json.dumps() for later use.
+ // Normally it should succeed as __init__.py of the isc.acl package
+ // explicitly imports the json module, and the code below should be
+ // error free (e.g. they don't require memory allocation) under this
+ // condition.
+ // This could still fail with deviant or evil Python code such as those
+ // that first import json and then delete the reference to it from
+ // sys.modules before it imports the acl.dns module. The RequestLoader
+ // class could still work as long as it doesn't use the JSON decoder,
+ // but we'd rather refuse to import the module than allowing the partially
+ // workable class to keep running.
+ PyObject* json_module = PyImport_AddModule("json");
+ if (json_module != NULL) {
+ PyObject* json_dict = PyModule_GetDict(json_module);
+ if (json_dict != NULL) {
+ json_dumps_obj = PyDict_GetItemString(json_dict, "dumps");
+ }
+ }
+ if (json_dumps_obj != NULL) {
+ Py_INCREF(json_dumps_obj);
+ } else {
+ PyErr_SetString(PyExc_RuntimeError,
+ "isc.acl.dns.RequestLoader needs the json module, but "
+ "it's missing");
+ return (false);
+ }
+
+ Py_INCREF(&requestloader_type);
+
+ return (true);
+}
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/python/isc/acl/dns_requestloader_python.h b/src/lib/python/isc/acl/dns_requestloader_python.h
new file mode 100644
index 0000000..9d0b63e
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestloader_python.h
@@ -0,0 +1,46 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_REQUESTLOADER_H
+#define __PYTHON_REQUESTLOADER_H 1
+
+#include <Python.h>
+
+#include <acl/dns.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+
+// The s_* Class simply covers one instantiation of the object
+class s_RequestLoader : public PyObject {
+public:
+ s_RequestLoader();
+ RequestLoader* cppobj;
+};
+
+extern PyTypeObject requestloader_type;
+
+bool initModulePart_RequestLoader(PyObject* mod);
+
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
+#endif // __PYTHON_REQUESTLOADER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/acl/dnsacl_inc.cc b/src/lib/python/isc/acl/dnsacl_inc.cc
new file mode 100644
index 0000000..b2e7338
--- /dev/null
+++ b/src/lib/python/isc/acl/dnsacl_inc.cc
@@ -0,0 +1,17 @@
+namespace {
+const char* const dnsacl_doc = "\
+Implementation module for DNS ACL operations\n\n\
+This module provides Python bindings for the C++ classes in the\n\
+isc::acl::dns namespace. Specifically, it defines Python interfaces of\n\
+handling access control lists (ACLs) with DNS related contexts.\n\
+These bindings are close match to the C++ API, but they are not complete\n\
+(some parts are not needed) and some are done in more python-like ways.\n\
+\n\
+Special objects:\n\
+\n\
+REQUEST_LOADER -- A singleton loader of ACLs. It is expected applications\n\
+ will use this function instead of creating their own loaders, because\n\
+ one is enough, this one will have registered default checks and it is\n\
+ known one, so any plugins can registrer additional checks as well.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/acl/tests/Makefile.am b/src/lib/python/isc/acl/tests/Makefile.am
new file mode 100644
index 0000000..e0a1895
--- /dev/null
+++ b/src/lib/python/isc/acl/tests/Makefile.am
@@ -0,0 +1,30 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = acl_test.py dns_test.py
+
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/isc/python/acl/.libs \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/acl/tests/acl_test.py b/src/lib/python/isc/acl/tests/acl_test.py
new file mode 100644
index 0000000..24a0c94
--- /dev/null
+++ b/src/lib/python/isc/acl/tests/acl_test.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+from isc.acl.acl import *
+
+class ACLTest(unittest.TestCase):
+
+ def test_actions(self):
+ # These are simple tests just checking the pre defined actions have
+ # different values
+ self.assertTrue(ACCEPT != REJECT)
+ self.assertTrue(REJECT != DROP)
+ self.assertTrue(DROP != ACCEPT)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/lib/python/isc/acl/tests/dns_test.py b/src/lib/python/isc/acl/tests/dns_test.py
new file mode 100644
index 0000000..7ee3023
--- /dev/null
+++ b/src/lib/python/isc/acl/tests/dns_test.py
@@ -0,0 +1,357 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+import socket
+from pydnspp import *
+from isc.acl.acl import LoaderError, Error, ACCEPT, REJECT, DROP
+from isc.acl.dns import *
+
+def get_sockaddr(address, port):
+ '''This is a simple shortcut wrapper for getaddrinfo'''
+ ai = socket.getaddrinfo(address, port, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP, socket.AI_NUMERICHOST)[0]
+ return ai[4]
+
+def get_acl(prefix):
+ '''This is a simple shortcut for creating an ACL containing single rule
+ that accepts addresses for the given IP prefix (and reject any others
+ by default)
+ '''
+ return REQUEST_LOADER.load('[{"action": "ACCEPT", "from": "' + \
+ prefix + '"}]')
+
+def get_acl_json(prefix):
+ '''Same as get_acl, but this function passes a Python representation of
+ JSON to the loader, not a string.'''
+ json = [{"action": "ACCEPT"}]
+ json[0]["from"] = prefix
+ return REQUEST_LOADER.load(json)
+
+# The following two are similar to the previous two, but use a TSIG key name
+# instead of IP prefix.
+def get_tsig_acl(key):
+ return REQUEST_LOADER.load('[{"action": "ACCEPT", "key": "' + \
+ key + '"}]')
+
+def get_tsig_acl_json(key):
+ json = [{"action": "ACCEPT"}]
+ json[0]["key"] = key
+ return REQUEST_LOADER.load(json)
+
+# commonly used TSIG RDATA. For the purpose of ACL checks only the key name
+# matters; other parrameters are simply borrowed from some other tests, which
+# can be anything for the purpose of the tests here.
+TSIG_RDATA = TSIG("hmac-md5.sig-alg.reg.int. 1302890362 " + \
+ "300 16 2tra2tra2tra2tra2tra2g== " + \
+ "11621 0 0")
+
+def get_context(address, key_name=None):
+ '''This is a simple shortcut wrapper for creating a RequestContext
+ object with a given IP address and optionally TSIG key name.
+ Port number doesn't matter in the test (as of the initial implementation),
+ so it's fixed for simplicity.
+ If key_name is not None, it internally creates a (faked) TSIG record
+ and constructs a context with that key. Note that only the key name
+ matters for the purpose of ACL checks.
+ '''
+ tsig_record = None
+ if key_name is not None:
+ tsig_record = TSIGRecord(Name(key_name), TSIG_RDATA)
+ return RequestContext(get_sockaddr(address, 53000), tsig_record)
+
+# These are commonly used RequestContext object
+CONTEXT4 = get_context('192.0.2.1')
+CONTEXT6 = get_context('2001:db8::1')
+
+class RequestContextTest(unittest.TestCase):
+
+ def test_construct(self):
+ # Construct the context from IPv4/IPv6 addresses, check the object
+ # by printing it.
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[192.0.2.1]:53001>',
+ RequestContext(('192.0.2.1', 53001)).__str__())
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[2001:db8::1234]:53006>',
+ RequestContext(('2001:db8::1234', 53006,
+ 0, 0)).__str__())
+
+ # Construct the context from IP address and a TSIG record.
+ tsig_record = TSIGRecord(Name("key.example.com"), TSIG_RDATA)
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[192.0.2.1]:53001, ' + \
+ 'key=key.example.com.>',
+ RequestContext(('192.0.2.1', 53001),
+ tsig_record).__str__())
+
+ # same with IPv6 address, just in case.
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[2001:db8::1234]:53006, ' + \
+ 'key=key.example.com.>',
+ RequestContext(('2001:db8::1234', 53006,
+ 0, 0), tsig_record).__str__())
+
+ # Unusual case: port number overflows (this constructor allows that,
+ # although it should be rare anyway; the socket address should
+ # normally come from the Python socket module.
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[192.0.2.1]:0>',
+ RequestContext(('192.0.2.1', 65536)).__str__())
+
+ # same test using socket.getaddrinfo() to ensure it accepts the sock
+ # address representation used in the Python socket module.
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[192.0.2.1]:53001>',
+ RequestContext(get_sockaddr('192.0.2.1',
+ 53001)).__str__())
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[2001:db8::1234]:53006>',
+ RequestContext(get_sockaddr('2001:db8::1234',
+ 53006)).__str__())
+
+ #
+ # Invalid parameters (in our expected usage this should not happen
+ # because the sockaddr would come from the Python socket module, but
+ # validation should still be performed correctly)
+ #
+ # not a tuple
+ self.assertRaises(TypeError, RequestContext, 1)
+ # invalid number of parameters
+ self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 53), 0, 1)
+ # type error for TSIG
+ self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 53), tsig=1)
+ # tuple is not in the form of sockaddr
+ self.assertRaises(TypeError, RequestContext, (0, 53))
+ self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 'http'))
+ self.assertRaises(TypeError, RequestContext, ('::', 0, 'flow', 0))
+ # invalid address
+ self.assertRaises(Error, RequestContext, ('example.com', 5300))
+ self.assertRaises(Error, RequestContext, ('192.0.2.1.1', 5300))
+ self.assertRaises(Error, RequestContext, ('2001:db8:::1', 5300))
+
+class RequestACLTest(unittest.TestCase):
+
+ def test_direct_construct(self):
+ self.assertRaises(Error, RequestACL)
+
+ def test_request_loader(self):
+ # these shouldn't raise an exception
+ REQUEST_LOADER.load('[{"action": "DROP"}]')
+ REQUEST_LOADER.load([{"action": "DROP"}])
+ REQUEST_LOADER.load('[{"action": "DROP", "from": "192.0.2.1"}]')
+ REQUEST_LOADER.load([{"action": "DROP", "from": "192.0.2.1"}])
+
+ # Invalid types (note that arguments like '1' or '[]' is of valid
+ # 'type' (but syntax error at a higher level)). So we need to use
+ # something that is not really JSON nor string.
+ self.assertRaises(TypeError, REQUEST_LOADER.load, b'')
+
+ # Incorrect number of arguments
+ self.assertRaises(TypeError, REQUEST_LOADER.load,
+ '[{"action": "DROP"}]', 0)
+
+ def test_bad_acl_syntax(self):
+ # the following are derived from loader_test.cc
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '{}');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, {});
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '42');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, 42);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, 'true');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, True);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, 'null');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, None);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '"hello"');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, "hello");
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '[42]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, [42]);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '["hello"]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, ["hello"]);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '[[]]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, [[]]);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '[true]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, [True]);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '[null]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, [None]);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '[{}]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, [{}]);
+
+ # the following are derived from dns_test.cc
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "bad": "192.0.2.1"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "bad": "192.0.2.1"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "from": 4}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "from": 4}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "from": []}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "from": []}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "key": 1}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": 1}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "key": {}}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": {}}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "from": "bad"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "from": "bad"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": "bad..name"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": "bad..name"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "from": null}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "from": None}])
+
+ def test_bad_acl_ipsyntax(self):
+ # this test is derived from ip_check_unittest.cc
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "192.0.2.43/-1"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "192.0.2.43/-1"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "192.0.2.43//1"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "192.0.2.43//1"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "192.0.2.43/1/"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "192.0.2.43/1/"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "/192.0.2.43/1"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "/192.0.2.43/1"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "2001:db8::/xxxx"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "2001:db8::/xxxx"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "2001:db8::/32/s"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "2001:db8::/32/s"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "1/"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "1/"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "/1"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "/1"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "192.0.2.0/33"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "192.0.2.0/33"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "::1/129"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "::1/129"}])
+
+ def test_execute(self):
+ # tests derived from dns_test.cc. We don't directly expose checks
+ # in the python wrapper, so we test it via execute().
+ self.assertEqual(ACCEPT, get_acl('192.0.2.1').execute(CONTEXT4))
+ self.assertEqual(ACCEPT, get_acl_json('192.0.2.1').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl('192.0.2.53').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl_json('192.0.2.53').execute(CONTEXT4))
+ self.assertEqual(ACCEPT, get_acl('192.0.2.0/24').execute(CONTEXT4))
+ self.assertEqual(ACCEPT, get_acl_json('192.0.2.0/24').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl('192.0.1.0/24').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl_json('192.0.1.0/24').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl('192.0.1.0/24').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl_json('192.0.1.0/24').execute(CONTEXT4))
+
+ self.assertEqual(ACCEPT, get_acl('2001:db8::1').execute(CONTEXT6))
+ self.assertEqual(ACCEPT, get_acl_json('2001:db8::1').execute(CONTEXT6))
+ self.assertEqual(REJECT, get_acl('2001:db8::53').execute(CONTEXT6))
+ self.assertEqual(REJECT, get_acl_json('2001:db8::53').execute(CONTEXT6))
+ self.assertEqual(ACCEPT, get_acl('2001:db8::/64').execute(CONTEXT6))
+ self.assertEqual(ACCEPT,
+ get_acl_json('2001:db8::/64').execute(CONTEXT6))
+ self.assertEqual(REJECT, get_acl('2001:db8:1::/64').execute(CONTEXT6))
+ self.assertEqual(REJECT,
+ get_acl_json('2001:db8:1::/64').execute(CONTEXT6))
+ self.assertEqual(REJECT, get_acl('32.1.13.184').execute(CONTEXT6))
+ self.assertEqual(REJECT, get_acl_json('32.1.13.184').execute(CONTEXT6))
+
+ # TSIG checks, derived from dns_test.cc
+ self.assertEqual(ACCEPT, get_tsig_acl('key.example.com').\
+ execute(get_context('192.0.2.1',
+ 'key.example.com')))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(get_context('192.0.2.1',
+ 'badkey.example.com')))
+ self.assertEqual(ACCEPT, get_tsig_acl('key.example.com').\
+ execute(get_context('2001:db8::1',
+ 'key.example.com')))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(get_context('2001:db8::1',
+ 'badkey.example.com')))
+ self.assertEqual(REJECT, get_tsig_acl('key.example.com').\
+ execute(CONTEXT4))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(CONTEXT4))
+ self.assertEqual(REJECT, get_tsig_acl('key.example.com').\
+ execute(CONTEXT6))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(CONTEXT6))
+
+ # A bit more complicated example, derived from resolver_config_unittest
+ acl = REQUEST_LOADER.load('[ {"action": "ACCEPT", ' +
+ ' "from": "192.0.2.1"},' +
+ ' {"action": "REJECT",' +
+ ' "from": "192.0.2.0/24"},' +
+ ' {"action": "DROP",' +
+ ' "from": "2001:db8::1"},' +
+ '] }')
+ self.assertEqual(ACCEPT, acl.execute(CONTEXT4))
+ self.assertEqual(REJECT, acl.execute(get_context('192.0.2.2')))
+ self.assertEqual(DROP, acl.execute(get_context('2001:db8::1')))
+ self.assertEqual(REJECT, acl.execute(get_context('2001:db8::2')))
+
+ # same test using the JSON representation
+ acl = REQUEST_LOADER.load([{"action": "ACCEPT", "from": "192.0.2.1"},
+ {"action": "REJECT",
+ "from": "192.0.2.0/24"},
+ {"action": "DROP", "from": "2001:db8::1"}])
+ self.assertEqual(ACCEPT, acl.execute(CONTEXT4))
+ self.assertEqual(REJECT, acl.execute(get_context('192.0.2.2')))
+ self.assertEqual(DROP, acl.execute(get_context('2001:db8::1')))
+ self.assertEqual(REJECT, acl.execute(get_context('2001:db8::2')))
+
+ def test_bad_execute(self):
+ acl = get_acl('192.0.2.1')
+ # missing parameter
+ self.assertRaises(TypeError, acl.execute)
+ # too many parameters
+ self.assertRaises(TypeError, acl.execute, get_context('192.0.2.2'), 0)
+ # type mismatch
+ self.assertRaises(TypeError, acl.execute, 'bad parameter')
+
+class RequestLoaderTest(unittest.TestCase):
+ # Note: loading ACLs is tested in other test cases.
+
+ def test_construct(self):
+ # at least for now, we don't allow direct construction.
+ self.assertRaises(Error, RequestLoader)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
new file mode 100644
index 0000000..43a7605
--- /dev/null
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -0,0 +1,4 @@
+SUBDIRS = . tests
+
+python_PYTHON = __init__.py sockcreator.py
+pythondir = $(pyexecdir)/isc/bind10
diff --git a/src/lib/python/isc/bind10/__init__.py b/src/lib/python/isc/bind10/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
new file mode 100644
index 0000000..2345034
--- /dev/null
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -0,0 +1,228 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import socket
+import struct
+import os
+import copy
+import subprocess
+import copy
+from isc.log_messages.bind10_messages import *
+from libutil_io_python import recv_fd
+
+logger = isc.log.Logger("boss")
+
+"""
+Module that comunicates with the privileged socket creator (b10-sockcreator).
+"""
+
+class CreatorError(Exception):
+ """
+ Exception for socket creator related errors.
+
+ It has two members: fatal and errno and they are just holding the values
+ passed to the __init__ function.
+ """
+
+ def __init__(self, message, fatal, errno=None):
+ """
+ Creates the exception. The message argument is the usual string.
+ The fatal one tells if the error is fatal (eg. the creator crashed)
+ and errno is the errno value returned from socket creator, if
+ applicable.
+ """
+ Exception.__init__(self, message)
+ self.fatal = fatal
+ self.errno = errno
+
+class Parser:
+ """
+ This class knows the sockcreator language. It creates commands, sends them
+ and receives the answers and parses them.
+
+ It does not start it, the communication channel must be provided.
+
+ In theory, anything here can throw a fatal CreatorError exception, but it
+ happens only in case something like the creator process crashes. Any other
+ occasions are mentioned explicitly.
+ """
+
+ def __init__(self, creator_socket):
+ """
+ Creates the parser. The creator_socket is socket to the socket creator
+ process that will be used for communication. However, the object must
+ have a read_fd() method to read the file descriptor. This slightly
+ unusual trick with modifying an object is used to easy up testing.
+
+ You can use WrappedSocket in production code to add the method to any
+ ordinary socket.
+ """
+ self.__socket = creator_socket
+ logger.info(BIND10_SOCKCREATOR_INIT)
+
+ def terminate(self):
+ """
+ Asks the creator process to terminate and waits for it to close the
+ socket. Does not return anything. Raises a CreatorError if there is
+ still data on the socket, if there is an error closing the socket,
+ or if the socket had already been closed.
+ """
+ if self.__socket is None:
+ raise CreatorError('Terminated already', True)
+ logger.info(BIND10_SOCKCREATOR_TERMINATE)
+ try:
+ self.__socket.sendall(b'T')
+ # Wait for an EOF - it will return empty data
+ eof = self.__socket.recv(1)
+ if len(eof) != 0:
+ raise CreatorError('Protocol error - data after terminated',
+ True)
+ self.__socket = None
+ except socket.error as se:
+ self.__socket = None
+ raise CreatorError(str(se), True)
+
+ def get_socket(self, address, port, socktype):
+ """
+ Asks the socket creator process to create a socket. Pass an address
+ (the isc.net.IPaddr object), port number and socket type (either
+ string "UDP", "TCP" or constant socket.SOCK_DGRAM or
+ socket.SOCK_STREAM.
+
+ Blocks until it is provided by the socket creator process (which
+ should be fast, as it is on localhost) and returns the file descriptor
+ number. It raises a CreatorError exception if the creation fails.
+ """
+ if self.__socket is None:
+ raise CreatorError('Socket requested on terminated creator', True)
+ # First, assemble the request from parts
+ logger.info(BIND10_SOCKET_GET, address, port, socktype)
+ data = b'S'
+ if socktype == 'UDP' or socktype == socket.SOCK_DGRAM:
+ data += b'U'
+ elif socktype == 'TCP' or socktype == socket.SOCK_STREAM:
+ data += b'T'
+ else:
+ raise ValueError('Unknown socket type: ' + str(socktype))
+ if address.family == socket.AF_INET:
+ data += b'4'
+ elif address.family == socket.AF_INET6:
+ data += b'6'
+ else:
+ raise ValueError('Unknown address family in address')
+ data += struct.pack('!H', port)
+ data += address.addr
+ try:
+ # Send the request
+ self.__socket.sendall(data)
+ answer = self.__socket.recv(1)
+ if answer == b'S':
+ # Success!
+ result = self.__socket.read_fd()
+ logger.info(BIND10_SOCKET_CREATED, result)
+ return result
+ elif answer == b'E':
+ # There was an error, read the error as well
+ error = self.__socket.recv(1)
+ errno = struct.unpack('i',
+ self.__read_all(len(struct.pack('i',
+ 0))))
+ if error == b'S':
+ cause = 'socket'
+ elif error == b'B':
+ cause = 'bind'
+ else:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_BAD_CAUSE, error)
+ raise CreatorError('Unknown error cause' + str(answer), True)
+ logger.error(BIND10_SOCKET_ERROR, cause, errno[0],
+ os.strerror(errno[0]))
+ raise CreatorError('Error creating socket on ' + cause, False,
+ errno[0])
+ else:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_BAD_RESPONSE, answer)
+ raise CreatorError('Unknown response ' + str(answer), True)
+ except socket.error as se:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_TRANSPORT_ERROR, str(se))
+ raise CreatorError(str(se), True)
+
+ def __read_all(self, length):
+ """
+ Keeps reading until length data is read or EOF or error happens.
+
+ EOF is considered error as well and throws a CreatorError.
+ """
+ result = b''
+ while len(result) < length:
+ data = self.__socket.recv(length - len(result))
+ if len(data) == 0:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_EOF)
+ raise CreatorError('Unexpected EOF', True)
+ result += data
+ return result
+
+class WrappedSocket:
+ """
+ This class wraps a socket and adds a read_fd method, so it can be used
+ for the Parser class conveniently. It simply copies all its guts into
+ itself and implements the method.
+ """
+ def __init__(self, socket):
+ # Copy whatever can be copied from the socket
+ for name in dir(socket):
+ if name not in ['__class__', '__weakref__']:
+ setattr(self, name, getattr(socket, name))
+ # Keep the socket, so we can prevent it from being garbage-collected
+ # and closed before we are removed ourself
+ self.__orig_socket = socket
+
+ def read_fd(self):
+ """
+ Read the file descriptor from the socket.
+ """
+ return recv_fd(self.fileno())
+
+# FIXME: Any idea how to test this? Starting an external process doesn't sound
+# OK
+class Creator(Parser):
+ """
+ This starts the socket creator and allows asking for the sockets.
+ """
+ def __init__(self, path):
+ (local, remote) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
+ # Popen does not like, for some reason, having the same socket for
+ # stdin as well as stdout, so we dup it before passing it there.
+ remote2 = socket.fromfd(remote.fileno(), socket.AF_UNIX,
+ socket.SOCK_STREAM)
+ env = copy.deepcopy(os.environ)
+ env['PATH'] = path
+ self.__process = subprocess.Popen(['b10-sockcreator'], env=env,
+ stdin=remote.fileno(),
+ stdout=remote2.fileno())
+ remote.close()
+ remote2.close()
+ Parser.__init__(self, WrappedSocket(local))
+
+ def pid(self):
+ return self.__process.pid
+
+ def kill(self):
+ logger.warn(BIND10_SOCKCREATOR_KILL)
+ if self.__process is not None:
+ self.__process.kill()
+ self.__process = None
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
new file mode 100644
index 0000000..df8ab30
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -0,0 +1,29 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+#PYTESTS = args_test.py bind10_test.py
+# NOTE: this has a generated test found in the builddir
+PYTESTS = sockcreator_test.py
+
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+ BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
+
diff --git a/src/lib/python/isc/bind10/tests/sockcreator_test.py b/src/lib/python/isc/bind10/tests/sockcreator_test.py
new file mode 100644
index 0000000..4453184
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/sockcreator_test.py
@@ -0,0 +1,327 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This test file is generated .py.in -> .py just to be in the build dir,
+# same as the rest of the tests. Saves a lot of stuff in makefile.
+
+"""
+Tests for the bind10.sockcreator module.
+"""
+
+import unittest
+import struct
+import socket
+from isc.net.addr import IPAddr
+import isc.log
+from libutil_io_python import send_fd
+from isc.bind10.sockcreator import Parser, CreatorError, WrappedSocket
+
+class FakeCreator:
+ """
+ Class emulating the socket to the socket creator. It can be given expected
+ data to receive (and check) and responses to give to the Parser class
+ during testing.
+ """
+
+ class InvalidPlan(Exception):
+ """
+ Raised when someone wants to recv when sending is planned or vice
+ versa.
+ """
+ pass
+
+ class InvalidData(Exception):
+ """
+ Raises when the data passed to sendall are not the same as expected.
+ """
+ pass
+
+ def __init__(self, plan):
+ """
+ Create the object. The plan variable contains list of expected actions,
+ in form:
+
+ [('r', 'Data to return from recv'), ('s', 'Data expected on sendall'),
+ , ('d', 'File descriptor number to return from read_sock'), ('e',
+ None), ...]
+
+ It modifies the array as it goes.
+ """
+ self.__plan = plan
+
+ def __get_plan(self, expected):
+ if len(self.__plan) == 0:
+ raise InvalidPlan('Nothing more planned')
+ (kind, data) = self.__plan[0]
+ if kind == 'e':
+ self.__plan.pop(0)
+ raise socket.error('False socket error')
+ if kind != expected:
+ raise InvalidPlan('Planned ' + kind + ', but ' + expected +
+ 'requested')
+ return data
+
+ def recv(self, maxsize):
+ """
+ Emulate recv. Returs maxsize bytes from the current recv plan. If
+ there are data left from previous recv call, it is used first.
+
+ If no recv is planned, raises InvalidPlan.
+ """
+ data = self.__get_plan('r')
+ result, rest = data[:maxsize], data[maxsize:]
+ if len(rest) > 0:
+ self.__plan[0] = ('r', rest)
+ else:
+ self.__plan.pop(0)
+ return result
+
+ def read_fd(self):
+ """
+ Emulate the reading of file descriptor. Returns one from a plan.
+
+ It raises InvalidPlan if no socket is planned now.
+ """
+ fd = self.__get_plan('f')
+ self.__plan.pop(0)
+ return fd
+
+ def sendall(self, data):
+ """
+ Checks that the data passed are correct according to plan. It raises
+ InvalidData if the data differs or InvalidPlan when sendall is not
+ expected.
+ """
+ planned = self.__get_plan('s')
+ dlen = len(data)
+ prefix, rest = planned[:dlen], planned[dlen:]
+ if prefix != data:
+ raise InvalidData('Expected "' + str(prefix)+ '", got "' +
+ str(data) + '"')
+ if len(rest) > 0:
+ self.__plan[0] = ('s', rest)
+ else:
+ self.__plan.pop(0)
+
+ def all_used(self):
+ """
+ Returns if the whole plan was consumed.
+ """
+ return len(self.__plan) == 0
+
+class ParserTests(unittest.TestCase):
+ """
+ Testcases for the Parser class.
+
+ A lot of these test could be done by
+ `with self.assertRaises(CreatorError) as cm`. But some versions of python
+ take the scope wrong and don't work, so we use the primitive way of
+ try-except.
+ """
+ def __terminate(self):
+ creator = FakeCreator([('s', b'T'), ('r', b'')])
+ parser = Parser(creator)
+ self.assertEqual(None, parser.terminate())
+ self.assertTrue(creator.all_used())
+ return parser
+
+ def test_terminate(self):
+ """
+ Test if the command to terminate is correct and it waits for reading the
+ EOF.
+ """
+ self.__terminate()
+
+ def __terminate_raises(self, parser):
+ """
+ Check that terminate() raises a fatal exception.
+ """
+ try:
+ parser.terminate()
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(ce.fatal)
+ self.assertEqual(None, ce.errno)
+
+ def test_terminate_error1(self):
+ """
+ Test it reports an exception when there's error terminating the creator.
+ This one raises an error when receiving the EOF.
+ """
+ creator = FakeCreator([('s', b'T'), ('e', None)])
+ parser = Parser(creator)
+ self.__terminate_raises(parser)
+
+ def test_terminate_error2(self):
+ """
+ Test it reports an exception when there's error terminating the creator.
+ This one raises an error when sending data.
+ """
+ creator = FakeCreator([('e', None)])
+ parser = Parser(creator)
+ self.__terminate_raises(parser)
+
+ def test_terminate_error3(self):
+ """
+ Test it reports an exception when there's error terminating the creator.
+ This one sends data when it should have terminated.
+ """
+ creator = FakeCreator([('s', b'T'), ('r', b'Extra data')])
+ parser = Parser(creator)
+ self.__terminate_raises(parser)
+
+ def test_terminate_twice(self):
+ """
+ Test we can't terminate twice.
+ """
+ parser = self.__terminate()
+ self.__terminate_raises(parser)
+
+ def test_crash(self):
+ """
+ Tests that the parser correctly raises exception when it crashes
+ unexpectedly.
+ """
+ creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'')])
+ parser = Parser(creator)
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(creator.all_used())
+ # Is the exception correct?
+ self.assertTrue(ce.fatal)
+ self.assertEqual(None, ce.errno)
+
+ def test_error(self):
+ """
+ Tests that the parser correctly raises non-fatal exception when
+ the socket can not be created.
+ """
+ # We split the int to see if it can cope with data coming in
+ # different packets
+ intpart = struct.pack('@i', 42)
+ creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'ES' +
+ intpart[:1]), ('r', intpart[1:])])
+ parser = Parser(creator)
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(creator.all_used())
+ # Is the exception correct?
+ self.assertFalse(ce.fatal)
+ self.assertEqual(42, ce.errno)
+
+ def __error(self, plan):
+ creator = FakeCreator(plan)
+ parser = Parser(creator)
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, socket.SOCK_DGRAM)
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(creator.all_used())
+ self.assertTrue(ce.fatal)
+
+ def test_error_send(self):
+ self.__error([('e', None)])
+
+ def test_error_recv(self):
+ self.__error([('s', b'SU4\0\0\0\0\0\0'), ('e', None)])
+
+ def test_error_read_fd(self):
+ self.__error([('s', b'SU4\0\0\0\0\0\0'), ('r', b'S'), ('e', None)])
+
+ def __create(self, addr, socktype, encoded):
+ creator = FakeCreator([('s', b'S' + encoded), ('r', b'S'), ('f', 42)])
+ parser = Parser(creator)
+ self.assertEqual(42, parser.get_socket(IPAddr(addr), 42, socktype))
+
+ def test_create1(self):
+ self.__create('192.0.2.0', 'UDP', b'U4\0\x2A\xC0\0\x02\0')
+
+ def test_create2(self):
+ self.__create('2001:db8::', socket.SOCK_STREAM,
+ b'T6\0\x2A\x20\x01\x0d\xb8\0\0\0\0\0\0\0\0\0\0\0\0')
+
+ def test_create_terminated(self):
+ """
+ Test we can't request sockets after it was terminated.
+ """
+ parser = self.__terminate()
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(ce.fatal)
+ self.assertEqual(None, ce.errno)
+
+ def test_invalid_socktype(self):
+ """
+ Test invalid socket type is rejected
+ """
+ self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
+ IPAddr('0.0.0.0'), 42, 'RAW')
+
+ def test_invalid_family(self):
+ """
+ Test it rejects invalid address family.
+ """
+ # Note: this produces a bad logger output, since this address
+ # can not be converted to string, so the original message with
+ # placeholders is output. This should not happen in practice, so
+ # it is harmless.
+ addr = IPAddr('0.0.0.0')
+ addr.family = 42
+ self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
+ addr, 42, socket.SOCK_DGRAM)
+
+class WrapTests(unittest.TestCase):
+ """
+ Tests for the wrap_socket function.
+ """
+ def test_wrap(self):
+ # We construct two pairs of socket. The receiving side of one pair will
+ # be wrapped. Then we send one of the other pair through this pair and
+ # check the received one can be used as a socket
+
+ # The transport socket
+ (t1, t2) = socket.socketpair()
+ # The payload socket
+ (p1, p2) = socket.socketpair()
+
+ t2 = WrappedSocket(t2)
+
+ # Transfer the descriptor
+ send_fd(t1.fileno(), p1.fileno())
+ p1 = socket.fromfd(t2.read_fd(), socket.AF_UNIX, socket.SOCK_STREAM)
+
+ # Now, pass some data trough the socket
+ p1.send(b'A')
+ data = p2.recv(1)
+ self.assertEqual(b'A', data)
+
+ # Test the wrapping didn't hurt the socket's usual methods
+ t1.send(b'B')
+ data = t2.recv(1)
+ self.assertEqual(b'B', data)
+ t2.send(b'C')
+ data = t1.recv(1)
+ self.assertEqual(b'C', data)
+
+if __name__ == '__main__':
+ isc.log.init("bind10") # FIXME Should this be needed?
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/cc/Makefile.am b/src/lib/python/isc/cc/Makefile.am
index a2246db..b0ba3b2 100644
--- a/src/lib/python/isc/cc/Makefile.am
+++ b/src/lib/python/isc/cc/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = . tests
python_PYTHON = __init__.py data.py session.py message.py
pythondir = $(pyexecdir)/isc/cc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/cc/data.py b/src/lib/python/isc/cc/data.py
index ce1bba0..76ef942 100644
--- a/src/lib/python/isc/cc/data.py
+++ b/src/lib/python/isc/cc/data.py
@@ -22,8 +22,22 @@
import json
-class DataNotFoundError(Exception): pass
-class DataTypeError(Exception): pass
+class DataNotFoundError(Exception):
+ """Raised if an identifier does not exist according to a spec file,
+ or if an item is addressed that is not in the current (or default)
+ config (such as a nonexistent list or map element)"""
+ pass
+
+class DataAlreadyPresentError(Exception):
+ """Raised if there is an attemt to add an element to a list or a
+ map that is already present in that list or map (i.e. if 'add'
+ is used when it should be 'set')"""
+ pass
+
+class DataTypeError(Exception):
+ """Raised if there is an attempt to set an element that is of a
+ different type than the type specified in the specification."""
+ pass
def remove_identical(a, b):
"""Removes the values from dict a that are the same as in dict b.
diff --git a/src/lib/python/isc/cc/message.py b/src/lib/python/isc/cc/message.py
index 3601c41..3ebcc43 100644
--- a/src/lib/python/isc/cc/message.py
+++ b/src/lib/python/isc/cc/message.py
@@ -35,7 +35,7 @@ def from_wire(data):
Raises an AttributeError if the given object has no decode()
method (which should return a string).
'''
- return json.loads(data.decode('utf8'))
+ return json.loads(data.decode('utf8'), strict=False)
if __name__ == "__main__":
import doctest
diff --git a/src/lib/python/isc/cc/session.py b/src/lib/python/isc/cc/session.py
index fb7dd06..f6b6265 100644
--- a/src/lib/python/isc/cc/session.py
+++ b/src/lib/python/isc/cc/session.py
@@ -93,6 +93,19 @@ class Session:
self._socket.send(msg)
def recvmsg(self, nonblock = True, seq = None):
+ """Reads a message. If nonblock is true, and there is no
+ message to read, it returns (None, None).
+ If seq is not None, it should be a value as returned by
+ group_sendmsg(), in which case only the response to
+ that message is returned, and others will be queued until
+ the next call to this method.
+ If seq is None, only messages that are *not* responses
+ will be returned, and responses will be queued.
+ The queue is checked for relevant messages before data
+ is read from the socket.
+ Raises a SessionError if there is a JSON decode problem in
+ the message that is read, or if the session has been closed
+ prior to the call of recvmsg()"""
with self._lock:
if len(self._queue) > 0:
i = 0;
@@ -109,16 +122,22 @@ class Session:
if data and len(data) > 2:
header_length = struct.unpack('>H', data[0:2])[0]
data_length = len(data) - 2 - header_length
- if data_length > 0:
- env = isc.cc.message.from_wire(data[2:header_length+2])
- msg = isc.cc.message.from_wire(data[header_length + 2:])
- if (seq == None and "reply" not in env) or (seq != None and "reply" in env and seq == env["reply"]):
- return env, msg
+ try:
+ if data_length > 0:
+ env = isc.cc.message.from_wire(data[2:header_length+2])
+ msg = isc.cc.message.from_wire(data[header_length + 2:])
+ if (seq == None and "reply" not in env) or (seq != None and "reply" in env and seq == env["reply"]):
+ return env, msg
+ else:
+ self._queue.append((env,msg))
+ return self.recvmsg(nonblock, seq)
else:
- self._queue.append((env,msg))
- return self.recvmsg(nonblock, seq)
- else:
- return isc.cc.message.from_wire(data[2:header_length+2]), None
+ return isc.cc.message.from_wire(data[2:header_length+2]), None
+ except ValueError as ve:
+ # TODO: when we have logging here, add a debug
+ # message printing the data that we were unable
+ # to parse as JSON
+ raise SessionError(ve)
return None, None
def _receive_bytes(self, size):
diff --git a/src/lib/python/isc/cc/tests/Makefile.am b/src/lib/python/isc/cc/tests/Makefile.am
index dc19758..4c2acc0 100644
--- a/src/lib/python/isc/cc/tests/Makefile.am
+++ b/src/lib/python/isc/cc/tests/Makefile.am
@@ -6,6 +6,13 @@ EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += sendcmd.py
EXTRA_DIST += test_session.py
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -15,7 +22,8 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH) \
BIND10_TEST_SOCKET_FILE=$(builddir)/test_socket.sock \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/cc/tests/message_test.py b/src/lib/python/isc/cc/tests/message_test.py
index 2024201..c417068 100644
--- a/src/lib/python/isc/cc/tests/message_test.py
+++ b/src/lib/python/isc/cc/tests/message_test.py
@@ -31,6 +31,10 @@ class MessageTest(unittest.TestCase):
self.msg2_str = "{\"aaa\": [1, 1.1, true, false, null]}";
self.msg2_wire = self.msg2_str.encode()
+ self.msg3 = { "aaa": [ 1, 1.1, True, False, "string\n" ] }
+ self.msg3_str = "{\"aaa\": [1, 1.1, true, false, \"string\n\" ]}";
+ self.msg3_wire = self.msg3_str.encode()
+
def test_encode_json(self):
self.assertEqual(self.msg1_wire, isc.cc.message.to_wire(self.msg1))
self.assertEqual(self.msg2_wire, isc.cc.message.to_wire(self.msg2))
@@ -40,6 +44,7 @@ class MessageTest(unittest.TestCase):
def test_decode_json(self):
self.assertEqual(self.msg1, isc.cc.message.from_wire(self.msg1_wire))
self.assertEqual(self.msg2, isc.cc.message.from_wire(self.msg2_wire))
+ self.assertEqual(self.msg3, isc.cc.message.from_wire(self.msg3_wire))
self.assertRaises(AttributeError, isc.cc.message.from_wire, 1)
self.assertRaises(ValueError, isc.cc.message.from_wire, b'\x001')
diff --git a/src/lib/python/isc/cc/tests/session_test.py b/src/lib/python/isc/cc/tests/session_test.py
index fe35a6c..772ed0c 100644
--- a/src/lib/python/isc/cc/tests/session_test.py
+++ b/src/lib/python/isc/cc/tests/session_test.py
@@ -274,6 +274,16 @@ class testSession(unittest.TestCase):
self.assertEqual({"hello": "b"}, msg)
self.assertFalse(sess.has_queued_msgs())
+ def test_recv_bad_msg(self):
+ sess = MySession()
+ self.assertFalse(sess.has_queued_msgs())
+ sess._socket.addrecv({'to': 'someone' }, {'hello': 'b'})
+ sess._socket.addrecv({'to': 'someone', 'reply': 1}, {'hello': 'a'})
+ # mangle the bytes a bit
+ sess._socket.recvqueue[5] = sess._socket.recvqueue[5] - 2
+ sess._socket.recvqueue = sess._socket.recvqueue[:-2]
+ self.assertRaises(SessionError, sess.recvmsg, True, 1)
+
def test_next_sequence(self):
sess = MySession()
self.assertEqual(sess._sequence, 1)
diff --git a/src/lib/python/isc/config/Makefile.am b/src/lib/python/isc/config/Makefile.am
index 916a522..ef696fb 100644
--- a/src/lib/python/isc/config/Makefile.am
+++ b/src/lib/python/isc/config/Makefile.am
@@ -1,5 +1,31 @@
SUBDIRS = . tests
python_PYTHON = __init__.py ccsession.py cfgmgr.py config_data.py module_spec.py
-
pythondir = $(pyexecdir)/isc/config
+
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+BUILT_SOURCES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+nodist_pylogmessage_PYTHON += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.pyc
+
+CLEANDIRS = __pycache__
+
+EXTRA_DIST = cfgmgr_messages.mes config_messages.mes
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py : cfgmgr_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cfgmgr_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py : config_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/config_messages.mes
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index 226c6ba..2d998ce 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -39,6 +39,13 @@
from isc.cc import Session
from isc.config.config_data import ConfigData, MultiConfigData, BIND10_CONFIG_DATA_VERSION
import isc
+from isc.util.file import path_search
+import bind10_config
+from isc.log import log_config_update
+import json
+from isc.log_messages.config_messages import *
+
+logger = isc.log.Logger("config")
class ModuleCCSessionError(Exception): pass
@@ -84,6 +91,7 @@ COMMAND_CONFIG_UPDATE = "config_update"
COMMAND_MODULE_SPECIFICATION_UPDATE = "module_specification_update"
COMMAND_GET_COMMANDS_SPEC = "get_commands_spec"
+COMMAND_GET_STATISTICS_SPEC = "get_statistics_spec"
COMMAND_GET_CONFIG = "get_config"
COMMAND_SET_CONFIG = "set_config"
COMMAND_GET_MODULE_SPEC = "get_module_spec"
@@ -116,6 +124,15 @@ def create_command(command_name, params = None):
msg = { 'command': cmd }
return msg
+def default_logconfig_handler(new_config, config_data):
+ errors = []
+
+ if config_data.get_module_spec().validate_config(False, new_config, errors):
+ isc.log.log_config_update(json.dumps(new_config),
+ json.dumps(config_data.get_module_spec().get_full_spec()))
+ else:
+ logger.error(CONFIG_LOG_CONFIG_ERRORS, errors)
+
class ModuleCCSession(ConfigData):
"""This class maintains a connection to the command channel, as
well as configuration options for modules. The module provides
@@ -126,14 +143,37 @@ class ModuleCCSession(ConfigData):
callbacks are called when 'check_command' is called on the
ModuleCCSession"""
- def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None):
+ def __init__(self, spec_file_name, config_handler, command_handler,
+ cc_session=None, handle_logging_config=True,
+ socket_file = None):
"""Initialize a ModuleCCSession. This does *NOT* send the
specification and request the configuration yet. Use start()
for that once the ModuleCCSession has been initialized.
- specfile_name is the path to the specification file
+
+ specfile_name is the path to the specification file.
+
config_handler and command_handler are callback functions,
see set_config_handler and set_command_handler for more
- information on their signatures."""
+ information on their signatures.
+
+ cc_session can be used to pass in an existing CCSession,
+ if it is None, one will be set up. This is mainly intended
+ for testing purposes.
+
+ handle_logging_config: if True, the module session will
+ automatically handle logging configuration for the module;
+ it will read the system-wide Logging configuration and call
+ the logger manager to apply it. It will also inform the
+ logger manager when the logging configuration gets updated.
+ The module does not need to do anything except intializing
+ its loggers, and provide log messages. Defaults to true.
+
+ socket_file: If cc_session was none, this optional argument
+ specifies which socket file to use to connect to msgq. It
+ will be overridden by the environment variable
+ MSGQ_SOCKET_FILE. If none, and no environment variable is
+ set, it will use the system default.
+ """
module_spec = isc.config.module_spec_from_file(spec_file_name)
ConfigData.__init__(self, module_spec)
@@ -143,12 +183,17 @@ class ModuleCCSession(ConfigData):
self.set_command_handler(command_handler)
if not cc_session:
- self._session = Session()
+ self._session = Session(socket_file)
else:
self._session = cc_session
self._session.group_subscribe(self._module_name, "*")
self._remote_module_configs = {}
+ self._remote_module_callbacks = {}
+
+ if handle_logging_config:
+ self.add_remote_config(path_search('logging.spec', bind10_config.PLUGIN_PATHS),
+ default_logconfig_handler)
def __del__(self):
# If the CC Session obejct has been closed, it returns
@@ -218,6 +263,9 @@ class ModuleCCSession(ConfigData):
newc = self._remote_module_configs[module_name].get_local_config()
isc.cc.data.merge(newc, new_config)
self._remote_module_configs[module_name].set_local_config(newc)
+ if self._remote_module_callbacks[module_name] != None:
+ self._remote_module_callbacks[module_name](new_config,
+ self._remote_module_configs[module_name])
# For other modules, we're not supposed to answer
return
@@ -260,7 +308,7 @@ class ModuleCCSession(ConfigData):
and return an answer created with create_answer()"""
self._command_handler = command_handler
- def add_remote_config(self, spec_file_name):
+ def add_remote_config(self, spec_file_name, config_update_callback = None):
"""Gives access to the configuration of a different module.
These remote module options can at this moment only be
accessed through get_remote_config_value(). This function
@@ -273,7 +321,7 @@ class ModuleCCSession(ConfigData):
module_spec = isc.config.module_spec_from_file(spec_file_name)
module_cfg = ConfigData(module_spec)
module_name = module_spec.get_module_name()
- self._session.group_subscribe(module_name);
+ self._session.group_subscribe(module_name)
# Get the current config for that module now
seq = self._session.group_sendmsg(create_command(COMMAND_GET_CONFIG, { "module_name": module_name }), "ConfigManager")
@@ -288,10 +336,13 @@ class ModuleCCSession(ConfigData):
rcode, value = parse_answer(answer)
if rcode == 0:
if value != None and module_spec.validate_config(False, value):
- module_cfg.set_local_config(value);
+ module_cfg.set_local_config(value)
+ if config_update_callback is not None:
+ config_update_callback(value, module_cfg)
# all done, add it
self._remote_module_configs[module_name] = module_cfg
+ self._remote_module_callbacks[module_name] = config_update_callback
return module_name
def remove_remote_config(self, module_name):
@@ -299,6 +350,7 @@ class ModuleCCSession(ConfigData):
if module_name in self._remote_module_configs:
self._session.group_unsubscribe(module_name)
del self._remote_module_configs[module_name]
+ del self._remote_module_callbacks[module_name]
def get_remote_config_value(self, module_name, identifier):
"""Returns the current setting for the given identifier at the
@@ -329,13 +381,20 @@ class ModuleCCSession(ConfigData):
if answer:
rcode, value = parse_answer(answer)
if rcode == 0:
- if value != None and self.get_module_spec().validate_config(False, value):
- self.set_local_config(value);
- if self._config_handler:
- self._config_handler(value)
+ errors = []
+ if value != None:
+ if self.get_module_spec().validate_config(False,
+ value,
+ errors):
+ self.set_local_config(value)
+ if self._config_handler:
+ self._config_handler(value)
+ else:
+ raise ModuleCCSessionError(
+ "Wrong data in configuration: " +
+ " ".join(errors))
else:
- # log error
- print("[" + self._module_name + "] Error requesting configuration: " + value)
+ logger.error(CONFIG_GET_FAILED, value)
else:
raise ModuleCCSessionError("No answer from configuration manager")
except isc.cc.SessionTimeout:
@@ -364,8 +423,8 @@ class UIModuleCCSession(MultiConfigData):
self.set_specification(isc.config.ModuleSpec(specs[module]))
def update_specs_and_config(self):
- self.request_specifications();
- self.request_current_config();
+ self.request_specifications()
+ self.request_current_config()
def request_current_config(self):
"""Requests the current configuration from the configuration
@@ -375,65 +434,144 @@ class UIModuleCCSession(MultiConfigData):
raise ModuleCCSessionError("Bad config version")
self._set_current_config(config)
-
- def add_value(self, identifier, value_str = None):
- """Add a value to a configuration list. Raises a DataTypeError
- if the value does not conform to the list_item_spec field
- of the module config data specification. If value_str is
- not given, we add the default as specified by the .spec
- file."""
- module_spec = self.find_spec_part(identifier)
- if (type(module_spec) != dict or "list_item_spec" not in module_spec):
- raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list")
-
+ def _add_value_to_list(self, identifier, value, module_spec):
cur_list, status = self.get_value(identifier)
if not cur_list:
cur_list = []
- # Hmm. Do we need to check for duplicates?
- value = None
- if value_str is not None:
- value = isc.cc.data.parse_value_str(value_str)
- else:
+ if value is None:
if "item_default" in module_spec["list_item_spec"]:
value = module_spec["list_item_spec"]["item_default"]
if value is None:
- raise isc.cc.data.DataNotFoundError("No value given and no default for " + str(identifier))
-
+ raise isc.cc.data.DataNotFoundError(
+ "No value given and no default for " + str(identifier))
+
if value not in cur_list:
cur_list.append(value)
self.set_value(identifier, cur_list)
+ else:
+ raise isc.cc.data.DataAlreadyPresentError(value +
+ " already in "
+ + identifier)
+
+ def _add_value_to_named_set(self, identifier, value, item_value):
+ if type(value) != str:
+ raise isc.cc.data.DataTypeError("Name for named_set " +
+ identifier +
+ " must be a string")
+ # fail on both None and empty string
+ if not value:
+ raise isc.cc.data.DataNotFoundError(
+ "Need a name to add a new item to named_set " +
+ str(identifier))
+ else:
+ cur_map, status = self.get_value(identifier)
+ if not cur_map:
+ cur_map = {}
+ if value not in cur_map:
+ cur_map[value] = item_value
+ self.set_value(identifier, cur_map)
+ else:
+ raise isc.cc.data.DataAlreadyPresentError(value +
+ " already in "
+ + identifier)
- def remove_value(self, identifier, value_str):
- """Remove a value from a configuration list. The value string
- must be a string representation of the full item. Raises
- a DataTypeError if the value at the identifier is not a list,
- or if the given value_str does not match the list_item_spec
- """
+ def add_value(self, identifier, value_str = None, set_value_str = None):
+ """Add a value to a configuration list. Raises a DataTypeError
+ if the value does not conform to the list_item_spec field
+ of the module config data specification. If value_str is
+ not given, we add the default as specified by the .spec
+ file. Raises a DataNotFoundError if the given identifier
+ is not specified in the specification as a map or list.
+ Raises a DataAlreadyPresentError if the specified element
+ already exists."""
module_spec = self.find_spec_part(identifier)
- if (type(module_spec) != dict or "list_item_spec" not in module_spec):
- raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list")
+ if module_spec is None:
+ raise isc.cc.data.DataNotFoundError("Unknown item " + str(identifier))
+
+ # the specified element must be a list or a named_set
+ if 'list_item_spec' in module_spec:
+ value = None
+ # in lists, we might get the value with spaces, making it
+ # the third argument. In that case we interpret both as
+ # one big string meant as the value
+ if value_str is not None:
+ if set_value_str is not None:
+ value_str += set_value_str
+ value = isc.cc.data.parse_value_str(value_str)
+ self._add_value_to_list(identifier, value, module_spec)
+ elif 'named_set_item_spec' in module_spec:
+ item_name = None
+ item_value = None
+ if value_str is not None:
+ item_name = isc.cc.data.parse_value_str(value_str)
+ if set_value_str is not None:
+ item_value = isc.cc.data.parse_value_str(set_value_str)
+ else:
+ if 'item_default' in module_spec['named_set_item_spec']:
+ item_value = module_spec['named_set_item_spec']['item_default']
+ self._add_value_to_named_set(identifier, item_name,
+ item_value)
+ else:
+ raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list or a named set")
- if value_str is None:
- # we are directly removing an list index
+ def _remove_value_from_list(self, identifier, value):
+ if value is None:
+ # we are directly removing a list index
id, list_indices = isc.cc.data.split_identifier_list_indices(identifier)
if list_indices is None:
- raise DataTypeError("identifier in remove_value() does not contain a list index, and no value to remove")
+ raise isc.cc.data.DataTypeError("identifier in remove_value() does not contain a list index, and no value to remove")
else:
self.set_value(identifier, None)
else:
- value = isc.cc.data.parse_value_str(value_str)
- isc.config.config_data.check_type(module_spec, [value])
cur_list, status = self.get_value(identifier)
- #if not cur_list:
- # cur_list = isc.cc.data.find_no_exc(self.config.data, identifier)
if not cur_list:
cur_list = []
- if value in cur_list:
+ elif value in cur_list:
cur_list.remove(value)
self.set_value(identifier, cur_list)
+ def _remove_value_from_named_set(self, identifier, value):
+ if value is None:
+ raise isc.cc.data.DataNotFoundError("Need a name to remove an item from named_set " + str(identifier))
+ elif type(value) != str:
+ raise isc.cc.data.DataTypeError("Name for named_set " + identifier + " must be a string")
+ else:
+ cur_map, status = self.get_value(identifier)
+ if not cur_map:
+ cur_map = {}
+ if value in cur_map:
+ del cur_map[value]
+ self.set_value(identifier, cur_map)
+ else:
+ raise isc.cc.data.DataNotFoundError(value + " not found in named_set " + str(identifier))
+
+ def remove_value(self, identifier, value_str):
+ """Remove a value from a configuration list or named set.
+ The value string must be a string representation of the full
+ item. Raises a DataTypeError if the value at the identifier
+ is not a list, or if the given value_str does not match the
+ list_item_spec """
+ module_spec = self.find_spec_part(identifier)
+ if module_spec is None:
+ raise isc.cc.data.DataNotFoundError("Unknown item " + str(identifier))
+
+ value = None
+ if value_str is not None:
+ value = isc.cc.data.parse_value_str(value_str)
+
+ if 'list_item_spec' in module_spec:
+ if value is not None:
+ isc.config.config_data.check_type(module_spec['list_item_spec'], value)
+ self._remove_value_from_list(identifier, value)
+ elif 'named_set_item_spec' in module_spec:
+ self._remove_value_from_named_set(identifier, value)
+ else:
+ raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list or a named_set")
+
+
+
def commit(self):
"""Commit all local changes, send them through b10-cmdctl to
the configuration manager"""
@@ -447,7 +585,6 @@ class UIModuleCCSession(MultiConfigData):
self.request_current_config()
self.clear_local_changes()
elif "error" in answer:
- print("Error: " + answer["error"])
- print("Configuration not committed")
+ raise ModuleCCSessionError("Error: " + str(answer["error"]) + "\n" + "Configuration not committed")
else:
raise ModuleCCSessionError("Unknown format of answer in commit(): " + str(answer))
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 88a93e1..4d568be 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -28,7 +28,13 @@ import tempfile
import json
import errno
from isc.cc import data
-from isc.config import ccsession, config_data
+from isc.config import ccsession, config_data, module_spec
+from isc.util.file import path_search
+import bind10_config
+import isc.log
+from isc.log_messages.cfgmgr_messages import *
+
+logger = isc.log.Logger("cfgmgr")
class ConfigManagerDataReadError(Exception):
"""This exception is thrown when there is an error while reading
@@ -89,7 +95,7 @@ class ConfigManagerData:
elif file_config['version'] == 1:
# only format change, no other changes necessary
file_config['version'] = 2
- print("[b10-cfgmgr] Updating configuration database version from 1 to 2")
+ logger.info(CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE, 1, 2)
config.data = file_config
else:
if config_data.BIND10_CONFIG_DATA_VERSION > file_config['version']:
@@ -111,12 +117,13 @@ class ConfigManagerData:
if file:
file.close();
return config
-
+
def write_to_file(self, output_file_name = None):
"""Writes the current configuration data to a file. If
output_file_name is not specified, the file used in
read_from_file is used."""
filename = None
+
try:
file = tempfile.NamedTemporaryFile(mode='w',
prefix="b10-config.db.",
@@ -131,12 +138,9 @@ class ConfigManagerData:
else:
os.rename(filename, self.db_filename)
except IOError as ioe:
- # TODO: log this (level critical)
- print("[b10-cfgmgr] Unable to write configuration file; configuration not stored: " + str(ioe))
- # TODO: debug option to keep file?
+ logger.error(CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION, ioe)
except OSError as ose:
- # TODO: log this (level critical)
- print("[b10-cfgmgr] Unable to write configuration file; configuration not stored: " + str(ose))
+ logger.error(CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION, ose)
try:
if filename and os.path.exists(filename):
os.remove(filename)
@@ -182,10 +186,24 @@ class ConfigManager:
self.cc.group_subscribe("ConfigManager")
self.cc.group_subscribe("Boss", "ConfigManager")
self.running = False
+ # As a core module, CfgMgr is different than other modules,
+ # as it does not use a ModuleCCSession, and hence needs
+ # to handle logging config on its own
+ self.log_config_data = config_data.ConfigData(
+ isc.config.module_spec_from_file(
+ path_search('logging.spec',
+ bind10_config.PLUGIN_PATHS)))
+ # store the logging 'module' name for easier reference
+ self.log_module_name = self.log_config_data.get_module_spec().get_module_name()
+
+ def check_logging_config(self, config):
+ if self.log_module_name in config:
+ ccsession.default_logconfig_handler(config[self.log_module_name],
+ self.log_config_data)
def notify_boss(self):
"""Notifies the Boss module that the Config Manager is running"""
- self.cc.group_sendmsg({"running": "configmanager"}, "Boss")
+ self.cc.group_sendmsg({"running": "ConfigManager"}, "Boss")
def set_module_spec(self, spec):
"""Adds a ModuleSpec"""
@@ -214,7 +232,7 @@ class ConfigManager:
is returned"""
if module_name:
if module_name in self.module_specs:
- return self.module_specs[module_name]
+ return self.module_specs[module_name].get_full_spec()
else:
# TODO: log error?
return {}
@@ -250,17 +268,31 @@ class ConfigManager:
commands[module_name] = self.module_specs[module_name].get_commands_spec()
return commands
+ def get_statistics_spec(self, name = None):
+ """Returns a dict containing 'module_name': statistics_spec for
+ all modules. If name is specified, only that module will
+ be included"""
+ statistics = {}
+ if name:
+ if name in self.module_specs:
+ statistics[name] = self.module_specs[name].get_statistics_spec()
+ else:
+ for module_name in self.module_specs.keys():
+ statistics[module_name] = self.module_specs[module_name].get_statistics_spec()
+ return statistics
+
def read_config(self):
"""Read the current configuration from the file specificied at init()"""
try:
self.config = ConfigManagerData.read_from_file(self.data_path,
self.\
database_filename)
+ self.check_logging_config(self.config.data);
except ConfigManagerDataEmpty:
# ok, just start with an empty config
self.config = ConfigManagerData(self.data_path,
self.database_filename)
-
+
def write_config(self):
"""Write the current configuration to the file specificied at init()"""
self.config.write_to_file()
@@ -272,7 +304,12 @@ class ConfigManager:
if type(cmd) == dict:
if 'module_name' in cmd and cmd['module_name'] != '':
module_name = cmd['module_name']
- answer = ccsession.create_answer(0, self.get_module_spec(module_name))
+ spec = self.get_module_spec(cmd['module_name'])
+ if type(spec) != type({}):
+ # this is a ModuleSpec object. Extract the
+ # internal spec.
+ spec = spec.get_full_spec()
+ answer = ccsession.create_answer(0, spec)
else:
answer = ccsession.create_answer(1, "Bad module_name in get_module_spec command")
else:
@@ -357,6 +394,9 @@ class ConfigManager:
answer, env = self.cc.group_recvmsg(False, seq)
except isc.cc.SessionTimeout:
answer = ccsession.create_answer(1, "Timeout waiting for answer from " + module_name)
+ except isc.cc.SessionError as se:
+ logger.error(CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE, module_name, se)
+ answer = ccsession.create_answer(1, "Unable to parse response from " + module_name + ": " + str(se))
if answer:
rcode, val = ccsession.parse_answer(answer)
if rcode == 0:
@@ -383,6 +423,8 @@ class ConfigManager:
got_error = True
err_list.append(val)
if not got_error:
+ # if Logging config is in there, update our config as well
+ self.check_logging_config(cmd);
self.write_config()
return ccsession.create_answer(0)
else:
@@ -404,7 +446,7 @@ class ConfigManager:
answer = ccsession.create_answer(1, "Wrong number of arguments")
if not answer:
answer = ccsession.create_answer(1, "No answer message from " + cmd[0])
-
+
return answer
def _handle_module_spec(self, spec):
@@ -414,7 +456,7 @@ class ConfigManager:
# todo: error checking (like keyerrors)
answer = {}
self.set_module_spec(spec)
-
+
# We should make one general 'spec update for module' that
# passes both specification and commands at once
spec_update = ccsession.create_command(ccsession.COMMAND_MODULE_SPECIFICATION_UPDATE,
@@ -429,6 +471,8 @@ class ConfigManager:
if cmd:
if cmd == ccsession.COMMAND_GET_COMMANDS_SPEC:
answer = ccsession.create_answer(0, self.get_commands_spec())
+ elif cmd == ccsession.COMMAND_GET_STATISTICS_SPEC:
+ answer = ccsession.create_answer(0, self.get_statistics_spec())
elif cmd == ccsession.COMMAND_GET_MODULE_SPEC:
answer = self._handle_get_module_spec(arg)
elif cmd == ccsession.COMMAND_GET_CONFIG:
@@ -436,8 +480,6 @@ class ConfigManager:
elif cmd == ccsession.COMMAND_SET_CONFIG:
answer = self._handle_set_config(arg)
elif cmd == ccsession.COMMAND_SHUTDOWN:
- # TODO: logging
- #print("[b10-cfgmgr] Received shutdown command")
self.running = False
answer = ccsession.create_answer(0)
elif cmd == ccsession.COMMAND_MODULE_SPEC:
@@ -450,7 +492,7 @@ class ConfigManager:
else:
answer = ccsession.create_answer(1, "Unknown message format: " + str(msg))
return answer
-
+
def run(self):
"""Runs the configuration manager."""
self.running = True
diff --git a/src/lib/python/isc/config/cfgmgr_messages.mes b/src/lib/python/isc/config/cfgmgr_messages.mes
new file mode 100644
index 0000000..61a63ed
--- /dev/null
+++ b/src/lib/python/isc/config/cfgmgr_messages.mes
@@ -0,0 +1,57 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+
+% CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+
+% CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+
+% CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+
+% CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+
+% CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+
+% CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+
diff --git a/src/lib/python/isc/config/config_data.py b/src/lib/python/isc/config/config_data.py
index cee1d34..b2cf048 100644
--- a/src/lib/python/isc/config/config_data.py
+++ b/src/lib/python/isc/config/config_data.py
@@ -108,6 +108,54 @@ def convert_type(spec_part, value):
except TypeError as err:
raise isc.cc.data.DataTypeError(str(err))
+def _get_map_or_list(spec_part):
+ """Returns the list or map specification if this is a list or a
+ map specification part. If not, returns the given spec_part
+ itself"""
+ if "map_item_spec" in spec_part:
+ return spec_part["map_item_spec"]
+ elif "list_item_spec" in spec_part:
+ return spec_part["list_item_spec"]
+ else:
+ return spec_part
+
+def _find_spec_part_single(cur_spec, id_part):
+ """Find the spec part for the given (partial) name. This partial
+ name does not contain separators ('/'), and the specification
+ part should be a direct child of the given specification part.
+ id_part may contain list selectors, which will be ignored.
+ Returns the child part.
+ Raises DataNotFoundError if it was not found."""
+ # strip list selector part
+ # don't need it for the spec part, so just drop it
+ id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
+
+ # The specification we want a sub-part for should be either a
+ # list or a map, which is internally represented by a dict with
+ # an element 'map_item_spec', a dict with an element 'list_item_spec',
+ # or a list (when it is the 'main' config_data element of a module).
+ if type(cur_spec) == dict and 'map_item_spec' in cur_spec.keys():
+ for cur_spec_item in cur_spec['map_item_spec']:
+ if cur_spec_item['item_name'] == id:
+ return cur_spec_item
+ # not found
+ raise isc.cc.data.DataNotFoundError(id + " not found")
+ elif type(cur_spec) == dict and 'list_item_spec' in cur_spec.keys():
+ if cur_spec['item_name'] == id:
+ return cur_spec['list_item_spec']
+ # not found
+ raise isc.cc.data.DataNotFoundError(id + " not found")
+ elif type(cur_spec) == dict and 'named_set_item_spec' in cur_spec.keys():
+ return cur_spec['named_set_item_spec']
+ elif type(cur_spec) == list:
+ for cur_spec_item in cur_spec:
+ if cur_spec_item['item_name'] == id:
+ return cur_spec_item
+ # not found
+ raise isc.cc.data.DataNotFoundError(id + " not found")
+ else:
+ raise isc.cc.data.DataNotFoundError("Not a correct config specification")
+
def find_spec_part(element, identifier):
"""find the data definition for the given identifier
returns either a map with 'item_name' etc, or a list of those"""
@@ -117,38 +165,15 @@ def find_spec_part(element, identifier):
id_parts[:] = (value for value in id_parts if value != "")
cur_el = element
- for id_part in id_parts:
- # strip list selector part
- # don't need it for the spec part, so just drop it
- id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
- # is this part still needed? (see below)
- if type(cur_el) == dict and 'map_item_spec' in cur_el.keys():
- found = False
- for cur_el_item in cur_el['map_item_spec']:
- if cur_el_item['item_name'] == id:
- cur_el = cur_el_item
- found = True
- if not found:
- raise isc.cc.data.DataNotFoundError(id + " not found")
- elif type(cur_el) == dict and 'list_item_spec' in cur_el.keys():
- cur_el = cur_el['list_item_spec']
- elif type(cur_el) == list:
- found = False
- for cur_el_item in cur_el:
- if cur_el_item['item_name'] == id:
- cur_el = cur_el_item
- # if we need to go further, we may need to 'skip' a step here
- # but not if we're done
- if id_parts[-1] != id_part and type(cur_el) == dict:
- if "map_item_spec" in cur_el:
- cur_el = cur_el["map_item_spec"]
- elif "list_item_spec" in cur_el:
- cur_el = cur_el["list_item_spec"]
- found = True
- if not found:
- raise isc.cc.data.DataNotFoundError(id + " not found")
- else:
- raise isc.cc.data.DataNotFoundError("Not a correct config specification")
+ # up to the last element, if the result is a map or a list,
+ # we want its subspecification (i.e. list_item_spec or
+ # map_item_spec). For the last element in the identifier we
+ # always want the 'full' spec of the item
+ for id_part in id_parts[:-1]:
+ cur_el = _find_spec_part_single(cur_el, id_part)
+ cur_el = _get_map_or_list(cur_el)
+
+ cur_el = _find_spec_part_single(cur_el, id_parts[-1])
return cur_el
def spec_name_list(spec, prefix="", recurse=False):
@@ -168,11 +193,14 @@ def spec_name_list(spec, prefix="", recurse=False):
result.extend(spec_name_list(map_el['map_item_spec'], prefix + map_el['item_name'], recurse))
else:
result.append(prefix + name)
+ elif 'named_set_item_spec' in spec:
+ # we added a '/' above, but in this one case we don't want it
+ result.append(prefix[:-1])
else:
for name in spec:
result.append(prefix + name + "/")
if recurse:
- result.extend(spec_name_list(spec[name],name, recurse))
+ result.extend(spec_name_list(spec[name], name, recurse))
elif type(spec) == list:
for list_el in spec:
if 'item_name' in list_el:
@@ -184,7 +212,7 @@ def spec_name_list(spec, prefix="", recurse=False):
else:
raise ConfigDataError("Bad specification")
else:
- raise ConfigDataError("Bad specication")
+ raise ConfigDataError("Bad specification")
return result
class ConfigData:
@@ -213,6 +241,15 @@ class ConfigData:
return spec['item_default'], True
return None, False
+ def get_default_value(self, identifier):
+ """Returns the default from the specification, or None if there
+ is no default"""
+ spec = find_spec_part(self.specification.get_config_spec(), identifier)
+ if spec and 'item_default' in spec:
+ return spec['item_default']
+ else:
+ return None
+
def get_module_spec(self):
"""Returns the ModuleSpec object associated with this ConfigData"""
return self.specification
@@ -223,7 +260,7 @@ class ConfigData:
def get_local_config(self):
"""Returns the non-default config values in a dict"""
- return self.data;
+ return self.data
def get_item_list(self, identifier = None, recurse = False):
"""Returns a list of strings containing the full identifiers of
@@ -380,7 +417,39 @@ class MultiConfigData:
item_id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
id_list = module + "/" + id_prefix + "/" + item_id
id_prefix += "/" + id_part
- if list_indices is not None:
+ part_spec = find_spec_part(self._specifications[module].get_config_spec(), id_prefix)
+ if part_spec['item_type'] == 'named_set':
+ # For named sets, the identifier is partly defined
+ # by which values are actually present, and not
+ # purely by the specification.
+ # So if there is a part of the identifier left,
+ # we need to look up the value, then see if that
+ # contains the next part of the identifier we got
+ if len(id_parts) == 0:
+ if 'item_default' in part_spec:
+ return part_spec['item_default']
+ else:
+ return None
+ id_part = id_parts.pop(0)
+
+ named_set_value, type = self.get_value(id_list)
+ if id_part in named_set_value:
+ if len(id_parts) > 0:
+ # we are looking for the *default* value.
+ # so if not present in here, we need to
+ # lookup the one from the spec
+ rest_of_id = "/".join(id_parts)
+ result = isc.cc.data.find_no_exc(named_set_value[id_part], rest_of_id)
+ if result is None:
+ spec_part = self.find_spec_part(identifier)
+ if 'item_default' in spec_part:
+ return spec_part['item_default']
+ return result
+ else:
+ return named_set_value[id_part]
+ else:
+ return None
+ elif list_indices is not None:
# there's actually two kinds of default here for
# lists; they can have a default value (like an
# empty list), but their elements can also have
@@ -417,7 +486,12 @@ class MultiConfigData:
spec = find_spec_part(self._specifications[module].get_config_spec(), id)
if 'item_default' in spec:
- return spec['item_default']
+ # one special case, named_set
+ if spec['item_type'] == 'named_set':
+ print("is " + id_part + " in named set?")
+ return spec['item_default']
+ else:
+ return spec['item_default']
else:
return None
@@ -441,7 +515,7 @@ class MultiConfigData:
return value, self.CURRENT
if default:
value = self.get_default_value(identifier)
- if value != None:
+ if value is not None:
return value, self.DEFAULT
return None, self.NONE
@@ -461,7 +535,7 @@ class MultiConfigData:
spec_part_list = spec_part['list_item_spec']
list_value, status = self.get_value(identifier)
if list_value is None:
- raise isc.cc.data.DataNotFoundError(identifier)
+ raise isc.cc.data.DataNotFoundError(identifier + " not found")
if type(list_value) != list:
# the identifier specified a single element
@@ -477,12 +551,38 @@ class MultiConfigData:
for i in range(len(list_value)):
self._append_value_item(result, spec_part_list, "%s[%d]" % (identifier, i), all)
elif item_type == "map":
+ value, status = self.get_value(identifier)
# just show the specific contents of a map, we are
# almost never interested in just its name
spec_part_map = spec_part['map_item_spec']
self._append_value_item(result, spec_part_map, identifier, all)
+ elif item_type == "named_set":
+ value, status = self.get_value(identifier)
+
+ # show just the one entry, when either the map is empty,
+ # or when this is element is not requested specifically
+ if len(value.keys()) == 0:
+ entry = _create_value_map_entry(identifier,
+ item_type,
+ {}, status)
+ result.append(entry)
+ elif not first and not all:
+ entry = _create_value_map_entry(identifier,
+ item_type,
+ None, status)
+ result.append(entry)
+ else:
+ spec_part_named_set = spec_part['named_set_item_spec']
+ for entry in value:
+ self._append_value_item(result,
+ spec_part_named_set,
+ identifier + "/" + entry,
+ all)
else:
value, status = self.get_value(identifier)
+ if status == self.NONE and not spec_part['item_optional']:
+ raise isc.cc.data.DataNotFoundError(identifier + " not found")
+
entry = _create_value_map_entry(identifier,
item_type,
value, status)
@@ -537,7 +637,7 @@ class MultiConfigData:
spec_part = spec_part['list_item_spec']
check_type(spec_part, value)
else:
- raise isc.cc.data.DataNotFoundError(identifier)
+ raise isc.cc.data.DataNotFoundError(identifier + " not found")
# Since we do not support list diffs (yet?), we need to
# copy the currently set list of items to _local_changes
@@ -547,15 +647,54 @@ class MultiConfigData:
cur_id_part = '/'
for id_part in id_parts:
id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
+ cur_value, status = self.get_value(cur_id_part + id)
+ # Check if the value was there in the first place
+ # If we are at the final element, we do not care whether we found
+ # it, since if we have reached this point and it did not exist,
+ # it was apparently an optional value without a default.
+ if status == MultiConfigData.NONE and cur_id_part != "/" and\
+ cur_id_part + id != identifier:
+ raise isc.cc.data.DataNotFoundError(id_part +
+ " not found in " +
+ cur_id_part)
if list_indices is not None:
- cur_list, status = self.get_value(cur_id_part + id)
+ # And check if we don't set something outside of any
+ # list
+ cur_list = cur_value
+ for list_index in list_indices:
+ if list_index >= len(cur_list):
+ raise isc.cc.data.DataNotFoundError("No item " +
+ str(list_index) + " in " + id_part)
+ else:
+ cur_list = cur_list[list_index]
if status != MultiConfigData.LOCAL:
isc.cc.data.set(self._local_changes,
cur_id_part + id,
- cur_list)
+ cur_value)
cur_id_part = cur_id_part + id_part + "/"
isc.cc.data.set(self._local_changes, identifier, value)
-
+
+ def _get_list_items(self, item_name):
+ """This method is used in get_config_item_list, to add list
+ indices and named_set names to the completion list. If
+ the given item_name is for a list or named_set, it'll
+ return a list of those (appended to item_name), otherwise
+ the list will only contain the item_name itself."""
+ spec_part = self.find_spec_part(item_name)
+ if 'item_type' in spec_part and \
+ spec_part['item_type'] == 'named_set':
+ subslash = ""
+ if spec_part['named_set_item_spec']['item_type'] == 'map' or\
+ spec_part['named_set_item_spec']['item_type'] == 'named_set':
+ subslash = "/"
+ values, status = self.get_value(item_name)
+ if len(values) > 0:
+ return [ item_name + "/" + v + subslash for v in values.keys() ]
+ else:
+ return [ item_name ]
+ else:
+ return [ item_name ]
+
def get_config_item_list(self, identifier = None, recurse = False):
"""Returns a list of strings containing the item_names of
the child items at the given identifier. If no identifier is
@@ -566,7 +705,11 @@ class MultiConfigData:
if identifier.startswith("/"):
identifier = identifier[1:]
spec = self.find_spec_part(identifier)
- return spec_name_list(spec, identifier + "/", recurse)
+ spec_list = spec_name_list(spec, identifier + "/", recurse)
+ result_list = []
+ for spec_name in spec_list:
+ result_list.extend(self._get_list_items(spec_name))
+ return result_list
else:
if recurse:
id_list = []
diff --git a/src/lib/python/isc/config/config_messages.mes b/src/lib/python/isc/config/config_messages.mes
new file mode 100644
index 0000000..c52efb4
--- /dev/null
+++ b/src/lib/python/isc/config/config_messages.mes
@@ -0,0 +1,33 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the config_messages python module.
+
+# since these messages are for the python config library, care must
+# be taken that names do not conflict with the messages from the c++
+# config library. A checker script should verify that, but we do not
+# have that at this moment. So when adding a message, make sure that
+# the name is not already used in src/lib/config/config_messages.mes
+
+% CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
+
+% CONFIG_GET_FAILED error getting configuration from cfgmgr: %1
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
+
diff --git a/src/lib/python/isc/config/module_spec.py b/src/lib/python/isc/config/module_spec.py
index 6c90677..b79f928 100644
--- a/src/lib/python/isc/config/module_spec.py
+++ b/src/lib/python/isc/config/module_spec.py
@@ -23,6 +23,7 @@
import json
import sys
+import time
import isc.cc.data
@@ -87,11 +88,11 @@ class ModuleSpec:
validate only a part of a configuration tree (like a list of
non-default values)"""
data_def = self.get_config_spec()
- if data_def:
+ if data_def is not None:
return _validate_spec_list(data_def, full, data, errors)
else:
# no spec, always bad
- if errors != None:
+ if errors is not None:
errors.append("No config_data specification")
return False
@@ -117,6 +118,26 @@ class ModuleSpec:
return False
+ def validate_statistics(self, full, stat, errors = None):
+ """Check whether the given piece of data conforms to this
+ data definition. If so, it returns True. If not, it will
+ return false. If errors is given, and is an array, a string
+ describing the error will be appended to it. The current
+ version stops as soon as there is one error so this list
+ will not be exhaustive. If 'full' is true, it also errors on
+ non-optional missing values. Set this to False if you want to
+ validate only a part of a statistics tree (like a list of
+ non-default values). Also it checks 'item_format' in case
+ of time"""
+ stat_spec = self.get_statistics_spec()
+ if stat_spec is not None:
+ return _validate_spec_list(stat_spec, full, stat, errors)
+ else:
+ # no spec, always bad
+ if errors is not None:
+ errors.append("No statistics specification")
+ return False
+
def get_module_name(self):
"""Returns a string containing the name of the module as
specified by the specification given at __init__()"""
@@ -152,6 +173,14 @@ class ModuleSpec:
else:
return None
+ def get_statistics_spec(self):
+ """Returns a dict representation of the statistics part of the
+ specification, or None if there is none."""
+ if 'statistics' in self._module_spec:
+ return self._module_spec['statistics']
+ else:
+ return None
+
def __str__(self):
"""Returns a string representation of the full specification"""
return self._module_spec.__str__()
@@ -160,8 +189,9 @@ def _check(module_spec):
"""Checks the full specification. This is a dict that contains the
element "module_spec", which is in itself a dict that
must contain at least a "module_name" (string) and optionally
- a "config_data" and a "commands" element, both of which are lists
- of dicts. Raises a ModuleSpecError if there is a problem."""
+ a "config_data", a "commands" and a "statistics" element, all
+ of which are lists of dicts. Raises a ModuleSpecError if there
+ is a problem."""
if type(module_spec) != dict:
raise ModuleSpecError("data specification not a dict")
if "module_name" not in module_spec:
@@ -173,6 +203,8 @@ def _check(module_spec):
_check_config_spec(module_spec["config_data"])
if "commands" in module_spec:
_check_command_spec(module_spec["commands"])
+ if "statistics" in module_spec:
+ _check_statistics_spec(module_spec["statistics"])
def _check_config_spec(config_data):
# config data is a list of items represented by dicts that contain
@@ -229,7 +261,7 @@ def _check_item_spec(config_item):
item_type = config_item["item_type"]
if type(item_type) != str:
raise ModuleSpecError("item_type in " + item_name + " is not a string: " + str(type(item_type)))
- if item_type not in ["integer", "real", "boolean", "string", "list", "map", "any"]:
+ if item_type not in ["integer", "real", "boolean", "string", "list", "map", "named_set", "any"]:
raise ModuleSpecError("unknown item_type in " + item_name + ": " + item_type)
if "item_optional" in config_item:
if type(config_item["item_optional"]) != bool:
@@ -263,39 +295,96 @@ def _check_item_spec(config_item):
if type(map_item) != dict:
raise ModuleSpecError("map_item_spec element is not a dict")
_check_item_spec(map_item)
+ if 'item_format' in config_item and 'item_default' in config_item:
+ item_format = config_item["item_format"]
+ item_default = config_item["item_default"]
+ if not _check_format(item_default, item_format):
+ raise ModuleSpecError(
+ "Wrong format for " + str(item_default) + " in " + str(item_name))
+def _check_statistics_spec(statistics):
+ # statistics is a list of items represented by dicts that contain
+ # things like "item_name", depending on the type they can have
+ # specific subitems
+ """Checks a list that contains the statistics part of the
+ specification. Raises a ModuleSpecError if there is a
+ problem."""
+ if type(statistics) != list:
+ raise ModuleSpecError("statistics is of type " + str(type(statistics))
+ + ", not a list of items")
+ for stat_item in statistics:
+ _check_item_spec(stat_item)
+ # Additionally checks if there are 'item_title' and
+ # 'item_description'
+ for item in [ 'item_title', 'item_description' ]:
+ if item not in stat_item:
+ raise ModuleSpecError("no " + item + " in statistics item")
+
+def _check_format(value, format_name):
+ """Check if specified value and format are correct. Return True if
+ is is correct."""
+ # TODO: should be added other format types if necessary
+ time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
+ 'date' : "%Y-%m-%d",
+ 'time' : "%H:%M:%S" }
+ for fmt in time_formats:
+ if format_name == fmt:
+ try:
+ # reverse check
+ return value == time.strftime(
+ time_formats[fmt],
+ time.strptime(value, time_formats[fmt]))
+ except (ValueError, TypeError):
+ break
+ return False
def _validate_type(spec, value, errors):
"""Returns true if the value is of the correct type given the
specification"""
data_type = spec['item_type']
if data_type == "integer" and type(value) != int:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be an integer")
return False
elif data_type == "real" and type(value) != float:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a real")
return False
elif data_type == "boolean" and type(value) != bool:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a boolean")
return False
elif data_type == "string" and type(value) != str:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a string")
return False
elif data_type == "list" and type(value) != list:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a list")
return False
elif data_type == "map" and type(value) != dict:
+ if errors is not None:
+ errors.append(str(value) + " should be a map")
+ return False
+ elif data_type == "named_set" and type(value) != dict:
if errors != None:
errors.append(str(value) + " should be a map")
return False
else:
return True
+def _validate_format(spec, value, errors):
+ """Returns true if the value is of the correct format given the
+ specification. And also return true if no 'item_format'"""
+ if "item_format" in spec:
+ item_format = spec['item_format']
+ if not _check_format(value, item_format):
+ if errors is not None:
+ errors.append("format type of " + str(value)
+ + " should be " + item_format)
+ return False
+ return True
+
def _validate_item(spec, full, data, errors):
if not _validate_type(spec, data, errors):
return False
@@ -304,12 +393,24 @@ def _validate_item(spec, full, data, errors):
for data_el in data:
if not _validate_type(list_spec, data_el, errors):
return False
+ if not _validate_format(list_spec, data_el, errors):
+ return False
if list_spec['item_type'] == "map":
if not _validate_item(list_spec, full, data_el, errors):
return False
elif type(data) == dict:
- if not _validate_spec_list(spec['map_item_spec'], full, data, errors):
- return False
+ if 'map_item_spec' in spec:
+ if not _validate_spec_list(spec['map_item_spec'], full, data, errors):
+ return False
+ else:
+ named_set_spec = spec['named_set_item_spec']
+ for data_el in data.values():
+ if not _validate_type(named_set_spec, data_el, errors):
+ return False
+ if not _validate_item(named_set_spec, full, data_el, errors):
+ return False
+ elif not _validate_format(spec, data, errors):
+ return False
return True
def _validate_spec(spec, full, data, errors):
@@ -321,7 +422,7 @@ def _validate_spec(spec, full, data, errors):
elif item_name in data:
return _validate_item(spec, full, data[item_name], errors)
elif full and not item_optional:
- if errors != None:
+ if errors is not None:
errors.append("non-optional item " + item_name + " missing")
return False
else:
@@ -345,8 +446,8 @@ def _validate_spec_list(module_spec, full, data, errors):
for spec_item in module_spec:
if spec_item["item_name"] == item_name:
found = True
- if not found:
- if errors != None:
+ if not found and item_name != "version":
+ if errors is not None:
errors.append("unknown item " + item_name)
validated = False
return validated
diff --git a/src/lib/python/isc/config/tests/Makefile.am b/src/lib/python/isc/config/tests/Makefile.am
index 622b23c..6670ee7 100644
--- a/src/lib/python/isc/config/tests/Makefile.am
+++ b/src/lib/python/isc/config/tests/Makefile.am
@@ -4,6 +4,13 @@ PYTESTS += module_spec_test.py
EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += unittest_fakesession.py
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -13,8 +20,15 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/config \
+ B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
CONFIG_WR_TESTDATA_PATH=$(abs_top_builddir)/src/lib/config/tests/testdata \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/config/tests/ccsession_test.py b/src/lib/python/isc/config/tests/ccsession_test.py
index 4edc559..8d616e2 100644
--- a/src/lib/python/isc/config/tests/ccsession_test.py
+++ b/src/lib/python/isc/config/tests/ccsession_test.py
@@ -22,6 +22,8 @@ import os
from isc.config.ccsession import *
from isc.config.config_data import BIND10_CONFIG_DATA_VERSION
from unittest_fakesession import FakeModuleCCSession, WouldBlockForever
+import bind10_config
+import isc.log
class TestHelperFunctions(unittest.TestCase):
def test_parse_answer(self):
@@ -106,8 +108,11 @@ class TestModuleCCSession(unittest.TestCase):
def spec_file(self, file):
return self.data_path + os.sep + file
- def create_session(self, spec_file_name, config_handler = None, command_handler = None, cc_session = None):
- return ModuleCCSession(self.spec_file(spec_file_name), config_handler, command_handler, cc_session)
+ def create_session(self, spec_file_name, config_handler = None,
+ command_handler = None, cc_session = None):
+ return ModuleCCSession(self.spec_file(spec_file_name),
+ config_handler, command_handler,
+ cc_session, False)
def test_init(self):
fake_session = FakeModuleCCSession()
@@ -220,6 +225,31 @@ class TestModuleCCSession(unittest.TestCase):
self.assertEqual({'command': ['get_config', {'module_name': 'Spec2'}]},
fake_session.get_message('ConfigManager', None))
+ def test_start5(self):
+ fake_session = FakeModuleCCSession()
+ mccs = self.create_session("spec2.spec", None, None, fake_session)
+ mccs.set_config_handler(self.my_config_handler_ok)
+ self.assertEqual(len(fake_session.message_queue), 0)
+ fake_session.group_sendmsg(None, 'Spec2')
+ fake_session.group_sendmsg(None, 'Spec2')
+ self.assertRaises(ModuleCCSessionError, mccs.start)
+ self.assertEqual(len(fake_session.message_queue), 2)
+ self.assertEqual({'command': ['module_spec', mccs.specification._module_spec]},
+ fake_session.get_message('ConfigManager', None))
+ self.assertEqual({'command': ['get_config', {'module_name': 'Spec2'}]},
+ fake_session.get_message('ConfigManager', None))
+
+ self.assertEqual(len(fake_session.message_queue), 0)
+ fake_session.group_sendmsg({'result': [ 0 ]}, "Spec2")
+ fake_session.group_sendmsg({'result': [ 0, {"Wrong": True} ]}, "Spec2")
+ self.assertRaises(ModuleCCSessionError, mccs.start)
+ self.assertEqual(len(fake_session.message_queue), 2)
+
+ self.assertEqual({'command': ['module_spec', mccs.specification._module_spec]},
+ fake_session.get_message('ConfigManager', None))
+ self.assertEqual({'command': ['get_config', {'module_name': 'Spec2'}]},
+ fake_session.get_message('ConfigManager', None))
+
def test_get_socket(self):
fake_session = FakeModuleCCSession()
mccs = self.create_session("spec1.spec", None, None, fake_session)
@@ -579,7 +609,43 @@ class TestModuleCCSession(unittest.TestCase):
self.assertEqual(len(fake_session.message_queue), 1)
mccs.check_command()
self.assertEqual(len(fake_session.message_queue), 0)
-
+
+ def test_logconfig_handler(self):
+ # test whether default_logconfig_handler reacts nicely to
+ # bad data. We assume the actual logger output is tested
+ # elsewhere
+ self.assertRaises(TypeError, default_logconfig_handler);
+ self.assertRaises(TypeError, default_logconfig_handler, 1);
+
+ spec = isc.config.module_spec_from_file(
+ path_search('logging.spec', bind10_config.PLUGIN_PATHS))
+ config_data = ConfigData(spec)
+
+ self.assertRaises(TypeError, default_logconfig_handler, 1, config_data)
+
+ default_logconfig_handler({}, config_data)
+
+ # Wrong data should not raise, but simply not be accepted
+ # This would log a lot of errors, so we may want to suppress that later
+ default_logconfig_handler({ "bad_data": "indeed" }, config_data)
+ default_logconfig_handler({ "bad_data": 1}, config_data)
+ default_logconfig_handler({ "bad_data": 1123 }, config_data)
+ default_logconfig_handler({ "bad_data": True }, config_data)
+ default_logconfig_handler({ "bad_data": False }, config_data)
+ default_logconfig_handler({ "bad_data": 1.1 }, config_data)
+ default_logconfig_handler({ "bad_data": [] }, config_data)
+ default_logconfig_handler({ "bad_data": [[],[],[[1, 3, False, "foo" ]]] },
+ config_data)
+ default_logconfig_handler({ "bad_data": [ 1, 2, { "b": { "c": "d" } } ] },
+ config_data)
+
+ # Try a correct config
+ log_conf = {"loggers":
+ [{"name": "b10-xfrout", "output_options":
+ [{"output": "/tmp/bind10.log",
+ "destination": "file",
+ "flush": True}]}]}
+ default_logconfig_handler(log_conf, config_data)
class fakeData:
def decode(self):
@@ -629,6 +695,12 @@ class TestUIModuleCCSession(unittest.TestCase):
fake_conn.set_get_answer('/config_data', { 'version': BIND10_CONFIG_DATA_VERSION })
return UIModuleCCSession(fake_conn)
+ def create_uccs_named_set(self, fake_conn):
+ module_spec = isc.config.module_spec_from_file(self.spec_file("spec32.spec"))
+ fake_conn.set_get_answer('/module_spec', { module_spec.get_module_name(): module_spec.get_full_spec()})
+ fake_conn.set_get_answer('/config_data', { 'version': BIND10_CONFIG_DATA_VERSION })
+ return UIModuleCCSession(fake_conn)
+
def test_init(self):
fake_conn = fakeUIConn()
fake_conn.set_get_answer('/module_spec', {})
@@ -649,12 +721,14 @@ class TestUIModuleCCSession(unittest.TestCase):
def test_add_remove_value(self):
fake_conn = fakeUIConn()
uccs = self.create_uccs2(fake_conn)
+
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, 1, "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, "no_such_item", "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, "Spec2/item1", "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, 1, "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, "no_such_item", "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, "Spec2/item1", "a")
+
self.assertEqual({}, uccs._local_changes)
uccs.add_value("Spec2/item5", "foo")
self.assertEqual({'Spec2': {'item5': ['a', 'b', 'foo']}}, uccs._local_changes)
@@ -664,10 +738,87 @@ class TestUIModuleCCSession(unittest.TestCase):
uccs.remove_value("Spec2/item5", "foo")
uccs.add_value("Spec2/item5", "foo")
self.assertEqual({'Spec2': {'item5': ['foo']}}, uccs._local_changes)
- uccs.add_value("Spec2/item5", "foo")
+ self.assertRaises(isc.cc.data.DataAlreadyPresentError,
+ uccs.add_value, "Spec2/item5", "foo")
self.assertEqual({'Spec2': {'item5': ['foo']}}, uccs._local_changes)
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ uccs.remove_value, "Spec2/item5[123]", None)
uccs.remove_value("Spec2/item5[0]", None)
self.assertEqual({'Spec2': {'item5': []}}, uccs._local_changes)
+ uccs.add_value("Spec2/item5", None);
+ self.assertEqual({'Spec2': {'item5': ['']}}, uccs._local_changes)
+ # Intending to empty a list element, but forget specifying the index.
+ self.assertRaises(isc.cc.data.DataTypeError,
+ uccs.remove_value, "Spec2/item5", None)
+
+ def test_add_remove_value_named_set(self):
+ fake_conn = fakeUIConn()
+ uccs = self.create_uccs_named_set(fake_conn)
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'a': 1, 'b': 2}, value)
+
+ # make sure that removing from default actually removes it
+ uccs.remove_value("/Spec32/named_set_item", "a")
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'b': 2}, value)
+ self.assertEqual(uccs.LOCAL, status)
+
+ # ok, put it back now
+ uccs.add_value("/Spec32/named_set_item", "a")
+ uccs.set_value("/Spec32/named_set_item/a", 1)
+
+ uccs.add_value("/Spec32/named_set_item", "foo")
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'a': 1, 'b': 2, 'foo': 3}, value)
+
+ uccs.remove_value("/Spec32/named_set_item", "a")
+ uccs.remove_value("/Spec32/named_set_item", "foo")
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'b': 2}, value)
+
+ uccs.set_value("/Spec32/named_set_item/c", 5)
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({"b": 2, "c": 5}, value)
+
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ uccs.set_value,
+ "/Spec32/named_set_item/no_such_item/a",
+ 4)
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ uccs.remove_value, "/Spec32/named_set_item",
+ "no_such_item")
+
+ def test_set_value_named_set(self):
+ fake_conn = fakeUIConn()
+ uccs = self.create_uccs_named_set(fake_conn)
+ value, status = uccs.get_value("/Spec32/named_set_item2")
+ self.assertEqual({}, value)
+ self.assertEqual(status, uccs.DEFAULT)
+
+ # Try setting a value that is optional but has no default
+ uccs.add_value("/Spec32/named_set_item2", "new1")
+ uccs.set_value("/Spec32/named_set_item2/new1/first", 3)
+ # Different method to add a new element
+ uccs.set_value("/Spec32/named_set_item2/new2", { "second": 4 })
+
+ value, status = uccs.get_value("/Spec32/named_set_item2")
+ self.assertEqual({ "new1": {"first": 3 }, "new2": {"second": 4}},
+ value)
+ self.assertEqual(status, uccs.LOCAL)
+
+ uccs.set_value("/Spec32/named_set_item2/new1/second", "foo")
+
+ value, status = uccs.get_value("/Spec32/named_set_item2")
+ self.assertEqual({ "new1": {"first": 3, "second": "foo" },
+ "new2": {"second": 4}},
+ value)
+ self.assertEqual(status, uccs.LOCAL)
+
+ # make sure using a bad name still fails
+ self.assertRaises(isc.cc.data.DataNotFoundError, uccs.set_value,
+ "/Spec32/named_set_item2/doesnotexist/first", 3)
+
+
def test_commit(self):
fake_conn = fakeUIConn()
@@ -677,5 +828,6 @@ class TestUIModuleCCSession(unittest.TestCase):
uccs.commit()
if __name__ == '__main__':
+ isc.log.init("bind10")
unittest.main()
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index b06db31..589a398 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -37,7 +37,7 @@ class TestConfigManagerData(unittest.TestCase):
It shouldn't append the data path to it.
"""
abs_path = self.data_path + os.sep + "b10-config-imaginary.db"
- data = ConfigManagerData(os.getcwd(), abs_path)
+ data = ConfigManagerData(self.data_path, abs_path)
self.assertEqual(abs_path, data.db_filename)
self.assertEqual(self.data_path, data.data_path)
@@ -88,7 +88,7 @@ class TestConfigManagerData(unittest.TestCase):
self.assertEqual(cfd1, cfd2)
cfd2.data['test'] = { 'a': [ 1, 2, 3]}
self.assertNotEqual(cfd1, cfd2)
-
+
class TestConfigManager(unittest.TestCase):
@@ -128,7 +128,7 @@ class TestConfigManager(unittest.TestCase):
msg = self.fake_session.get_message("Boss", None)
self.assert_(msg)
# this one is actually wrong, but 'current status quo'
- self.assertEqual(msg, {"running": "configmanager"})
+ self.assertEqual(msg, {"running": "ConfigManager"})
def test_set_module_spec(self):
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
@@ -176,7 +176,9 @@ class TestConfigManager(unittest.TestCase):
self.cm.set_module_spec(module_spec)
self.assert_(module_spec.get_module_name() in self.cm.module_specs)
module_spec2 = self.cm.get_module_spec(module_spec.get_module_name())
- self.assertEqual(module_spec, module_spec2)
+ self.assertEqual(module_spec.get_full_spec(), module_spec2)
+
+ self.assertEqual({}, self.cm.get_module_spec("nosuchmodule"))
def test_get_config_spec(self):
config_spec = self.cm.get_config_spec()
@@ -196,8 +198,8 @@ class TestConfigManager(unittest.TestCase):
self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
config_spec = self.cm.get_config_spec('Spec2')
self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
-
-
+
+
def test_get_commands_spec(self):
commands_spec = self.cm.get_commands_spec()
self.assertEqual(commands_spec, {})
@@ -217,6 +219,25 @@ class TestConfigManager(unittest.TestCase):
commands_spec = self.cm.get_commands_spec('Spec2')
self.assertEqual(commands_spec['Spec2'], module_spec.get_commands_spec())
+ def test_get_statistics_spec(self):
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec, {})
+ module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
+ self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.cm.set_module_spec(module_spec)
+ self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec, { 'Spec1': None })
+ self.cm.remove_module_spec('Spec1')
+ module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
+ self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.cm.set_module_spec(module_spec)
+ self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
+ statistics_spec = self.cm.get_statistics_spec('Spec2')
+ self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
+
def test_read_config(self):
self.assertEqual(self.cm.config.data, {'version': config_data.BIND10_CONFIG_DATA_VERSION})
self.cm.read_config()
@@ -229,7 +250,7 @@ class TestConfigManager(unittest.TestCase):
def test_write_config(self):
# tested in ConfigManagerData tests
pass
-
+
def _handle_msg_helper(self, msg, expected_answer):
answer = self.cm.handle_msg(msg)
self.assertEqual(expected_answer, answer)
@@ -239,6 +260,7 @@ class TestConfigManager(unittest.TestCase):
self._handle_msg_helper("", { 'result': [ 1, 'Unknown message format: ']})
self._handle_msg_helper({ "command": [ "badcommand" ] }, { 'result': [ 1, "Unknown command: badcommand"]})
self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, {} ]})
+ self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, {} ]})
self._handle_msg_helper({ "command": [ "get_module_spec" ] }, { 'result': [ 0, {} ]})
self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "Spec2" } ] }, { 'result': [ 0, {} ]})
#self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "nosuchmodule" } ] },
@@ -316,14 +338,18 @@ class TestConfigManager(unittest.TestCase):
# self.fake_session.get_message(self.name, None))
#self.assertEqual({'version': 1, 'TestModule': {'test': 124}}, self.cm.config.data)
#
- self._handle_msg_helper({ "command":
+ self._handle_msg_helper({ "command":
["module_spec", self.spec.get_full_spec()]
},
{'result': [0]})
self._handle_msg_helper({ "command": [ "module_spec", { 'foo': 1 } ] },
{'result': [1, 'Error in data definition: no module_name in module_spec']})
self._handle_msg_helper({ "command": [ "get_module_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_full_spec() } ]})
+ self._handle_msg_helper({ "command": [ "get_module_spec",
+ { "module_name" : "Spec2" } ] },
+ { 'result': [ 0, self.spec.get_full_spec() ] })
self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_commands_spec() } ]})
+ self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_statistics_spec() } ]})
# re-add this once we have new way to propagate spec changes (1 instead of the current 2 messages)
#self.assertEqual(len(self.fake_session.message_queue), 2)
# the name here is actually wrong (and hardcoded), but needed in the current version
@@ -333,7 +359,7 @@ class TestConfigManager(unittest.TestCase):
#self.assertEqual({'commands_update': [ self.name, self.commands ] },
# self.fake_session.get_message("Cmdctl", None))
- self._handle_msg_helper({ "command":
+ self._handle_msg_helper({ "command":
["shutdown"]
},
{'result': [0]})
@@ -445,6 +471,7 @@ class TestConfigManager(unittest.TestCase):
def test_run(self):
self.fake_session.group_sendmsg({ "command": [ "get_commands_spec" ] }, "ConfigManager")
+ self.fake_session.group_sendmsg({ "command": [ "get_statistics_spec" ] }, "ConfigManager")
self.fake_session.group_sendmsg({ "command": [ "shutdown" ] }, "ConfigManager")
self.cm.run()
pass
diff --git a/src/lib/python/isc/config/tests/config_data_test.py b/src/lib/python/isc/config/tests/config_data_test.py
index 923e0b6..bede625 100644
--- a/src/lib/python/isc/config/tests/config_data_test.py
+++ b/src/lib/python/isc/config/tests/config_data_test.py
@@ -236,6 +236,28 @@ class TestConfigData(unittest.TestCase):
value, default = self.cd.get_value("item6/value2")
self.assertEqual(None, value)
self.assertEqual(False, default)
+ self.assertRaises(isc.cc.data.DataNotFoundError, self.cd.get_value, "item6/no_such_item")
+
+ def test_get_default_value(self):
+ self.assertEqual(1, self.cd.get_default_value("item1"))
+ self.assertEqual('default', self.cd.get_default_value("item6/value1"))
+ self.assertEqual(None, self.cd.get_default_value("item6/value2"))
+
+ # set some local values to something else, and see if we
+ # still get the default
+ self.cd.set_local_config({"item1": 2, "item6": { "value1": "asdf" } })
+
+ self.assertEqual((2, False), self.cd.get_value("item1"))
+ self.assertEqual(1, self.cd.get_default_value("item1"))
+ self.assertEqual(('asdf', False), self.cd.get_value("item6/value1"))
+ self.assertEqual('default', self.cd.get_default_value("item6/value1"))
+
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ self.cd.get_default_value,
+ "does_not_exist/value1")
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ self.cd.get_default_value,
+ "item6/doesnotexist")
def test_set_local_config(self):
self.cd.set_local_config({"item1": 2})
@@ -308,9 +330,38 @@ class TestMultiConfigData(unittest.TestCase):
spec_part = self.mcd.find_spec_part("Spec2/item1")
self.assertEqual({'item_name': 'item1', 'item_type': 'integer', 'item_optional': False, 'item_default': 1, }, spec_part)
+ def test_find_spec_part_nested(self):
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec30.spec")
+ self.mcd.set_specification(module_spec)
+ spec_part = self.mcd.find_spec_part("/lists/first_list_items[0]/second_list_items[1]/final_element")
+ self.assertEqual({'item_name': 'final_element', 'item_type': 'string', 'item_default': 'hello', 'item_optional': False}, spec_part)
+ spec_part = self.mcd.find_spec_part("/BAD_NAME/first_list_items[0]/second_list_items[1]/final_element")
+ self.assertEqual(None, spec_part)
+
+ def test_find_spec_part_nested2(self):
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec31.spec")
+ self.mcd.set_specification(module_spec)
+ spec_part = self.mcd.find_spec_part("/lists/first_list_items[0]/second_list_items[1]/map_element/list1[1]/list2[2]")
+ self.assertEqual({"item_name": "number", "item_type": "integer", "item_optional": False, "item_default": 1}, spec_part)
+
+ spec_part = self.mcd.find_spec_part("/DOESNOTEXIST")
+ self.assertEqual(None, spec_part)
+ spec_part = self.mcd.find_spec_part("/lists/DOESNOTEXIST")
+ self.assertEqual(None, spec_part)
+ spec_part = self.mcd.find_spec_part("/lists/first_list_items[0]/DOESNOTEXIST")
+ self.assertEqual(None, spec_part)
+ spec_part = self.mcd.find_spec_part("/lists/first_list_items[0]/second_list_items[1]/DOESNOTEXIST")
+ self.assertEqual(None, spec_part)
+ spec_part = self.mcd.find_spec_part("/lists/first_list_items[0]/second_list_items[1]/map_element/DOESNOTEXIST")
+ self.assertEqual(None, spec_part)
+ spec_part = self.mcd.find_spec_part("/lists/first_list_items[0]/second_list_items[1]/map_element/list1[1]/DOESNOTEXIST")
+ self.assertEqual(None, spec_part)
+ spec_part = self.mcd.find_spec_part("/lists/first_list_items[0]/second_list_items[1]/map_element/list1[1]/list2[1]/DOESNOTEXIST")
+ self.assertEqual(None, spec_part)
+
def test_get_current_config(self):
cf = { 'module1': { 'item1': 2, 'item2': True } }
- self.mcd._set_current_config(cf);
+ self.mcd._set_current_config(cf)
self.assertEqual(cf, self.mcd.get_current_config())
def test_get_local_changes(self):
@@ -371,6 +422,17 @@ class TestMultiConfigData(unittest.TestCase):
value = self.mcd.get_default_value("Spec2/no_such_item/asdf")
self.assertEqual(None, value)
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ value = self.mcd.get_default_value("Spec32/named_set_item")
+ self.assertEqual({ 'a': 1, 'b': 2}, value)
+ value = self.mcd.get_default_value("Spec32/named_set_item/a")
+ self.assertEqual(1, value)
+ value = self.mcd.get_default_value("Spec32/named_set_item/b")
+ self.assertEqual(2, value)
+ value = self.mcd.get_default_value("Spec32/named_set_item/no_such_item")
+ self.assertEqual(None, value)
+
def test_get_value(self):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
self.mcd.set_specification(module_spec)
@@ -494,6 +556,29 @@ class TestMultiConfigData(unittest.TestCase):
maps = self.mcd.get_value_maps("/Spec22/value9")
self.assertEqual(expected, maps)
+ def test_get_value_maps_named_set(self):
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ maps = self.mcd.get_value_maps()
+ self.assertEqual([{'default': False, 'type': 'module',
+ 'name': 'Spec32', 'value': None,
+ 'modified': False}], maps)
+ maps = self.mcd.get_value_maps("/Spec32/named_set_item")
+ self.assertEqual([{'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/a',
+ 'value': 1, 'modified': False},
+ {'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/b',
+ 'value': 2, 'modified': False}], maps)
+ maps = self.mcd.get_value_maps("/Spec32/named_set_item/a")
+ self.assertEqual([{'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/a',
+ 'value': 1, 'modified': False}], maps)
+ maps = self.mcd.get_value_maps("/Spec32/named_set_item/b")
+ self.assertEqual([{'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/b',
+ 'value': 2, 'modified': False}], maps)
+
def test_set_value(self):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
self.mcd.set_specification(module_spec)
@@ -532,6 +617,24 @@ class TestMultiConfigData(unittest.TestCase):
config_items = self.mcd.get_config_item_list("Spec2", True)
self.assertEqual(['Spec2/item1', 'Spec2/item2', 'Spec2/item3', 'Spec2/item4', 'Spec2/item5', 'Spec2/item6/value1', 'Spec2/item6/value2'], config_items)
+ def test_get_config_item_list_named_set(self):
+ config_items = self.mcd.get_config_item_list()
+ self.assertEqual([], config_items)
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ config_items = self.mcd.get_config_item_list()
+ self.assertEqual(['Spec32'], config_items)
+ config_items = self.mcd.get_config_item_list(None, False)
+ self.assertEqual(['Spec32'], config_items)
+ config_items = self.mcd.get_config_item_list(None, True)
+ self.assertEqual(['Spec32/named_set_item', 'Spec32/named_set_item2'], config_items)
+ self.mcd.set_value('Spec32/named_set_item', { "aaaa": 4, "aabb": 5, "bbbb": 6})
+ config_items = self.mcd.get_config_item_list("/Spec32/named_set_item", True)
+ self.assertEqual(['Spec32/named_set_item/aaaa',
+ 'Spec32/named_set_item/aabb',
+ 'Spec32/named_set_item/bbbb',
+ ], config_items)
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/config/tests/module_spec_test.py b/src/lib/python/isc/config/tests/module_spec_test.py
index a4dcdec..fc53d23 100644
--- a/src/lib/python/isc/config/tests/module_spec_test.py
+++ b/src/lib/python/isc/config/tests/module_spec_test.py
@@ -81,6 +81,11 @@ class TestModuleSpec(unittest.TestCase):
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec20.spec")
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec21.spec")
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec26.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec34.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec35.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec36.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec37.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec38.spec")
def validate_data(self, specfile_name, datafile_name):
dd = self.read_spec_file(specfile_name);
@@ -98,6 +103,9 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(True, self.validate_data("spec22.spec", "data22_6.data"))
self.assertEqual(True, self.validate_data("spec22.spec", "data22_7.data"))
self.assertEqual(False, self.validate_data("spec22.spec", "data22_8.data"))
+ self.assertEqual(True, self.validate_data("spec32.spec", "data32_1.data"))
+ self.assertEqual(False, self.validate_data("spec32.spec", "data32_2.data"))
+ self.assertEqual(False, self.validate_data("spec32.spec", "data32_3.data"))
def validate_command_params(self, specfile_name, datafile_name, cmd_name):
dd = self.read_spec_file(specfile_name);
@@ -120,6 +128,17 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(False, self.validate_command_params("spec27.spec", "data22_8.data", 'cmd1'))
self.assertEqual(False, self.validate_command_params("spec27.spec", "data22_8.data", 'cmd2'))
+ def test_statistics_validation(self):
+ def _validate_stat(specfile_name, datafile_name):
+ dd = self.read_spec_file(specfile_name);
+ data_file = open(self.spec_file(datafile_name))
+ data_str = data_file.read()
+ data = isc.cc.data.parse_value_str(data_str)
+ return dd.validate_statistics(True, data, [])
+ self.assertFalse(self.read_spec_file("spec1.spec").validate_statistics(True, None, None));
+ self.assertTrue(_validate_stat("spec33.spec", "data33_1.data"))
+ self.assertFalse(_validate_stat("spec33.spec", "data33_2.data"))
+
def test_init(self):
self.assertRaises(ModuleSpecError, ModuleSpec, 1)
module_spec = isc.config.module_spec_from_file(self.spec_file("spec1.spec"), False)
@@ -266,6 +285,80 @@ class TestModuleSpec(unittest.TestCase):
}
)
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_datetime",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "date-time"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_date",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "date"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_time",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "time"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_datetime",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "2011-05-27T19:42:57Z",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_date",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "2011-05-27",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_time",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "19:42:57Z",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ def test_check_format(self):
+ self.assertTrue(isc.config.module_spec._check_format('2011-05-27T19:42:57Z', 'date-time'))
+ self.assertTrue(isc.config.module_spec._check_format('2011-05-27', 'date'))
+ self.assertTrue(isc.config.module_spec._check_format('19:42:57', 'time'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T19:42:57Z', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('19:42:57', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-13-99T99:99:99Z', 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-13-99', 'date'))
+ self.assertFalse(isc.config.module_spec._check_format('99:99:99', 'time'))
+ self.assertFalse(isc.config.module_spec._check_format('', 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format(None, 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format(None, None))
+ # wrong date-time-type format not ending with "Z"
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T19:42:57', 'date-time'))
+ # wrong date-type format ending with "T"
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T', 'date'))
+ # wrong time-type format ending with "Z"
+ self.assertFalse(isc.config.module_spec._check_format('19:42:57Z', 'time'))
+
def test_validate_type(self):
errors = []
self.assertEqual(True, isc.config.module_spec._validate_type({ 'item_type': 'integer' }, 1, errors))
@@ -303,6 +396,25 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(False, isc.config.module_spec._validate_type({ 'item_type': 'map' }, 1, errors))
self.assertEqual(['1 should be a map'], errors)
+ def test_validate_format(self):
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "2011-05-27T19:42:57Z", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "a", errors))
+ self.assertEqual(['format type of a should be date-time'], errors)
+
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "2011-05-27", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "a", errors))
+ self.assertEqual(['format type of a should be date'], errors)
+
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "19:42:57", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "a", errors))
+ self.assertEqual(['format type of a should be time'], errors)
+
def test_validate_spec(self):
spec = { 'item_name': "an_item",
'item_type': "string",
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index 5b9dafb..a5b4ca3 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -1,5 +1,38 @@
SUBDIRS = . tests
+# old data, should be removed in the near future once conversion is done
+pythondir = $(pyexecdir)/isc/datasrc
python_PYTHON = __init__.py master.py sqlite3_ds.py
-pythondir = $(pyexecdir)/isc/datasrc
+
+# new data
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CPPFLAGS += $(SQLITE_CFLAGS)
+
+python_LTLIBRARIES = datasrc.la
+datasrc_la_SOURCES = datasrc.cc datasrc.h
+datasrc_la_SOURCES += client_python.cc client_python.h
+datasrc_la_SOURCES += iterator_python.cc iterator_python.h
+datasrc_la_SOURCES += finder_python.cc finder_python.h
+datasrc_la_SOURCES += updater_python.cc updater_python.h
+
+datasrc_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+datasrc_la_LDFLAGS = $(PYTHON_LDFLAGS)
+datasrc_la_LDFLAGS += -module
+datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
+datasrc_la_LIBADD += $(PYTHON_LIB)
+
+EXTRA_DIST = client_inc.cc
+EXTRA_DIST += finder_inc.cc
+EXTRA_DIST += iterator_inc.cc
+EXTRA_DIST += updater_inc.cc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
+
diff --git a/src/lib/python/isc/datasrc/__init__.py b/src/lib/python/isc/datasrc/__init__.py
index 0e1e481..7ebd918 100644
--- a/src/lib/python/isc/datasrc/__init__.py
+++ b/src/lib/python/isc/datasrc/__init__.py
@@ -1,2 +1,35 @@
-from isc.datasrc.master import *
+import sys
+import os
+
+# The datasource factory loader uses dlopen, as does python
+# for its modules. Some dynamic linkers do not play nice if
+# modules are not loaded with RTLD_GLOBAL, a symptom of which
+# is that exceptions are not recognized by type. So to make
+# sure this doesn't happen, we temporarily set RTLD_GLOBAL
+# during the loading of the datasource wrappers.
+import ctypes
+flags = sys.getdlopenflags()
+sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL)
+
+# this setup is a temporary workaround to deal with the problem of
+# having both 'normal' python modules and a wrapper module
+# Once all programs use the new interface, we should remove the
+# old, and the setup can be made similar to that of the log wrappers.
+intree = False
+for base in sys.path[:]:
+ datasrc_libdir = os.path.join(base, 'isc/datasrc/.libs')
+ if os.path.exists(datasrc_libdir):
+ sys.path.insert(0, datasrc_libdir)
+ intree = True
+
+if intree:
+ from datasrc import *
+else:
+ from isc.datasrc.datasrc import *
+
+# revert to the default dlopen flags
+sys.setdlopenflags(flags)
+
from isc.datasrc.sqlite3_ds import *
+from isc.datasrc.master import *
+
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
new file mode 100644
index 0000000..b81f48d
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -0,0 +1,170 @@
+namespace {
+
+const char* const DataSourceClient_doc = "\
+The base class of data source clients.\n\
+\n\
+This is the python wrapper for the abstract base class that defines\n\
+the common interface for various types of data source clients. A data\n\
+source client is a top level access point to a data source, allowing \n\
+various operations on the data source such as lookups, traversing or \n\
+updates.\n\
+This class serves as both the factory and the main interface to those \n\
+classes.\n\
+\n\
+The constructor takes two arguments; a type (string), and\n\
+configuration data for a datasource client of that type. The configuration\n\
+data is currently passed as a JSON in string form, and its contents depend\n\
+on the type of datasource from the first argument. For instance, a\n\
+datasource of type \"sqlite3\" takes the config \n\
+{ \"database_file\": \"/var/example.org\" }\n\
+We may in the future add support for passing configuration data,\n\
+but right now we limit it to a JSON-formatted string\n\
+\n\
+The client class itself has limited focus and delegates \n\
+the responsibility for these specific operations to other (c++) classes;\n\
+in general methods of this class act as factories of these other classes.\n\
+\n\
+- InMemoryClient: A client of a conceptual data source that stores all\n\
+ necessary data in memory for faster lookups\n\
+- DatabaseClient: A client that uses a real database backend (such as\n\
+ an SQL database). It would internally hold a connection to the\n\
+ underlying database system.\n\
+\n\
+It is intentional that while the term these derived classes don't\n\
+contain \"DataSource\" unlike their base class. It's also noteworthy\n\
+that the naming of the base class is somewhat redundant because the\n\
+namespace datasrc would indicate that it's related to a data source.\n\
+The redundant naming comes from the observation that namespaces are\n\
+often omitted with using directives, in which case \"Client\" would be\n\
+too generic. On the other hand, concrete derived classes are generally\n\
+not expected to be referenced directly from other modules and\n\
+applications, so we'll give them more concise names such as\n\
+InMemoryClient. A single DataSourceClient object is expected to handle\n\
+only a single RR class even if the underlying data source contains\n\
+records for multiple RR classes. Likewise, (when we support views) a\n\
+DataSourceClient object is expected to handle only a single view.\n\
+\n\
+If the application uses multiple threads, each thread will need to\n\
+create and use a separate DataSourceClient. This is because some\n\
+database backend doesn't allow multiple threads to share the same\n\
+connection to the database.\n\
+\n\
+For a client using an in memory backend, this may result in having a\n\
+multiple copies of the same data in memory, increasing the memory\n\
+footprint substantially. Depending on how to support multiple CPU\n\
+cores for concurrent lookups on the same single data source (which is\n\
+not fully fixed yet, and for which multiple threads may be used), this\n\
+design may have to be revisited. This class (and therefore its derived\n\
+classes) are not copyable. This is because the derived classes would\n\
+generally contain attributes that are not easy to copy (such as a\n\
+large size of in memory data or a network connection to a database\n\
+server). In order to avoid a surprising disruption with a naive copy\n\
+it's prohibited explicitly. For the expected usage of the client\n\
+classes the restriction should be acceptable.\n\
+\n\
+Todo: This class is still not complete. It will need more factory\n\
+methods, e.g. for (re)loading a zone.\n\
+";
+
+const char* const DataSourceClient_findZone_doc = "\
+find_zone(name) -> (code, ZoneFinder)\n\
+\n\
+Returns a ZoneFinder for a zone that best matches the given name.\n\
+\n\
+code: The result code of the operation (integer).\n\
+- DataSourceClient.SUCCESS: A zone that gives an exact match is found\n\
+- DataSourceClient.PARTIALMATCH: A zone whose origin is a super domain of name\n\
+ is found (but there is no exact match)\n\
+- DataSourceClient.NOTFOUND: For all other cases.\n\
+ZoneFinder: ZoneFinder object for the found zone if one is found;\n\
+otherwise None.\n\
+\n\
+Any internal error will be raised as an isc.datasrc.Error exception\n\
+\n\
+Parameters:\n\
+ name A domain name for which the search is performed.\n\
+\n\
+Return Value(s): A tuple containing a result value and a ZoneFinder object or\n\
+None\n\
+";
+
+const char* const DataSourceClient_getIterator_doc = "\
+get_iterator(name) -> ZoneIterator\n\
+\n\
+Returns an iterator to the given zone.\n\
+\n\
+This allows for traversing the whole zone. The returned object can\n\
+provide the RRsets one by one.\n\
+\n\
+This throws isc.datasrc.Error when the zone does not exist in the\n\
+datasource, or when an internal error occurs.\n\
+\n\
+The default implementation throws isc.datasrc.NotImplemented. This allows for\n\
+easy and fast deployment of minimal custom data sources, where the\n\
+user/implementator doesn't have to care about anything else but the\n\
+actual queries. Also, in some cases, it isn't possible to traverse the\n\
+zone from logic point of view (eg. dynamically generated zone data).\n\
+\n\
+It is not fixed if a concrete implementation of this method can throw\n\
+anything else.\n\
+\n\
+Parameters:\n\
+ isc.dns.Name The name of zone apex to be traversed. It doesn't do\n\
+ nearest match as find_zone.\n\
+\n\
+Return Value(s): Pointer to the iterator.\n\
+";
+
+const char* const DataSourceClient_getUpdater_doc = "\
+get_updater(name, replace) -> ZoneUpdater\n\
+\n\
+Return an updater to make updates to a specific zone.\n\
+\n\
+The RR class of the zone is the one that the client is expected to\n\
+handle (see the detailed description of this class).\n\
+\n\
+If the specified zone is not found via the client, a None object will\n\
+be returned; in other words a completely new zone cannot be created\n\
+using an updater. It must be created beforehand (even if it's an empty\n\
+placeholder) in a way specific to the underlying data source.\n\
+\n\
+Conceptually, the updater will trigger a separate transaction for\n\
+subsequent updates to the zone within the context of the updater (the\n\
+actual implementation of the \"transaction\" may vary for the specific\n\
+underlying data source). Until commit() is performed on the updater,\n\
+the intermediate updates won't affect the results of other methods\n\
+(and the result of the object's methods created by other factory\n\
+methods). Likewise, if the updater is destructed without performing\n\
+commit(), the intermediate updates will be effectively canceled and\n\
+will never affect other methods.\n\
+\n\
+If the underlying data source allows concurrent updates, this method\n\
+can be called multiple times while the previously returned updater(s)\n\
+are still active. In this case each updater triggers a different\n\
+\"transaction\". Normally it would be for different zones for such a\n\
+case as handling multiple incoming AXFR streams concurrently, but this\n\
+interface does not even prohibit an attempt of getting more than one\n\
+updater for the same zone, as long as the underlying data source\n\
+allows such an operation (and any conflict resolution is left to the\n\
+specific implementation).\n\
+\n\
+If replace is true, any existing RRs of the zone will be deleted on\n\
+successful completion of updates (after commit() on the updater); if\n\
+it's false, the existing RRs will be intact unless explicitly deleted\n\
+by delete_rrset() on the updater.\n\
+\n\
+A data source can be \"read only\" or can prohibit partial updates. In\n\
+such cases this method will result in an isc.datasrc.NotImplemented exception\n\
+unconditionally or when replace is false).\n\
+\n\
+Exceptions:\n\
+ isc.datasrc. NotImplemented The underlying data source does not support\n\
+ updates.\n\
+ isc.datasrc.Error Internal error in the underlying data source.\n\
+\n\
+Parameters:\n\
+ name The zone name to be updated\n\
+ replace Whether to delete existing RRs before making updates\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
new file mode 100644
index 0000000..caebd25
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -0,0 +1,277 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/factory.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/pydnspp_common.h>
+
+#include "datasrc.h"
+#include "client_python.h"
+#include "finder_python.h"
+#include "iterator_python.h"
+#include "updater_python.h"
+#include "client_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_DataSourceClient : public PyObject {
+public:
+ s_DataSourceClient() : cppobj(NULL) {};
+ DataSourceClientContainer* cppobj;
+};
+
+PyObject*
+DataSourceClient_findZone(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name;
+ if (PyArg_ParseTuple(args, "O!", &name_type, &name)) {
+ try {
+ DataSourceClient::FindResult find_result(
+ self->cppobj->getInstance().findZone(PyName_ToName(name)));
+
+ result::Result r = find_result.code;
+ ZoneFinderPtr zfp = find_result.zone_finder;
+ // Use N instead of O so refcount isn't increased twice
+ return (Py_BuildValue("IN", r, createZoneFinderObject(zfp, po_self)));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
+ try {
+ return (createZoneIteratorObject(
+ self->cppobj->getInstance().getIterator(PyName_ToName(name_obj)),
+ po_self));
+ } catch (const isc::NotImplemented& ne) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ne.what());
+ return (NULL);
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ PyObject *replace_obj;
+ if (PyArg_ParseTuple(args, "O!O", &name_type, &name_obj, &replace_obj) &&
+ PyBool_Check(replace_obj)) {
+ bool replace = (replace_obj != Py_False);
+ try {
+ ZoneUpdaterPtr updater =
+ self->cppobj->getInstance().getUpdater(PyName_ToName(name_obj),
+ replace);
+ if (!updater) {
+ return (Py_None);
+ }
+ return (createZoneUpdaterObject(updater, po_self));
+ } catch (const isc::NotImplemented& ne) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ne.what());
+ return (NULL);
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef DataSourceClient_methods[] = {
+ { "find_zone", reinterpret_cast<PyCFunction>(DataSourceClient_findZone),
+ METH_VARARGS, DataSourceClient_findZone_doc },
+ { "get_iterator",
+ reinterpret_cast<PyCFunction>(DataSourceClient_getIterator), METH_VARARGS,
+ DataSourceClient_getIterator_doc },
+ { "get_updater", reinterpret_cast<PyCFunction>(DataSourceClient_getUpdater),
+ METH_VARARGS, DataSourceClient_getUpdater_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+int
+DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
+ char* ds_type_str;
+ char* ds_config_str;
+ try {
+ // Turn the given argument into config Element; then simply call
+ // factory class to do its magic
+
+ // for now, ds_config must be JSON string
+ if (PyArg_ParseTuple(args, "ss", &ds_type_str, &ds_config_str)) {
+ isc::data::ConstElementPtr ds_config =
+ isc::data::Element::fromJSON(ds_config_str);
+ self->cppobj = new DataSourceClientContainer(ds_type_str,
+ ds_config);
+ return (0);
+ } else {
+ return (-1);
+ }
+ } catch (const isc::data::JSONError& je) {
+ const string ex_what = "JSON parse error in data source configuration "
+ "data for type " +
+ string(ds_type_str) + ":" + je.what();
+ PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+ return (-1);
+ } catch (const DataSourceError& dse) {
+ const string ex_what = "Failed to create DataSourceClient of type " +
+ string(ds_type_str) + ":" + dse.what();
+ PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+ return (-1);
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct DataSourceClient object: " +
+ string(ex.what());
+ PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in constructing DataSourceClient");
+ return (-1);
+ }
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to DataSourceClient constructor");
+
+ return (-1);
+}
+
+void
+DataSourceClient_destroy(s_DataSourceClient* const self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+} // end anonymous namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_DataSourceClient
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject datasourceclient_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.DataSourceClient",
+ sizeof(s_DataSourceClient), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(DataSourceClient_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ DataSourceClient_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ DataSourceClient_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(DataSourceClient_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/client_python.h b/src/lib/python/isc/datasrc/client_python.h
new file mode 100644
index 0000000..b20fb6b
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_python.h
@@ -0,0 +1,35 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_CLIENT_H
+#define __PYTHON_DATASRC_CLIENT_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+extern PyTypeObject datasourceclient_type;
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_CLIENT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
new file mode 100644
index 0000000..6ab29d8
--- /dev/null
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -0,0 +1,256 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include <structmember.h>
+
+#include <config.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include "datasrc.h"
+#include "client_python.h"
+#include "finder_python.h"
+#include "iterator_python.h"
+#include "updater_python.h"
+
+#include <util/python/pycppwrapper_util.h>
+#include <dns/python/pydnspp_common.h>
+
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyObject*
+getDataSourceException(const char* ex_name) {
+ PyObject* ex_obj = NULL;
+
+ PyObject* datasrc_module = PyImport_AddModule("isc.datasrc");
+ if (datasrc_module != NULL) {
+ PyObject* datasrc_dict = PyModule_GetDict(datasrc_module);
+ if (datasrc_dict != NULL) {
+ ex_obj = PyDict_GetItemString(datasrc_dict, ex_name);
+ }
+ }
+
+ if (ex_obj == NULL) {
+ ex_obj = PyExc_RuntimeError;
+ }
+ return (ex_obj);
+}
+
+} // end namespace python
+} // end namespace datasrc
+} // end namespace isc
+
+namespace {
+
+bool
+initModulePart_DataSourceClient(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&datasourceclient_type) < 0) {
+ return (false);
+ }
+ void* dscp = &datasourceclient_type;
+ if (PyModule_AddObject(mod, "DataSourceClient", static_cast<PyObject*>(dscp)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&datasourceclient_type);
+
+ try {
+ installClassVariable(datasourceclient_type, "SUCCESS",
+ Py_BuildValue("I", result::SUCCESS));
+ installClassVariable(datasourceclient_type, "EXIST",
+ Py_BuildValue("I", result::EXIST));
+ installClassVariable(datasourceclient_type, "NOTFOUND",
+ Py_BuildValue("I", result::NOTFOUND));
+ installClassVariable(datasourceclient_type, "PARTIALMATCH",
+ Py_BuildValue("I", result::PARTIALMATCH));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in DataSourceClient initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in DataSourceClient initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneFinder(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zonefinder_type) < 0) {
+ return (false);
+ }
+ void* zip = &zonefinder_type;
+ if (PyModule_AddObject(mod, "ZoneFinder", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zonefinder_type);
+
+ try {
+ installClassVariable(zonefinder_type, "SUCCESS",
+ Py_BuildValue("I", ZoneFinder::SUCCESS));
+ installClassVariable(zonefinder_type, "DELEGATION",
+ Py_BuildValue("I", ZoneFinder::DELEGATION));
+ installClassVariable(zonefinder_type, "NXDOMAIN",
+ Py_BuildValue("I", ZoneFinder::NXDOMAIN));
+ installClassVariable(zonefinder_type, "NXRRSET",
+ Py_BuildValue("I", ZoneFinder::NXRRSET));
+ installClassVariable(zonefinder_type, "CNAME",
+ Py_BuildValue("I", ZoneFinder::CNAME));
+ installClassVariable(zonefinder_type, "DNAME",
+ Py_BuildValue("I", ZoneFinder::DNAME));
+ installClassVariable(zonefinder_type, "WILDCARD",
+ Py_BuildValue("I", ZoneFinder::WILDCARD));
+ installClassVariable(zonefinder_type, "WILDCARD_NXRRSET",
+ Py_BuildValue("I", ZoneFinder::WILDCARD_NXRRSET));
+ installClassVariable(zonefinder_type, "WILDCARD_CNAME",
+ Py_BuildValue("I", ZoneFinder::WILDCARD_CNAME));
+
+ installClassVariable(zonefinder_type, "FIND_DEFAULT",
+ Py_BuildValue("I", ZoneFinder::FIND_DEFAULT));
+ installClassVariable(zonefinder_type, "FIND_GLUE_OK",
+ Py_BuildValue("I", ZoneFinder::FIND_GLUE_OK));
+ installClassVariable(zonefinder_type, "FIND_DNSSEC",
+ Py_BuildValue("I", ZoneFinder::FIND_DNSSEC));
+ installClassVariable(zonefinder_type, "NO_WILDCARD",
+ Py_BuildValue("I", ZoneFinder::NO_WILDCARD));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in ZoneFinder initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in ZoneFinder initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneIterator(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zoneiterator_type) < 0) {
+ return (false);
+ }
+ void* zip = &zoneiterator_type;
+ if (PyModule_AddObject(mod, "ZoneIterator", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zoneiterator_type);
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneUpdater(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zoneupdater_type) < 0) {
+ return (false);
+ }
+ void* zip = &zoneupdater_type;
+ if (PyModule_AddObject(mod, "ZoneUpdater", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zoneupdater_type);
+
+ return (true);
+}
+
+
+PyObject* po_DataSourceError;
+PyObject* po_NotImplemented;
+
+PyModuleDef iscDataSrc = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "datasrc",
+ "Python bindings for the classes in the isc::datasrc namespace.\n\n"
+ "These bindings are close match to the C++ API, but they are not complete "
+ "(some parts are not needed) and some are done in more python-like ways.",
+ -1,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+} // end anonymous namespace
+
+PyMODINIT_FUNC
+PyInit_datasrc(void) {
+ PyObject* mod = PyModule_Create(&iscDataSrc);
+ if (mod == NULL) {
+ return (NULL);
+ }
+
+ if (!initModulePart_DataSourceClient(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneFinder(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneIterator(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneUpdater(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ try {
+ po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
+ NULL);
+ PyObjectContainer(po_DataSourceError).installToModule(mod, "Error");
+ po_NotImplemented = PyErr_NewException("isc.datasrc.NotImplemented",
+ NULL, NULL);
+ PyObjectContainer(po_NotImplemented).installToModule(mod,
+ "NotImplemented");
+ } catch (...) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ return (mod);
+}
diff --git a/src/lib/python/isc/datasrc/datasrc.h b/src/lib/python/isc/datasrc/datasrc.h
new file mode 100644
index 0000000..d82881b
--- /dev/null
+++ b/src/lib/python/isc/datasrc/datasrc.h
@@ -0,0 +1,50 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_H
+#define __PYTHON_DATASRC_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+// Return a Python exception object of the given name (ex_name) defined in
+// the isc.datasrc.datasrc loadable module.
+//
+// Since the datasrc module is a different binary image and is loaded separately
+// from the dns module, it would be very tricky to directly access to
+// C/C++ symbols defined in that module. So we get access to these object
+// using the Python interpretor through this wrapper function.
+//
+// The __init__.py file should ensure isc.datasrc has been loaded by the time
+// whenever this function is called, and there shouldn't be any operation
+// within this function that can fail (such as dynamic memory allocation),
+// so this function should always succeed. Yet there may be an overlooked
+// failure mode, perhaps due to a bug in the binding implementation, or
+// due to invalid usage. As a last resort for such cases, this function
+// returns PyExc_RuntimeError (a C binding of Python's RuntimeError) should
+// it encounters an unexpected failure.
+extern PyObject* getDataSourceException(const char* ex_name);
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
+#endif // __PYTHON_ACL_DNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
new file mode 100644
index 0000000..4a00e78
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -0,0 +1,133 @@
+namespace {
+const char* const ZoneFinder_doc = "\
+The base class to search a zone for RRsets.\n\
+\n\
+The ZoneFinder class is a wrapper for the c++ base class for representing an\n\
+object that performs DNS lookups in a specific zone accessible via a\n\
+data source. In general, different types of data sources (in-memory,\n\
+database-based, etc) define their own derived c++ classes of ZoneFinder,\n\
+implementing ways to retrieve the required data through the common\n\
+interfaces declared in the base class. Each concrete ZoneFinder object\n\
+is therefore (conceptually) associated with a specific zone of one\n\
+specific data source instance.\n\
+\n\
+The origin name and the RR class of the associated zone are available\n\
+via the get_origin() and get_class() methods, respectively.\n\
+\n\
+The most important method of this class is find(), which performs the\n\
+lookup for a given domain and type. See the description of the method\n\
+for details.\n\
+\n\
+It's not clear whether we should request that a zone finder form a\n\
+\"transaction\", that is, whether to ensure the finder is not\n\
+susceptible to changes made by someone else than the creator of the\n\
+finder. If we don't request that, for example, two different lookup\n\
+results for the same name and type can be different if other threads\n\
+or programs make updates to the zone between the lookups. We should\n\
+revisit this point as we gain more experiences.\n\
+\n\
+";
+
+const char* const ZoneFinder_getOrigin_doc = "\
+get_origin() -> isc.dns.Name\n\
+\n\
+Return the origin name of the zone.\n\
+\n\
+";
+
+const char* const ZoneFinder_getClass_doc = "\
+get_class() -> isc.dns.RRClass\n\
+\n\
+Return the RR class of the zone.\n\
+\n\
+";
+
+// Main changes from the C++ doxygen version:
+// - Return type: use tuple instead of the dedicated FindResult type
+// - NULL->None
+// - exceptions
+const char* const ZoneFinder_find_doc = "\
+find(name, type, target=None, options=FIND_DEFAULT) -> (integer, RRset)\n\
+\n\
+Search the zone for a given pair of domain name and RR type.\n\
+\n\
+Each derived version of this method searches the underlying backend\n\
+for the data that best matches the given name and type. This method is\n\
+expected to be \"intelligent\", and identifies the best possible\n\
+answer for the search key. Specifically,\n\
+\n\
+- If the search name belongs under a zone cut, it returns the code of\n\
+ DELEGATION and the NS RRset at the zone cut.\n\
+- If there is no matching name, it returns the code of NXDOMAIN, and,\n\
+ if DNSSEC is requested, the NSEC RRset that proves the non-\n\
+ existence.\n\
+- If there is a matching name but no RRset of the search type, it\n\
+ returns the code of NXRRSET, and, if DNSSEC is required, the NSEC\n\
+ RRset for that name.\n\
+- If there is a CNAME RR of the searched name but there is no RR of\n\
+ the searched type of the name (so this type is different from\n\
+ CNAME), it returns the code of CNAME and that CNAME RR. Note that if\n\
+ the searched RR type is CNAME, it is considered a successful match,\n\
+ and the code of SUCCESS will be returned.\n\
+- If the search name matches a delegation point of DNAME, it returns\n\
+ the code of DNAME and that DNAME RR.\n\
+- If the target isn't None, all RRsets under the domain are inserted\n\
+ there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned\n\
+ instead of normall processing. This is intended to handle ANY query.\n\
+\n\
+Note: This behavior is controversial as we discussed in\n\
+https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html We\n\
+should revisit the interface before we heavily rely on it.\n\
+\n\
+The options parameter specifies customized behavior of the search.\n\
+Their semantics is as follows (they are or bit-field):\n\
+\n\
+- FIND_GLUE_OK Allow search under a zone cut. By default the search\n\
+ will stop once it encounters a zone cut. If this option is specified\n\
+ it remembers information about the highest zone cut and continues\n\
+ the search until it finds an exact match for the given name or it\n\
+ detects there is no exact match. If an exact match is found, RRsets\n\
+ for that name are searched just like the normal case; otherwise, if\n\
+ the search has encountered a zone cut, DELEGATION with the\n\
+ information of the highest zone cut will be returned.\n\
+- FIND_DNSSEC Request that DNSSEC data (like NSEC, RRSIGs) are\n\
+ returned with the answer. It is allowed for the data source to\n\
+ include them even when not requested.\n\
+- NO_WILDCARD Do not try wildcard matching. This option is of no use\n\
+ for normal lookups; it's intended to be used to get a DNSSEC proof\n\
+ of the non existence of any matching wildcard or non existence of an\n\
+ exact match when a wildcard match is found.\n\
+\n\
+\n\
+This method raises an isc.datasrc.Error exception if there is an\n\
+internal error in the datasource.\n\
+\n\
+Parameters:\n\
+ name The domain name to be searched for.\n\
+ type The RR type to be searched for.\n\
+ target If target is not None, insert all RRs under the domain\n\
+ into it.\n\
+ options The search options.\n\
+\n\
+Return Value(s): A tuple of a result code (integer) and an RRset object\n\
+enclosing the search result (see above).\n\
+";
+
+const char* const ZoneFinder_find_previous_name_doc = "\
+find_previous_name(isc.dns.Name) -> isc.dns.Name\n\
+\n\
+Gets the previous name in the DNSSEC order. This can be used\n\
+to find the correct NSEC records for proving nonexistence\n\
+of domains.\n\
+\n\
+This method does not include under-zone-cut data (glue data).\n\
+\n\
+Raises isc.datasrc.NotImplemented in case the data source backend\n\
+doesn't support DNSSEC or there is no previous in the zone (NSEC\n\
+records might be missing in the DB, the queried name is less or\n\
+equal to the apex).\n\
+\n\
+Raises isc.datasrc.Error for low-level or internal datasource errors\n\
+(like broken connection to database, wrong data living there).\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
new file mode 100644
index 0000000..6585049
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -0,0 +1,286 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+#include <datasrc/zone.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/rrclass_python.h>
+#include <dns/python/rrtype_python.h>
+#include <dns/python/pydnspp_common.h>
+
+#include "datasrc.h"
+#include "finder_python.h"
+#include "finder_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace isc_datasrc_internal {
+// This is the shared code for the find() call in the finder and the updater
+// Is is intentionally not available through any header, nor at our standard
+// namespace, as it is not supposed to be called anywhere but from finder and
+// updater
+PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
+ if (finder == NULL) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Internal error in find() wrapper; finder object NULL");
+ return (NULL);
+ }
+ PyObject *name;
+ PyObject *rrtype;
+ PyObject *target;
+ int options_int;
+ if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+ &rrtype_type, &rrtype,
+ &target, &options_int)) {
+ try {
+ ZoneFinder::FindOptions options =
+ static_cast<ZoneFinder::FindOptions>(options_int);
+ ZoneFinder::FindResult find_result(
+ finder->find(PyName_ToName(name),
+ PyRRType_ToRRType(rrtype),
+ NULL,
+ options
+ ));
+ ZoneFinder::Result r = find_result.code;
+ isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
+ if (rrsp) {
+ // Use N instead of O so the refcount isn't increased twice
+ return (Py_BuildValue("IN", r, createRRsetObject(*rrsp)));
+ } else {
+ return (Py_BuildValue("IO", r, Py_None));
+ }
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+ return Py_BuildValue("I", 1);
+}
+
+} // end namespace internal
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneFinder : public PyObject {
+public:
+ s_ZoneFinder() : cppobj(ZoneFinderPtr()), base_obj(NULL) {};
+ ZoneFinderPtr cppobj;
+ // This is a reference to a base object; if the object of this class
+ // depends on another object to be in scope during its lifetime,
+ // we use INCREF the base object upon creation, and DECREF it at
+ // the end of the destructor
+ // This is an optional argument to createXXX(). If NULL, it is ignored.
+ PyObject* base_obj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneFinder, ZoneFinder> ZoneFinderContainer;
+
+// General creation and destruction
+int
+ZoneFinder_init(s_ZoneFinder* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneFinder cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneFinder_destroy(s_ZoneFinder* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ if (self->base_obj != NULL) {
+ Py_DECREF(self->base_obj);
+ }
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+ZoneFinder_getClass(PyObject* po_self, PyObject*) {
+ s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
+ try {
+ return (createRRClassObject(self->cppobj->getClass()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneFinder_getOrigin(PyObject* po_self, PyObject*) {
+ s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
+ try {
+ return (createNameObject(self->cppobj->getOrigin()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneFinder_find(PyObject* po_self, PyObject* args) {
+ s_ZoneFinder* const self = static_cast<s_ZoneFinder*>(po_self);
+ return (isc_datasrc_internal::ZoneFinder_helper(self->cppobj.get(), args));
+}
+
+PyObject*
+ZoneFinder_findPreviousName(PyObject* po_self, PyObject* args) {
+ s_ZoneFinder* const self = static_cast<s_ZoneFinder*>(po_self);
+ PyObject* name_obj;
+ if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
+ try {
+ return (createNameObject(
+ self->cppobj->findPreviousName(PyName_ToName(name_obj))));
+ } catch (const isc::NotImplemented& nie) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ nie.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef ZoneFinder_methods[] = {
+ { "get_origin", ZoneFinder_getOrigin, METH_NOARGS,
+ ZoneFinder_getOrigin_doc },
+ { "get_class", ZoneFinder_getClass, METH_NOARGS, ZoneFinder_getClass_doc },
+ { "find", ZoneFinder_find, METH_VARARGS, ZoneFinder_find_doc },
+ { "find_previous_name", ZoneFinder_findPreviousName, METH_VARARGS,
+ ZoneFinder_find_previous_name_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+PyTypeObject zonefinder_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneFinder",
+ sizeof(s_ZoneFinder), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneFinder_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneFinder_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ ZoneFinder_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneFinder_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneFinderObject(isc::datasrc::ZoneFinderPtr source, PyObject* base_obj) {
+ s_ZoneFinder* py_zf = static_cast<s_ZoneFinder*>(
+ zonefinder_type.tp_alloc(&zonefinder_type, 0));
+ if (py_zf != NULL) {
+ py_zf->cppobj = source;
+ py_zf->base_obj = base_obj;
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
+ }
+ }
+ return (py_zf);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/finder_python.h b/src/lib/python/isc/datasrc/finder_python.h
new file mode 100644
index 0000000..23bc457
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_python.h
@@ -0,0 +1,44 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_FINDER_H
+#define __PYTHON_DATASRC_FINDER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+
+namespace python {
+
+extern PyTypeObject zonefinder_type;
+
+/// \brief Create a ZoneFinder python object
+///
+/// \param source The zone iterator pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneFinder depends on
+/// Its refcount is increased, and will be decreased when
+/// this zone iterator is destroyed, making sure that the
+/// base object is never destroyed before this zonefinder.
+PyObject* createZoneFinderObject(isc::datasrc::ZoneFinderPtr source,
+ PyObject* base_obj = NULL);
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_FINDER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/iterator_inc.cc b/src/lib/python/isc/datasrc/iterator_inc.cc
new file mode 100644
index 0000000..087200a
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_inc.cc
@@ -0,0 +1,67 @@
+namespace {
+
+const char* const ZoneIterator_doc = "\
+Read-only iterator to a zone.\n\
+\n\
+You can get an instance of the ZoneIterator from\n\
+DataSourceClient.get_iterator() method. The actual concrete\n\
+c++ implementation will be different depending on the actual data source\n\
+used. This is the abstract interface.\n\
+\n\
+There's no way to start iterating from the beginning again or return.\n\
+\n\
+The ZoneIterator is a python iterator, and can be iterated over directly.\n\
+";
+
+const char* const ZoneIterator_getNextRRset_doc = "\
+get_next_rrset() -> isc.dns.RRset\n\
+\n\
+Get next RRset from the zone.\n\
+\n\
+This returns the next RRset in the zone.\n\
+\n\
+Any special order is not guaranteed.\n\
+\n\
+While this can potentially throw anything (including standard\n\
+allocation errors), it should be rare.\n\
+\n\
+Pointer to the next RRset or None pointer when the iteration gets to\n\
+the end of the zone.\n\
+\n\
+Raises an isc.datasrc.Error exception if it is called again after returning\n\
+None\n\
+";
+
+// Modifications:
+// - ConstRRset->RRset
+// - NULL->None
+// - removed notes about derived classes (which doesn't apply for python)
+const char* const ZoneIterator_getSOA_doc = "\
+get_soa() -> isc.dns.RRset\n\
+\n\
+Return the SOA record of the zone in the iterator context.\n\
+\n\
+This method returns the zone's SOA record (if any, and a valid zone\n\
+should have it) in the form of an RRset object. This SOA is identical\n\
+to that (again, if any) contained in the sequence of RRsets returned\n\
+by the iterator. In that sense this method is redundant, but is\n\
+provided as a convenient utility for the application of the iterator;\n\
+the application may need to know the SOA serial or the SOA RR itself\n\
+for the purpose of protocol handling or skipping the expensive\n\
+iteration processing.\n\
+\n\
+If the zone doesn't have an SOA (which is broken, but some data source\n\
+may allow that situation), this method returns None. Also, in the\n\
+normal and valid case, the SOA should have exactly one RDATA, but this\n\
+API does not guarantee it as some data source may accept such an\n\
+abnormal condition. It's up to the caller whether to check the number\n\
+of RDATA and how to react to the unexpected case.\n\
+\n\
+Exceptions:\n\
+ None\n\
+\n\
+Return Value(s): An SOA RRset object that would be\n\
+returned from the iteration. It will be None if the zone doesn't have\n\
+an SOA.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/iterator_python.cc b/src/lib/python/isc/datasrc/iterator_python.cc
new file mode 100644
index 0000000..9e6900c
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_python.cc
@@ -0,0 +1,242 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "iterator_python.h"
+
+#include "iterator_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneIterator : public PyObject {
+public:
+ s_ZoneIterator() : cppobj(ZoneIteratorPtr()), base_obj(NULL) {};
+ ZoneIteratorPtr cppobj;
+ // This is a reference to a base object; if the object of this class
+ // depends on another object to be in scope during its lifetime,
+ // we use INCREF the base object upon creation, and DECREF it at
+ // the end of the destructor
+ // This is an optional argument to createXXX(). If NULL, it is ignored.
+ PyObject* base_obj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneIterator, ZoneIterator>
+ ZoneIteratorContainer;
+
+// General creation and destruction
+int
+ZoneIterator_init(s_ZoneIterator* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneIterator cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneIterator_destroy(s_ZoneIterator* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ if (self->base_obj != NULL) {
+ Py_DECREF(self->base_obj);
+ }
+ Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneIterator_getNextRRset(PyObject* po_self, PyObject*) {
+ s_ZoneIterator* self = static_cast<s_ZoneIterator*>(po_self);
+ if (!self->cppobj) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "get_next_rrset() called past end of iterator");
+ return (NULL);
+ }
+ try {
+ isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextRRset();
+ if (!rrset) {
+ Py_RETURN_NONE;
+ }
+ return (createRRsetObject(*rrset));
+ } catch (const isc::Exception& isce) {
+ // isc::Unexpected is thrown when we call getNextRRset() when we are
+ // already done iterating ('iterating past end')
+ // We could also simply return None again
+ PyErr_SetString(getDataSourceException("Error"), isce.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneIterator_iter(PyObject *self) {
+ Py_INCREF(self);
+ return (self);
+}
+
+PyObject*
+ZoneIterator_next(PyObject* self) {
+ PyObject *result = ZoneIterator_getNextRRset(self, NULL);
+ // iter_next must return NULL without error instead of Py_None
+ if (result == Py_None) {
+ Py_DECREF(result);
+ return (NULL);
+ } else {
+ return (result);
+ }
+}
+
+PyObject*
+ZoneIterator_getSOA(PyObject* po_self, PyObject*) {
+ s_ZoneIterator* self = static_cast<s_ZoneIterator*>(po_self);
+ try {
+ isc::dns::ConstRRsetPtr rrset = self->cppobj->getSOA();
+ if (!rrset) {
+ Py_RETURN_NONE;
+ }
+ return (createRRsetObject(*rrset));
+ } catch (const isc::Exception& isce) {
+ // isc::Unexpected is thrown when we call getNextRRset() when we are
+ // already done iterating ('iterating past end')
+ // We could also simply return None again
+ PyErr_SetString(getDataSourceException("Error"), isce.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyMethodDef ZoneIterator_methods[] = {
+ { "get_next_rrset", ZoneIterator_getNextRRset, METH_NOARGS,
+ ZoneIterator_getNextRRset_doc },
+ { "get_soa", ZoneIterator_getSOA, METH_NOARGS, ZoneIterator_getSOA_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject zoneiterator_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneIterator",
+ sizeof(s_ZoneIterator), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneIterator_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneIterator_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ ZoneIterator_iter, // tp_iter
+ ZoneIterator_next, // tp_iternext
+ ZoneIterator_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneIterator_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source,
+ PyObject* base_obj)
+{
+ s_ZoneIterator* py_zi = static_cast<s_ZoneIterator*>(
+ zoneiterator_type.tp_alloc(&zoneiterator_type, 0));
+ if (py_zi != NULL) {
+ py_zi->cppobj = source;
+ py_zi->base_obj = base_obj;
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
+ }
+ }
+ return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/iterator_python.h b/src/lib/python/isc/datasrc/iterator_python.h
new file mode 100644
index 0000000..7c1b0eb
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_python.h
@@ -0,0 +1,46 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_ITERATOR_H
+#define __PYTHON_DATASRC_ITERATOR_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+extern PyTypeObject zoneiterator_type;
+
+/// \brief Create a ZoneIterator python object
+///
+/// \param source The zone iterator pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneIterator depends on
+/// Its refcount is increased, and will be decreased when
+/// this zone iterator is destroyed, making sure that the
+/// base object is never destroyed before this zone iterator.
+PyObject* createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source,
+ PyObject* base_obj = NULL);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_ITERATOR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index 77b0828..fd63741 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -33,44 +33,63 @@ def create(cur):
Arguments:
cur - sqlite3 cursor.
"""
- cur.execute("CREATE TABLE schema_version (version INTEGER NOT NULL)")
- cur.execute("INSERT INTO schema_version VALUES (1)")
- cur.execute("""CREATE TABLE zones (id INTEGER PRIMARY KEY,
- name STRING NOT NULL COLLATE NOCASE,
- rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN',
- dnssec BOOLEAN NOT NULL DEFAULT 0)""")
- cur.execute("CREATE INDEX zones_byname ON zones (name)")
- cur.execute("""CREATE TABLE records (id INTEGER PRIMARY KEY,
- zone_id INTEGER NOT NULL,
- name STRING NOT NULL COLLATE NOCASE,
- rname STRING NOT NULL COLLATE NOCASE,
- ttl INTEGER NOT NULL,
- rdtype STRING NOT NULL COLLATE NOCASE,
- sigtype STRING COLLATE NOCASE,
- rdata STRING NOT NULL)""")
- cur.execute("CREATE INDEX records_byname ON records (name)")
- cur.execute("CREATE INDEX records_byrname ON records (rname)")
- cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
- zone_id INTEGER NOT NULL,
- hash STRING NOT NULL COLLATE NOCASE,
- owner STRING NOT NULL COLLATE NOCASE,
- ttl INTEGER NOT NULL,
- rdtype STRING NOT NULL COLLATE NOCASE,
- rdata STRING NOT NULL)""")
- cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
-
-def open(dbfile):
+ # We are creating the database because it apparently had not been at
+ # the time we tried to read from it. However, another process may have
+ # had the same idea, resulting in a potential race condition.
+ # Therefore, we obtain an exclusive lock before we create anything
+ # When we have it, we check *again* whether the database has been
+ # initialized. If not, we do so.
+
+ # If the database is perpetually locked, it'll time out automatically
+ # and we just let it fail.
+ cur.execute("BEGIN EXCLUSIVE TRANSACTION")
+ try:
+ cur.execute("SELECT version FROM schema_version")
+ row = cur.fetchone()
+ except sqlite3.OperationalError:
+ cur.execute("CREATE TABLE schema_version (version INTEGER NOT NULL)")
+ cur.execute("INSERT INTO schema_version VALUES (1)")
+ cur.execute("""CREATE TABLE zones (id INTEGER PRIMARY KEY,
+ name STRING NOT NULL COLLATE NOCASE,
+ rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN',
+ dnssec BOOLEAN NOT NULL DEFAULT 0)""")
+ cur.execute("CREATE INDEX zones_byname ON zones (name)")
+ cur.execute("""CREATE TABLE records (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ name STRING NOT NULL COLLATE NOCASE,
+ rname STRING NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdtype STRING NOT NULL COLLATE NOCASE,
+ sigtype STRING COLLATE NOCASE,
+ rdata STRING NOT NULL)""")
+ cur.execute("CREATE INDEX records_byname ON records (name)")
+ cur.execute("CREATE INDEX records_byrname ON records (rname)")
+ cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ hash STRING NOT NULL COLLATE NOCASE,
+ owner STRING NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdtype STRING NOT NULL COLLATE NOCASE,
+ rdata STRING NOT NULL)""")
+ cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
+ row = [1]
+ cur.execute("COMMIT TRANSACTION")
+ return row
+
+def open(dbfile, connect_timeout=5.0):
""" Open a database, if the database is not yet set up, call create
to do so. It may raise Sqlite3DSError if failed to open sqlite3
database file or find bad database schema version in the database.
Arguments:
dbfile - the filename for the sqlite3 database.
+ connect_timeout - timeout for opening the database or acquiring locks
+ defaults to sqlite3 module's default of 5.0 seconds
Return sqlite3 connection, sqlite3 cursor.
"""
try:
- conn = sqlite3.connect(dbfile)
+ conn = sqlite3.connect(dbfile, timeout=connect_timeout)
cur = conn.cursor()
except Exception as e:
fail = "Failed to open " + dbfile + ": " + e.args[0]
@@ -80,10 +99,13 @@ def open(dbfile):
try:
cur.execute("SELECT version FROM schema_version")
row = cur.fetchone()
- except:
- create(cur)
- conn.commit()
- row = [1]
+ except sqlite3.OperationalError:
+ # temporarily disable automatic transactions so
+ # we can do our own
+ iso_lvl = conn.isolation_level
+ conn.isolation_level = None
+ row = create(cur)
+ conn.isolation_level = iso_lvl
if row == None or row[0] != 1:
raise Sqlite3DSError("Bad database schema version")
@@ -235,13 +257,13 @@ def load(dbfile, zone, reader):
zone += '.'
conn, cur = open(dbfile)
- old_zone_id = get_zoneid(zone, cur)
+ try:
+ old_zone_id = get_zoneid(zone, cur)
- temp = str(random.randrange(100000))
- cur.execute("INSERT INTO zones (name, rdclass) VALUES (?, 'IN')", [temp])
- new_zone_id = cur.lastrowid
+ temp = str(random.randrange(100000))
+ cur.execute("INSERT INTO zones (name, rdclass) VALUES (?, 'IN')", [temp])
+ new_zone_id = cur.lastrowid
- try:
for name, ttl, rdclass, rdtype, rdata in reader():
sigtype = ''
if rdtype.lower() == 'rrsig':
@@ -266,20 +288,20 @@ def load(dbfile, zone, reader):
VALUES (?, ?, ?, ?, ?, ?)""",
[new_zone_id, name, reverse_name(name), ttl,
rdtype, rdata])
+
+ if old_zone_id:
+ cur.execute("DELETE FROM zones WHERE id=?", [old_zone_id])
+ cur.execute("UPDATE zones SET name=? WHERE id=?", [zone, new_zone_id])
+ conn.commit()
+ cur.execute("DELETE FROM records WHERE zone_id=?", [old_zone_id])
+ cur.execute("DELETE FROM nsec3 WHERE zone_id=?", [old_zone_id])
+ conn.commit()
+ else:
+ cur.execute("UPDATE zones SET name=? WHERE id=?", [zone, new_zone_id])
+ conn.commit()
except Exception as e:
fail = "Error while loading " + zone + ": " + e.args[0]
raise Sqlite3DSError(fail)
-
- if old_zone_id:
- cur.execute("DELETE FROM zones WHERE id=?", [old_zone_id])
- cur.execute("UPDATE zones SET name=? WHERE id=?", [zone, new_zone_id])
- conn.commit()
- cur.execute("DELETE FROM records WHERE zone_id=?", [old_zone_id])
- cur.execute("DELETE FROM nsec3 WHERE zone_id=?", [old_zone_id])
- conn.commit()
- else:
- cur.execute("UPDATE zones SET name=? WHERE id=?", [zone, new_zone_id])
- conn.commit()
-
- cur.close()
- conn.close()
+ finally:
+ cur.close()
+ conn.close()
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 4f87cc9..411b5cc 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -1,9 +1,24 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = master_test.py sqlite3_ds_test.py
+# old tests, TODO remove or change to use new API?
+#PYTESTS = master_test.py sqlite3_ds_test.py
+PYTESTS = datasrc_test.py
EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += testdata/brokendb.sqlite3
EXTRA_DIST += testdata/example.com.sqlite3
+CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+# We always add one, the location of the data source modules
+# We may want to add an API method for this to the ds factory, but that is out
+# of scope for this ticket
+LIBRARY_PATH_PLACEHOLDER = $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs:
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
@@ -14,7 +29,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=:$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/python/isc/datasrc/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs \
TESTDATA_PATH=$(abs_srcdir)/testdata \
+ TESTDATA_WRITE_PATH=$(abs_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
new file mode 100644
index 0000000..c649f6e
--- /dev/null
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -0,0 +1,542 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+import isc.datasrc
+from isc.datasrc import ZoneFinder
+import isc.dns
+import unittest
+import os
+import shutil
+import sys
+import json
+
+TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
+TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
+
+READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied"
+
+READ_ZONE_DB_CONFIG = "{ \"database_file\": \"" + READ_ZONE_DB_FILE + "\" }"
+WRITE_ZONE_DB_CONFIG = "{ \"database_file\": \"" + WRITE_ZONE_DB_FILE + "\"}"
+
+def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
+ rrset_to_add = isc.dns.RRset(name, rrclass, rrtype, ttl)
+ if rdatas is not None:
+ for rdata in rdatas:
+ rrset_to_add.add_rdata(isc.dns.Rdata(rrtype, rrclass, rdata))
+ rrset_list.append(rrset_to_add)
+
+# helper function, we have no direct rrset comparison atm
+def rrsets_equal(a, b):
+ # no accessor for sigs either (so this only checks name, class, type, ttl,
+ # and rdata)
+ # also, because of the fake data in rrsigs, if the type is rrsig, the
+ # rdata is not checked
+ return a.get_name() == b.get_name() and\
+ a.get_class() == b.get_class() and\
+ a.get_type() == b.get_type() and \
+ a.get_ttl() == b.get_ttl() and\
+ (a.get_type() == isc.dns.RRType.RRSIG() or
+ sorted(a.get_rdata()) == sorted(b.get_rdata()))
+
+# returns true if rrset is in expected_rrsets
+# will remove the rrset from expected_rrsets if found
+def check_for_rrset(expected_rrsets, rrset):
+ for cur_rrset in expected_rrsets[:]:
+ if rrsets_equal(cur_rrset, rrset):
+ expected_rrsets.remove(cur_rrset)
+ return True
+ return False
+
+class DataSrcClient(unittest.TestCase):
+
+ def test_constructors(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneIterator)
+
+ self.assertRaises(TypeError, isc.datasrc.DataSourceClient, 1, "{}")
+ self.assertRaises(TypeError, isc.datasrc.DataSourceClient, "sqlite3", 1)
+ self.assertRaises(isc.datasrc.Error,
+ isc.datasrc.DataSourceClient, "foo", "{}")
+ self.assertRaises(isc.datasrc.Error,
+ isc.datasrc.DataSourceClient, "sqlite3", "")
+ self.assertRaises(isc.datasrc.Error,
+ isc.datasrc.DataSourceClient, "sqlite3", "{}")
+ self.assertRaises(isc.datasrc.Error,
+ isc.datasrc.DataSourceClient, "sqlite3",
+ "{ \"foo\": 1 }")
+ self.assertRaises(isc.datasrc.Error,
+ isc.datasrc.DataSourceClient, "memory",
+ "{ \"foo\": 1 }")
+
+ def test_iterate(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+
+ # for RRSIGS, the TTL's are currently modified. This test should
+ # start failing when we fix that.
+ rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
+
+ # we do not know the order in which they are returned by the iterator
+ # but we do want to check them, so we put all records into one list
+ # sort it (doesn't matter which way it is sorted, as long as it is
+ # sorted)
+
+ # RRset is (atm) an unorderable type, and within an rrset, the
+ # rdatas and rrsigs may also be in random order. In theory the
+ # rrsets themselves can be returned in any order.
+ #
+ # So we create a second list with all rrsets we expect, and for each
+ # rrset we get from the iterator, see if it is in that list, and
+ # remove it.
+ #
+ # When the iterator is empty, we check no rrsets are left in the
+ # list of expected ones
+ expected_rrset_list = []
+
+ name = isc.dns.Name("sql1.example.com")
+ rrclass = isc.dns.RRClass.IN()
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ [
+ "256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
+ "N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
+ "TlALxMmspcfdpqun3Yr3YYnztuj06rV7RqmveYckWvAUXVYMSMQZfJ30"+
+ "5fs0dE/xLztL/CzZ",
+ "257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
+ "KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
+ "ZIyvcKq+9RXmV3HK3bUdHnQZ88IZWBRmWKfZ6wnzHo53kdYKAemTErkz"+
+ "taX3lRRPLYWpxRcDPEjysXT3Lh0vfL5D+CIO1yKw/q7C+v6+/kYAxc2l"+
+ "fbNE3HpklSuF+dyX4nXxWgzbcFuLz5Bwfq6ZJ9RYe/kNkA0uMWNa1KkG"+
+ "eRh8gg22kgD/KT5hPTnpezUWLvoY5Qc7IB3T0y4n2JIwiF2ZrZYVrWgD"+
+ "jRWAzGsxJiJyjd6w2k0="
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ [
+ "dns01.example.com.",
+ "dns02.example.com.",
+ "dns03.example.com."
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ [
+ "www.sql1.example.com. NS SOA RRSIG NSEC DNSKEY"
+ ])
+ # For RRSIGS, we can't add the fake data through the API, so we
+ # simply pass no rdata at all (which is skipped by the check later)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
+ [
+ "master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
+ ])
+ name = isc.dns.Name("www.sql1.example.com.")
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.A(), isc.dns.RRTTL(3600),
+ [
+ "192.0.2.100"
+ ])
+ name = isc.dns.Name("www.sql1.example.com.")
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ [
+ "sql1.example.com. A RRSIG NSEC"
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+
+ # rrs is an iterator, but also has direct get_next_rrset(), use
+ # the latter one here
+ rrset_to_check = rrs.get_next_rrset()
+ while (rrset_to_check != None):
+ self.assertTrue(check_for_rrset(expected_rrset_list,
+ rrset_to_check),
+ "Unexpected rrset returned by iterator:\n" +
+ rrset_to_check.to_text())
+ rrset_to_check = rrs.get_next_rrset()
+
+ # Now check there are none left
+ self.assertEqual(0, len(expected_rrset_list),
+ "RRset(s) not returned by iterator: " +
+ str([rrset.to_text() for rrset in expected_rrset_list ]
+ ))
+
+ # TODO should we catch this (iterating past end) and just return None
+ # instead of failing?
+ self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"))
+ # there are more than 80 RRs in this zone... let's just count them
+ # (already did a full check of the smaller zone above)
+ self.assertEqual(55, len(list(rrets)))
+ # TODO should we catch this (iterating past end) and just return None
+ # instead of failing?
+ self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+
+ self.assertRaises(TypeError, dsc.get_iterator, "asdf")
+
+ def test_iterator_soa(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+ iterator = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
+ expected_soa = isc.dns.RRset(isc.dns.Name("sql1.example.com."),
+ isc.dns.RRClass.IN(),
+ isc.dns.RRType.SOA(),
+ isc.dns.RRTTL(3600))
+ expected_soa.add_rdata(isc.dns.Rdata(isc.dns.RRType.SOA(),
+ isc.dns.RRClass.IN(),
+ "master.example.com. " +
+ "admin.example.com. 678 " +
+ "3600 1800 2419200 7200"))
+ self.assertTrue(rrsets_equal(expected_soa, iterator.get_soa()))
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneFinder)
+
+ def test_findoptions(self):
+ '''A simple test to confirm no option is specified by default.
+
+ '''
+ self.assertFalse(ZoneFinder.FIND_DEFAULT & ZoneFinder.FIND_GLUE_OK)
+ self.assertFalse(ZoneFinder.FIND_DEFAULT & ZoneFinder.FIND_DNSSEC)
+ self.assertFalse(ZoneFinder.FIND_DEFAULT & ZoneFinder.NO_WILDCARD)
+
+ def test_findresults(self):
+ '''A simple test to confirm result codes are (defined and) different
+ for some combinations.
+
+ '''
+ self.assertNotEqual(ZoneFinder.SUCCESS, ZoneFinder.DELEGATION)
+ self.assertNotEqual(ZoneFinder.DELEGATION, ZoneFinder.NXDOMAIN)
+ self.assertNotEqual(ZoneFinder.NXDOMAIN, ZoneFinder.NXRRSET)
+ self.assertNotEqual(ZoneFinder.NXRRSET, ZoneFinder.CNAME)
+ self.assertNotEqual(ZoneFinder.CNAME, ZoneFinder.DNAME)
+ self.assertNotEqual(ZoneFinder.DNAME, ZoneFinder.WILDCARD)
+ self.assertNotEqual(ZoneFinder.WILDCARD, ZoneFinder.WILDCARD_CNAME)
+ self.assertNotEqual(ZoneFinder.WILDCARD_CNAME,
+ ZoneFinder.WILDCARD_NXRRSET)
+
+ def test_find(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.sql1.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.DELEGATION, result)
+ self.assertEqual("sql1.example.com. 3600 IN NS dns01.example.com.\n" +
+ "sql1.example.com. 3600 IN NS dns02.example.com.\n" +
+ "sql1.example.com. 3600 IN NS dns03.example.com.\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("doesnotexist.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.some.other.domain"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.TXT(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXRRSET, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("cname-ext.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.CNAME, result)
+ self.assertEqual(
+ "cname-ext.example.com. 3600 IN CNAME www.sql1.example.com.\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("foo.wild.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.WILDCARD, result)
+ self.assertEqual("foo.wild.example.com. 3600 IN A 192.0.2.255\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("foo.wild.example.com"),
+ isc.dns.RRType.TXT(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.WILDCARD_NXRRSET, result)
+ self.assertEqual(None, rrset)
+
+ self.assertRaises(TypeError, finder.find,
+ "foo",
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertRaises(TypeError, finder.find,
+ isc.dns.Name("cname-ext.example.com"),
+ "foo",
+ None,
+ finder.FIND_DEFAULT)
+ self.assertRaises(TypeError, finder.find,
+ isc.dns.Name("cname-ext.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ "foo")
+
+ def test_find_previous(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+
+ prev = finder.find_previous_name(isc.dns.Name("bbb.example.com"))
+ self.assertEqual("example.com.", prev.to_text())
+
+ prev = finder.find_previous_name(isc.dns.Name("zzz.example.com"))
+ self.assertEqual("www.example.com.", prev.to_text())
+
+ prev = finder.find_previous_name(prev)
+ self.assertEqual("*.wild.example.com.", prev.to_text())
+
+ self.assertRaises(isc.datasrc.NotImplemented,
+ finder.find_previous_name,
+ isc.dns.Name("com"))
+
+class DataSrcUpdater(unittest.TestCase):
+
+ def setUp(self):
+ # Make a fresh copy of the writable database with all original content
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneUpdater)
+
+ def test_update_delete_commit(self):
+
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+
+ # first make sure, through a separate finder, that some record exists
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ rrset_to_delete = rrset;
+
+ # can't delete rrset with associated sig. Abuse that to force an
+ # exception first, then remove the sig, then delete the record
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ self.assertRaises(isc.datasrc.Error, updater.delete_rrset,
+ rrset_to_delete)
+
+ rrset_to_delete.remove_rrsig()
+
+ updater.delete_rrset(rrset_to_delete)
+
+ # The record should be gone in the updater, but not in the original
+ # finder (since we have not committed)
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ updater.commit()
+ # second commit should raise exception
+ self.assertRaises(isc.datasrc.Error, updater.commit)
+
+ # the record should be gone now in the 'real' finder as well
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ # now add it again
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ updater.add_rrset(rrset_to_delete)
+ updater.commit()
+
+ # second commit should throw
+ self.assertRaises(isc.datasrc.Error, updater.commit)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ def test_two_modules(self):
+ # load two modules, and check if they don't interfere
+ mem_cfg = { "type": "memory", "class": "IN", "zones": [] };
+ dsc_mem = isc.datasrc.DataSourceClient("memory", json.dumps(mem_cfg))
+ dsc_sql = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+
+ # check if exceptions are working
+ self.assertRaises(isc.datasrc.Error, isc.datasrc.DataSourceClient,
+ "memory", "{}")
+ self.assertRaises(isc.datasrc.Error, isc.datasrc.DataSourceClient,
+ "sqlite3", "{}")
+
+ # see if a lookup succeeds in sqlite3 ds
+ result, finder = dsc_sql.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ # see if a lookup fails in mem ds
+ result, finder = dsc_mem.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.NXDOMAIN, result)
+
+
+ def test_update_delete_abort(self):
+ # we don't do enything with this one, just making sure loading two
+ # datasources
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+
+ # first make sure, through a separate finder, that some record exists
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ rrset_to_delete = rrset;
+
+ # can't delete rrset with associated sig. Abuse that to force an
+ # exception first, then remove the sig, then delete the record
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ self.assertRaises(isc.datasrc.Error, updater.delete_rrset,
+ rrset_to_delete)
+
+ rrset_to_delete.remove_rrsig()
+
+ updater.delete_rrset(rrset_to_delete)
+
+ # The record should be gone in the updater, but not in the original
+ # finder (since we have not committed)
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ # destroy the updater, which should make it roll back
+ updater = None
+
+ # the record should still be available in the 'real' finder as well
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ def test_update_for_no_zone(self):
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ self.assertEqual(None,
+ dsc.get_updater(isc.dns.Name("notexistent.example"),
+ True))
+
+ def test_client_reference(self):
+ # Temporarily create various objects using factory methods of the
+ # client. The created objects won't be stored anywhere and
+ # immediately released. The creation shouldn't affect the reference
+ # to the base client.
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ orig_ref = sys.getrefcount(dsc)
+
+ dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(orig_ref, sys.getrefcount(dsc))
+
+ dsc.get_iterator(isc.dns.Name("example.com."))
+ self.assertEqual(orig_ref, sys.getrefcount(dsc))
+
+ dsc.get_updater(isc.dns.Name("example.com"), True)
+ self.assertEqual(orig_ref, sys.getrefcount(dsc))
+
+ def test_iterate_over_empty_zone(self):
+ # empty the test zone first
+ dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ updater.commit()
+
+ # Check the iterator behavior for the empty zone.
+ iterator = dsc.get_iterator(isc.dns.Name("example.com."))
+ self.assertEqual(None, iterator.get_soa())
+ self.assertEqual(None, iterator.get_next_rrset())
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ unittest.main()
diff --git a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
index 013c7d7..10c61cf 100644
--- a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
+++ b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
@@ -17,8 +17,32 @@ from isc.datasrc import sqlite3_ds
import os
import socket
import unittest
+import sqlite3
TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
+TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
+
+READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
+BROKEN_DB_FILE = TESTDATA_PATH + "brokendb.sqlite3"
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "example.com.out.sqlite3"
+NEW_DB_FILE = TESTDATA_WRITE_PATH + "new_db.sqlite3"
+
+def example_reader():
+ my_zone = [
+ ("example.com.", "3600", "IN", "SOA", "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200"),
+ ("example.com.", "3600", "IN", "NS", "ns.example.com."),
+ ("ns.example.com.", "3600", "IN", "A", "192.0.2.1")
+ ]
+ for rr in my_zone:
+ yield rr
+
+def example_reader_nested():
+ # this iterator is used in the 'locked' test; it will cause
+ # the load() method to try and write to the same database
+ sqlite3_ds.load(WRITE_ZONE_DB_FILE,
+ ".",
+ example_reader)
+ return example_reader()
class TestSqlite3_ds(unittest.TestCase):
def test_zone_exist(self):
@@ -33,11 +57,87 @@ class TestSqlite3_ds(unittest.TestCase):
# Open a broken database file
self.assertRaises(sqlite3_ds.Sqlite3DSError,
sqlite3_ds.zone_exist, "example.com",
- TESTDATA_PATH + "brokendb.sqlite3")
+ BROKEN_DB_FILE)
self.assertTrue(sqlite3_ds.zone_exist("example.com.",
- TESTDATA_PATH + "example.com.sqlite3"))
+ READ_ZONE_DB_FILE))
self.assertFalse(sqlite3_ds.zone_exist("example.org.",
- TESTDATA_PATH + "example.com.sqlite3"))
+ READ_ZONE_DB_FILE))
+
+ def test_load_db(self):
+ sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
+
+ def test_locked_db(self):
+ # load it first to make sure it exists
+ sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
+
+ # and manually create a writing session as well
+ con = sqlite3.connect(WRITE_ZONE_DB_FILE);
+ cur = con.cursor()
+ cur.execute("delete from records")
+
+ self.assertRaises(sqlite3_ds.Sqlite3DSError,
+ sqlite3_ds.load, WRITE_ZONE_DB_FILE, ".",
+ example_reader)
+
+ con.rollback()
+
+ # and make sure lock does not stay
+ sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
+
+ # force locked db by nested loads
+ self.assertRaises(sqlite3_ds.Sqlite3DSError,
+ sqlite3_ds.load, WRITE_ZONE_DB_FILE, ".",
+ example_reader_nested)
+
+ # and make sure lock does not stay
+ sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
+
+class NewDBFile(unittest.TestCase):
+ def tearDown(self):
+ # remove the created database after every test
+ if (os.path.exists(NEW_DB_FILE)):
+ os.remove(NEW_DB_FILE)
+
+ def setUp(self):
+ # remove the created database before every test too, just
+ # in case a test got aborted half-way, and cleanup didn't occur
+ if (os.path.exists(NEW_DB_FILE)):
+ os.remove(NEW_DB_FILE)
+
+ def test_new_db(self):
+ self.assertFalse(os.path.exists(NEW_DB_FILE))
+ sqlite3_ds.open(NEW_DB_FILE)
+ self.assertTrue(os.path.exists(NEW_DB_FILE))
+
+ def test_new_db_locked(self):
+ self.assertFalse(os.path.exists(NEW_DB_FILE))
+ con = sqlite3.connect(NEW_DB_FILE);
+ con.isolation_level = None
+ cur = con.cursor()
+ cur.execute("BEGIN IMMEDIATE TRANSACTION")
+
+ # load should now fail, since the database is locked,
+ # and the open() call needs an exclusive lock
+ self.assertRaises(sqlite3.OperationalError,
+ sqlite3_ds.open, NEW_DB_FILE, 0.1)
+
+ con.rollback()
+ cur.close()
+ con.close()
+ self.assertTrue(os.path.exists(NEW_DB_FILE))
+
+ # now that we closed our connection, load should work again
+ sqlite3_ds.open(NEW_DB_FILE)
+
+ # the database should now have been created, and a new load should
+ # not require an exclusive lock anymore, so we lock it again
+ con = sqlite3.connect(NEW_DB_FILE);
+ cur = con.cursor()
+ cur.execute("BEGIN IMMEDIATE TRANSACTION")
+ sqlite3_ds.open(NEW_DB_FILE, 0.1)
+ con.rollback()
+ cur.close()
+ con.close()
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/datasrc/updater_inc.cc b/src/lib/python/isc/datasrc/updater_inc.cc
new file mode 100644
index 0000000..32715ec
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_inc.cc
@@ -0,0 +1,181 @@
+namespace {
+
+const char* const ZoneUpdater_doc = "\
+The base class to make updates to a single zone.\n\
+\n\
+On construction, each derived class object will start a\n\
+\"transaction\" for making updates to a specific zone (this means a\n\
+constructor of a derived class would normally take parameters to\n\
+identify the zone to be updated). The underlying realization of a\n\
+\"transaction\" will differ for different derived classes; if it uses\n\
+a general purpose database as a backend, it will involve performing\n\
+some form of \"begin transaction\" statement for the database.\n\
+\n\
+Updates (adding or deleting RRs) are made via add_rrset() and\n\
+delete_rrset() methods. Until the commit() method is called the\n\
+changes are local to the updater object. For example, they won't be\n\
+visible via a ZoneFinder object, but only by the updater's own find()\n\
+method. The commit() completes the transaction and makes the changes\n\
+visible to others.\n\
+\n\
+This class does not provide an explicit \"rollback\" interface. If\n\
+something wrong or unexpected happens during the updates and the\n\
+caller wants to cancel the intermediate updates, the caller should\n\
+simply destroy the updater object without calling commit(). The\n\
+destructor is supposed to perform the \"rollback\" operation,\n\
+depending on the internal details of the derived class.\n\
+\n\
+This initial implementation provides a quite simple interface of\n\
+adding and deleting RRs (see the description of the related methods).\n\
+It may be revisited as we gain more experiences.\n\
+\n\
+";
+
+const char* const ZoneUpdater_addRRset_doc = "\
+add_rrset(rrset) -> No return value\n\
+\n\
+Add an RRset to a zone via the updater.\n\
+It performs a few basic checks:\n\
+- Whether the RR class is identical to that for the zone to be updated\n\
+- Whether the RRset is not empty, i.e., it has at least one RDATA\n\
+- Whether the RRset is not associated with an RRSIG, i.e., whether\n\
+ get_rrsig() on the RRset returns a NULL pointer.\n\
+\n\
+and otherwise does not check any oddity. For example, it doesn't check\n\
+whether the owner name of the specified RRset is a subdomain of the\n\
+zone's origin; it doesn't care whether or not there is already an\n\
+RRset of the same name and RR type in the zone, and if there is,\n\
+whether any of the existing RRs have duplicate RDATA with the added\n\
+ones. If these conditions matter the calling application must examine\n\
+the existing data beforehand using the ZoneFinder returned by\n\
+get_finder().\n\
+\n\
+The validation requirement on the associated RRSIG is temporary. If we\n\
+find it more reasonable and useful to allow adding a pair of RRset and\n\
+its RRSIG RRset as we gain experiences with the interface, we may\n\
+remove this restriction. Until then we explicitly check it to prevent\n\
+accidental misuse.\n\
+\n\
+Conceptually, on successful call to this method, the zone will have\n\
+the specified RRset, and if there is already an RRset of the same name\n\
+and RR type, these two sets will be \"merged\". \"Merged\" means that\n\
+a subsequent call to ZoneFinder.find() for the name and type will\n\
+result in success and the returned RRset will contain all previously\n\
+existing and newly added RDATAs with the TTL being the minimum of the\n\
+two RRsets. The underlying representation of the \"merged\" RRsets may\n\
+vary depending on the characteristic of the underlying data source.\n\
+For example, if it uses a general purpose database that stores each RR\n\
+of the same RRset separately, it may simply be a larger sets of RRs\n\
+based on both the existing and added RRsets; the TTLs of the RRs may\n\
+be different within the database, and there may even be duplicate RRs\n\
+in different database rows. As long as the RRset returned via\n\
+ZoneFinder.find() conforms to the concept of \"merge\", the actual\n\
+internal representation is up to the implementation.\n\
+\n\
+This method must not be called once commit() is performed. If it calls\n\
+after commit() the implementation must throw a isc.datasrc.Error\n\
+exception.\n\
+\n\
+Todo As noted above we may have to revisit the design details as we\n\
+gain experiences:\n\
+\n\
+- we may want to check (and maybe reject) if there is already a\n\
+ duplicate RR (that has the same RDATA).\n\
+- we may want to check (and maybe reject) if there is already an RRset\n\
+ of the same name and RR type with different TTL\n\
+- we may even want to check if there is already any RRset of the same\n\
+ name and RR type.\n\
+- we may want to add an \"options\" parameter that can control the\n\
+ above points\n\
+- we may want to have this method return a value containing the\n\
+ information on whether there's a duplicate, etc.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Called after commit(), RRset is invalid (see above),\n\
+ internal data source error, or wrapper error\n\
+\n\
+Parameters:\n\
+ rrset The RRset to be added\n\
+\n\
+";
+
+const char* const ZoneUpdater_deleteRRset_doc = "\
+delete_rrset(rrset) -> No return value\n\
+\n\
+Delete an RRset from a zone via the updater.\n\
+\n\
+Like add_rrset(), the detailed semantics and behavior of this method\n\
+may have to be revisited in a future version. The following are based\n\
+on the initial implementation decisions.\n\
+\n\
+- Existing RRs that don't match any of the specified RDATAs will\n\
+ remain in the zone.\n\
+- Any RRs of the specified RRset that doesn't exist in the zone will\n\
+ simply be ignored; the implementation of this method is not supposed\n\
+ to check that condition.\n\
+- The TTL of the RRset is ignored; matching is only performed by the\n\
+ owner name, RR type and RDATA\n\
+\n\
+Ignoring the TTL may not look sensible, but it's based on the\n\
+observation that it will result in more intuitive result, especially\n\
+when the underlying data source is a general purpose database. See\n\
+also the c++ documentation of DatabaseAccessor::DeleteRecordInZone()\n\
+on this point. It also matches the dynamic update protocol (RFC2136),\n\
+where TTLs are ignored when deleting RRs.\n\
+\n\
+This method performs a limited level of validation on the specified\n\
+RRset:\n\
+- Whether the RR class is identical to that for the zone to be updated\n\
+- Whether the RRset is not empty, i.e., it has at least one RDATA\n\
+- Whether the RRset is not associated with an RRSIG\n\
+\n\
+This method must not be called once commit() is performed. If it calls\n\
+after commit() the implementation must throw a isc.datasrc.Error\n\
+exception.\n\
+\n\
+Todo: As noted above we may have to revisit the design details as we\n\
+gain experiences:\n\
+\n\
+- we may want to check (and maybe reject) if some or all of the RRs\n\
+ for the specified RRset don't exist in the zone\n\
+- we may want to allow an option to \"delete everything\" for\n\
+ specified name and/or specified name + RR type.\n\
+- as mentioned above, we may want to include the TTL in matching the\n\
+ deleted RRs\n\
+- we may want to add an \"options\" parameter that can control the\n\
+ above points\n\
+- we may want to have this method return a value containing the\n\
+ information on whether there's any RRs that are specified but don't\n\
+ exit, the number of actually deleted RRs, etc.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Called after commit(), RRset is invalid (see above),\n\
+ internal data source error\n\
+ std.bad_alloc Resource allocation failure\n\
+\n\
+Parameters:\n\
+ rrset The RRset to be deleted\n\
+\n\
+";
+
+const char* const ZoneUpdater_commit_doc = "\
+commit() -> void\n\
+\n\
+Commit the updates made in the updater to the zone.\n\
+\n\
+This method completes the \"transaction\" started at the creation of\n\
+the updater. After successful completion of this method, the updates\n\
+will be visible outside the scope of the updater. The actual internal\n\
+behavior will defer for different derived classes. For a derived class\n\
+with a general purpose database as a backend, for example, this method\n\
+would perform a \"commit\" statement for the database.\n\
+\n\
+This operation can only be performed at most once. A duplicate call\n\
+must result in a isc.datasrc.Error exception.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Duplicate call of the method, internal data source\n\
+ error, or wrapper error\n\\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/updater_python.cc b/src/lib/python/isc/datasrc/updater_python.cc
new file mode 100644
index 0000000..29d2ffe
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_python.cc
@@ -0,0 +1,288 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/zone.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/rrclass_python.h>
+#include <dns/python/rrtype_python.h>
+
+#include "datasrc.h"
+#include "updater_python.h"
+
+#include "updater_inc.cc"
+#include "finder_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace isc_datasrc_internal {
+// See finder_python.cc
+PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args);
+}
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneUpdater : public PyObject {
+public:
+ s_ZoneUpdater() : cppobj(ZoneUpdaterPtr()), base_obj(NULL) {};
+ ZoneUpdaterPtr cppobj;
+ // This is a reference to a base object; if the object of this class
+ // depends on another object to be in scope during its lifetime,
+ // we use INCREF the base object upon creation, and DECREF it at
+ // the end of the destructor
+ // This is an optional argument to createXXX(). If NULL, it is ignored.
+ PyObject* base_obj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneUpdater, ZoneUpdater> ZoneUpdaterContainer;
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+// General creation and destruction
+int
+ZoneUpdater_init(s_ZoneUpdater* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneUpdater cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneUpdater_destroy(s_ZoneUpdater* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ if (self->base_obj != NULL) {
+ Py_DECREF(self->base_obj);
+ }
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+ZoneUpdater_addRRset(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject* rrset_obj;
+ if (PyArg_ParseTuple(args, "O!", &rrset_type, &rrset_obj)) {
+ try {
+ self->cppobj->addRRset(PyRRset_ToRRset(rrset_obj));
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_deleteRRset(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject* rrset_obj;
+ if (PyArg_ParseTuple(args, "O!", &rrset_type, &rrset_obj)) {
+ try {
+ self->cppobj->deleteRRset(PyRRset_ToRRset(rrset_obj));
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_commit(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ self->cppobj->commit();
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_getClass(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ return (createRRClassObject(self->cppobj->getFinder().getClass()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_getOrigin(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ return (createNameObject(self->cppobj->getFinder().getOrigin()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_find(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ return (isc_datasrc_internal::ZoneFinder_helper(&self->cppobj->getFinder(),
+ args));
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef ZoneUpdater_methods[] = {
+ { "add_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_addRRset),
+ METH_VARARGS, ZoneUpdater_addRRset_doc },
+ { "delete_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_deleteRRset),
+ METH_VARARGS, ZoneUpdater_deleteRRset_doc },
+ { "commit", reinterpret_cast<PyCFunction>(ZoneUpdater_commit), METH_NOARGS,
+ ZoneUpdater_commit_doc },
+ // Instead of a getFinder, we implement the finder functionality directly
+ // This is because ZoneFinder is non-copyable, and we should not create
+ // a ZoneFinder object from a reference only (which is what is returned
+ // by getFinder(). Apart from that
+ { "get_origin", reinterpret_cast<PyCFunction>(ZoneUpdater_getOrigin),
+ METH_NOARGS, ZoneFinder_getOrigin_doc },
+ { "get_class", reinterpret_cast<PyCFunction>(ZoneUpdater_getClass),
+ METH_NOARGS, ZoneFinder_getClass_doc },
+ { "find", reinterpret_cast<PyCFunction>(ZoneUpdater_find), METH_VARARGS,
+ ZoneFinder_find_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject zoneupdater_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneUpdater",
+ sizeof(s_ZoneUpdater), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneUpdater_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneUpdater_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ ZoneUpdater_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneUpdater_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source,
+ PyObject* base_obj)
+{
+ s_ZoneUpdater* py_zu = static_cast<s_ZoneUpdater*>(
+ zoneupdater_type.tp_alloc(&zoneupdater_type, 0));
+ if (py_zu != NULL) {
+ py_zu->cppobj = source;
+ py_zu->base_obj = base_obj;
+ if (base_obj != NULL) {
+ Py_INCREF(base_obj);
+ }
+ }
+ return (py_zu);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/updater_python.h b/src/lib/python/isc/datasrc/updater_python.h
new file mode 100644
index 0000000..8228578
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_python.h
@@ -0,0 +1,47 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_UPDATER_H
+#define __PYTHON_DATASRC_UPDATER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+
+extern PyTypeObject zoneupdater_type;
+
+/// \brief Create a ZoneUpdater python object
+///
+/// \param source The zone iterator pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneUpdater depends on
+/// It's refcount is increased, and will be decreased when
+/// this zone iterator is destroyed, making sure that the
+/// base object is never destroyed before this zone updater.
+PyObject* createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source,
+ PyObject* base_obj = NULL);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_UPDATER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/dns/Makefile.am b/src/lib/python/isc/dns/Makefile.am
new file mode 100644
index 0000000..b31da93
--- /dev/null
+++ b/src/lib/python/isc/dns/Makefile.am
@@ -0,0 +1,8 @@
+python_PYTHON = __init__.py
+pythondir = $(pyexecdir)/isc/dns
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
+
diff --git a/src/lib/python/isc/log/Makefile.am b/src/lib/python/isc/log/Makefile.am
index acd2acc..5ff2c28 100644
--- a/src/lib/python/isc/log/Makefile.am
+++ b/src/lib/python/isc/log/Makefile.am
@@ -1,8 +1,41 @@
SUBDIRS = . tests
-python_PYTHON = __init__.py log.py
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
-pythondir = $(pyexecdir)/isc/log
+pythondir = $(pyexecdir)/isc
+python_LTLIBRARIES = log.la
+log_la_SOURCES = log.cc
+
+log_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+log_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+log_la_LDFLAGS = $(PYTHON_LDFLAGS)
+log_la_LDFLAGS += -module
+log_la_LIBADD = $(top_builddir)/src/lib/log/liblog.la
+log_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+log_la_LIBADD += $(top_builddir)/src/lib/config/libcfgclient.la
+log_la_LIBADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+log_la_LIBADD += $(PYTHON_LIB)
+
+# This is not installed, it helps locate the module during tests
+EXTRA_DIST = __init__.py
+
+# We're going to abuse install-data-local for a pre-install check.
+# This is to be considered a short term hack and is expected to be removed
+# in a near future version.
+install-data-local:
+ if test -d @pyexecdir@/isc/log; then \
+ echo "@pyexecdir@/isc/log is deprecated, and will confuse newer versions. Please (re)move it by hand."; \
+ exit 1; \
+ fi
pytest:
$(SHELL) tests/log_test
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/log/__init__.py b/src/lib/python/isc/log/__init__.py
index a34e164..641cf79 100644
--- a/src/lib/python/isc/log/__init__.py
+++ b/src/lib/python/isc/log/__init__.py
@@ -1 +1,33 @@
-from isc.log.log import *
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file is not installed. The log.so is installed into the right place.
+# It is only to find it in the .libs directory when we run as a test or
+# from the build directory.
+# But as nobody gives us the builddir explicitly (and we can't use generation
+# from .in file, as it would put us into the builddir and we wouldn't be found)
+# we guess from current directory. Any idea for something better? This should
+# be enough for the tests, but would it work for B10_FROM_SOURCE as well?
+# Should we look there? Or define something in bind10_config?
+
+import os
+import sys
+
+for base in sys.path[:]:
+ loglibdir = os.path.join(base, 'isc/log/.libs')
+ if os.path.exists(loglibdir):
+ sys.path.insert(0, loglibdir)
+
+from log import *
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
new file mode 100644
index 0000000..c7112b3
--- /dev/null
+++ b/src/lib/python/isc/log/log.cc
@@ -0,0 +1,764 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include <structmember.h>
+
+#include <config.h>
+
+#include <log/message_dictionary.h>
+#include <log/logger_manager.h>
+#include <log/logger_support.h>
+#include <log/logger.h>
+
+#include <config/ccsession.h>
+
+#include <string>
+#include <boost/bind.hpp>
+
+#include <util/python/pycppwrapper_util.h>
+#include <log/log_dbglevels.h>
+
+using namespace isc::log;
+using namespace isc::util::python;
+using std::string;
+using boost::bind;
+
+// We encountered a strange problem with Clang (clang version 2.8
+// (tags/RELEASE_28 115909)) on OSX, where unwinding the stack
+// segfaults the moment this exception was thrown and caught.
+//
+// Placing it in a named namespace instead of the originalRecommend
+// unnamed namespace appears to solve this, so as a temporary
+// workaround, we create a local randomly named namespace here
+// to solve this issue.
+namespace clang_unnamed_namespace_workaround {
+ // To propagate python exceptions through our code
+ // This exception is used to signal to the calling function that a
+ // proper Python Exception has already been set, and the caller
+ // should now return NULL.
+ // Since it is only used internally, and should not pass any
+ // information itself, is is not derived from std::exception
+ class InternalError : public std::exception {};
+}
+using namespace clang_unnamed_namespace_workaround;
+
+namespace {
+
+// This is for testing only. The real module will have it always set as
+// NULL and will use the global dictionary.
+MessageDictionary* testDictionary = NULL;
+
+PyObject*
+setTestDictionary(PyObject*, PyObject* args) {
+ PyObject* enableO;
+ // The API doesn't seem to provide conversion to bool,
+ // so we do it little bit manually
+ if (!PyArg_ParseTuple(args, "O", &enableO)) {
+ return (NULL);
+ }
+ int enableI(PyObject_IsTrue(enableO));
+ if (enableI == -1) {
+ return (NULL);
+ }
+ bool enable(enableI != 0);
+
+ try {
+ delete testDictionary;
+ testDictionary = NULL;
+ if (enable) {
+ testDictionary = new MessageDictionary;
+ }
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+ Py_RETURN_NONE;
+}
+
+PyObject*
+createMessage(PyObject*, PyObject* args) {
+ const char* mid;
+ const char* text;
+ // We parse the strings
+ if (!PyArg_ParseTuple(args, "ss", &mid, &text)) {
+ return (NULL);
+ }
+ PyObject* origMid;
+ // And extract the original representation of the message
+ // ID, so we can return it instead of creating another instance.
+ // This call shouldn't fail if the previous suceeded.
+ if (!PyArg_ParseTuple(args, "Os", &origMid, &text)) {
+ return (NULL);
+ }
+
+ try {
+ MessageDictionary* dict = testDictionary ? testDictionary :
+ &MessageDictionary::globalDictionary();
+
+ // We ignore the result, they will be in some kind of dupe list
+ // if there's a problem
+ dict->add(mid, text);
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+
+ // Return the ID
+ Py_INCREF(origMid);
+ return (origMid);
+}
+
+PyObject*
+getMessage(PyObject*, PyObject* args) {
+ const char* mid;
+ if (!PyArg_ParseTuple(args, "s", &mid)) {
+ return (NULL);
+ }
+
+ try {
+ MessageDictionary* dict = testDictionary ? testDictionary :
+ &MessageDictionary::globalDictionary();
+
+ const std::string& result(dict->getText(mid));
+ if (result.empty()) {
+ Py_RETURN_NONE;
+ } else {
+ return (Py_BuildValue("s", result.c_str()));
+ }
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+reset(PyObject*, PyObject*) {
+ LoggerManager::reset();
+ Py_RETURN_NONE;
+}
+
+PyObject*
+init(PyObject*, PyObject* args) {
+ const char* root;
+ const char* file(NULL);
+ const char* severity("INFO");
+ int dbglevel(0);
+ if (!PyArg_ParseTuple(args, "s|siz", &root, &severity, &dbglevel, &file)) {
+ return (NULL);
+ }
+
+ try {
+ LoggerManager::init(root, getSeverity(severity), dbglevel, file);
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+ Py_RETURN_NONE;
+}
+
+// This initialization is for unit tests. It allows message settings to
+// be determined by a set of B10_xxx environment variables. (See the
+// description of initLogger() for more details.) The function has been named
+// resetUnitTestRootLogger() here as being more descriptive and
+// trying to avoid confusion.
+PyObject*
+resetUnitTestRootLogger(PyObject*, PyObject*) {
+ try {
+ isc::log::resetUnitTestRootLogger();
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+ Py_RETURN_NONE;
+}
+
+PyObject*
+logConfigUpdate(PyObject*, PyObject* args) {
+ // we have no wrappers for ElementPtr and ConfigData,
+ // So we expect JSON strings and convert them.
+ // The new_config object is assumed to have been validated.
+
+ const char* new_config_json;
+ const char* mod_spec_json;
+ if (!PyArg_ParseTuple(args, "ss",
+ &new_config_json, &mod_spec_json)) {
+ return (NULL);
+ }
+
+ try {
+ isc::data::ConstElementPtr new_config =
+ isc::data::Element::fromJSON(new_config_json);
+ isc::data::ConstElementPtr mod_spec_e =
+ isc::data::Element::fromJSON(mod_spec_json);
+ isc::config::ModuleSpec mod_spec(mod_spec_e);
+ isc::config::ConfigData config_data(mod_spec);
+ isc::config::default_logconfig_handler("logging", new_config,
+ config_data);
+
+ Py_RETURN_NONE;
+ } catch (const isc::data::JSONError& je) {
+ std::string error_msg = std::string("JSON format error: ") + je.what();
+ PyErr_SetString(PyExc_TypeError, error_msg.c_str());
+ } catch (const isc::data::TypeError& de) {
+ PyErr_SetString(PyExc_TypeError, "argument 1 of log_config_update "
+ "is not a map of config data");
+ } catch (const isc::config::ModuleSpecError& mse) {
+ PyErr_SetString(PyExc_TypeError, "argument 2 of log_config_update "
+ "is not a correct module specification");
+ } catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ }
+ return (NULL);
+}
+
+PyMethodDef methods[] = {
+ {"set_test_dictionary", setTestDictionary, METH_VARARGS,
+ "Set or unset testing mode for message dictionary. In testing, "
+ "the create_message and get_message functions work on different "
+ "than the logger-global dictionary, not polluting it."},
+ {"create_message", createMessage, METH_VARARGS,
+ "Creates a new message in the dictionary. You shouldn't need to "
+ "call this directly, it should be called by the generated message "
+ "file. Returns the identifier to be used in logging. The text "
+ "shouldn't be empty."},
+ {"get_message", getMessage, METH_VARARGS,
+ "Get a message. This function is for testing purposes and you don't "
+ "need to call it. It returns None if the message does not exist."},
+ {"reset", reset, METH_NOARGS,
+ "Reset all logging. For testing purposes only, do not use."},
+ {"init", init, METH_VARARGS,
+ "Run-time initialization. You need to call this before you do any "
+ "logging, to configure the root logger name. You may also provide "
+ "logging severity (one of 'DEBUG', 'INFO', 'WARN', 'ERROR' or "
+ "'FATAL'), a debug level (integer in the range 0-99) and a file name "
+ "of a dictionary with message text translations."},
+ {"resetUnitTestRootLogger", resetUnitTestRootLogger, METH_VARARGS,
+ "Resets the configuration of the root logger to that set by the "
+ "B10_XXX environment variables. It is aimed at unit tests, where "
+ "the logging is initialized by the code under test; called before "
+ "the unit test starts, this function resets the logging configuration "
+ "to that in use for the C++ unit tests."},
+ {"log_config_update", logConfigUpdate, METH_VARARGS,
+ "Update logger settings. This method is automatically used when "
+ "ModuleCCSession is initialized with handle_logging_config set "
+ "to True. When called, the first argument is the new logging "
+ "configuration (in JSON format). The second argument is "
+ "the raw specification (as returned from "
+ "ConfigData.get_module_spec().get_full_spec(), and converted to "
+ "JSON format).\n"
+ "Raises a TypeError if either argument is not a (correct) JSON "
+ "string, or if the spec is not a correct spec.\n"
+ "If this call succeeds, the global logger settings have "
+ "been updated."
+ },
+ {NULL, NULL, 0, NULL}
+};
+
+class LoggerWrapper : public PyObject {
+// Everything is public here, as it is accessible only inside this .cc file.
+public:
+ Logger *logger_;
+};
+
+extern PyTypeObject logger_type;
+
+int
+Logger_init(LoggerWrapper* self, PyObject* args) {
+ const char* name;
+ if (!PyArg_ParseTuple(args, "s", &name)) {
+ return (-1);
+ }
+ try {
+ self->logger_ = new Logger(name);
+ return (0);
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (-1);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (-1);
+ }
+}
+
+void
+Logger_destroy(LoggerWrapper* const self) {
+ delete self->logger_;
+ self->logger_ = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+// The isc::log doesn't contain function to convert this way
+const char*
+severityToText(const Severity& severity) {
+ switch (severity) {
+ case DEFAULT:
+ return ("DEFAULT");
+ case DEBUG:
+ return ("DEBUG");
+ case INFO:
+ return ("INFO");
+ case WARN:
+ return ("WARN");
+ case ERROR:
+ return ("ERROR");
+ case FATAL:
+ return ("FATAL");
+ default:
+ return (NULL);
+ }
+}
+
+PyObject*
+Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
+ try {
+ return (Py_BuildValue("s",
+ severityToText(
+ self->logger_->getEffectiveSeverity())));
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
+ try {
+ return (Py_BuildValue("i", self->logger_->getEffectiveDebugLevel()));
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+Logger_setSeverity(LoggerWrapper* self, PyObject* args) {
+ const char* severity;
+ int dbgLevel = 0;
+ if (!PyArg_ParseTuple(args, "z|i", &severity, &dbgLevel)) {
+ return (NULL);
+ }
+ try {
+ self->logger_->setSeverity((severity == NULL) ? DEFAULT :
+ getSeverity(severity), dbgLevel);
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+ Py_RETURN_NONE;
+}
+
+template<class FPtr> // Who should remember the pointer-to-method syntax
+PyObject*
+Logger_isLevelEnabled(LoggerWrapper* self, FPtr function) {
+ try {
+ if ((self->logger_->*function)()) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+Logger_isInfoEnabled(LoggerWrapper* self, PyObject*) {
+ return (Logger_isLevelEnabled(self, &Logger::isInfoEnabled));
+}
+
+PyObject*
+Logger_isWarnEnabled(LoggerWrapper* self, PyObject*) {
+ return (Logger_isLevelEnabled(self, &Logger::isWarnEnabled));
+}
+
+PyObject*
+Logger_isErrorEnabled(LoggerWrapper* self, PyObject*) {
+ return (Logger_isLevelEnabled(self, &Logger::isErrorEnabled));
+}
+
+PyObject*
+Logger_isFatalEnabled(LoggerWrapper* self, PyObject*) {
+ return (Logger_isLevelEnabled(self, &Logger::isFatalEnabled));
+}
+
+PyObject*
+Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
+ int level = MIN_DEBUG_LEVEL;
+ if (!PyArg_ParseTuple(args, "|i", &level)) {
+ return (NULL);
+ }
+
+ try {
+ if (self->logger_->isDebugEnabled(level)) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+}
+
+string
+objectToStr(PyObject* object, bool convert) {
+ PyObject* cleanup(NULL);
+ if (convert) {
+ object = cleanup = PyObject_Str(object);
+ if (object == NULL) {
+ throw InternalError();
+ }
+ }
+ const char* value;
+ PyObject* tuple(Py_BuildValue("(O)", object));
+ if (tuple == NULL) {
+ if (cleanup != NULL) {
+ Py_DECREF(cleanup);
+ }
+ throw InternalError();
+ }
+
+ if (!PyArg_ParseTuple(tuple, "s", &value)) {
+ Py_DECREF(tuple);
+ if (cleanup != NULL) {
+ Py_DECREF(cleanup);
+ }
+ throw InternalError();
+ }
+ string result(value);
+ Py_DECREF(tuple);
+ if (cleanup != NULL) {
+ Py_DECREF(cleanup);
+ }
+ return (result);
+}
+
+// Generic function to output the logging message. Called by the real functions.
+template<class Function>
+PyObject*
+Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
+ try {
+ Py_ssize_t number(PyObject_Length(args));
+ if (number < 0) {
+ return (NULL);
+ }
+
+ // Which argument is the first to format?
+ size_t start(1);
+ if (dbgLevel) {
+ start ++;
+ }
+
+ if (number < start) {
+ return (PyErr_Format(PyExc_TypeError, "Too few arguments to "
+ "logging call, at least %zu needed and %zd "
+ "given", start, number));
+ }
+
+ // Extract the fixed arguments
+ PyObject *midO(PySequence_GetItem(args, start - 1));
+ if (midO == NULL) {
+ return (NULL);
+ }
+ string mid(objectToStr(midO, false));
+ long dbg(0);
+ if (dbgLevel) {
+ PyObject *dbgO(PySequence_GetItem(args, 0));
+ if (dbgO == NULL) {
+ return (NULL);
+ }
+ dbg = PyLong_AsLong(dbgO);
+ if (PyErr_Occurred()) {
+ return (NULL);
+ }
+ }
+
+ // We create the logging message right now. If we fail to convert a
+ // parameter to string, at least the part that we already did will
+ // be output
+ Logger::Formatter formatter(function(dbg, mid.c_str()));
+
+ // Now process the rest of parameters, convert each to string and put
+ // into the formatter. It will print itself in the end.
+ for (size_t i(start); i < number; ++ i) {
+ PyObject* param(PySequence_GetItem(args, i));
+ if (param == NULL) {
+ return (NULL);
+ }
+ formatter = formatter.arg(objectToStr(param, true));
+ }
+ Py_RETURN_NONE;
+ }
+ catch (const InternalError&) {
+ return (NULL);
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+}
+
+// Now map the functions into the performOutput. I wish C++ could do
+// functional programming.
+PyObject*
+Logger_debug(LoggerWrapper* self, PyObject* args) {
+ return (Logger_performOutput(bind(&Logger::debug, self->logger_, _1, _2),
+ args, true));
+}
+
+PyObject*
+Logger_info(LoggerWrapper* self, PyObject* args) {
+ return (Logger_performOutput(bind(&Logger::info, self->logger_, _2),
+ args, false));
+}
+
+PyObject*
+Logger_warn(LoggerWrapper* self, PyObject* args) {
+ return (Logger_performOutput(bind(&Logger::warn, self->logger_, _2),
+ args, false));
+}
+
+PyObject*
+Logger_error(LoggerWrapper* self, PyObject* args) {
+ return (Logger_performOutput(bind(&Logger::error, self->logger_, _2),
+ args, false));
+}
+
+PyObject*
+Logger_fatal(LoggerWrapper* self, PyObject* args) {
+ return (Logger_performOutput(bind(&Logger::fatal, self->logger_, _2),
+ args, false));
+}
+
+PyMethodDef loggerMethods[] = {
+ { "get_effective_severity",
+ reinterpret_cast<PyCFunction>(Logger_getEffectiveSeverity),
+ METH_NOARGS, "Returns the effective logging severity as string" },
+ { "get_effective_debug_level",
+ reinterpret_cast<PyCFunction>(Logger_getEffectiveDebugLevel),
+ METH_NOARGS, "Returns the current debug level." },
+ { "set_severity",
+ reinterpret_cast<PyCFunction>(Logger_setSeverity), METH_VARARGS,
+ "Sets the severity of a logger. The parameters are severity as a "
+ "string and, optionally, a debug level (integer in range 0-99). "
+ "The severity may be NULL, in which case an inherited value is taken."
+ },
+ { "is_debug_enabled", reinterpret_cast<PyCFunction>(Logger_isDebugEnabled),
+ METH_VARARGS, "Returns if the logger would log debug message now. "
+ "You can provide a desired debug level." },
+ { "is_info_enabled", reinterpret_cast<PyCFunction>(Logger_isInfoEnabled),
+ METH_NOARGS, "Returns if the logger would log info message now." },
+ { "is_warn_enabled", reinterpret_cast<PyCFunction>(Logger_isWarnEnabled),
+ METH_NOARGS, "Returns if the logger would log warn message now." },
+ { "is_error_enabled", reinterpret_cast<PyCFunction>(Logger_isErrorEnabled),
+ METH_NOARGS, "Returns if the logger would log error message now." },
+ { "is_fatal_enabled", reinterpret_cast<PyCFunction>(Logger_isFatalEnabled),
+ METH_NOARGS, "Returns if the logger would log fatal message now." },
+ { "debug", reinterpret_cast<PyCFunction>(Logger_debug), METH_VARARGS,
+ "Logs a debug-severity message. It takes the debug level, message ID "
+ "and any number of stringifiable arguments to the message." },
+ { "info", reinterpret_cast<PyCFunction>(Logger_info), METH_VARARGS,
+ "Logs a info-severity message. It taskes the message ID and any "
+ "number of stringifiable arguments to the message." },
+ { "warn", reinterpret_cast<PyCFunction>(Logger_warn), METH_VARARGS,
+ "Logs a warn-severity message. It taskes the message ID and any "
+ "number of stringifiable arguments to the message." },
+ { "error", reinterpret_cast<PyCFunction>(Logger_error), METH_VARARGS,
+ "Logs a error-severity message. It taskes the message ID and any "
+ "number of stringifiable arguments to the message." },
+ { "fatal", reinterpret_cast<PyCFunction>(Logger_fatal), METH_VARARGS,
+ "Logs a fatal-severity message. It taskes the message ID and any "
+ "number of stringifiable arguments to the message." },
+ { NULL, NULL, 0, NULL }
+};
+
+PyTypeObject logger_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "isc.log.Logger",
+ sizeof(LoggerWrapper), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(Logger_destroy), // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "Wrapper around the C++ isc::log::Logger class."
+ "It is not complete, but everything important should be here.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ loggerMethods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(Logger_init), // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyModuleDef iscLog = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "log",
+ "Python bindings for the classes in the isc::log namespace.\n\n"
+ "These bindings are close match to the C++ API, but they are not complete "
+ "(some parts are not needed) and some are done in more python-like ways.",
+ -1,
+ methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+} // end anonymous namespace
+
+PyMODINIT_FUNC
+PyInit_log(void) {
+ PyObject* mod = PyModule_Create(&iscLog);
+ if (mod == NULL) {
+ return (NULL);
+ }
+
+ if (PyType_Ready(&logger_type) < 0) {
+ return (NULL);
+ }
+
+ if (PyModule_AddObject(mod, "Logger",
+ static_cast<PyObject*>(static_cast<void*>(
+ &logger_type))) < 0) {
+ return (NULL);
+ }
+
+ // Add in the definitions of the standard debug levels. These can then
+ // be referred to in Python through the constants log.DBGLVL_XXX.
+ // N.B. These should be kept in sync with the constants defined in
+ // log_dbglevels.h.
+ try {
+ installClassVariable(logger_type, "DBGLVL_START_SHUT",
+ Py_BuildValue("I", DBGLVL_START_SHUT));
+ installClassVariable(logger_type, "DBGLVL_COMMAND",
+ Py_BuildValue("I", DBGLVL_COMMAND));
+ installClassVariable(logger_type, "DBGLVL_COMMAND_DATA",
+ Py_BuildValue("I", DBGLVL_COMMAND_DATA));
+ installClassVariable(logger_type, "DBGLVL_TRACE_BASIC",
+ Py_BuildValue("I", DBGLVL_TRACE_BASIC));
+ installClassVariable(logger_type, "DBGLVL_TRACE_BASIC_DATA",
+ Py_BuildValue("I", DBGLVL_TRACE_BASIC_DATA));
+ installClassVariable(logger_type, "DBGLVL_TRACE_DETAIL",
+ Py_BuildValue("I", DBGLVL_TRACE_DETAIL));
+ installClassVariable(logger_type, "DBGLVL_TRACE_DETAIL_DATA",
+ Py_BuildValue("I", DBGLVL_TRACE_DETAIL_DATA));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in Log initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(PyExc_SystemError, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Log initialization");
+ return (NULL);
+ }
+
+ Py_INCREF(&logger_type);
+ return (mod);
+}
diff --git a/src/lib/python/isc/log/log.py b/src/lib/python/isc/log/log.py
deleted file mode 100644
index 74261e2..0000000
--- a/src/lib/python/isc/log/log.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""This module is to convert python logging module over
-to log4python.
-Copyright (C) 2010 Internet Systems Consortium.
-To use, simply 'import isc.log.log' and log away!
-"""
-import os
-import sys
-import syslog
-import logging
-import logging.handlers
-
-"""LEVELS: logging levels mapping
-"""
-LEVELS = {'debug' : logging.DEBUG,
- 'info' : logging.INFO,
- 'warning' : logging.WARNING,
- 'error' : logging.ERROR,
- 'critical' : logging.CRITICAL}
-
-FORMATTER = logging.Formatter("%(name)s: %(levelname)s: %(message)s")
-TIME_FORMATTER = logging.Formatter("%(asctime)s.%(msecs)03d %(name)s: %(levelname)s: %(message)s",
- "%d-%b-%Y %H:%M:%S")
-
-def log_err(err_type, err_msg):
- sys.stderr.write(err_type + ": " + "%s.\n" % str(err_msg)[str(err_msg).find(']')+1:])
-
-
-class NSFileLogHandler(logging.handlers.RotatingFileHandler):
- """RotatingFileHandler: replace RotatingFileHandler with a custom handler"""
-
- def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
- abs_file_name = self._get_abs_file_path(filename)
- """Create log directory beforehand, because the underlying logging framework won't
- create non-exsiting log directory on writing logs.
- """
- if not (os.path.exists(os.path.dirname(abs_file_name))):
- os.makedirs(os.path.dirname(abs_file_name))
- super(NSFileLogHandler, self).__init__(abs_file_name, mode, maxBytes,
- backupCount, encoding, delay)
-
- def handleError(self, record):
- """Overwrite handleError to provide more user-friendly error messages"""
- if logging.raiseExceptions:
- ei = sys.exc_info()
- if (ei[1]):
- sys.stderr.write("[b10-logging] : " + str(ei[1]))
-
- def _get_abs_file_path(self, file_name):
- """ Get absolute file path"""
- # For a bare filename, log_dir will be set the current directory.
- if not os.path.dirname(file_name):
- abs_file_dir = os.getcwd()
- else:
- abs_file_dir = os.path.abspath(os.path.dirname(file_name))
- abs_file_name = os.path.join(abs_file_dir, os.path.basename(file_name))
- return abs_file_name
-
- def shouldRollover(self, record):
- """Rewrite RotatingFileHandler.shouldRollover.
-
- If the log file is deleted at runtime, a new file will be created.
- """
- dfn = self.baseFilename
- if (self.stream) and (not os.path.exists(dfn)): #Does log file exist?
- self.stream = None
- """ Log directory may be deleted while bind10 running or updated with a
- non-existing directory. Need to create log directory beforehand, because
- the underlying logging framework won't create non-exsiting log directory
- on writing logs.
- """
- if not (os.path.exists(os.path.dirname(dfn))): #Does log subdirectory exist?
- os.makedirs(os.path.dirname(dfn))
- self.stream = self._open()
- return super(NSFileLogHandler, self).shouldRollover(record)
-
- def update_config(self, file_name, backup_count, max_bytes):
- """Update RotatingFileHandler configuration.
- Changes will be picked up in the next call to shouldRollover().
-
- input:
- log file name
- max backup count
- predetermined log file size
- """
- abs_file_name = self._get_abs_file_path(file_name)
- self.baseFilename = abs_file_name
- self.maxBytes = max_bytes
- self.backupCount = backup_count
-
-
-class NSSysLogHandler(logging.Handler):
- """Replace SysLogHandler with a custom handler
-
- A handler class which sends formatted logging records to a syslog
- server.
- """
- def __init__(self, ident, logopt=0, facility=syslog.LOG_USER):
- """Initialize a handler.
-
- If facility is not specified, LOG_USER is used.
- """
- super(NSSysLogHandler, self).__init__()
- self._ident = ident
- self._logopt = logopt
- self._facility = facility
- self._mappings = {
- logging.DEBUG: syslog.LOG_DEBUG,
- logging.INFO: syslog.LOG_INFO,
- logging.WARNING: syslog.LOG_WARNING,
- logging.ERROR: syslog.LOG_ERR,
- logging.CRITICAL: syslog.LOG_CRIT,
- }
-
- def _encodeLevel(self, level):
- """Encoding the priority."""
- return self._mappings.get(level, syslog.LOG_INFO)
-
- def emit(self, record):
- """Emit a record.
-
- The record is formatted, and then sent to the syslog server. If
- exception information is present, it is NOT sent to the server.
- """
- syslog.openlog(self._ident, self._logopt, self._facility)
- msg = self.format(record)
- prio = self._encodeLevel(record.levelno)
- syslog.syslog(prio, msg)
- syslog.closelog()
-
-
-class NSLogger(logging.getLoggerClass()):
- """Override logging.logger behaviour."""
- def __init__(self, log_name, log_file, severity='debug', versions=0,
- max_bytes=0, log_to_console=True):
- """Initializes the logger with some specific parameters
-
- If log_to_console is True, stream handler will be used;
- else syslog handler will be used.
-
- To disable file handler, set log_file = ''.
- """
- self._log_name = log_name
- self._log_file = log_file
- self._severity = severity
- self._versions = versions
- self._max_bytes = max_bytes
-
- super(NSLogger, self).__init__(self._log_name)
-
- # Set up a specific logger with our desired output level
- logLevel = LEVELS.get(self._severity, logging.NOTSET)
- self.setLevel(logLevel)
-
- self._file_handler = None
- self._stream_handler = None
- self._syslog_handler = None
-
- self._add_rotate_handler(self._log_file, self._versions, self._max_bytes)
- if log_to_console:
- self._add_stream_handler()
- else:
- self._add_syslog_handler()
-
- def _add_rotate_handler(self, log_file, backup_count, max_bytes):
- """Add a rotate file handler.
-
- input:
- log_file : the location of log file. Handler will not be created
- if log_file=''
- max_bytes : limit log growth
- backup_count : max backup count
- """
- if (log_file != 0 and log_file != ''):
- try:
- self._file_handler = NSFileLogHandler(filename = log_file,
- maxBytes = max_bytes, backupCount = backup_count)
- except (IOError, OSError) as e:
- self._file_handler = None
- log_err("[b10-logging] Add file handler fail", str(e))
- return
- self._file_handler.setFormatter(TIME_FORMATTER)
- self.addHandler(self._file_handler)
-
- def _add_stream_handler(self):
- """Add a stream handler.
-
- sys.stderr will be used for logging output.
- """
- self._stream_handler = logging.StreamHandler()
- self._stream_handler.setFormatter(TIME_FORMATTER)
- self.addHandler(self._stream_handler)
-
- def _add_syslog_handler(self, nsfacility=syslog.LOG_USER):
- """Add a syslog handler.
-
- If facility is not specified, LOG_USER is used.
- The default severity level is INFO.
- """
- self._syslog_handler = NSSysLogHandler('BIND10', facility = nsfacility)
- self._syslog_handler.setFormatter(FORMATTER)
- #set syslog handler severity level INFO
- self._syslog_handler.setLevel(logging.INFO)
- self.addHandler(self._syslog_handler)
-
- def _update_rotate_handler(self, log_file, backup_count, max_bytes):
- """If the rotate file handler has been added to the logger, update its
- configuration, or add it to the logger.
- """
- if (self._file_handler in self.handlers):
- if (log_file != 0 and log_file != ''):
- self._file_handler.update_config(log_file, backup_count, max_bytes)
- else:
- """If log file is empty, the handler will be removed."""
- self._file_handler.flush()
- self._file_handler.close()
- self.removeHandler(self._file_handler)
- else:
- self._add_rotate_handler(log_file, backup_count, max_bytes)
-
- def _get_config(self, config_data):
- """Get config data from module configuration"""
-
- log_file_str = config_data.get('log_file')
- if (log_file_str):
- self._log_file = log_file_str
-
- severity_str = config_data.get('log_severity')
- if (severity_str):
- self._severity = severity_str
-
- versions_str = config_data.get('log_versions')
- if (versions_str):
- self._versions = int(versions_str)
-
- max_bytes_str = config_data.get('log_max_bytes')
- if (max_bytes_str):
- self._max_bytes = int(max_bytes_str)
-
- def update_config(self, config_data):
- """Update logger's configuration.
-
- We can update logger's log level and its rotate file handler's configuration.
- """
- self._get_config(config_data)
-
- logLevel = LEVELS.get(self._severity, logging.NOTSET)
- if (logLevel != self.getEffectiveLevel()):
- self.setLevel(logLevel)
- self._update_rotate_handler(self._log_file, self._versions, self._max_bytes)
-
- def log_message(self, level, msg, *args, **kwargs):
- """Log 'msg % args' with the integer severity 'level'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.log_message('info', "We have a %s", "mysterious problem").
- """
- logLevel = LEVELS.get(level, logging.NOTSET)
- try:
- self.log(logLevel, msg, *args, **kwargs)
- except (TypeError, KeyError) as e:
- sys.stderr.write("[b10-logging] Log message fail %s\n" % (str(e)))
-
-
diff --git a/src/lib/python/isc/log/tests/Makefile.am b/src/lib/python/isc/log/tests/Makefile.am
index 86b3e5d..170eee6 100644
--- a/src/lib/python/isc/log/tests/Makefile.am
+++ b/src/lib/python/isc/log/tests/Makefile.am
@@ -1,16 +1,40 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = log_test.py
-EXTRA_DIST = $(PYTESTS)
+PYTESTS_GEN = log_console.py
+PYTESTS_NOGEN = log_test.py
+noinst_SCRIPTS = $(PYTESTS_GEN)
+EXTRA_DIST = console.out check_output.sh $(PYTESTS_NOGEN)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
# test using command-line arguments, so use check-local target instead of TESTS
+# We need to run the cycle twice, because once the files are in builddir, once in srcdir
check-local:
+ chmod +x $(abs_builddir)/log_console.py
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log \
+ $(abs_srcdir)/check_output.sh $(abs_builddir)/log_console.py $(abs_srcdir)/console.out
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
- for pytest in $(PYTESTS) ; do \
+ for pytest in $(PYTESTS_NOGEN) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
+ B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done ; \
+ for pytest in $(PYTESTS_GEN) ; do \
+ echo Running test: $$pytest ; \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
+ B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
+ $(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/log/tests/check_output.sh b/src/lib/python/isc/log/tests/check_output.sh
new file mode 100755
index 0000000..32146af
--- /dev/null
+++ b/src/lib/python/isc/log/tests/check_output.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+"$1" 2>&1 | cut -d\ -f3- | diff - "$2" 1>&2
diff --git a/src/lib/python/isc/log/tests/console.out b/src/lib/python/isc/log/tests/console.out
new file mode 100644
index 0000000..fbb1bb9
--- /dev/null
+++ b/src/lib/python/isc/log/tests/console.out
@@ -0,0 +1,4 @@
+INFO [test.output] MSG_ID Message with list [1, 2, 3, 4]
+WARN [test.output] DIFFERENT Different message
+FATAL [test.output] MSG_ID Message with 2 1
+DEBUG [test.output] MSG_ID Message with 3 2
diff --git a/src/lib/python/isc/log/tests/log_console.py.in b/src/lib/python/isc/log/tests/log_console.py.in
new file mode 100755
index 0000000..af05f61
--- /dev/null
+++ b/src/lib/python/isc/log/tests/log_console.py.in
@@ -0,0 +1,15 @@
+#!@PYTHON@
+
+import isc.log
+# This would come from a dictionary in real life
+MSG_ID = isc.log.create_message("MSG_ID", "Message with %2 %1")
+DIFFERENT = isc.log.create_message("DIFFERENT", "Different message")
+isc.log.init("test")
+logger = isc.log.Logger("output")
+
+logger.debug(20, MSG_ID, "test", "no output")
+logger.info(MSG_ID, [1, 2, 3, 4], "list")
+logger.warn(DIFFERENT)
+logger.fatal(MSG_ID, 1, 2)
+logger.set_severity("DEBUG", 99)
+logger.debug(1, MSG_ID, 2, 3)
diff --git a/src/lib/python/isc/log/tests/log_test.in b/src/lib/python/isc/log/tests/log_test.in
deleted file mode 100644
index 60e5e3f..0000000
--- a/src/lib/python/isc/log/tests/log_test.in
+++ /dev/null
@@ -1,26 +0,0 @@
-#! /bin/sh
-
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
-export PYTHON_EXEC
-
-TEST_PATH=@abs_top_srcdir@/src/lib/python/isc/log/tests
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
-export PYTHONPATH
-
-cd ${TEST_PATH}
-exec ${PYTHON_EXEC} -O log_test.py $*
diff --git a/src/lib/python/isc/log/tests/log_test.py b/src/lib/python/isc/log/tests/log_test.py
index 026dee1..8deaeae 100644
--- a/src/lib/python/isc/log/tests/log_test.py
+++ b/src/lib/python/isc/log/tests/log_test.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2010 Internet Systems Consortium.
+# Copyright (C) 2011 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -13,225 +13,161 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#
-# Tests for the python logging module
-#
-
-from isc.log.log import *
+# This tests it can be loaded, nothing more yet
+import isc.log
import unittest
-import os
-import sys
-import tempfile
-
-
-class TestRotateFileHandler(unittest.TestCase):
+import json
+import bind10_config
+from isc.config.ccsession import path_search
+class LogDict(unittest.TestCase):
def setUp(self):
- self.FILE_LOG1 = tempfile.NamedTemporaryFile(mode='w',
- prefix="b10",
- delete=True)
- self.FILE_LOG2 = tempfile.NamedTemporaryFile(mode='w',
- prefix="b10",
- delete=True)
- self.FILE_LOG3 = tempfile.NamedTemporaryFile(mode='w',
- prefix="b10",
- delete=True)
- self.handler = NSFileLogHandler(filename = self.FILE_LOG1.name,
- maxBytes = 1024,
- backupCount = 5)
-
- def test_shouldRollover(self):
- if(os.path.exists(self.FILE_LOG1.name)):
- os.remove(self.FILE_LOG1.name)
- record = logging.LogRecord(None, None, "", 0, "rotate file handler", (), None, None)
- self.handler.shouldRollover(record)
- self.assertTrue(os.path.exists(self.FILE_LOG1.name))
-
- def test_get_absolute_file_path(self):
- abs_file_name = self.handler._get_abs_file_path(self.FILE_LOG1.name)
- self.assertEqual(abs_file_name, self.FILE_LOG1.name)
- # test bare filename
- file_name1 = "bind10.py"
- abs_file_name = self.handler._get_abs_file_path(file_name1)
- self.assertEqual(abs_file_name, os.path.join(os.getcwd(), file_name1))
- # test relative path
- file_name2 = "./bind10.py"
- abs_file_name = self.handler._get_abs_file_path(file_name2)
- self.assertEqual(abs_file_name, os.path.join(os.getcwd(), os.path.basename(file_name2)))
-
- def test_update_config(self):
- self.handler.update_config(self.FILE_LOG2.name, 3, 512)
- self.assertEqual(self.handler.baseFilename, self.FILE_LOG2.name)
- self.assertEqual(self.handler.maxBytes, 512)
- self.assertEqual(self.handler.backupCount, 3)
-
- # check the existence of new log file.
- # emit() will call shouldRollover() to update the log file
- if(os.path.exists(self.FILE_LOG2.name)):
- os.remove(self.FILE_LOG2.name)
- record = logging.LogRecord(None, None, "", 0, "rotate file handler", (), None, None)
- self.handler.emit(record)
- self.assertTrue(os.path.exists(self.FILE_LOG2.name))
-
- def test_handle_Error(self):
- if(os.path.exists(self.FILE_LOG3.name)):
- os.remove(self.FILE_LOG3.name)
- # redirect error message to file
- savederr = sys.stderr
- errfd = open(self.FILE_LOG3.name, 'w+')
- sys.stderr = errfd
- record = logging.LogRecord(None, None, "", 0, "record message", (), None, None)
- try:
- raise ValueError("ValueError")
- except ValueError:
- self.handler.handleError(record)
-
- self.assertEqual("[b10-logging] : ValueError", errfd.read())
- sys.stderr = savederr
- errfd.close()
-
+ # We work on a test dictionary now.
+ isc.log.set_test_dictionary(True)
def tearDown(self):
- self.handler.flush()
- self.handler.close()
- self.FILE_LOG1.close()
- self.FILE_LOG2.close()
- self.FILE_LOG3.close()
+ # Return to the global dictionary
+ isc.log.set_test_dictionary(False)
-class TestSysLogHandler(unittest.TestCase):
- def setUp(self):
- self.handler = NSSysLogHandler("BIND10")
-
- def test_encodeLevel(self):
- sysLevel = self.handler._encodeLevel(logging.ERROR)
- self.assertEqual(sysLevel, syslog.LOG_ERR)
+ def test_load_msgs(self):
+ # Try loading a message and see it's there, but nothing more
+ self.assertEqual(isc.log.create_message("ID", "Text"), "ID")
+ self.assertEqual(isc.log.get_message("ID"), "Text")
+ self.assertEqual(isc.log.get_message("no-ID"), None)
- def test_emit(self):
- syslog_message = "bind10 syslog testing"
- record = logging.LogRecord(None, None, "", 0, syslog_message, (), None, None)
- self.handler.emit(record)
+class Manager(unittest.TestCase):
+ def tearDown(self):
+ isc.log.reset()
+
+ def test_init_debug(self):
+ # We try calling it now only, as we don't have any other functions
+ # to check the outcome by it. Once we add the logger class, we may
+ # check more.
+ isc.log.init("root", "DEBUG", 50, None)
+
+ def test_init_defaults(self):
+ # We try calling it now only, as we don't have any other functions
+ # to check the outcome by it. Once we add the logger class, we may
+ # check more.
+ isc.log.init("root")
+
+ def test_init_notfound(self):
+ # This should not throw, because the C++ one doesn't. Should we really
+ # ignore errors like missing file?
+ isc.log.init("root", "INFO", 0, "/no/such/file");
+
+ def test_log_config_update(self):
+ log_spec = json.dumps(isc.config.module_spec_from_file(path_search('logging.spec', bind10_config.PLUGIN_PATHS)).get_full_spec())
+
+ self.assertRaises(TypeError, isc.log.log_config_update)
+ self.assertRaises(TypeError, isc.log.log_config_update, 1)
+ self.assertRaises(TypeError, isc.log.log_config_update, 1, 1)
+ self.assertRaises(TypeError, isc.log.log_config_update, 1, 1, 1)
+
+ self.assertRaises(TypeError, isc.log.log_config_update, 1, log_spec)
+ self.assertRaises(TypeError, isc.log.log_config_update, [], log_spec)
+ self.assertRaises(TypeError, isc.log.log_config_update, "foo", log_spec)
+ self.assertRaises(TypeError, isc.log.log_config_update, "{ '", log_spec)
+
+ # empty should pass
+ isc.log.log_config_update("{}", log_spec)
+
+ # bad spec
+ self.assertRaises(TypeError, isc.log.log_config_update, "{}", json.dumps({"foo": "bar"}))
+
+ # Try a correct one
+ log_conf = json.dumps({"loggers":
+ [{"name": "b10-xfrout", "output_options":
+ [{"output": "/tmp/bind10.log",
+ "destination": "file",
+ "flush": True}]}]})
+ isc.log.log_config_update(log_conf, log_spec)
+
+class Logger(unittest.TestCase):
+ def tearDown(self):
+ isc.log.reset()
-class TestLogging(unittest.TestCase):
-
def setUp(self):
- self.FILE_STREAM_LOG1 = tempfile.NamedTemporaryFile(mode='w',
- prefix="b10",
- delete=True)
- self.FILE_STREAM_LOG2 = tempfile.NamedTemporaryFile(mode='w',
- prefix="b10",
- delete=True)
- self.FILE_STREAM_LOG3 = tempfile.NamedTemporaryFile(mode='w',
- prefix="b10",
- delete=True)
- self.file_stream_logger = NSLogger('File_Stream_Logger',
- self.FILE_STREAM_LOG1.name,
- 'debug', 5, 1024, True)
- self.syslog_logger = NSLogger('SysLogger', '', 'info', 5, 1024, False)
- self.stderr_bak = sys.stderr
- sys.stderr = open(os.devnull, 'w')
-
- def test_logging_init(self):
- self.assertNotEqual(self.file_stream_logger._file_handler, None)
- self.assertNotEqual(self.file_stream_logger._stream_handler, None)
- self.assertEqual(self.file_stream_logger._syslog_handler, None)
-
- self.assertIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
- self.assertIn(self.file_stream_logger._stream_handler, self.file_stream_logger.handlers)
- self.assertNotIn(self.file_stream_logger._syslog_handler, self.file_stream_logger.handlers)
- logLevel = LEVELS.get('debug', logging.NOTSET)
- self.assertEqual(self.file_stream_logger.getEffectiveLevel(), logLevel)
-
- self.assertEqual(self.syslog_logger._file_handler, None)
- self.assertEqual(self.syslog_logger._stream_handler, None)
- self.assertNotEqual(self.syslog_logger._syslog_handler, None)
- self.assertNotIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
- self.assertNotIn(self.syslog_logger._stream_handler, self.syslog_logger.handlers)
- self.assertIn(self.syslog_logger._syslog_handler, self.syslog_logger.handlers)
-
- logLevel = LEVELS.get('info', logging.NOTSET)
- self.assertEqual(self.syslog_logger.getEffectiveLevel(), logLevel)
-
- def test_add_rotate_handler(self):
- if(self.syslog_logger._file_handler in self.syslog_logger.handlers):
- self.syslog_logger.removeHandler(self.syslog_logger._file_handler)
-
- self.syslog_logger._add_rotate_handler('', 5, 1024)
- self.assertNotIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
-
- self.syslog_logger._add_rotate_handler(self.FILE_STREAM_LOG1.name, 5, 1024)
- self.assertIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
-
- # test IOError exception
- self.syslog_logger.removeHandler(self.syslog_logger._file_handler)
- log_file = self.FILE_STREAM_LOG1.name + '/logfile'
- self.syslog_logger._add_rotate_handler(log_file, 5, 1024)
- self.assertNotIn(self.syslog_logger._file_handler, self.syslog_logger.handlers)
-
- def test_add_stream_handler(self):
- if(self.file_stream_logger._stream_handler in self.file_stream_logger.handlers):
- self.file_stream_logger.removeHandler(self.file_stream_logger._stream_handler)
-
- self.file_stream_logger._add_stream_handler()
- self.assertIn(self.file_stream_logger._stream_handler, self.file_stream_logger.handlers)
-
- def test_add_syslog_handler(self):
- if(self.syslog_logger._syslog_handler in self.syslog_logger.handlers):
- self.syslog_logger.removeHandler(self.syslog_logger._syslog_handler)
-
- self.syslog_logger._add_syslog_handler()
- self.assertIn(self.syslog_logger._syslog_handler, self.syslog_logger.handlers)
-
- def test_update_rotate_handler(self):
- self.file_stream_logger._update_rotate_handler(self.FILE_STREAM_LOG2.name, 4, 1024)
- self.assertIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
-
- self.file_stream_logger._update_rotate_handler('', 5, 1024)
- self.assertNotIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
-
- self.file_stream_logger._update_rotate_handler(self.FILE_STREAM_LOG1.name, 4, 1024)
- self.assertIn(self.file_stream_logger._file_handler, self.file_stream_logger.handlers)
-
- def test_get_config(self):
- config_data = {'log_file' : self.FILE_STREAM_LOG1.name,
- 'log_severity' : 'critical',
- 'log_versions' : 4,
- 'log_max_bytes' : 1024}
- self.file_stream_logger._get_config(config_data)
- self.assertEqual(self.file_stream_logger._log_file, self.FILE_STREAM_LOG1.name)
- self.assertEqual(self.file_stream_logger._severity, 'critical')
- self.assertEqual(self.file_stream_logger._versions, 4)
- self.assertEqual(self.file_stream_logger._max_bytes, 1024)
-
-
- def test_update_config(self):
- update_config = {'log_file' : self.FILE_STREAM_LOG1.name,
- 'log_severity' : 'error',
- 'log_versions' : 4,
- 'log_max_bytes' : 1024}
- self.file_stream_logger.update_config(update_config)
- logLevel = LEVELS.get('error', logging.NOTSET)
- self.assertEqual(self.file_stream_logger.getEffectiveLevel(), logLevel)
-
- def test_log_message(self):
- update_config = {'log_file' : self.FILE_STREAM_LOG3.name,
- 'log_severity' : 'critical',
- 'log_versions' : 4,
- 'log_max_bytes' : 1024}
- self.file_stream_logger.update_config(update_config)
- self.file_stream_logger.log_message('debug', 'debug message')
- self.file_stream_logger.log_message('warning', 'warning message')
- self.file_stream_logger.log_message('error', 'error message')
- #test non-exist log level
- self.assertRaises(None, self.file_stream_logger.log_message('not-exist', 'not exist message'))
- #test log_message KeyError exception
- self.assertRaises(None, self.file_stream_logger.log_message('critical', 'critical message', extra=['message', 'asctime']))
- self.assertTrue(os.path.exists(self.FILE_STREAM_LOG3.name))
-
- def tearDown(self):
- self.FILE_STREAM_LOG1.close()
- self.FILE_STREAM_LOG2.close()
- self.FILE_STREAM_LOG3.close()
- sys.stderr.flush();
- sys.stderr = self.stderr_bak
+ isc.log.init("root", "DEBUG", 50)
+ self.sevs = ['INFO', 'WARN', 'ERROR', 'FATAL']
+
+ # Checks defaults of the logger
+ def defaults(self, logger):
+ self.assertEqual(logger.get_effective_severity(), "DEBUG")
+ self.assertEqual(logger.get_effective_debug_level(), 50)
+
+ def test_default_severity(self):
+ logger = isc.log.Logger("child")
+ self.defaults(logger)
+
+ # Try changing the severities little bit
+ def test_severity(self):
+ logger = isc.log.Logger("child")
+ logger.set_severity('DEBUG', 25)
+ self.assertEqual(logger.get_effective_severity(), "DEBUG")
+ self.assertEqual(logger.get_effective_debug_level(), 25)
+ for sev in self.sevs:
+ logger.set_severity(sev)
+ self.assertEqual(logger.get_effective_severity(), sev)
+ self.assertEqual(logger.get_effective_debug_level(), 0)
+ # Return to default
+ logger.set_severity(None)
+ self.defaults(logger)
+
+ def test_enabled(self):
+ logger = isc.log.Logger("child")
+ self.sevs.insert(0, 'DEBUG')
+ methods = {
+ 'DEBUG': logger.is_debug_enabled,
+ 'INFO': logger.is_info_enabled,
+ 'WARN': logger.is_warn_enabled,
+ 'ERROR': logger.is_error_enabled,
+ 'FATAL': logger.is_fatal_enabled
+ }
+ for sev in self.sevs:
+ logger.set_severity(sev)
+ enabled = False
+ for tested in self.sevs:
+ if tested == sev:
+ enabled = True
+ self.assertEqual(methods[tested](), enabled)
+ logger.set_severity('DEBUG', 50)
+ self.assertTrue(logger.is_debug_enabled())
+ self.assertTrue(logger.is_debug_enabled(0))
+ self.assertTrue(logger.is_debug_enabled(50))
+ self.assertFalse(logger.is_debug_enabled(99))
+
+ def test_invalid_params(self):
+ """
+ Tests invalid arguments for logging functions. The output is tested
+ in check_output.sh.
+ """
+ logger = isc.log.Logger("child")
+ methods = [
+ logger.info,
+ logger.warn,
+ logger.error,
+ logger.fatal
+ ]
+ for meth in methods:
+ # Not enough arguments
+ self.assertRaises(TypeError, meth)
+ # Bad type
+ self.assertRaises(TypeError, meth, 1)
+ # Too few arguments
+ self.assertRaises(TypeError, logger.debug, 42)
+ self.assertRaises(TypeError, logger.debug)
+ # Bad type
+ self.assertRaises(TypeError, logger.debug, "42", "hello")
+
+ def test_dbglevel_constants(self):
+ """
+ Just check a constant to make sure it is defined and is the
+ correct value. (The constant chosen has a non-zero value to
+ ensure that the code has both define the constant and set its
+ value correctly.)
+ """
+ logger = isc.log.Logger("child")
+ self.assertEqual(logger.DBGLVL_COMMAND, 10)
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/log_messages/Makefile.am b/src/lib/python/isc/log_messages/Makefile.am
new file mode 100644
index 0000000..30f8374
--- /dev/null
+++ b/src/lib/python/isc/log_messages/Makefile.am
@@ -0,0 +1,32 @@
+SUBDIRS = work
+
+EXTRA_DIST = __init__.py
+EXTRA_DIST += bind10_messages.py
+EXTRA_DIST += cmdctl_messages.py
+EXTRA_DIST += stats_messages.py
+EXTRA_DIST += stats_httpd_messages.py
+EXTRA_DIST += xfrin_messages.py
+EXTRA_DIST += xfrout_messages.py
+EXTRA_DIST += zonemgr_messages.py
+EXTRA_DIST += cfgmgr_messages.py
+EXTRA_DIST += config_messages.py
+EXTRA_DIST += notify_out_messages.py
+EXTRA_DIST += libxfrin_messages.py
+
+CLEANFILES = __init__.pyc
+CLEANFILES += bind10_messages.pyc
+CLEANFILES += cmdctl_messages.pyc
+CLEANFILES += stats_messages.pyc
+CLEANFILES += stats_httpd_messages.pyc
+CLEANFILES += xfrin_messages.pyc
+CLEANFILES += xfrout_messages.pyc
+CLEANFILES += zonemgr_messages.pyc
+CLEANFILES += cfgmgr_messages.pyc
+CLEANFILES += config_messages.pyc
+CLEANFILES += notify_out_messages.pyc
+CLEANFILES += libxfrin_messages.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/log_messages/README b/src/lib/python/isc/log_messages/README
new file mode 100644
index 0000000..c96f78c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/README
@@ -0,0 +1,68 @@
+This is a placeholder package for logging messages of various modules
+in the form of python scripts. This package is expected to be installed
+somewhere like <top-install-dir>/python3.x/site-packages/isc/log_messages
+and each message script is expected to be imported as
+"isc.log_messages.some_module_messages".
+
+We also need to allow in-source test code to get access to the message
+scripts in the same manner. That's why the package is stored in the
+directory that shares the same trailing part as the install directory,
+i.e., isc/log_messages.
+
+Furthermore, we need to support a build mode using a separate build
+tree (such as in the case with 'make distcheck'). In that case if an
+application (via a test script) imports "isc.log_messages.xxx", it
+would try to import the module under the source tree, where the
+generated message script doesn't exist. So, in the source directory
+(i.e., here) we provide dummy scripts that subsequently import the
+same name of module under the "work" sub-package. The caller
+application is assumed to have <top_builddir>/src/lib/python/isc/log_messages
+in its module search path (this is done by including
+$(COMMON_PYTHON_PATH) in the PYTHONPATH environment variable),
+which ensures the right directory is chosen.
+
+A python module or program that defines its own log messages needs to
+make sure that the setup described above is implemented. It's a
+complicated process, but can generally be done by following a common
+pattern:
+
+1. Create the dummy script (see above) for the module and update
+ Makefile.am in this directory accordingly. See (and use)
+ a helper shell script named gen-forwarder.sh.
+2. Update Makefil.am of the module that defines the log message. The
+ following are a sample snippet for Makefile.am for a module named
+ "mymodule" (which is supposed to be generated from a file
+ "mymodule_messages.mes"). In many cases it should work simply by
+ replacing 'mymodule' with the actual module name.
+
+==================== begin Makefile.am additions ===================
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.pyc
+
+EXTRA_DIST = mymodule_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py : mymodule_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/mymodule_messages.mes
+
+# This rule ensures mymodule_messages.py is (re)generated as a result of
+# 'make'. If there's no other appropriate target, specify
+# mymodule_messages.py in BUILT_SOURCES.
+mymodule: <other source files> $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+===================== end Makefile.am additions ====================
+
+Notes:
+- "nodist_" prefix is important. Without this, 'make distcheck' tries
+ to make _messages.py before actually starting the main build, which
+ would fail because the message compiler isn't built yet.
+- "pylogmessage" is a prefix for python scripts that define log
+ messages and are expected to be installed in the common isc/log_messages
+ directory. It's intentionally named differently from the common
+ "python" prefix (as in python_PYTHON), because the latter may be
+ used for other scripts in the same Makefile.am file.
+- $(PYTHON_LOGMSGPKG_DIR) should be set to point to this directory (or
+ the corresponding build directory if it's different) by the
+ configure script.
diff --git a/src/lib/python/isc/log_messages/__init__.py b/src/lib/python/isc/log_messages/__init__.py
new file mode 100644
index 0000000..d222b8c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/__init__.py
@@ -0,0 +1,3 @@
+"""
+This is an in-source forwarder package redirecting to work/* scripts.
+"""
diff --git a/src/lib/python/isc/log_messages/bind10_messages.py b/src/lib/python/isc/log_messages/bind10_messages.py
new file mode 100644
index 0000000..68ce94c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/bind10_messages.py
@@ -0,0 +1 @@
+from work.bind10_messages import *
diff --git a/src/lib/python/isc/log_messages/cfgmgr_messages.py b/src/lib/python/isc/log_messages/cfgmgr_messages.py
new file mode 100644
index 0000000..5557100
--- /dev/null
+++ b/src/lib/python/isc/log_messages/cfgmgr_messages.py
@@ -0,0 +1 @@
+from work.cfgmgr_messages import *
diff --git a/src/lib/python/isc/log_messages/cmdctl_messages.py b/src/lib/python/isc/log_messages/cmdctl_messages.py
new file mode 100644
index 0000000..7283d5a
--- /dev/null
+++ b/src/lib/python/isc/log_messages/cmdctl_messages.py
@@ -0,0 +1 @@
+from work.cmdctl_messages import *
diff --git a/src/lib/python/isc/log_messages/config_messages.py b/src/lib/python/isc/log_messages/config_messages.py
new file mode 100644
index 0000000..c557975
--- /dev/null
+++ b/src/lib/python/isc/log_messages/config_messages.py
@@ -0,0 +1 @@
+from work.config_messages import *
diff --git a/src/lib/python/isc/log_messages/gen-forwarder.sh b/src/lib/python/isc/log_messages/gen-forwarder.sh
new file mode 100755
index 0000000..84c2450
--- /dev/null
+++ b/src/lib/python/isc/log_messages/gen-forwarder.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+MODULE_NAME=$1
+if test -z $MODULE_NAME; then
+ echo 'Usage: gen-forwarder.sh module_name'
+ exit 1
+fi
+
+echo "from work.${MODULE_NAME}_messages import *" > ${MODULE_NAME}_messages.py
+echo "Forwarder python script is generated. Make sure to perform:"
+echo "git add ${MODULE_NAME}_messages.py"
+echo "and add the following to Makefile.am:"
+echo "EXTRA_DIST += ${MODULE_NAME}_messages.py"
+echo "CLEANFILES += ${MODULE_NAME}_messages.pyc"
diff --git a/src/lib/python/isc/log_messages/libxfrin_messages.py b/src/lib/python/isc/log_messages/libxfrin_messages.py
new file mode 100644
index 0000000..74da329
--- /dev/null
+++ b/src/lib/python/isc/log_messages/libxfrin_messages.py
@@ -0,0 +1 @@
+from work.libxfrin_messages import *
diff --git a/src/lib/python/isc/log_messages/notify_out_messages.py b/src/lib/python/isc/log_messages/notify_out_messages.py
new file mode 100644
index 0000000..6aa37ea
--- /dev/null
+++ b/src/lib/python/isc/log_messages/notify_out_messages.py
@@ -0,0 +1 @@
+from work.notify_out_messages import *
diff --git a/src/lib/python/isc/log_messages/stats_httpd_messages.py b/src/lib/python/isc/log_messages/stats_httpd_messages.py
new file mode 100644
index 0000000..7782c34
--- /dev/null
+++ b/src/lib/python/isc/log_messages/stats_httpd_messages.py
@@ -0,0 +1 @@
+from work.stats_httpd_messages import *
diff --git a/src/lib/python/isc/log_messages/stats_messages.py b/src/lib/python/isc/log_messages/stats_messages.py
new file mode 100644
index 0000000..1324cfc
--- /dev/null
+++ b/src/lib/python/isc/log_messages/stats_messages.py
@@ -0,0 +1 @@
+from work.stats_messages import *
diff --git a/src/lib/python/isc/log_messages/work/Makefile.am b/src/lib/python/isc/log_messages/work/Makefile.am
new file mode 100644
index 0000000..9bc5e0f
--- /dev/null
+++ b/src/lib/python/isc/log_messages/work/Makefile.am
@@ -0,0 +1,12 @@
+# .py is generated in the builddir by the configure script so that test
+# scripts can refer to it when a separate builddir is used.
+
+python_PYTHON = __init__.py
+
+pythondir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = __init__.pyc
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/log_messages/work/__init__.py.in b/src/lib/python/isc/log_messages/work/__init__.py.in
new file mode 100644
index 0000000..991f10a
--- /dev/null
+++ b/src/lib/python/isc/log_messages/work/__init__.py.in
@@ -0,0 +1,3 @@
+"""
+This package is a placeholder for python scripts of log messages.
+"""
diff --git a/src/lib/python/isc/log_messages/xfrin_messages.py b/src/lib/python/isc/log_messages/xfrin_messages.py
new file mode 100644
index 0000000..b412519
--- /dev/null
+++ b/src/lib/python/isc/log_messages/xfrin_messages.py
@@ -0,0 +1 @@
+from work.xfrin_messages import *
diff --git a/src/lib/python/isc/log_messages/xfrout_messages.py b/src/lib/python/isc/log_messages/xfrout_messages.py
new file mode 100644
index 0000000..2093d5c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/xfrout_messages.py
@@ -0,0 +1 @@
+from work.xfrout_messages import *
diff --git a/src/lib/python/isc/log_messages/zonemgr_messages.py b/src/lib/python/isc/log_messages/zonemgr_messages.py
new file mode 100644
index 0000000..b3afe9c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/zonemgr_messages.py
@@ -0,0 +1 @@
+from work.zonemgr_messages import *
diff --git a/src/lib/python/isc/net/Makefile.am b/src/lib/python/isc/net/Makefile.am
index bb6057c..1b97614 100644
--- a/src/lib/python/isc/net/Makefile.am
+++ b/src/lib/python/isc/net/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = tests
python_PYTHON = __init__.py addr.py parse.py
pythondir = $(pyexecdir)/isc/net
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/net/tests/Makefile.am b/src/lib/python/isc/net/tests/Makefile.am
index 73528d2..dd94946 100644
--- a/src/lib/python/isc/net/tests/Makefile.am
+++ b/src/lib/python/isc/net/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = addr_test.py parse_test.py
EXTRA_DIST = $(PYTESTS)
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/notify/Makefile.am b/src/lib/python/isc/notify/Makefile.am
index f4a94fa..c247ab8 100644
--- a/src/lib/python/isc/notify/Makefile.am
+++ b/src/lib/python/isc/notify/Makefile.am
@@ -1,5 +1,22 @@
SUBDIRS = . tests
python_PYTHON = __init__.py notify_out.py
-
pythondir = $(pyexecdir)/isc/notify
+
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+EXTRA_DIST = notify_out_messages.mes
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.pyc
+
+CLEANDIRS = __pycache__
+
+$(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py : notify_out_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/notify_out_messages.mes
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index 43dc7af..6b91c87 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -21,12 +21,17 @@ import threading
import time
import errno
from isc.datasrc import sqlite3_ds
+from isc.net import addr
import isc
-try:
- from pydnspp import *
-except ImportError as e:
- # C++ loadable module may not be installed;
- sys.stderr.write('[b10-xfrout] failed to import DNS or XFR module: %s\n' % str(e))
+from isc.log_messages.notify_out_messages import *
+
+logger = isc.log.Logger("notify_out")
+
+# there used to be a printed message if this import failed, but if
+# we can't import we should not start anyway, and logging an error
+# is a bad idea since the logging system is most likely not
+# initialized yet. see trac ticket #1103
+from pydnspp import *
ZONE_NEW_DATA_READY_CMD = 'zone_new_data_ready'
_MAX_NOTIFY_NUM = 30
@@ -35,7 +40,6 @@ _EVENT_NONE = 0
_EVENT_READ = 1
_EVENT_TIMEOUT = 2
_NOTIFY_TIMEOUT = 1
-_IDLE_SLEEP_TIME = 0.5
# define the rcode for parsing notify reply message
_REPLY_OK = 0
@@ -46,18 +50,11 @@ _BAD_QR = 4
_BAD_REPLY_PACKET = 5
SOCK_DATA = b's'
-def addr_to_str(addr):
- return '%s#%s' % (addr[0], addr[1])
-
class ZoneNotifyInfo:
'''This class keeps track of notify-out information for one zone.'''
def __init__(self, zone_name_, class_):
- '''notify_timeout_: absolute time for next notify reply. when the zone
- is preparing for sending notify message, notify_timeout_ is set to now,
- that means the first sending is triggered by the 'Timeout' mechanism.
- '''
self._notify_current = None
self._slave_index = 0
self._sock = None
@@ -66,9 +63,12 @@ class ZoneNotifyInfo:
self.zone_name = zone_name_
self.zone_class = class_
self.notify_msg_id = 0
- self.notify_timeout = 0
- self.notify_try_num = 0 #Notify times sending to one target.
-
+ # Absolute time for next notify reply. When the zone is preparing for
+ # sending notify message, notify_timeout_ is set to now, that means
+ # the first sending is triggered by the 'Timeout' mechanism.
+ self.notify_timeout = None
+ self.notify_try_num = 0 # Notify times sending to one target.
+
def set_next_notify_target(self):
if self._slave_index < (len(self.notify_slaves) - 1):
self._slave_index += 1
@@ -77,8 +77,7 @@ class ZoneNotifyInfo:
self._notify_current = None
def prepare_notify_out(self):
- '''Create the socket and set notify timeout time to now'''
- self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #TODO support IPv6?
+ '''Set notify timeout time to now'''
self.notify_timeout = time.time()
self.notify_try_num = 0
self._slave_index = 0
@@ -89,6 +88,12 @@ class ZoneNotifyInfo:
if self._sock:
self._sock.close()
self._sock = None
+ self.notify_timeout = None
+
+ def create_socket(self, dest_addr):
+ self._sock = socket.socket(addr.IPAddr(dest_addr).family,
+ socket.SOCK_DGRAM)
+ return self._sock
def get_socket(self):
return self._sock
@@ -98,14 +103,13 @@ class ZoneNotifyInfo:
class NotifyOut:
'''This class is used to handle notify logic for all zones(sending
- notify message to its slaves). notify service can be started by
+ notify message to its slaves). notify service can be started by
calling dispatcher(), and it can be stoped by calling shutdown()
- in another thread. '''
- def __init__(self, datasrc_file, log=None, verbose=True):
+ in another thread. '''
+ def __init__(self, datasrc_file, verbose=True):
self._notify_infos = {} # key is (zone_name, zone_class)
self._waiting_zones = []
self._notifying_zones = []
- self._log = log
self._serving = False
self._read_sock, self._write_sock = socket.socketpair()
self._read_sock.setblocking(False)
@@ -114,12 +118,15 @@ class NotifyOut:
self._lock = threading.Lock()
self._db_file = datasrc_file
self._init_notify_out(datasrc_file)
+ # Use nonblock event to eliminate busy loop
+ # If there are no notifying zones, clear the event bit and wait.
+ self._nonblock_event = threading.Event()
def _init_notify_out(self, datasrc_file):
'''Get all the zones name and its notify target's address
- TODO, currently the zones are got by going through the zone
- table in database. There should be a better way to get them
- and also the setting 'also_notify', and there should be one
+ TODO, currently the zones are got by going through the zone
+ table in database. There should be a better way to get them
+ and also the setting 'also_notify', and there should be one
mechanism to cover the changed datasrc.'''
self._db_file = datasrc_file
for zone_name, zone_class in sqlite3_ds.get_zones_info(datasrc_file):
@@ -130,7 +137,7 @@ class NotifyOut:
self._notify_infos[zone_id].notify_slaves.append((item, 53))
def send_notify(self, zone_name, zone_class='IN'):
- '''Send notify to one zone's slaves, this function is
+ '''Send notify to one zone's slaves, this function is
the only interface for class NotifyOut which can be called
by other object.
Internally, the function only set the zone's notify-reply
@@ -142,14 +149,20 @@ class NotifyOut:
if zone_id not in self._notify_infos:
return
+ # Has no slave servers, skip it.
+ if (len(self._notify_infos[zone_id].notify_slaves) <= 0):
+ return
+
with self._lock:
if (self.notify_num >= _MAX_NOTIFY_NUM) or (zone_id in self._notifying_zones):
if zone_id not in self._waiting_zones:
self._waiting_zones.append(zone_id)
else:
self._notify_infos[zone_id].prepare_notify_out()
- self.notify_num += 1
+ self.notify_num += 1
self._notifying_zones.append(zone_id)
+ if not self._nonblock_event.isSet():
+ self._nonblock_event.set()
def _dispatcher(self, started_event):
started_event.set() # Let the master know we are alive already
@@ -168,8 +181,8 @@ class NotifyOut:
If one zone get the notify reply before timeout, call the
handle to process the reply. If one zone can't get the notify
- before timeout, call the handler to resend notify or notify
- next slave.
+ before timeout, call the handler to resend notify or notify
+ next slave.
The thread can be stopped by calling shutdown().
@@ -205,6 +218,9 @@ class NotifyOut:
# Ask it to stop
self._serving = False
+ if not self._nonblock_event.isSet():
+ # set self._nonblock_event to stop waiting for new notifying zones.
+ self._nonblock_event.set()
self._write_sock.send(SOCK_DATA) # make self._read_sock be readable.
# Wait for it
@@ -223,7 +239,7 @@ class NotifyOut:
then use the name in NS record rdata part to get the a/aaaa records
in the same zone. the targets listed in a/aaaa record rdata are treated
as the notify slaves.
- Note: this is the simplest way to get the address of slaves,
+ Note: this is the simplest way to get the address of slaves,
but not correct, it can't handle the delegation slaves, or the CNAME
and DNAME logic.
TODO. the function should be provided by one library.'''
@@ -231,8 +247,8 @@ class NotifyOut:
soa_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
ns_rr_name = []
for ns in ns_rrset:
- ns_rr_name.append(self._get_rdata_data(ns))
-
+ ns_rr_name.append(self._get_rdata_data(ns))
+
if len(soa_rrset) > 0:
sname = (soa_rrset[0][sqlite3_ds.RR_RDATA_INDEX].split(' '))[0].strip() #TODO, bad hardcode to get rdata part
if sname in ns_rr_name:
@@ -266,15 +282,22 @@ class NotifyOut:
sock = self._notify_infos[info].get_socket()
if sock:
valid_socks.append(sock)
+
+ # If a non null timeout is specified notify has been scheduled
+ # (in which case socket is still None) or sent (with a valid
+ # socket). In either case we need add the zone to notifying_zones
+ # so that we can invoke the appropriate event for the zone after
+ # select.
+ tmp_timeout = self._notify_infos[info].notify_timeout
+ if tmp_timeout is not None:
notifying_zones[info] = self._notify_infos[info]
- tmp_timeout = self._notify_infos[info].notify_timeout
if min_timeout is not None:
if tmp_timeout < min_timeout:
min_timeout = tmp_timeout
else:
min_timeout = tmp_timeout
- block_timeout = _IDLE_SLEEP_TIME
+ block_timeout = None
if min_timeout is not None:
block_timeout = min_timeout - time.time()
if block_timeout < 0:
@@ -297,10 +320,18 @@ class NotifyOut:
# This is None only during some tests
if self._read_sock is not None:
valid_socks.append(self._read_sock)
+
+ # Currently, there is no notifying zones, waiting for zones to send notify
+ if block_timeout is None:
+ self._nonblock_event.clear()
+ self._nonblock_event.wait()
+ # has new notifying zone, check immediately
+ block_timeout = 0
+
try:
r_fds, w, e = select.select(valid_socks, [], [], block_timeout)
except select.error as err:
- if err.args[0] != EINTR:
+ if err.args[0] != errno.EINTR:
return {}, {}
if self._read_sock in r_fds: # user has called shutdown()
@@ -323,35 +354,36 @@ class NotifyOut:
return replied_zones, not_replied_zones
def _zone_notify_handler(self, zone_notify_info, event_type):
- '''Notify handler for one zone. The first notify message is
- always triggered by the event "_EVENT_TIMEOUT" since when
- one zone prepares to notify its slaves, its notify_timeout
- is set to now, which is used to trigger sending notify
+ '''Notify handler for one zone. The first notify message is
+ always triggered by the event "_EVENT_TIMEOUT" since when
+ one zone prepares to notify its slaves, its notify_timeout
+ is set to now, which is used to trigger sending notify
message when dispatcher() scanning zones. '''
tgt = zone_notify_info.get_current_notify_target()
if event_type == _EVENT_READ:
reply = self._get_notify_reply(zone_notify_info.get_socket(), tgt)
- if reply:
- if self._handle_notify_reply(zone_notify_info, reply):
+ if reply is not None:
+ if self._handle_notify_reply(zone_notify_info, reply, tgt):
self._notify_next_target(zone_notify_info)
elif event_type == _EVENT_TIMEOUT and zone_notify_info.notify_try_num > 0:
- self._log_msg('info', 'notify retry to %s' % addr_to_str(tgt))
+ logger.info(NOTIFY_OUT_TIMEOUT, tgt[0], tgt[1])
tgt = zone_notify_info.get_current_notify_target()
if tgt:
zone_notify_info.notify_try_num += 1
if zone_notify_info.notify_try_num > _MAX_NOTIFY_TRY_NUM:
- self._log_msg('info', 'notify to %s: retried exceeded' % addr_to_str(tgt))
+ logger.warn(NOTIFY_OUT_RETRY_EXCEEDED, tgt[0], tgt[1],
+ _MAX_NOTIFY_TRY_NUM)
self._notify_next_target(zone_notify_info)
else:
- retry_timeout = _NOTIFY_TIMEOUT * pow(2, zone_notify_info.notify_try_num)
# set exponential backoff according rfc1996 section 3.6
+ retry_timeout = _NOTIFY_TIMEOUT * pow(2, zone_notify_info.notify_try_num)
zone_notify_info.notify_timeout = time.time() + retry_timeout
self._send_notify_message_udp(zone_notify_info, tgt)
def _notify_next_target(self, zone_notify_info):
- '''Notify next address for the same zone. If all the targets
+ '''Notify next address for the same zone. If all the targets
has been notified, notify the first zone in waiting list. '''
zone_notify_info.notify_try_num = 0
zone_notify_info.set_next_notify_target()
@@ -359,35 +391,43 @@ class NotifyOut:
if not tgt:
zone_notify_info.finish_notify_out()
with self._lock:
- self.notify_num -= 1
- self._notifying_zones.remove((zone_notify_info.zone_name,
- zone_notify_info.zone_class))
+ self.notify_num -= 1
+ self._notifying_zones.remove((zone_notify_info.zone_name,
+ zone_notify_info.zone_class))
# trigger notify out for waiting zones
if len(self._waiting_zones) > 0:
- zone_id = self._waiting_zones.pop(0)
+ zone_id = self._waiting_zones.pop(0)
self._notify_infos[zone_id].prepare_notify_out()
- self.notify_num += 1
+ self.notify_num += 1
self._notifying_zones.append(zone_id)
+ if not self._nonblock_event.isSet():
+ self._nonblock_event.set()
def _send_notify_message_udp(self, zone_notify_info, addrinfo):
- msg, qid = self._create_notify_message(zone_notify_info.zone_name,
+ msg, qid = self._create_notify_message(zone_notify_info.zone_name,
zone_notify_info.zone_class)
render = MessageRenderer()
- render.set_length_limit(512)
+ render.set_length_limit(512)
msg.to_wire(render)
zone_notify_info.notify_msg_id = qid
- sock = zone_notify_info.get_socket()
try:
+ sock = zone_notify_info.create_socket(addrinfo[0])
sock.sendto(render.get_data(), 0, addrinfo)
- self._log_msg('info', 'sending notify to %s' % addr_to_str(addrinfo))
- except socket.error as err:
- self._log_msg('error', 'send notify to %s failed: %s' % (addr_to_str(addrinfo), str(err)))
+ logger.info(NOTIFY_OUT_SENDING_NOTIFY, addrinfo[0],
+ addrinfo[1])
+ except (socket.error, addr.InvalidAddress) as err:
+ logger.error(NOTIFY_OUT_SOCKET_ERROR, addrinfo[0],
+ addrinfo[1], err)
+ return False
+ except addr.InvalidAddress as iae:
+ logger.error(NOTIFY_OUT_INVALID_ADDRESS, addrinfo[0],
+ addrinfo[1], iae)
return False
return True
def _create_rrset_from_db_record(self, record, zone_class):
- '''Create one rrset from one record of datasource, if the schema of record is changed,
+ '''Create one rrset from one record of datasource, if the schema of record is changed,
This function should be updated first. TODO, the function is copied from xfrout, there
should be library for creating one rrset. '''
rrtype_ = RRType(record[sqlite3_ds.RR_TYPE_INDEX])
@@ -407,39 +447,43 @@ class NotifyOut:
question = Question(Name(zone_name), RRClass(zone_class), RRType('SOA'))
msg.add_question(question)
# Add soa record to answer section
- soa_record = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
+ soa_record = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
rrset_soa = self._create_rrset_from_db_record(soa_record[0], zone_class)
msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
return msg, qid
- def _handle_notify_reply(self, zone_notify_info, msg_data):
+ def _handle_notify_reply(self, zone_notify_info, msg_data, from_addr):
'''Parse the notify reply message.
- TODO, the error message should be refined properly.
rcode will not checked here, If we get the response
from the slave, it means the slaves has got the notify.'''
msg = Message(Message.PARSE)
try:
- errstr = 'notify reply error: '
msg.from_wire(msg_data)
if not msg.get_header_flag(Message.HEADERFLAG_QR):
- self._log_msg('error', errstr + 'bad flags')
+ logger.warn(NOTIFY_OUT_REPLY_QR_NOT_SET, from_addr[0],
+ from_addr[1])
return _BAD_QR
- if msg.get_qid() != zone_notify_info.notify_msg_id:
- self._log_msg('error', errstr + 'bad query ID')
+ if msg.get_qid() != zone_notify_info.notify_msg_id:
+ logger.warn(NOTIFY_OUT_REPLY_BAD_QID, from_addr[0],
+ from_addr[1], msg.get_qid(),
+ zone_notify_info.notify_msg_id)
return _BAD_QUERY_ID
-
+
question = msg.get_question()[0]
if question.get_name() != Name(zone_notify_info.zone_name):
- self._log_msg('error', errstr + 'bad query name')
+ logger.warn(NOTIFY_OUT_REPLY_BAD_QUERY_NAME, from_addr[0],
+ from_addr[1], question.get_name().to_text(),
+ Name(zone_notify_info.zone_name).to_text())
return _BAD_QUERY_NAME
if msg.get_opcode() != Opcode.NOTIFY():
- self._log_msg('error', errstr + 'bad opcode')
+ logger.warn(NOTIFY_OUT_REPLY_BAD_OPCODE, from_addr[0],
+ from_addr[1], msg.get_opcode().to_text())
return _BAD_OPCODE
except Exception as err:
- # We don't care what exception, just report it?
- self._log_msg('error', errstr + str(err))
+ # We don't care what exception, just report it?
+ logger.error(NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION, err)
return _BAD_REPLY_PACKET
return _REPLY_OK
@@ -447,14 +491,9 @@ class NotifyOut:
def _get_notify_reply(self, sock, tgt_addr):
try:
msg, addr = sock.recvfrom(512)
- except socket.error:
- self._log_msg('error', "notify to %s failed: can't read notify reply" % addr_to_str(tgt_addr))
+ except socket.error as err:
+ logger.error(NOTIFY_OUT_SOCKET_RECV_ERROR, tgt_addr[0],
+ tgt_addr[1], err)
return None
return msg
-
-
- def _log_msg(self, level, msg):
- if self._log:
- self._log.log_message(level, msg)
-
diff --git a/src/lib/python/isc/notify/notify_out_messages.mes b/src/lib/python/isc/notify/notify_out_messages.mes
new file mode 100644
index 0000000..570f51e
--- /dev/null
+++ b/src/lib/python/isc/notify/notify_out_messages.mes
@@ -0,0 +1,83 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the notify_out_messages python module.
+
+% NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
+
+% NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
+
+% NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+
+% NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+
+% NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
+
+% NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+
+% NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2
+A notify message is sent to the secondary nameserver at the given
+address.
+
+% NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
+
+% NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
+
+% NOTIFY_OUT_TIMEOUT retry notify to %1#%2
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
+
+% NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index a83ff86..00c2eee 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -18,7 +18,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/notify/tests/notify_out_test.py b/src/lib/python/isc/notify/tests/notify_out_test.py
index c4c149c..83f6d1a 100644
--- a/src/lib/python/isc/notify/tests/notify_out_test.py
+++ b/src/lib/python/isc/notify/tests/notify_out_test.py
@@ -21,12 +21,11 @@ import time
import socket
from isc.datasrc import sqlite3_ds
from isc.notify import notify_out, SOCK_DATA
+import isc.log
# our fake socket, where we can read and insert messages
class MockSocket():
- def __init__(self, family, type):
- self.family = family
- self.type = type
+ def __init__(self):
self._local_sock, self._remote_sock = socket.socketpair()
def connect(self, to):
@@ -51,12 +50,16 @@ class MockSocket():
return self._remote_sock
# We subclass the ZoneNotifyInfo class we're testing here, only
-# to override the prepare_notify_out() method.
+# to override the create_socket() method.
class MockZoneNotifyInfo(notify_out.ZoneNotifyInfo):
- def prepare_notify_out(self):
- super().prepare_notify_out();
+ def create_socket(self, addrinfo):
+ super().create_socket(addrinfo)
+ # before replacing the underlying socket, remember the address family
+ # of the original socket so that tests can check that.
+ self.sock_family = self._sock.family
self._sock.close()
- self._sock = MockSocket(socket.AF_INET, socket.SOCK_DGRAM)
+ self._sock = MockSocket()
+ return self._sock
class TestZoneNotifyInfo(unittest.TestCase):
def setUp(self):
@@ -64,11 +67,12 @@ class TestZoneNotifyInfo(unittest.TestCase):
def test_prepare_finish_notify_out(self):
self.info.prepare_notify_out()
- self.assertNotEqual(self.info._sock, None)
+ self.assertNotEqual(self.info.notify_timeout, None)
self.assertIsNone(self.info._notify_current)
self.info.finish_notify_out()
self.assertEqual(self.info._sock, None)
+ self.assertEqual(self.info.notify_timeout, None)
def test_set_next_notify_target(self):
self.info.notify_slaves.append(('127.0.0.1', 53))
@@ -76,7 +80,6 @@ class TestZoneNotifyInfo(unittest.TestCase):
self.info.prepare_notify_out()
self.assertEqual(self.info.get_current_notify_target(), ('127.0.0.1', 53))
- self.assertEqual('127.0.0.1#53', notify_out.addr_to_str(('127.0.0.1', 53)))
self.info.set_next_notify_target()
self.assertEqual(self.info.get_current_notify_target(), ('1.1.1.1', 5353))
self.info.set_next_notify_target()
@@ -99,36 +102,56 @@ class TestNotifyOut(unittest.TestCase):
self._notify._notify_infos[('example.org.', 'IN')] = MockZoneNotifyInfo('example.org.', 'IN')
self._notify._notify_infos[('example.org.', 'CH')] = MockZoneNotifyInfo('example.org.', 'CH')
- info = self._notify._notify_infos[('example.net.', 'IN')]
- info.notify_slaves.append(('127.0.0.1', 53))
- info.notify_slaves.append(('1.1.1.1', 5353))
+ net_info = self._notify._notify_infos[('example.net.', 'IN')]
+ net_info.notify_slaves.append(('127.0.0.1', 53))
+ net_info.notify_slaves.append(('1.1.1.1', 5353))
+ com_info = self._notify._notify_infos[('example.com.', 'IN')]
+ com_info.notify_slaves.append(('1.1.1.1', 5353))
+ com_ch_info = self._notify._notify_infos[('example.com.', 'CH')]
+ com_ch_info.notify_slaves.append(('1.1.1.1', 5353))
def tearDown(self):
self._db_file.close()
os.unlink(self._db_file.name)
def test_send_notify(self):
+ notify_out._MAX_NOTIFY_NUM = 2
+
+ self._notify._nonblock_event.clear()
self._notify.send_notify('example.net')
+ self.assertTrue(self._notify._nonblock_event.isSet())
self.assertEqual(self._notify.notify_num, 1)
- self.assertEqual(self._notify._notifying_zones[0], ('example.net.','IN'))
+ self.assertEqual(self._notify._notifying_zones[0], ('example.net.', 'IN'))
self._notify.send_notify('example.com')
self.assertEqual(self._notify.notify_num, 2)
- self.assertEqual(self._notify._notifying_zones[1], ('example.com.','IN'))
+ self.assertEqual(self._notify._notifying_zones[1], ('example.com.', 'IN'))
- notify_out._MAX_NOTIFY_NUM = 3
+ # notify_num is equal to MAX_NOTIFY_NUM, append it to waiting_zones list.
+ self._notify._nonblock_event.clear()
self._notify.send_notify('example.com', 'CH')
- self.assertEqual(self._notify.notify_num, 3)
- self.assertEqual(self._notify._notifying_zones[2], ('example.com.','CH'))
-
- self._notify.send_notify('example.org.')
- self.assertEqual(self._notify._waiting_zones[0], ('example.org.', 'IN'))
- self._notify.send_notify('example.org.')
+ # add waiting zones won't set nonblock_event.
+ self.assertFalse(self._notify._nonblock_event.isSet())
+ self.assertEqual(self._notify.notify_num, 2)
self.assertEqual(1, len(self._notify._waiting_zones))
+ # zone_id is already in notifying_zones list, append it to waiting_zones list.
+ self._notify.send_notify('example.net')
+ self.assertEqual(2, len(self._notify._waiting_zones))
+ self.assertEqual(self._notify._waiting_zones[1], ('example.net.', 'IN'))
+
+ # zone_id is already in waiting_zones list, skip it.
+ self._notify.send_notify('example.net')
+ self.assertEqual(2, len(self._notify._waiting_zones))
+
+ # has no slave masters, skip it.
self._notify.send_notify('example.org.', 'CH')
+ self.assertEqual(self._notify.notify_num, 2)
+ self.assertEqual(2, len(self._notify._waiting_zones))
+
+ self._notify.send_notify('example.org.')
+ self.assertEqual(self._notify.notify_num, 2)
self.assertEqual(2, len(self._notify._waiting_zones))
- self.assertEqual(self._notify._waiting_zones[1], ('example.org.', 'CH'))
def test_wait_for_notify_reply(self):
self._notify.send_notify('example.net.')
@@ -140,6 +163,11 @@ class TestNotifyOut(unittest.TestCase):
self.assertEqual(len(replied_zones), 0)
self.assertEqual(len(timeout_zones), 2)
+ # Trigger timeout events to "send" notifies via a mock socket
+ for zone in timeout_zones:
+ self._notify._zone_notify_handler(timeout_zones[zone],
+ notify_out._EVENT_TIMEOUT)
+
# Now make one socket be readable
self._notify._notify_infos[('example.net.', 'IN')].notify_timeout = time.time() + 10
self._notify._notify_infos[('example.com.', 'IN')].notify_timeout = time.time() + 10
@@ -171,6 +199,7 @@ class TestNotifyOut(unittest.TestCase):
self._notify.send_notify('example.net.')
self._notify.send_notify('example.com.')
notify_out._MAX_NOTIFY_NUM = 2
+ # zone example.org. has no slave servers.
self._notify.send_notify('example.org.')
self._notify.send_notify('example.com.', 'CH')
@@ -179,48 +208,71 @@ class TestNotifyOut(unittest.TestCase):
self.assertEqual(0, info.notify_try_num)
self.assertEqual(info.get_current_notify_target(), ('1.1.1.1', 5353))
self.assertEqual(2, self._notify.notify_num)
+ self.assertEqual(1, len(self._notify._waiting_zones))
self._notify._notify_next_target(info)
self.assertEqual(0, info.notify_try_num)
self.assertIsNone(info.get_current_notify_target())
self.assertEqual(2, self._notify.notify_num)
- self.assertEqual(1, len(self._notify._waiting_zones))
+ self.assertEqual(0, len(self._notify._waiting_zones))
example_com_info = self._notify._notify_infos[('example.com.', 'IN')]
self._notify._notify_next_target(example_com_info)
- self.assertEqual(2, self._notify.notify_num)
- self.assertEqual(2, len(self._notify._notifying_zones))
+ self.assertEqual(1, self._notify.notify_num)
+ self.assertEqual(1, len(self._notify._notifying_zones))
+ self.assertEqual(0, len(self._notify._waiting_zones))
def test_handle_notify_reply(self):
- self.assertEqual(notify_out._BAD_REPLY_PACKET, self._notify._handle_notify_reply(None, b'badmsg'))
+ fake_address = ('192.0.2.1', 53)
+ self.assertEqual(notify_out._BAD_REPLY_PACKET, self._notify._handle_notify_reply(None, b'badmsg', fake_address))
example_com_info = self._notify._notify_infos[('example.com.', 'IN')]
example_com_info.notify_msg_id = 0X2f18
# test with right notify reply message
data = b'\x2f\x18\xa0\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03com\x00\x00\x06\x00\x01'
- self.assertEqual(notify_out._REPLY_OK, self._notify._handle_notify_reply(example_com_info, data))
+ self.assertEqual(notify_out._REPLY_OK, self._notify._handle_notify_reply(example_com_info, data, fake_address))
# test with unright query id
data = b'\x2e\x18\xa0\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03com\x00\x00\x06\x00\x01'
- self.assertEqual(notify_out._BAD_QUERY_ID, self._notify._handle_notify_reply(example_com_info, data))
+ self.assertEqual(notify_out._BAD_QUERY_ID, self._notify._handle_notify_reply(example_com_info, data, fake_address))
# test with unright query name
data = b'\x2f\x18\xa0\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03net\x00\x00\x06\x00\x01'
- self.assertEqual(notify_out._BAD_QUERY_NAME, self._notify._handle_notify_reply(example_com_info, data))
+ self.assertEqual(notify_out._BAD_QUERY_NAME, self._notify._handle_notify_reply(example_com_info, data, fake_address))
# test with unright opcode
data = b'\x2f\x18\x80\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03com\x00\x00\x06\x00\x01'
- self.assertEqual(notify_out._BAD_OPCODE, self._notify._handle_notify_reply(example_com_info, data))
+ self.assertEqual(notify_out._BAD_OPCODE, self._notify._handle_notify_reply(example_com_info, data, fake_address))
# test with unright qr
data = b'\x2f\x18\x10\x10\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03com\x00\x00\x06\x00\x01'
- self.assertEqual(notify_out._BAD_QR, self._notify._handle_notify_reply(example_com_info, data))
+ self.assertEqual(notify_out._BAD_QR, self._notify._handle_notify_reply(example_com_info, data, fake_address))
- def test_send_notify_message_udp(self):
+ def test_send_notify_message_udp_ipv4(self):
example_com_info = self._notify._notify_infos[('example.net.', 'IN')]
example_com_info.prepare_notify_out()
- ret = self._notify._send_notify_message_udp(example_com_info, ('1.1.1.1', 53))
+ ret = self._notify._send_notify_message_udp(example_com_info,
+ ('192.0.2.1', 53))
+ self.assertTrue(ret)
+ self.assertEqual(socket.AF_INET, example_com_info.sock_family)
+
+ def test_send_notify_message_udp_ipv6(self):
+ example_com_info = self._notify._notify_infos[('example.net.', 'IN')]
+ ret = self._notify._send_notify_message_udp(example_com_info,
+ ('2001:db8::53', 53))
self.assertTrue(ret)
+ self.assertEqual(socket.AF_INET6, example_com_info.sock_family)
+
+ def test_send_notify_message_with_bogus_address(self):
+ example_com_info = self._notify._notify_infos[('example.net.', 'IN')]
+
+ # As long as the underlying data source validates RDATA this shouldn't
+ # happen, but right now it's not actually the case. Even if the
+ # data source does its job, it's prudent to confirm the behavior for
+ # an unexpected case.
+ ret = self._notify._send_notify_message_udp(example_com_info,
+ ('invalid', 53))
+ self.assertFalse(ret)
def test_zone_notify_handler(self):
old_send_msg = self._notify._send_notify_message_udp
@@ -249,6 +301,15 @@ class TestNotifyOut(unittest.TestCase):
self._notify._zone_notify_handler(example_net_info, notify_out._EVENT_NONE)
self.assertNotEqual(cur_tgt, example_net_info._notify_current)
+ cur_tgt = example_net_info._notify_current
+ example_net_info.create_socket('127.0.0.1')
+ # dns message, will result in bad_qid, but what we are testing
+ # here is whether handle_notify_reply is called correctly
+ example_net_info._sock.remote_end().send(b'\x2f\x18\xa0\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03com\x00\x00\x06\x00\x01')
+ self._notify._zone_notify_handler(example_net_info, notify_out._EVENT_READ)
+ self.assertNotEqual(cur_tgt, example_net_info._notify_current)
+
+
def _example_net_data_reader(self):
zone_data = [
('example.net.', '1000', 'IN', 'SOA', 'a.dns.example.net. mail.example.net. 1 1 1 1 1'),
@@ -302,7 +363,7 @@ class TestNotifyOut(unittest.TestCase):
def test_prepare_select_info(self):
timeout, valid_fds, notifying_zones = self._notify._prepare_select_info()
- self.assertEqual(notify_out._IDLE_SLEEP_TIME, timeout)
+ self.assertEqual(None, timeout)
self.assertListEqual([], valid_fds)
self._notify._notify_infos[('example.net.', 'IN')]._sock = 1
@@ -326,10 +387,36 @@ class TestNotifyOut(unittest.TestCase):
def test_shutdown(self):
thread = self._notify.dispatcher()
self.assertTrue(thread.is_alive())
+ # nonblock_event won't be setted since there are no notifying zones.
+ self.assertFalse(self._notify._nonblock_event.isSet())
+
+ # set nonblock_event manually
+ self._notify._nonblock_event.set()
+ # nonblock_event will be cleared soon since there are no notifying zones.
+ while (self._notify._nonblock_event.isSet()):
+ pass
+
+ # send notify
+ example_net_info = self._notify._notify_infos[('example.net.', 'IN')]
+ example_net_info.notify_slaves = [('127.0.0.1', 53)]
+ example_net_info.create_socket('127.0.0.1')
+ self._notify.send_notify('example.net')
+ self.assertTrue(self._notify._nonblock_event.isSet())
+ # set notify_try_num to _MAX_NOTIFY_TRY_NUM, zone 'example.net' will be removed
+ # from notifying zones soon and nonblock_event will be cleared since there is no
+ # notifying zone left.
+ example_net_info.notify_try_num = notify_out._MAX_NOTIFY_TRY_NUM
+ while (self._notify._nonblock_event.isSet()):
+ pass
+
+ self.assertFalse(self._notify._nonblock_event.isSet())
self._notify.shutdown()
+ # nonblock_event should have been setted to stop waiting.
+ self.assertTrue(self._notify._nonblock_event.isSet())
self.assertFalse(thread.is_alive())
if __name__== "__main__":
+ isc.log.init("bind10")
unittest.main()
diff --git a/src/lib/python/isc/testutils/Makefile.am b/src/lib/python/isc/testutils/Makefile.am
index 8f00a9b..0b08257 100644
--- a/src/lib/python/isc/testutils/Makefile.am
+++ b/src/lib/python/isc/testutils/Makefile.am
@@ -1 +1,6 @@
-EXTRA_DIST = __init__.py parse_args.py
+EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/testutils/tsigctx_mock.py b/src/lib/python/isc/testutils/tsigctx_mock.py
new file mode 100644
index 0000000..a9af9b9
--- /dev/null
+++ b/src/lib/python/isc/testutils/tsigctx_mock.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from pydnspp import *
+
+class MockTSIGContext(TSIGContext):
+ """Tthis is a mock of TSIGContext class for testing.
+ Via its "error" attribute, you can fake the result of verify(), thereby
+ you can test many of TSIG related tests without requiring actual crypto
+ setups. "error" should be either a TSIGError type value or a callable
+ object (typically a function). In the latter case, the callable object
+ will take the context as a parameter, and is expected to return a
+ TSIGError object.
+ """
+
+ def __init__(self, tsig_key):
+ super().__init__(tsig_key)
+ self.error = None
+ self.verify_called = 0 # number of verify() called
+
+ def sign(self, qid, data):
+ """Transparently delegate the processing to the super class.
+ It doesn't matter much anyway because normal applications that would
+ be implemented in Python normally won't call TSIGContext.sign()
+ directly.
+ """
+ return super().sign(qid, data)
+
+ def verify(self, tsig_record, data):
+ self.verify_called += 1
+ # call real "verify" so that we can notice any misue (which would
+ # result in exception.
+ super().verify(tsig_record, data)
+ return self.get_error()
+
+ def get_error(self):
+ if self.error is None:
+ return super().get_error()
+ if hasattr(self.error, '__call__'):
+ return self.error(self)
+ return self.error
diff --git a/src/lib/python/isc/util/Makefile.am b/src/lib/python/isc/util/Makefile.am
index f6cbb78..140e221 100644
--- a/src/lib/python/isc/util/Makefile.am
+++ b/src/lib/python/isc/util/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = . tests
python_PYTHON = __init__.py process.py socketserver_mixin.py file.py
pythondir = $(pyexecdir)/isc/util
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/util/tests/Makefile.am b/src/lib/python/isc/util/tests/Makefile.am
index 0ce96de..3b882b4 100644
--- a/src/lib/python/isc/util/tests/Makefile.am
+++ b/src/lib/python/isc/util/tests/Makefile.am
@@ -2,6 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = process_test.py socketserver_mixin_test.py file_test.py
EXTRA_DIST = $(PYTESTS)
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
@@ -11,6 +18,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/xfrin/Makefile.am b/src/lib/python/isc/xfrin/Makefile.am
new file mode 100644
index 0000000..5804de6
--- /dev/null
+++ b/src/lib/python/isc/xfrin/Makefile.am
@@ -0,0 +1,23 @@
+SUBDIRS = . tests
+
+python_PYTHON = __init__.py diff.py
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+EXTRA_DIST = libxfrin_messages.mes
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.pyc
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py: libxfrin_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/libxfrin_messages.mes
+
+pythondir = $(pyexecdir)/isc/xfrin
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/xfrin/__init__.py b/src/lib/python/isc/xfrin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
new file mode 100644
index 0000000..a2d9a7d
--- /dev/null
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -0,0 +1,237 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This helps the XFR in process with accumulating parts of diff and applying
+it to the datasource.
+
+The name of the module is not yet fully decided. We might want to move it
+under isc.datasrc or somewhere else, because we might want to reuse it with
+future DDNS process. But until then, it lives here.
+"""
+
+import isc.dns
+import isc.log
+from isc.log_messages.libxfrin_messages import *
+
+class NoSuchZone(Exception):
+ """
+ This is raised if a diff for non-existant zone is being created.
+ """
+ pass
+
+"""
+This is the amount of changes we accumulate before calling Diff.apply
+automatically.
+
+The number 100 is just taken from BIND 9. We don't know the rationale
+for exactly this amount, but we think it is just some randomly chosen
+number.
+"""
+# If changing this, modify the tests accordingly as well.
+DIFF_APPLY_TRESHOLD = 100
+
+logger = isc.log.Logger('libxfrin')
+
+class Diff:
+ """
+ The class represents a diff against current state of datasource on
+ one zone. The usual way of working with it is creating it, then putting
+ bunch of changes in and commiting at the end.
+
+ If you change your mind, you can just stop using the object without
+ really commiting it. In that case no changes will happen in the data
+ sounce.
+
+ The class works as a kind of a buffer as well, it does not direct
+ the changes to underlying data source right away, but keeps them for
+ a while.
+ """
+ def __init__(self, ds_client, zone, replace=False):
+ """
+ Initializes the diff to a ready state. It checks the zone exists
+ in the datasource and if not, NoSuchZone is raised. This also creates
+ a transaction in the data source.
+
+ The ds_client is the datasource client containing the zone. Zone is
+ isc.dns.Name object representing the name of the zone (its apex).
+ If replace is true, the content of the whole zone is wiped out before
+ applying the diff.
+
+ You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented
+ exceptions.
+ """
+ self.__updater = ds_client.get_updater(zone, replace)
+ if self.__updater is None:
+ # The no such zone case
+ raise NoSuchZone("Zone " + str(zone) +
+ " does not exist in the data source " +
+ str(ds_client))
+ self.__buffer = []
+
+ def __check_commited(self):
+ """
+ This checks if the diff is already commited or broken. If it is, it
+ raises ValueError. This check is for methods that need to work only on
+ yet uncommited diffs.
+ """
+ if self.__updater is None:
+ raise ValueError("The diff is already commited or it has raised " +
+ "an exception, you come late")
+
+ def __data_common(self, rr, operation):
+ """
+ Schedules an operation with rr.
+
+ It does all the real work of add_data and delete_data, including
+ all checks.
+ """
+ self.__check_commited()
+ if rr.get_rdata_count() != 1:
+ raise ValueError('The rrset must contain exactly 1 Rdata, but ' +
+ 'it holds ' + str(rr.get_rdata_count()))
+ if rr.get_class() != self.__updater.get_class():
+ raise ValueError("The rrset's class " + str(rr.get_class()) +
+ " does not match updater's " +
+ str(self.__updater.get_class()))
+ self.__buffer.append((operation, rr))
+ if len(self.__buffer) >= DIFF_APPLY_TRESHOLD:
+ # Time to auto-apply, so the data don't accumulate too much
+ self.apply()
+
+ def add_data(self, rr):
+ """
+ Schedules addition of an RR into the zone in this diff.
+
+ The rr is of isc.dns.RRset type and it must contain only one RR.
+ If this is not the case or if the diff was already commited, this
+ raises the ValueError exception.
+
+ The rr class must match the one of the datasource client. If
+ it does not, ValueError is raised.
+ """
+ self.__data_common(rr, 'add')
+
+ def delete_data(self, rr):
+ """
+ Schedules deleting an RR from the zone in this diff.
+
+ The rr is of isc.dns.RRset type and it must contain only one RR.
+ If this is not the case or if the diff was already commited, this
+ raises the ValueError exception.
+
+ The rr class must match the one of the datasource client. If
+ it does not, ValueError is raised.
+ """
+ self.__data_common(rr, 'delete')
+
+ def compact(self):
+ """
+ Tries to compact the operations in buffer a little by putting some of
+ the operations together, forming RRsets with more than one RR.
+
+ This is called by apply before putting the data into datasource. You
+ may, but not have to, call this manually.
+
+ Currently it merges consecutive same operations on the same
+ domain/type. We could do more fancy things, like sorting by the domain
+ and do more merging, but such diffs should be rare in practice anyway,
+ so we don't bother and do it this simple way.
+ """
+ buf = []
+ for (op, rrset) in self.__buffer:
+ old = buf[-1][1] if len(buf) > 0 else None
+ if old is None or op != buf[-1][0] or \
+ rrset.get_name() != old.get_name() or \
+ rrset.get_type() != old.get_type():
+ buf.append((op, isc.dns.RRset(rrset.get_name(),
+ rrset.get_class(),
+ rrset.get_type(),
+ rrset.get_ttl())))
+ if rrset.get_ttl() != buf[-1][1].get_ttl():
+ logger.warn(LIBXFRIN_DIFFERENT_TTL, rrset.get_ttl(),
+ buf[-1][1].get_ttl())
+ for rdatum in rrset.get_rdata():
+ buf[-1][1].add_rdata(rdatum)
+ self.__buffer = buf
+
+ def apply(self):
+ """
+ Push the buffered changes inside this diff down into the data source.
+ This does not stop you from adding more changes later through this
+ diff and it does not close the datasource transaction, so the changes
+ will not be shown to others yet. It just means the internal memory
+ buffer is flushed.
+
+ This is called from time to time automatically, but you can call it
+ manually if you really want to.
+
+ This raises ValueError if the diff was already commited.
+
+ It also can raise isc.datasrc.Error. If that happens, you should stop
+ using this object and abort the modification.
+ """
+ self.__check_commited()
+ # First, compact the data
+ self.compact()
+ try:
+ # Then pass the data inside the data source
+ for (operation, rrset) in self.__buffer:
+ if operation == 'add':
+ self.__updater.add_rrset(rrset)
+ elif operation == 'delete':
+ self.__updater.delete_rrset(rrset)
+ else:
+ raise ValueError('Unknown operation ' + operation)
+ # As everything is already in, drop the buffer
+ except:
+ # If there's a problem, we can't continue.
+ self.__updater = None
+ raise
+
+ self.__buffer = []
+
+ def commit(self):
+ """
+ Writes all the changes into the data source and makes them visible.
+ This closes the diff, you may not use it any more. If you try to use
+ it, you'll get ValueError.
+
+ This might raise isc.datasrc.Error.
+ """
+ self.__check_commited()
+ # Push the data inside the data source
+ self.apply()
+ # Make sure they are visible.
+ try:
+ self.__updater.commit()
+ finally:
+ # Remove the updater. That will free some resources for one, but
+ # mark this object as already commited, so we can check
+
+ # We delete it even in case the commit failed, as that makes us
+ # unusable.
+ self.__updater = None
+
+ def get_buffer(self):
+ """
+ Returns the current buffer of changes not yet passed into the data
+ source. It is in a form like [('add', rrset), ('delete', rrset),
+ ('delete', rrset), ...].
+
+ Probably useful only for testing and introspection purposes. Don't
+ modify the list.
+ """
+ return self.__buffer
diff --git a/src/lib/python/isc/xfrin/libxfrin_messages.mes b/src/lib/python/isc/xfrin/libxfrin_messages.mes
new file mode 100644
index 0000000..be943c8
--- /dev/null
+++ b/src/lib/python/isc/xfrin/libxfrin_messages.mes
@@ -0,0 +1,21 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the libxfrin_messages python module.
+
+% LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
diff --git a/src/lib/python/isc/xfrin/tests/Makefile.am b/src/lib/python/isc/xfrin/tests/Makefile.am
new file mode 100644
index 0000000..416d62b
--- /dev/null
+++ b/src/lib/python/isc/xfrin/tests/Makefile.am
@@ -0,0 +1,24 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = diff_tests.py
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
new file mode 100644
index 0000000..9fab890
--- /dev/null
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -0,0 +1,446 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+import unittest
+from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
+from isc.xfrin.diff import Diff, NoSuchZone
+
+class TestError(Exception):
+ """
+ Just to have something to be raised during the tests.
+ Not used outside.
+ """
+ pass
+
+class DiffTest(unittest.TestCase):
+ """
+ Tests for the isc.xfrin.diff.Diff class.
+
+ It also plays role of a data source and an updater, so it can manipulate
+ some test variables while being called.
+ """
+ def setUp(self):
+ """
+ This sets internal variables so we can see nothing was called yet.
+
+ It also creates some variables used in multiple tests.
+ """
+ # Track what was called already
+ self.__updater_requested = False
+ self.__compact_called = False
+ self.__data_operations = []
+ self.__apply_called = False
+ self.__commit_called = False
+ self.__broken_called = False
+ self.__warn_called = False
+ self.__should_replace = False
+ # Some common values
+ self.__rrclass = RRClass.IN()
+ self.__type = RRType.A()
+ self.__ttl = RRTTL(3600)
+ # And RRsets
+ # Create two valid rrsets
+ self.__rrset1 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rdata = Rdata(self.__type, self.__rrclass, '192.0.2.1')
+ self.__rrset1.add_rdata(self.__rdata)
+ self.__rrset2 = RRset(Name('b.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset2.add_rdata(self.__rdata)
+ # And two invalid
+ self.__rrset_empty = RRset(Name('empty.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset_multi = RRset(Name('multi.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset_multi.add_rdata(self.__rdata)
+ self.__rrset_multi.add_rdata(Rdata(self.__type, self.__rrclass,
+ '192.0.2.2'))
+
+ def __mock_compact(self):
+ """
+ This can be put into the diff to hook into its compact method and see
+ if it gets called.
+ """
+ self.__compact_called = True
+
+ def __mock_apply(self):
+ """
+ This can be put into the diff to hook into its apply method and see
+ it gets called.
+ """
+ self.__apply_called = True
+
+ def __broken_operation(self, *args):
+ """
+ This can be used whenever an operation should fail. It raises TestError.
+ It should take whatever amount of parameters needed, so it can be put
+ quite anywhere.
+ """
+ self.__broken_called = True
+ raise TestError("Test error")
+
+ def warn(self, *args):
+ """
+ This is for checking the warn function was called, we replace the logger
+ in the tested module.
+ """
+ self.__warn_called = True
+
+ def commit(self):
+ """
+ This is part of pretending to be a zone updater. This notes the commit
+ was called.
+ """
+ self.__commit_called = True
+
+ def add_rrset(self, rrset):
+ """
+ This one is part of pretending to be a zone updater. It writes down
+ addition of an rrset was requested.
+ """
+ self.__data_operations.append(('add', rrset))
+
+ def delete_rrset(self, rrset):
+ """
+ This one is part of pretending to be a zone updater. It writes down
+ removal of an rrset was requested.
+ """
+ self.__data_operations.append(('delete', rrset))
+
+ def get_class(self):
+ """
+ This one is part of pretending to be a zone updater. It returns
+ the IN class.
+ """
+ return self.__rrclass
+
+ def get_updater(self, zone_name, replace):
+ """
+ This one pretends this is the data source client and serves
+ getting an updater.
+
+ If zone_name is 'none.example.org.', it returns None, otherwise
+ it returns self.
+ """
+ # The diff should not delete the old data.
+ self.assertEqual(self.__should_replace, replace)
+ self.__updater_requested = True
+ # Pretend this zone doesn't exist
+ if zone_name == Name('none.example.org.'):
+ return None
+ else:
+ return self
+
+ def test_create(self):
+ """
+ This test the case when the diff is successfuly created. It just
+ tries it does not throw and gets the updater.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.assertTrue(self.__updater_requested)
+ self.assertEqual([], diff.get_buffer())
+
+ def test_create_nonexist(self):
+ """
+ Try to create a diff on a zone that doesn't exist. This should
+ raise a correct exception.
+ """
+ self.assertRaises(NoSuchZone, Diff, self, Name('none.example.org.'))
+ self.assertTrue(self.__updater_requested)
+
+ def __data_common(self, diff, method, operation):
+ """
+ Common part of test for test_add and test_delte.
+ """
+ # Try putting there the bad data first
+ self.assertRaises(ValueError, method, self.__rrset_empty)
+ self.assertRaises(ValueError, method, self.__rrset_multi)
+ # They were not added
+ self.assertEqual([], diff.get_buffer())
+ # Put some proper data into the diff
+ method(self.__rrset1)
+ method(self.__rrset2)
+ dlist = [(operation, self.__rrset1), (operation, self.__rrset2)]
+ self.assertEqual(dlist, diff.get_buffer())
+ # Check the data are not destroyed by raising an exception because of
+ # bad data
+ self.assertRaises(ValueError, method, self.__rrset_empty)
+ self.assertEqual(dlist, diff.get_buffer())
+
+ def test_add(self):
+ """
+ Try to add few items into the diff and see they are stored in there.
+
+ Also try passing an rrset that has differnt amount of RRs than 1.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.__data_common(diff, diff.add_data, 'add')
+
+ def test_delete(self):
+ """
+ Try scheduling removal of few items into the diff and see they are
+ stored in there.
+
+ Also try passing an rrset that has different amount of RRs than 1.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.__data_common(diff, diff.delete_data, 'delete')
+
+ def test_apply(self):
+ """
+ Schedule few additions and check the apply works by passing the
+ data into the updater.
+ """
+ # Prepare the diff
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ diff.delete_data(self.__rrset2)
+ dlist = [('add', self.__rrset1), ('delete', self.__rrset2)]
+ self.assertEqual(dlist, diff.get_buffer())
+ # Do the apply, hook the compact method
+ diff.compact = self.__mock_compact
+ diff.apply()
+ # It should call the compact
+ self.assertTrue(self.__compact_called)
+ # And pass the data. Our local history of what happened is the same
+ # format, so we can check the same way
+ self.assertEqual(dlist, self.__data_operations)
+ # And the buffer in diff should become empty, as everything
+ # got inside.
+ self.assertEqual([], diff.get_buffer())
+
+ def test_commit(self):
+ """
+ If we call a commit, it should first apply whatever changes are
+ left (we hook into that instead of checking the effect) and then
+ the commit on the updater should have been called.
+
+ Then we check it raises value error for whatever operation we try.
+ """
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ orig_apply = diff.apply
+ diff.apply = self.__mock_apply
+ diff.commit()
+ self.assertTrue(self.__apply_called)
+ self.assertTrue(self.__commit_called)
+ # The data should be handled by apply which we replaced.
+ self.assertEqual([], self.__data_operations)
+ # Now check all range of other methods raise ValueError
+ self.assertRaises(ValueError, diff.commit)
+ self.assertRaises(ValueError, diff.add_data, self.__rrset2)
+ self.assertRaises(ValueError, diff.delete_data, self.__rrset1)
+ diff.apply = orig_apply
+ self.assertRaises(ValueError, diff.apply)
+ # This one does not state it should raise, so check it doesn't
+ # But it is NOP in this situation anyway
+ diff.compact()
+
+ def test_autoapply(self):
+ """
+ Test the apply is called all by itself after 100 tasks are added.
+ """
+ diff = Diff(self, Name('example.org.'))
+ # A method to check the apply is called _after_ the 100th element
+ # is added. We don't use it anywhere else, so we define it locally
+ # as lambda function
+ def check():
+ self.assertEqual(100, len(diff.get_buffer()))
+ self.__mock_apply()
+ orig_apply = diff.apply
+ diff.apply = check
+ # If we put 99, nothing happens yet
+ for i in range(0, 99):
+ diff.add_data(self.__rrset1)
+ expected = [('add', self.__rrset1)] * 99
+ self.assertEqual(expected, diff.get_buffer())
+ self.assertFalse(self.__apply_called)
+ # Now we push the 100th and it should call the apply method
+ # This will _not_ flush the data yet, as we replaced the method.
+ # It, however, would in the real life.
+ diff.add_data(self.__rrset1)
+ # Now the apply method (which is replaced by our check) should
+ # have been called. If it wasn't, this is false. If it was, but
+ # still with 99 elements, the check would complain
+ self.assertTrue(self.__apply_called)
+ # Reset the buffer by calling the original apply.
+ orig_apply()
+ self.assertEqual([], diff.get_buffer())
+ # Similar with delete
+ self.__apply_called = False
+ for i in range(0, 99):
+ diff.delete_data(self.__rrset2)
+ expected = [('delete', self.__rrset2)] * 99
+ self.assertEqual(expected, diff.get_buffer())
+ self.assertFalse(self.__apply_called)
+ diff.delete_data(self.__rrset2)
+ self.assertTrue(self.__apply_called)
+
+ def test_compact(self):
+ """
+ Test the compaction works as expected, eg. it compacts only consecutive
+ changes of the same operation and on the same domain/type.
+
+ The test case checks that it does merge them, but also puts some
+ different operations "in the middle", changes the type and name and
+ places the same kind of change further away of each other to see they
+ are not merged in that case.
+ """
+ diff = Diff(self, Name('example.org.'))
+ # Check we can do a compact on empty data, it shouldn't break
+ diff.compact()
+ self.assertEqual([], diff.get_buffer())
+ # This data is the way it should look like after the compact
+ # ('operation', 'domain.prefix', 'type', ['rdata', 'rdata'])
+ # The notes say why the each of consecutive can't be merged
+ data = [
+ ('add', 'a', 'A', ['192.0.2.1', '192.0.2.2']),
+ # Different type.
+ ('add', 'a', 'AAAA', ['2001:db8::1', '2001:db8::2']),
+ # Different operation
+ ('delete', 'a', 'AAAA', ['2001:db8::3']),
+ # Different domain
+ ('delete', 'b', 'AAAA', ['2001:db8::4']),
+ # This does not get merged with the first, even if logically
+ # possible. We just don't do this.
+ ('add', 'a', 'A', ['192.0.2.3'])
+ ]
+ # Now, fill the data into the diff, in a "flat" way, one by one
+ for (op, nprefix, rrtype, rdata) in data:
+ name = Name(nprefix + '.example.org.')
+ rrtype_obj = RRType(rrtype)
+ for rdatum in rdata:
+ rrset = RRset(name, self.__rrclass, rrtype_obj, self.__ttl)
+ rrset.add_rdata(Rdata(rrtype_obj, self.__rrclass, rdatum))
+ if op == 'add':
+ diff.add_data(rrset)
+ else:
+ diff.delete_data(rrset)
+ # Compact it
+ diff.compact()
+ # Now check they got compacted. They should be in the same order as
+ # pushed inside. So it should be the same as data modulo being in
+ # the rrsets and isc.dns objects.
+ def check():
+ buf = diff.get_buffer()
+ self.assertEqual(len(data), len(buf))
+ for (expected, received) in zip(data, buf):
+ (eop, ename, etype, edata) = expected
+ (rop, rrrset) = received
+ self.assertEqual(eop, rop)
+ ename_obj = Name(ename + '.example.org.')
+ self.assertEqual(ename_obj, rrrset.get_name())
+ # We check on names to make sure they are printed nicely
+ self.assertEqual(etype, str(rrrset.get_type()))
+ rdata = rrrset.get_rdata()
+ self.assertEqual(len(edata), len(rdata))
+ # It should also preserve the order
+ for (edatum, rdatum) in zip(edata, rdata):
+ self.assertEqual(edatum, str(rdatum))
+ check()
+ # Try another compact does nothing, but survives
+ diff.compact()
+ check()
+
+ def test_wrong_class(self):
+ """
+ Test a wrong class of rrset is rejected.
+ """
+ diff = Diff(self, Name('example.org.'))
+ rrset = RRset(Name('a.example.org.'), RRClass.CH(), RRType.NS(),
+ self.__ttl)
+ rrset.add_rdata(Rdata(RRType.NS(), RRClass.CH(), 'ns.example.org.'))
+ self.assertRaises(ValueError, diff.add_data, rrset)
+ self.assertRaises(ValueError, diff.delete_data, rrset)
+
+ def __do_raise_test(self):
+ """
+ Do a raise test. Expects that one of the operations is exchanged for
+ broken version.
+ """
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ diff.delete_data(self.__rrset2)
+ self.assertRaises(TestError, diff.commit)
+ self.assertTrue(self.__broken_called)
+ self.assertRaises(ValueError, diff.add_data, self.__rrset1)
+ self.assertRaises(ValueError, diff.delete_data, self.__rrset2)
+ self.assertRaises(ValueError, diff.commit)
+ self.assertRaises(ValueError, diff.apply)
+
+ def test_raise_add(self):
+ """
+ Test the exception from add_rrset is propagated and the diff can't be
+ used afterwards.
+ """
+ self.add_rrset = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_raise_delete(self):
+ """
+ Test the exception from delete_rrset is propagated and the diff can't be
+ used afterwards.
+ """
+ self.delete_rrset = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_raise_commit(self):
+ """
+ Test the exception from updater's commit gets propagated and it can't be
+ used afterwards.
+ """
+ self.commit = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_ttl(self):
+ """
+ Test the TTL handling. A warn function should have been called if they
+ differ, but that's all, it should not crash or raise.
+ """
+ orig_logger = isc.xfrin.diff.logger
+ try:
+ isc.xfrin.diff.logger = self
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ rrset2 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, RRTTL(120))
+ rrset2.add_rdata(Rdata(self.__type, self.__rrclass, '192.10.2.2'))
+ diff.add_data(rrset2)
+ rrset2 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, RRTTL(6000))
+ rrset2.add_rdata(Rdata(self.__type, self.__rrclass, '192.10.2.3'))
+ diff.add_data(rrset2)
+ # They should get compacted together and complain.
+ diff.compact()
+ self.assertEqual(1, len(diff.get_buffer()))
+ # The TTL stays on the first value, no matter if smaller or bigger
+ # ones come later.
+ self.assertEqual(self.__ttl, diff.get_buffer()[0][1].get_ttl())
+ self.assertTrue(self.__warn_called)
+ finally:
+ isc.xfrin.diff.logger = orig_logger
+
+ def test_relpace(self):
+ """
+ Test that when we want to replace the whole zone, it is propagated.
+ """
+ self.__should_replace = True
+ diff = Diff(self, "example.org.", True)
+ self.assertTrue(self.__updater_requested)
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ unittest.main()
diff --git a/src/lib/resolve/Makefile.am b/src/lib/resolve/Makefile.am
index 0b29da4..ceccce8 100644
--- a/src/lib/resolve/Makefile.am
+++ b/src/lib/resolve/Makefile.am
@@ -7,16 +7,36 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
AM_CXXFLAGS = $(B10_CXXFLAGS)
-CLEANFILES = *.gcno *.gcda
+# Define rule to build logging source files from message file
+resolve_messages.h resolve_messages.cc: resolve_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/resolve/resolve_messages.mes
+
+# Tell Automake that the nsasdef.{cc,h} source files are created in the build
+# process, so it must create these before doing anything else. Although they
+# are a dependency of the library (so will be created from the message file
+# anyway), there is no guarantee as to exactly _when_ in the build they will be
+# created. As the .h file is included in other sources file (so must be
+# present when they are compiled), the safest option is to create it first.
+BUILT_SOURCES = resolve_messages.h resolve_messages.cc
+
+CLEANFILES = *.gcno *.gcda resolve_messages.cc resolve_messages.h
lib_LTLIBRARIES = libresolve.la
libresolve_la_SOURCES = resolve.h resolve.cc
+libresolve_la_SOURCES += resolve_log.h resolve_log.cc
libresolve_la_SOURCES += resolver_interface.h
libresolve_la_SOURCES += resolver_callback.h resolver_callback.cc
libresolve_la_SOURCES += response_classifier.cc response_classifier.h
libresolve_la_SOURCES += recursive_query.cc recursive_query.h
+
+nodist_libresolve_la_SOURCES = resolve_messages.h resolve_messages.cc
+
libresolve_la_LIBADD = $(top_builddir)/src/lib/dns/libdns++.la
libresolve_la_LIBADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+libresolve_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
+
+# The message file should be in the distribution.
+EXTRA_DIST = resolve_messages.mes
# Note: the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS)
diff --git a/src/lib/resolve/recursive_query.cc b/src/lib/resolve/recursive_query.cc
index b753cc9..0d3fb4c 100644
--- a/src/lib/resolve/recursive_query.cc
+++ b/src/lib/resolve/recursive_query.cc
@@ -16,14 +16,13 @@
#include <stdlib.h>
#include <sys/socket.h>
#include <unistd.h> // for some IPC/network system calls
+#include <string>
#include <boost/lexical_cast.hpp>
#include <boost/bind.hpp>
#include <config.h>
-#include <log/dummylog.h>
-
#include <dns/question.h>
#include <dns/message.h>
#include <dns/opcode.h>
@@ -31,6 +30,7 @@
#include <dns/rdataclass.h>
#include <resolve/resolve.h>
+#include <resolve/resolve_log.h>
#include <cache/resolver_cache.h>
#include <nsas/address_request_callback.h>
#include <nsas/nameserver_address.h>
@@ -41,10 +41,10 @@
#include <asiolink/io_service.h>
#include <resolve/recursive_query.h>
-using isc::log::dlog;
using namespace isc::dns;
using namespace isc::util;
using namespace isc::asiolink;
+using namespace isc::resolve;
namespace isc {
namespace asiodns {
@@ -64,14 +64,27 @@ hasAddress(const Name& name, const RRClass& rrClass,
cache.lookup(name, RRType::AAAA(), rrClass) != RRsetPtr());
}
+// Convenience function for debug messages. Question::toText() includes
+// a trailing newline in its output, which makes it awkward to embed in a
+// message. This just strips that newline from it.
+std::string
+questionText(const isc::dns::Question& question) {
+ std::string text = question.toText();
+ if (!text.empty()) {
+ text.erase(text.size() - 1);
+ }
+ return (text);
}
+} // anonymous namespace
+
/// \brief Find deepest usable delegation in the cache
///
/// This finds the deepest delegation we have in cache and is safe to use.
/// It is not public function, therefore it's not in header. But it's not
/// in anonymous namespace, so we can call it from unittests.
/// \param name The name we want to delegate to.
+/// \param rrclass The class.
/// \param cache The place too look for known delegations.
std::string
deepestDelegation(Name name, RRClass rrclass,
@@ -135,8 +148,7 @@ RecursiveQuery::RecursiveQuery(DNSService& dns_service,
// Set the test server - only used for unit testing.
void
RecursiveQuery::setTestServer(const std::string& address, uint16_t port) {
- dlog("Setting test server to " + address + "(" +
- boost::lexical_cast<std::string>(port) + ")");
+ LOG_WARN(isc::resolve::logger, RESLIB_TEST_SERVER).arg(address).arg(port);
test_server_.first = address;
test_server_.second = port;
}
@@ -165,14 +177,16 @@ public:
ResolverNSASCallback(RunningQuery* rq) : rq_(rq) {}
void success(const isc::nsas::NameserverAddress& address) {
- dlog("Found a nameserver, sending query to " + address.getAddress().toText());
+ // Success callback, send query to found namesever
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_CB, RESLIB_RUNQ_SUCCESS)
+ .arg(address.getAddress().toText());
rq_->nsasCallbackCalled();
rq_->sendTo(address);
}
void unreachable() {
- dlog("Nameservers unreachable");
- // Drop query or send servfail?
+ // Nameservers unreachable: drop query or send servfail?
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_CB, RESLIB_RUNQ_FAIL);
rq_->nsasCallbackCalled();
rq_->makeSERVFAIL();
rq_->callCallback(true);
@@ -298,12 +312,16 @@ private:
// if we have a response for our query stored already. if
// so, call handlerecursiveresponse(), if not, we call send()
void doLookup() {
- dlog("doLookup: try cache");
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_CACHE, RESLIB_RUNQ_CACHE_LOOKUP)
+ .arg(questionText(question_));
+
Message cached_message(Message::RENDER);
isc::resolve::initResponseMessage(question_, cached_message);
if (cache_.lookup(question_.getName(), question_.getType(),
question_.getClass(), cached_message)) {
- dlog("Message found in cache, continuing with that");
+
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_CACHE, RESLIB_RUNQ_CACHE_FIND)
+ .arg(questionText(question_));
// Should these be set by the cache too?
cached_message.setOpcode(Opcode::QUERY());
cached_message.setRcode(Rcode::NOERROR());
@@ -313,9 +331,10 @@ private:
stop();
}
} else {
- dlog("doLookup: get lowest usable delegation from cache");
cur_zone_ = deepestDelegation(question_.getName(),
question_.getClass(), cache_);
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_CACHE, RESLIB_DEEPEST)
+ .arg(questionText(question_)).arg(cur_zone_);
send();
}
@@ -347,8 +366,9 @@ private:
void send(IOFetch::Protocol protocol = IOFetch::UDP) {
protocol_ = protocol; // Store protocol being used for this
if (test_server_.second != 0) {
- dlog("Sending upstream query (" + question_.toText() +
- ") to test server at " + test_server_.first);
+ // Send query to test server
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_TRACE, RESLIB_TEST_UPSTREAM)
+ .arg(questionText(question_)).arg(test_server_.first);
gettimeofday(¤t_ns_qsent_time, NULL);
++outstanding_events_;
IOFetch query(protocol, io_, question_,
@@ -356,10 +376,13 @@ private:
test_server_.second, buffer_, this,
query_timeout_);
io_.get_io_service().post(query);
+
} else {
// Ask the NSAS for an address for the current zone,
// the callback will call the actual sendTo()
- dlog("Look up nameserver for " + cur_zone_ + " in NSAS");
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_TRACE, RESLIB_NSAS_LOOKUP)
+ .arg(cur_zone_);
+
// Can we have multiple calls to nsas_out? Let's assume not
// for now
assert(!nsas_callback_out_);
@@ -387,7 +410,7 @@ private:
// error message)
// returns false if we are not done
bool handleRecursiveAnswer(const Message& incoming) {
- dlog("Handle response");
+
// In case we get a CNAME, we store the target
// here (classify() will set it when it walks through
// the cname chain to verify it).
@@ -402,46 +425,60 @@ private:
switch (category) {
case isc::resolve::ResponseClassifier::ANSWER:
case isc::resolve::ResponseClassifier::ANSWERCNAME:
- // Done. copy and return.
- dlog("Response is an answer");
+ // Answer received - copy and return.
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_ANSWER)
+ .arg(questionText(question_));
isc::resolve::copyResponseMessage(incoming, answer_message_);
cache_.update(*answer_message_);
return true;
break;
+
case isc::resolve::ResponseClassifier::CNAME:
- dlog("Response is CNAME!");
+ // CNAME received.
+
// (unfinished) CNAME. We set our question_ to the CNAME
// target, then start over at the beginning (for now, that
// is, we reset our 'current servers' to the root servers).
if (cname_count_ >= RESOLVER_MAX_CNAME_CHAIN) {
- // just give up
- dlog("CNAME chain too long");
+ // CNAME chain too long - just give up
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_LONG_CHAIN)
+ .arg(questionText(question_));
makeSERVFAIL();
return true;
}
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_CNAME)
+ .arg(questionText(question_));
+
answer_message_->appendSection(Message::SECTION_ANSWER,
incoming);
question_ = Question(cname_target, question_.getClass(),
question_.getType());
- dlog("Following CNAME chain to " + question_.toText());
+ // Follow CNAME chain.
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_FOLLOW_CNAME)
+ .arg(questionText(question_));
doLookup();
return false;
break;
+
case isc::resolve::ResponseClassifier::NXDOMAIN:
case isc::resolve::ResponseClassifier::NXRRSET:
- dlog("Response is NXDOMAIN or NXRRSET");
- // NXDOMAIN, just copy and return.
- dlog(incoming.toText());
+ // Received NXDOMAIN or NXRRSET, just copy and return
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_NXDOM_NXRR)
+ .arg(questionText(question_));
isc::resolve::copyResponseMessage(incoming, answer_message_);
// no negcache yet
//cache_.update(*answer_message_);
return true;
break;
+
case isc::resolve::ResponseClassifier::REFERRAL:
- dlog("Response is referral");
+ // Response is a referral
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_REFERRAL)
+ .arg(questionText(question_));
+
cache_.update(incoming);
// Referral. For now we just take the first glue address
// we find and continue with that
@@ -460,7 +497,8 @@ private:
// (this requires a few API changes in related
// libraries, so as not to need many conversions)
cur_zone_ = rrs->getName().toText();
- dlog("Referred to zone " + cur_zone_);
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_REFER_ZONE)
+ .arg(cur_zone_);
found_ns = true;
break;
}
@@ -484,7 +522,10 @@ private:
nsas_callback_, ANY_OK, glue_hints);
return false;
} else {
- dlog("No NS RRset in referral?");
+ // Referral was received but did not contain an NS RRset.
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_NO_NS_RRSET)
+ .arg(questionText(question_));
+
// TODO this will result in answering with the delegation. oh well
isc::resolve::copyResponseMessage(incoming, answer_message_);
return true;
@@ -494,7 +535,8 @@ private:
// Truncated packet. If the protocol we used for the last one is
// UDP, re-query using TCP. Otherwise regard it as an error.
if (protocol_ == IOFetch::UDP) {
- dlog("Response truncated, re-querying over TCP");
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_TRUNCATED)
+ .arg(questionText(question_));
send(IOFetch::TCP);
return false;
}
@@ -513,6 +555,8 @@ private:
case isc::resolve::ResponseClassifier::NOTSINGLE:
case isc::resolve::ResponseClassifier::OPCODE:
case isc::resolve::ResponseClassifier::RCODE:
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_ERR)
+ .arg(questionText(question_));
// Should we try a different server rather than SERVFAIL?
makeSERVFAIL();
return true;
@@ -677,7 +721,7 @@ public:
rtt = 1000 * (cur_time.tv_sec - current_ns_qsent_time.tv_sec);
rtt += (cur_time.tv_usec - current_ns_qsent_time.tv_usec) / 1000;
}
- dlog("RTT: " + boost::lexical_cast<std::string>(rtt));
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_RTT).arg(rtt);
current_ns_address.updateRTT(rtt);
if (rtt_recorder_) {
rtt_recorder_->addRtt(rtt);
@@ -701,19 +745,22 @@ public:
stop();
}
} catch (const isc::dns::DNSProtocolError& dpe) {
- dlog("DNS Protocol error in answer for " +
- question_.toText() + " " +
- question_.getType().toText() + ": " +
- dpe.what());
// Right now, we treat this similar to timeouts
// (except we don't store RTT)
// We probably want to make this an integral part
// of the fetch data process. (TODO)
if (retries_--) {
- dlog("Retrying");
+ // Retry
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS,
+ RESLIB_PROTOCOL_RETRY)
+ .arg(questionText(question_)).arg(dpe.what())
+ .arg(retries_);
send();
} else {
- dlog("Giving up");
+ // Give up
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS,
+ RESLIB_PROTOCOL)
+ .arg(questionText(question_)).arg(dpe.what());
if (!callback_called_) {
makeSERVFAIL();
callCallback(true);
@@ -723,13 +770,17 @@ public:
}
} else if (!done_ && retries_--) {
// Query timed out, but we have some retries, so send again
- dlog("Timeout for " + question_.toText() + " to " + current_ns_address.getAddress().toText() + ", resending query");
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_TIMEOUT_RETRY)
+ .arg(questionText(question_))
+ .arg(current_ns_address.getAddress().toText()).arg(retries_);
current_ns_address.updateRTT(isc::nsas::AddressEntry::UNREACHABLE);
send();
} else {
// We are either already done, or out of retries
if (result == IOFetch::TIME_OUT) {
- dlog("Timeout for " + question_.toText() + " to " + current_ns_address.getAddress().toText() + ", giving up");
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_TIMEOUT)
+ .arg(questionText(question_))
+ .arg(current_ns_address.getAddress().toText());
current_ns_address.updateRTT(isc::nsas::AddressEntry::UNREACHABLE);
}
if (!callback_called_) {
@@ -793,8 +844,10 @@ private:
buffer_->clear();
int serverIndex = rand() % uc;
ConstQuestionPtr question = *(query_message_->beginQuestion());
- dlog("Sending upstream query (" + question->toText() +
- ") to " + upstream_->at(serverIndex).first);
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_TRACE, RESLIB_UPSTREAM)
+ .arg(questionText(*question))
+ .arg(upstream_->at(serverIndex).first);
+
++outstanding_events_;
// Forward the query, create the IOFetch with
// query message, so that query flags can be forwarded
@@ -934,14 +987,16 @@ RecursiveQuery::resolve(const QuestionPtr& question,
OutputBufferPtr buffer(new OutputBuffer(0));
- dlog("Asked to resolve: " + question->toText());
-
- dlog("Try out cache first (direct call to resolve)");
// First try to see if we have something cached in the messagecache
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_TRACE, RESLIB_RESOLVE)
+ .arg(questionText(*question)).arg(1);
if (cache_.lookup(question->getName(), question->getType(),
question->getClass(), *answer_message) &&
answer_message->getRRCount(Message::SECTION_ANSWER) > 0) {
- dlog("Message found in cache, returning that");
+ // Message found, return that
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_CACHE, RESLIB_RECQ_CACHE_FIND)
+ .arg(questionText(*question)).arg(1);
+
// TODO: err, should cache set rcode as well?
answer_message->setRcode(Rcode::NOERROR());
callback->success(answer_message);
@@ -952,14 +1007,18 @@ RecursiveQuery::resolve(const QuestionPtr& question,
question->getType(),
question->getClass());
if (cached_rrset) {
- dlog("Found single RRset in cache");
+ // Found single RRset in cache
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_CACHE, RESLIB_RRSET_FOUND)
+ .arg(questionText(*question)).arg(1);
answer_message->addRRset(Message::SECTION_ANSWER,
cached_rrset);
answer_message->setRcode(Rcode::NOERROR());
callback->success(answer_message);
} else {
- dlog("Message not found in cache, starting recursive query");
- // It will delete itself when it is done
+ // Message not found in cache, start recursive query. It will
+ // delete itself when it is done
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_TRACE, RESLIB_RECQ_CACHE_NO_FIND)
+ .arg(questionText(*question)).arg(1);
new RunningQuery(io, *question, answer_message,
test_server_, buffer, callback,
query_timeout_, client_timeout_,
@@ -988,14 +1047,17 @@ RecursiveQuery::resolve(const Question& question,
answer_message->setOpcode(isc::dns::Opcode::QUERY());
answer_message->addQuestion(question);
- dlog("Asked to resolve: " + question.toText());
-
// First try to see if we have something cached in the messagecache
- dlog("Try out cache first (started by incoming event)");
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_TRACE, RESLIB_RESOLVE)
+ .arg(questionText(question)).arg(2);
+
if (cache_.lookup(question.getName(), question.getType(),
question.getClass(), *answer_message) &&
answer_message->getRRCount(Message::SECTION_ANSWER) > 0) {
- dlog("Message found in cache, returning that");
+
+ // Message found, return that
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_CACHE, RESLIB_RECQ_CACHE_FIND)
+ .arg(questionText(question)).arg(2);
// TODO: err, should cache set rcode as well?
answer_message->setRcode(Rcode::NOERROR());
crs->success(answer_message);
@@ -1006,14 +1068,19 @@ RecursiveQuery::resolve(const Question& question,
question.getType(),
question.getClass());
if (cached_rrset) {
- dlog("Found single RRset in cache");
+ // Found single RRset in cache
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_CACHE, RESLIB_RRSET_FOUND)
+ .arg(questionText(question)).arg(2);
answer_message->addRRset(Message::SECTION_ANSWER,
cached_rrset);
answer_message->setRcode(Rcode::NOERROR());
crs->success(answer_message);
+
} else {
- dlog("Message not found in cache, starting recursive query");
- // It will delete itself when it is done
+ // Message not found in cache, start recursive query. It will
+ // delete itself when it is done
+ LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_TRACE, RESLIB_RECQ_CACHE_NO_FIND)
+ .arg(questionText(question)).arg(2);
new RunningQuery(io, question, answer_message,
test_server_, buffer, crs, query_timeout_,
client_timeout_, lookup_timeout_, retries_,
diff --git a/src/lib/resolve/recursive_query.h b/src/lib/resolve/recursive_query.h
index b9fb80d..9af2d72 100644
--- a/src/lib/resolve/recursive_query.h
+++ b/src/lib/resolve/recursive_query.h
@@ -38,7 +38,7 @@ public:
///
/// Adds a round-trip time to the internal vector of times.
///
- /// \param RTT to record.
+ /// \param rtt RTT to record.
void addRtt(uint32_t rtt) {
rtt_.push_back(rtt);
}
@@ -73,6 +73,10 @@ public:
///
/// \param dns_service The DNS Service to perform the recursive
/// query on.
+ /// \param nsas Nameserver address store, used to hold information about zone
+ /// nameservers.
+ /// \param cache Resolver cache object, used to hold information about retrieved
+ /// records.
/// \param upstream Addresses and ports of the upstream servers
/// to forward queries to.
/// \param upstream_root Addresses and ports of the root servers
@@ -133,8 +137,10 @@ public:
/// object.
///
/// \param question The question being answered <qname/qclass/qtype>
- /// \param answer_message An output Message into which the final response will be copied
- /// \param buffer An output buffer into which the intermediate responses will be copied
+ /// \param answer_message An output Message into which the final response will
+ /// be copied.
+ /// \param buffer An output buffer into which the intermediate responses will
+ /// be copied.
/// \param server A pointer to the \c DNSServer object handling the client
void resolve(const isc::dns::Question& question,
isc::dns::MessagePtr answer_message,
@@ -147,6 +153,10 @@ public:
/// function resolve().
///
/// \param query_message the full query got from client.
+ /// \param answer_message the full answer received from other server.
+ /// \param buffer Output buffer into which the responses will be copied.
+ /// \param server Server object that handles receipt and processing of the
+ /// received messages.
/// \param callback callback object
void forward(isc::dns::ConstMessagePtr query_message,
isc::dns::MessagePtr answer_message,
diff --git a/src/lib/resolve/resolve.h b/src/lib/resolve/resolve.h
index 550b620..0a588e2 100644
--- a/src/lib/resolve/resolve.h
+++ b/src/lib/resolve/resolve.h
@@ -37,7 +37,6 @@ namespace resolve {
/// section), you can simply use this to create an error response.
///
/// \param answer_message The message to clear and place the error in
-/// \param question The question to add to the
/// \param error_code The error Rcode
void makeErrorMessage(isc::dns::MessagePtr answer_message,
const isc::dns::Rcode& error_code);
diff --git a/src/lib/resolve/resolve_log.cc b/src/lib/resolve/resolve_log.cc
new file mode 100644
index 0000000..e41d8d2
--- /dev/null
+++ b/src/lib/resolve/resolve_log.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// Defines the logger used by the NSAS
+
+#include <resolve/resolve_log.h>
+
+namespace isc {
+namespace resolve {
+
+isc::log::Logger logger("reslib"); // Distinct from "resolver"
+
+} // namespace resolve
+} // namespace isc
+
diff --git a/src/lib/resolve/resolve_log.h b/src/lib/resolve/resolve_log.h
new file mode 100644
index 0000000..828b9d3
--- /dev/null
+++ b/src/lib/resolve/resolve_log.h
@@ -0,0 +1,53 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __RESOLVE_LOG__H
+#define __RESOLVE_LOG__H
+
+#include <log/macros.h>
+#include "resolve_messages.h"
+
+namespace isc {
+namespace resolve {
+
+/// \brief Resolver Library Logging
+///
+/// Defines the levels used to output debug messages in the resolver library.
+/// Note that higher numbers equate to more verbose (and detailed) output.
+
+// The first level traces normal operations
+const int RESLIB_DBG_TRACE = DBGLVL_TRACE_BASIC;
+
+// The next level extends the normal operations and records the results of the
+// lookups.
+const int RESLIB_DBG_RESULTS = DBGLVL_TRACE_BASIC_DATA;
+
+// Report cache lookups and results
+const int RESLIB_DBG_CACHE = DBGLVL_TRACE_DETAIL_DATA;
+
+// Indicate when callbacks are called
+const int RESLIB_DBG_CB = DBGLVL_TRACE_DETAIL_DATA + 10;
+
+
+/// \brief Resolver Library Logger
+///
+/// Define the logger used to log messages. We could define it in multiple
+/// modules, but defining in a single module and linking to it saves time and
+/// space.
+extern isc::log::Logger logger;
+
+} // namespace resolve
+} // namespace isc
+
+#endif // __RESOLVE_LOG__H
diff --git a/src/lib/resolve/resolve_messages.mes b/src/lib/resolve/resolve_messages.mes
new file mode 100644
index 0000000..f702d9b
--- /dev/null
+++ b/src/lib/resolve/resolve_messages.mes
@@ -0,0 +1,154 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::resolve
+
+% RESLIB_ANSWER answer received in response to query for <%1>
+A debug message recording that an answer has been received to an upstream
+query for the specified question. Previous debug messages will have indicated
+the server to which the question was sent.
+
+% RESLIB_CNAME CNAME received in response to query for <%1>
+A debug message recording that CNAME response has been received to an upstream
+query for the specified question. Previous debug messages will have indicated
+the server to which the question was sent.
+
+% RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2
+A debug message, a cache lookup did not find the specified <name, class,
+type> tuple in the cache; instead, the deepest delegation found is indicated.
+
+% RESLIB_FOLLOW_CNAME following CNAME chain to <%1>
+A debug message, a CNAME response was received and another query is being issued
+for the <name, class, type> tuple.
+
+% RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded
+A debug message recording that a CNAME response has been received to an upstream
+query for the specified question (Previous debug messages will have indicated
+the server to which the question was sent). However, receipt of this CNAME
+has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
+is where on CNAME points to another) and so an error is being returned.
+
+% RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1>
+A debug message, this indicates that a response was received for the specified
+query and was categorized as a referral. However, the received message did
+not contain any NS RRsets. This may indicate a programming error in the
+response classification code.
+
+% RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS
+A debug message, the RunningQuery object is querying the NSAS for the
+nameservers for the specified zone.
+
+% RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1>
+A debug message recording that either a NXDOMAIN or an NXRRSET response has
+been received to an upstream query for the specified question. Previous debug
+messages will have indicated the server to which the question was sent.
+
+% RESLIB_PROTOCOL protocol error in answer for %1: %3
+A debug message indicating that a protocol error was received. As there
+are no retries left, an error will be reported.
+
+% RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)
+A debug message indicating that a protocol error was received and that
+the resolver is repeating the query to the same nameserver. After this
+repeated query, there will be the indicated number of retries left.
+
+% RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1>
+A debug message, the response to the specified query indicated an error
+that is not covered by a specific code path. A SERVFAIL will be returned.
+
+% RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)
+This is a debug message and indicates that a RecursiveQuery object found the
+the specified <name, class, type> tuple in the cache. The instance number
+at the end of the message indicates which of the two resolve() methods has
+been called.
+
+% RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)
+This is a debug message and indicates that the look in the cache made by the
+RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
+object has been created to resolve the question. The instance number at
+the end of the message indicates which of the two resolve() methods has
+been called.
+
+% RESLIB_REFERRAL referral received in response to query for <%1>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+
+% RESLIB_REFER_ZONE referred to zone %1
+A debug message indicating that the last referral message was to the specified
+zone.
+
+% RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)
+A debug message, the RecursiveQuery::resolve method has been called to resolve
+the specified <name, class, type> tuple. The first action will be to lookup
+the specified tuple in the cache. The instance number at the end of the
+message indicates which of the two resolve() methods has been called.
+
+% RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)
+A debug message, indicating that when RecursiveQuery::resolve queried the
+cache, a single RRset was found which was put in the answer. The instance
+number at the end of the message indicates which of the two resolve()
+methods has been called.
+
+% RESLIB_RTT round-trip time of last query calculated as %1 ms
+A debug message giving the round-trip time of the last query and response.
+
+% RESLIB_RUNQ_CACHE_FIND found <%1> in the cache
+This is a debug message and indicates that a RunningQuery object found
+the specified <name, class, type> tuple in the cache.
+
+% RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache
+This is a debug message and indicates that a RunningQuery object has made
+a call to its doLookup() method to look up the specified <name, class, type>
+tuple, the first action of which will be to examine the cache.
+
+% RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable
+A debug message indicating that a RunningQuery's failure callback has been
+called because all nameservers for the zone in question are unreachable.
+
+% RESLIB_RUNQ_SUCCESS success callback - sending query to %1
+A debug message indicating that a RunningQuery's success callback has been
+called because a nameserver has been found, and that a query is being sent
+to the specified nameserver.
+
+% RESLIB_TEST_SERVER setting test server to %1(%2)
+This is a warning message only generated in unit tests. It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed. If seen during normal operation,
+please submit a bug report.
+
+% RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2
+This is a debug message and should only be seen in unit tests. A query for
+the specified <name, class, type> tuple is being sent to a test nameserver
+whose address is given in the message.
+
+% RESLIB_TIMEOUT query <%1> to %2 timed out
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
+
+% RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)
+A debug message indicating that the specified query has timed out and that
+the resolver is repeating the query to the same nameserver. After this
+repeated query, there will be the indicated number of retries left.
+
+% RESLIB_TRUNCATED response to query for <%1> was truncated, re-querying over TCP
+A debug message, this indicates that the response to the specified query was
+truncated and that the resolver will be re-querying over TCP. There are
+various reasons why responses may be truncated, so this message is normal and
+gives no cause for concern.
+
+% RESLIB_UPSTREAM sending upstream query for <%1> to %2
+A debug message indicating that a query for the specified <name, class, type>
+tuple is being sent to a nameserver whose address is given in the message.
diff --git a/src/lib/resolve/tests/Makefile.am b/src/lib/resolve/tests/Makefile.am
index edea7cd..cf05d9b 100644
--- a/src/lib/resolve/tests/Makefile.am
+++ b/src/lib/resolve/tests/Makefile.am
@@ -11,9 +11,11 @@ CLEANFILES = *.gcno *.gcda
TESTS =
if HAVE_GTEST
TESTS += run_unittests
+
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_SOURCES = run_unittests.cc
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_SOURCES = run_unittests.cc
run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.h
run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
run_unittests_SOURCES += resolve_unittest.cc
@@ -23,14 +25,16 @@ run_unittests_SOURCES += recursive_query_unittest.cc
run_unittests_SOURCES += recursive_query_unittest_2.cc
run_unittests_LDADD = $(GTEST_LDADD)
-run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
endif
diff --git a/src/lib/resolve/tests/run_unittests.cc b/src/lib/resolve/tests/run_unittests.cc
index f80e167..fe8124e 100644
--- a/src/lib/resolve/tests/run_unittests.cc
+++ b/src/lib/resolve/tests/run_unittests.cc
@@ -13,12 +13,15 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
#include <dns/tests/unittest_util.h>
+#include <log/logger_support.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
+ isc::log::initLogger();
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/server_common/Makefile.am b/src/lib/server_common/Makefile.am
index dfb3014..c2779b4 100644
--- a/src/lib/server_common/Makefile.am
+++ b/src/lib/server_common/Makefile.am
@@ -17,10 +17,23 @@ AM_CXXFLAGS += -Wno-unused-parameter
endif
lib_LTLIBRARIES = libserver_common.la
-libserver_common_la_SOURCES = portconfig.h portconfig.cc
+libserver_common_la_SOURCES = client.h client.cc
+libserver_common_la_SOURCES += keyring.h keyring.cc
+libserver_common_la_SOURCES += portconfig.h portconfig.cc
+libserver_common_la_SOURCES += logger.h logger.cc
+nodist_libserver_common_la_SOURCES = server_common_messages.h
+nodist_libserver_common_la_SOURCES += server_common_messages.cc
libserver_common_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libserver_common_la_LIBADD += $(top_builddir)/src/lib/config/libcfgclient.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
+libserver_common_la_LIBADD += $(top_builddir)/src/lib/acl/libacl.la
+libserver_common_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
+BUILT_SOURCES = server_common_messages.h server_common_messages.cc
+server_common_messages.h server_common_messages.cc: server_common_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/server_common/server_common_messages.mes
-CLEANFILES = *.gcno *.gcda
+EXTRA_DIST = server_common_messages.mes
+
+CLEANFILES = *.gcno *.gcda server_common_messages.h server_common_messages.cc
diff --git a/src/lib/server_common/client.cc b/src/lib/server_common/client.cc
new file mode 100644
index 0000000..e6383d6
--- /dev/null
+++ b/src/lib/server_common/client.cc
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <acl/ip_check.h>
+
+#include <asiolink/io_endpoint.h>
+#include <asiolink/io_message.h>
+
+#include <server_common/client.h>
+
+using namespace isc::acl;
+using namespace isc::server_common;
+using namespace isc::asiolink;
+
+struct Client::ClientImpl {
+ ClientImpl(const IOMessage& request_message) :
+ request_(request_message),
+ request_src_(request_.getRemoteEndpoint().getSockAddr())
+ {}
+
+ const IOMessage& request_;
+ const IPAddress request_src_;
+};
+
+Client::Client(const IOMessage& request_message) :
+ impl_(new ClientImpl(request_message))
+{}
+
+Client::~Client() {
+ delete impl_;
+}
+
+const IOEndpoint&
+Client::getRequestSourceEndpoint() const {
+ return (impl_->request_.getRemoteEndpoint());
+}
+
+const IPAddress&
+Client::getRequestSourceIPAddress() const {
+ return (impl_->request_src_);
+}
+
+std::string
+Client::toText() const {
+ std::stringstream ss;
+ ss << impl_->request_.getRemoteEndpoint().getAddress().toText()
+ << '#' << impl_->request_.getRemoteEndpoint().getPort();
+ return (ss.str());
+}
+
+std::ostream&
+isc::server_common::operator<<(std::ostream& os, const Client& client) {
+ return (os << client.toText());
+}
diff --git a/src/lib/server_common/client.h b/src/lib/server_common/client.h
new file mode 100644
index 0000000..8cafb1e
--- /dev/null
+++ b/src/lib/server_common/client.h
@@ -0,0 +1,154 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __CLIENT_H
+#define __CLIENT_H 1
+
+#include <string>
+#include <ostream>
+
+#include <boost/noncopyable.hpp>
+
+#include <acl/ip_check.h>
+
+namespace isc {
+namespace asiolink {
+class IOMessage;
+class IOEndpoint;
+}
+
+namespace acl {
+struct IPAddress;
+}
+
+namespace server_common {
+
+/// A DNS client with a single request context.
+///
+/// The \c Client class represents a DNS client with information of one
+/// DNS request (e.g., a query). The information includes the source and
+/// destination IP addresses of the request, information of the DNS request
+/// message such as the query name or (if provided) TSIG key information.
+///
+/// A \c Client class object is expected to be constructed on receiving a
+/// new request with lower level information such as IP addresses and is
+/// updated with DNS specific information as the server processes the request.
+/// It is also expected to be used as the primary interface for request
+/// processing such as query handling or access control.
+///
+/// Furthermore, to minimize the overhead, this class would be further
+/// extended so that it can be reusable with an additional method to reset
+/// the internal information.
+///
+/// In the current initial implementation, however, it only contains the
+/// lower level information in the form of \c IOMessage object and cannot
+/// be reused (it must be constructed for every new request). Also, the
+/// only actual usage of this class at this moment is for ACL handling.
+///
+/// A \c Client class object is generally assumed to be valid throughout
+/// the processing of a single request, and then be destructed or (when
+/// supported) reset. To avoid it is copied and held accidentally beyond
+/// the expected valid period, it is intentionally made non copyable.
+///
+/// Notes about other possibilities: we may want to abstract it further,
+/// so that it can also be used for DHCP. In that case, we'd subclass a
+/// base client class for DNS specific clients and DHCP specific clients.
+/// We might also want to separate DNS clients for authoritative servers
+/// and clients for the resolver, especially because the former could be
+/// simpler with performance optimizations.
+class Client : boost::noncopyable {
+public:
+ ///
+ /// \name Constructors and Destructor
+ ///
+ //@{
+ /// The constructor.
+ ///
+ /// This initial version of constructor takes an \c IOMessage object
+ /// that is supposed to represent a DNS request message sent from an
+ /// external client (but the constructor does not perform any assumption
+ /// check on the given \c IOMessage).
+ ///
+ /// If and when we extend the behavior and responsibility
+ /// of this class, this version of constructor will probably be
+ /// deprecated.
+ ///
+ /// \c request_message must be valid throughout the lifetime of the client.
+ ///
+ /// \exception None
+ /// \param request_message Refers to \c IOMessage corresponding to some
+ /// DNS request message.
+ explicit Client(const isc::asiolink::IOMessage& request_message);
+
+ /// The destructor
+ ~Client();
+ //@}
+
+ /// Return the client's endpoint of the request.
+ ///
+ /// This should be identical to the result of \c getRemoteEndpoint()
+ /// called on \c request_message passed to the constructor.
+ ///
+ /// \exception None
+ const isc::asiolink::IOEndpoint& getRequestSourceEndpoint() const;
+
+ /// Return the IP address part of the client request's endpoint.
+ ///
+ /// The resulting \c IPAddress can be constructed using
+ /// \c getRequestSourceEndpoint(), and in that sense this method is
+ /// redundant. But this implementation internally constructs the
+ /// \c IPAddress on construction and always returns a reference to it,
+ /// and should be more efficient. It is provided so that it can be
+ /// called multiple times in a complicated ACL with minimum cost.
+ ///
+ /// \exception None
+ const isc::acl::IPAddress& getRequestSourceIPAddress() const;
+
+ /// Convert the Client to a string.
+ ///
+ /// (In the initial implementation) the format of the resulting string
+ /// is as follows:
+ /// \code <IP address>#<port>
+ /// \endcode
+ /// The IP address is the textual representation of the client's IP
+ /// address, which is the source address of the request the client has
+ /// sent. The port is the UDP or TCP of the client's end of the request.
+ ///
+ /// \exception std::bad_alloc Internal resource allocation fails
+ std::string toText() const;
+
+private:
+ struct ClientImpl;
+ ClientImpl* impl_;
+};
+
+/// \brief Insert the \c Client as a string into stream.
+///
+/// This method convert \c client into a string and inserts it into the
+/// output stream \c os.
+///
+/// \param os A \c std::ostream object on which the insertion operation is
+/// performed.
+/// \param client A reference to a \c Client object output by the operation.
+/// \return A reference to the same \c std::ostream object referenced by
+/// parameter \c os after the insertion operation.
+std::ostream& operator<<(std::ostream& os, const Client& client);
+}
+}
+
+#endif // __CLIENT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/server_common/keyring.cc b/src/lib/server_common/keyring.cc
new file mode 100644
index 0000000..501dfd9
--- /dev/null
+++ b/src/lib/server_common/keyring.cc
@@ -0,0 +1,71 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <server_common/keyring.h>
+#include <server_common/logger.h>
+
+using namespace isc::dns;
+using namespace isc::data;
+
+namespace isc {
+namespace server_common {
+
+typedef boost::shared_ptr<TSIGKeyRing> KeyringPtr;
+
+KeyringPtr keyring;
+
+namespace {
+
+void
+updateKeyring(const std::string&, ConstElementPtr data,
+ const isc::config::ConfigData&) {
+ ConstElementPtr list(data->get("keys"));
+ KeyringPtr load(new TSIGKeyRing);
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, SRVCOMM_KEYS_UPDATE);
+
+ // Note that 'data' only contains explicitly configured config parameters.
+ // So if we use the default list is NULL, rather than an empty list, and
+ // we must explicitly expect that case (and handle it just like an empty
+ // list).
+ for (size_t i(0); list && i < list->size(); ++ i) {
+ load->add(TSIGKey(list->get(i)->stringValue()));
+ }
+ keyring.swap(load);
+}
+
+}
+
+void
+initKeyring(config::ModuleCCSession& session) {
+ if (keyring) {
+ // We are already initialized
+ return;
+ }
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, SRVCOMM_KEYS_INIT);
+ session.addRemoteConfig("tsig_keys", updateKeyring, false);
+}
+
+void
+deinitKeyring(config::ModuleCCSession& session) {
+ if (!keyring) {
+ // Not initialized, ignore it
+ return;
+ }
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, SRVCOMM_KEYS_DEINIT);
+ keyring.reset();
+ session.removeRemoteConfig("tsig_keys");
+}
+
+}
+}
diff --git a/src/lib/server_common/keyring.h b/src/lib/server_common/keyring.h
new file mode 100644
index 0000000..9c067e9
--- /dev/null
+++ b/src/lib/server_common/keyring.h
@@ -0,0 +1,102 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef ISC_SERVER_COMMON_KEYRING_H
+#define ISC_SERVER_COMMON_KEYRING_H
+
+#include <boost/shared_ptr.hpp>
+#include <dns/tsigkey.h>
+#include <config/ccsession.h>
+
+/**
+ * \file keyring.h
+ * \brief TSIG keyring loaded from configuration.
+ *
+ * This file contains routines for loading a TSIG key ring from
+ * the tsig_keys configuration section and keeping them up to date
+ * on updates.
+ *
+ * You simply initialize/load the keyring with isc::server_common::initKeyring
+ * and then just use the key ring referred to by isc::server_common::keyring. It
+ * is automatically reloaded, when the configuration updates, so you no longer
+ * needs to care about it.
+ *
+ * If you want to keep a key (or session) for longer time or your application
+ * is multithreaded, you might want to have a copy of the shared pointer to
+ * hold a reference. Otherwise an update might replace the keyring and delete
+ * the keys in the old one.
+ *
+ * Also note that, while the interface doesn't prevent application from
+ * modifying the keyring, it is not a good idea to do so. As mentioned above,
+ * it might get reloaded at any time, which would replace the modified keyring.
+ * The possibility to modify it is side effect of simpler implementation and
+ * shorter code, not a goal.
+ */
+
+namespace isc {
+
+namespace server_common {
+
+/**
+ * \brief The key ring itself
+ *
+ * This is where the key ring is stored. You can directly use it to your needs,
+ * but you need to call initKeyring first, otherwise you'll find a NULL pointer
+ * here only.
+ */
+extern boost::shared_ptr<dns::TSIGKeyRing> keyring;
+
+/**
+ * \brief Load the key ring for the first time
+ *
+ * This loads the key ring from configuration to keyring. It also registers for
+ * config updates, so from now on, it'll be kept up to date.
+ *
+ * You can unload the key ring with deinitKeyring.
+ *
+ * If it is already loaded, this function does nothing. So, if more than one
+ * part of an application needs to use the key ring, they all can just call
+ * this independently to ensure the keyring is loaded.
+ *
+ * \param session The configuration session used to talk to the config manager.
+ */
+void
+initKeyring(config::ModuleCCSession& session);
+
+/**
+ * \brief Unload the key ring
+ *
+ * This can be used to unload the key ring. It will reset the keyring to NULL
+ * and stop receiving updates of the configuration.
+ *
+ * The need for this function should be quite rare, as it isn't required to be
+ * called before application shutdown. And not calling it has only small
+ * performance penalty -- the keyring will be kept in memory and updated when
+ * the user changes configuration.
+ *
+ * This does nothing if the key ring is not loaded currently.
+ *
+ * \param session The configuration session used to talk to the config manager.
+ *
+ * \todo What do we do when the data that come are invalid? Should we ignore it,
+ * as walidity should have been checked already in the config manager, or
+ * throw? What about when we get an update and it's invalid?
+ */
+void
+deinitKeyring(config::ModuleCCSession& session);
+
+}
+}
+
+#endif
diff --git a/src/lib/server_common/logger.cc b/src/lib/server_common/logger.cc
new file mode 100644
index 0000000..0b9ab6e
--- /dev/null
+++ b/src/lib/server_common/logger.cc
@@ -0,0 +1,23 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <server_common/logger.h>
+
+namespace isc {
+namespace server_common {
+
+isc::log::Logger logger("server_common");
+
+}
+}
diff --git a/src/lib/server_common/logger.h b/src/lib/server_common/logger.h
new file mode 100644
index 0000000..80bc81d
--- /dev/null
+++ b/src/lib/server_common/logger.h
@@ -0,0 +1,43 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __SERVER_COMMON_LOGGER_H
+#define __SERVER_COMMON_LOGGER_H
+
+#include <log/macros.h>
+#include <server_common/server_common_messages.h>
+
+/// \file server_common/logger.h
+/// \brief Server Common library global logger
+///
+/// This holds the logger for the server common library. It is a private header
+/// and should not be included in any publicly used header, only in local
+/// cc files.
+
+namespace isc {
+namespace server_common {
+
+/// \brief The logger for this library
+extern isc::log::Logger logger;
+
+/// \brief Trace basic operations
+const int DBG_TRACE_BASIC = DBGLVL_TRACE_BASIC;
+
+/// \brief Print also values used
+const int DBG_TRACE_VALUES = DBGLVL_TRACE_BASIC_DATA;
+
+}
+}
+
+#endif
diff --git a/src/lib/server_common/portconfig.cc b/src/lib/server_common/portconfig.cc
index 7b2b3dd..379a0a1 100644
--- a/src/lib/server_common/portconfig.cc
+++ b/src/lib/server_common/portconfig.cc
@@ -13,10 +13,10 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <server_common/portconfig.h>
+#include <server_common/logger.h>
#include <asiolink/io_address.h>
#include <asiodns/dns_service.h>
-#include <log/dummylog.h>
#include <boost/foreach.hpp>
#include <boost/lexical_cast.hpp>
@@ -25,7 +25,6 @@ using namespace std;
using namespace isc::data;
using namespace isc::asiolink;
using namespace isc::asiodns;
-using isc::log::dlog;
namespace isc {
namespace server_common {
@@ -43,6 +42,8 @@ parseAddresses(isc::data::ConstElementPtr addresses,
ConstElementPtr addr(addrPair->get("address"));
ConstElementPtr port(addrPair->get("port"));
if (!addr || ! port) {
+ LOG_ERROR(logger, SRVCOMM_ADDRESS_MISSING).
+ arg(addrPair->str());
isc_throw(BadValue, "Address must contain both the IP"
"address and port");
}
@@ -50,6 +51,8 @@ parseAddresses(isc::data::ConstElementPtr addresses,
IOAddress(addr->stringValue());
if (port->intValue() < 0 ||
port->intValue() > 0xffff) {
+ LOG_ERROR(logger, SRVCOMM_PORT_RANGE).
+ arg(port->intValue()).arg(addrPair->str());
isc_throw(BadValue, "Bad port value (" <<
port->intValue() << ")");
}
@@ -57,11 +60,14 @@ parseAddresses(isc::data::ConstElementPtr addresses,
port->intValue()));
}
catch (const TypeError &e) { // Better error message
+ LOG_ERROR(logger, SRVCOMM_ADDRESS_TYPE).
+ arg(addrPair->str());
isc_throw(TypeError,
"Address must be a string and port an integer");
}
}
} else if (addresses->getType() != Element::null) {
+ LOG_ERROR(logger, SRVCOMM_ADDRESSES_NOT_LIST).arg(elemName);
isc_throw(TypeError, elemName + " config element must be a list");
}
}
@@ -86,10 +92,10 @@ installListenAddresses(const AddressList& newAddresses,
isc::asiodns::DNSService& service)
{
try {
- dlog("Setting listen addresses:");
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, SRVCOMM_SET_LISTEN);
BOOST_FOREACH(const AddressPair& addr, newAddresses) {
- dlog(" " + addr.first + ":" +
- boost::lexical_cast<string>(addr.second));
+ LOG_DEBUG(logger, DBG_TRACE_VALUES, SRVCOMM_ADDRESS_VALUE).
+ arg(addr.first).arg(addr.second);
}
setAddresses(service, newAddresses);
addressStore = newAddresses;
@@ -108,13 +114,12 @@ installListenAddresses(const AddressList& newAddresses,
* user will get error info, command control can be used to set new
* address. So we just catch the exception without propagating outside
*/
- dlog(string("Unable to set new address: ") + e.what(), true);
+ LOG_ERROR(logger, SRVCOMM_ADDRESS_FAIL).arg(e.what());
try {
setAddresses(service, addressStore);
}
catch (const exception& e2) {
- dlog("Unable to recover from error;", true);
- dlog(string("Rollback failed with: ") + e2.what(), true);
+ LOG_FATAL(logger, SRVCOMM_ADDRESS_UNRECOVERABLE).arg(e2.what());
}
//Anyway the new configure has problem, we need to notify configure
//manager the new configure doesn't work
diff --git a/src/lib/server_common/server_common_messages.mes b/src/lib/server_common/server_common_messages.mes
new file mode 100644
index 0000000..5fbbb0b
--- /dev/null
+++ b/src/lib/server_common/server_common_messages.mes
@@ -0,0 +1,73 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::server_common
+
+# \brief Messages for the server_common library
+
+% SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+
+% SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+
+% SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+
+% SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+
+% SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+
+The condition indicates problems with the server and/or the system on
+which it is running. The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+
+% SRVCOMM_ADDRESS_VALUE address to set: %1#%2
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+
+% SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring
+Debug message indicating that the server is deinitializing the TSIG keyring.
+
+% SRVCOMM_KEYS_INIT initializing TSIG keyring
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+
+% SRVCOMM_KEYS_UPDATE updating TSIG keyring
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+
+% SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+
+% SRVCOMM_SET_LISTEN setting addresses to listen to
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
diff --git a/src/lib/server_common/tests/Makefile.am b/src/lib/server_common/tests/Makefile.am
index a04a884..d7e113a 100644
--- a/src/lib/server_common/tests/Makefile.am
+++ b/src/lib/server_common/tests/Makefile.am
@@ -26,18 +26,29 @@ TESTS =
if HAVE_GTEST
TESTS += run_unittests
run_unittests_SOURCES = run_unittests.cc
+run_unittests_SOURCES += client_unittest.cc
run_unittests_SOURCES += portconfig_unittest.cc
+run_unittests_SOURCES += keyring_test.cc
+nodist_run_unittests_SOURCES = data_path.h
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
-
run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
-run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
+run_unittests_LDADD += $(top_builddir)/src/lib/config/tests/libfake_session.la
endif
noinst_PROGRAMS = $(TESTS)
+
+EXTRA_DIST = testdata/spec.spec
diff --git a/src/lib/server_common/tests/client_unittest.cc b/src/lib/server_common/tests/client_unittest.cc
new file mode 100644
index 0000000..287a926
--- /dev/null
+++ b/src/lib/server_common/tests/client_unittest.cc
@@ -0,0 +1,103 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sys/socket.h>
+#include <string.h>
+
+#include <string>
+#include <sstream>
+
+#include <boost/scoped_ptr.hpp>
+
+#include <acl/ip_check.h>
+
+#include <asiolink/io_address.h>
+#include <asiolink/io_socket.h>
+#include <asiolink/io_message.h>
+
+#include <server_common/client.h>
+
+#include <gtest/gtest.h>
+
+using namespace boost;
+using namespace isc::acl;
+using namespace isc::asiolink;
+using namespace isc::server_common;
+
+namespace {
+
+class ClientTest : public ::testing::Test {
+protected:
+ ClientTest() {
+ endpoint4.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"),
+ 53214));
+ endpoint6.reset(IOEndpoint::create(IPPROTO_TCP,
+ IOAddress("2001:db8::1"), 53216));
+ request4.reset(new IOMessage(NULL, 0, IOSocket::getDummyUDPSocket(),
+ *endpoint4));
+ request6.reset(new IOMessage(NULL, 0, IOSocket::getDummyTCPSocket(),
+ *endpoint6));
+ client4.reset(new Client(*request4));
+ client6.reset(new Client(*request6));
+ }
+ scoped_ptr<const IOEndpoint> endpoint4;
+ scoped_ptr<const IOEndpoint> endpoint6;
+ scoped_ptr<const IOMessage> request4;
+ scoped_ptr<const IOMessage> request6;
+ scoped_ptr<const Client> client4;
+ scoped_ptr<const Client> client6;
+};
+
+TEST_F(ClientTest, constructIPv4) {
+ EXPECT_EQ(AF_INET, client4->getRequestSourceEndpoint().getFamily());
+ EXPECT_EQ(IPPROTO_UDP, client4->getRequestSourceEndpoint().getProtocol());
+ EXPECT_EQ("192.0.2.1",
+ client4->getRequestSourceEndpoint().getAddress().toText());
+ EXPECT_EQ(53214, client4->getRequestSourceEndpoint().getPort());
+
+ const uint8_t expected_data[] = { 192, 0, 2, 1 };
+ EXPECT_EQ(AF_INET, client4->getRequestSourceIPAddress().getFamily());
+ ASSERT_EQ(4, client4->getRequestSourceIPAddress().getLength());
+ EXPECT_EQ(0, memcmp(expected_data,
+ client4->getRequestSourceIPAddress().getData(), 4));
+}
+
+TEST_F(ClientTest, constructIPv6) {
+ EXPECT_EQ(AF_INET6, client6->getRequestSourceEndpoint().getFamily());
+ EXPECT_EQ(IPPROTO_TCP, client6->getRequestSourceEndpoint().getProtocol());
+ EXPECT_EQ("2001:db8::1",
+ client6->getRequestSourceEndpoint().getAddress().toText());
+ EXPECT_EQ(53216, client6->getRequestSourceEndpoint().getPort());
+
+ const uint8_t expected_data[] = { 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01 };
+ EXPECT_EQ(AF_INET6, client6->getRequestSourceIPAddress().getFamily());
+ ASSERT_EQ(16, client6->getRequestSourceIPAddress().getLength());
+ EXPECT_EQ(0, memcmp(expected_data,
+ client6->getRequestSourceIPAddress().getData(), 16));
+}
+
+TEST_F(ClientTest, toText) {
+ EXPECT_EQ("192.0.2.1#53214", client4->toText());
+ EXPECT_EQ("2001:db8::1#53216", client6->toText());
+}
+
+// test operator<<. We simply confirm it appends the result of toText().
+TEST_F(ClientTest, LeftShiftOperator) {
+ std::ostringstream oss;
+ oss << *client4 << "more text";
+ EXPECT_EQ(client4->toText() + std::string("more text"), oss.str());
+}
+}
diff --git a/src/lib/server_common/tests/data_path.h.in b/src/lib/server_common/tests/data_path.h.in
new file mode 100644
index 0000000..8ac0380
--- /dev/null
+++ b/src/lib/server_common/tests/data_path.h.in
@@ -0,0 +1,16 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define TEST_DATA_PATH "@abs_srcdir@/testdata"
+#define PLUGIN_DATA_PATH "@top_srcdir@/src/bin/cfgmgr/plugins"
diff --git a/src/lib/server_common/tests/keyring_test.cc b/src/lib/server_common/tests/keyring_test.cc
new file mode 100644
index 0000000..dab43df
--- /dev/null
+++ b/src/lib/server_common/tests/keyring_test.cc
@@ -0,0 +1,150 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <server_common/keyring.h>
+#include <server_common/tests/data_path.h>
+
+#include <config/tests/fake_session.h>
+#include <config/ccsession.h>
+#include <dns/name.h>
+
+#include <gtest/gtest.h>
+#include <memory>
+#include <string>
+
+using namespace isc::data;
+using namespace isc::config;
+using namespace isc::server_common;
+using namespace isc::dns;
+
+namespace {
+
+class KeyringTest : public ::testing::Test {
+public:
+ KeyringTest() :
+ session(ElementPtr(new ListElement), ElementPtr(new ListElement),
+ ElementPtr(new ListElement)),
+ specfile(std::string(TEST_DATA_PATH) + "/spec.spec")
+ {
+ session.getMessages()->add(createAnswer());
+ mccs.reset(new ModuleCCSession(specfile, session, NULL, NULL,
+ false, false));
+ }
+ isc::cc::FakeSession session;
+ std::auto_ptr<ModuleCCSession> mccs;
+ std::string specfile;
+ void doInit(bool with_key = true) {
+ // Prepare the module specification for it and the config
+ session.getMessages()->
+ add(createAnswer(0,
+ moduleSpecFromFile(std::string(PLUGIN_DATA_PATH) +
+ "/tsig_keys.spec").
+ getFullSpec()));
+ if (with_key) {
+ session.getMessages()->add(
+ createAnswer(0, Element::fromJSON(
+ "{\"keys\": [\"key:MTIzNAo=:hmac-sha1\"]}")));
+ } else {
+ // This emulates the case of using the spec default. Note that
+ // the default value won't be passed to the config handler, so
+ // we'll pass an empty object, instead of {"keys": []}.
+ session.getMessages()->add(createAnswer(0,
+ Element::fromJSON("{}")));
+ }
+ // Now load it
+ EXPECT_NO_THROW(initKeyring(*mccs));
+ EXPECT_NE(keyring, boost::shared_ptr<TSIGKeyRing>()) <<
+ "No keyring even after init";
+ }
+};
+
+// Test usual use - init, using the keyring, update, deinit
+TEST_F(KeyringTest, keyring) {
+ // First, initialize it
+ {
+ SCOPED_TRACE("Init");
+ doInit();
+
+ // Make sure it contains the correct key
+ TSIGKeyRing::FindResult result(keyring->find(Name("key"),
+ TSIGKey::HMACSHA1_NAME()));
+ EXPECT_EQ(TSIGKeyRing::SUCCESS, result.code);
+ }
+
+ {
+ SCOPED_TRACE("Update");
+ session.addMessage(createCommand("config_update", Element::fromJSON(
+ "{\"keys\": [\"another:MTIzNAo=:hmac-sha256\"]}")),
+ "tsig_keys", "*");
+ mccs->checkCommand();
+
+ // Make sure it no longer contains the original key
+ TSIGKeyRing::FindResult result(keyring->find(Name("key"),
+ TSIGKey::HMACSHA1_NAME()));
+ EXPECT_EQ(TSIGKeyRing::NOTFOUND, result.code);
+ // but it does contain the new one
+ TSIGKeyRing::FindResult result2 = keyring->find(Name("another"),
+ TSIGKey::HMACSHA256_NAME());
+ EXPECT_EQ(TSIGKeyRing::SUCCESS, result2.code);
+ }
+
+ {
+ SCOPED_TRACE("Deinit");
+ deinitKeyring(*mccs);
+ EXPECT_EQ(keyring, boost::shared_ptr<TSIGKeyRing>()) <<
+ "The keyring didn't disappear";
+ }
+}
+
+TEST_F(KeyringTest, keyringWithDefault) {
+ // If we don't explicitly specify a keyring, the default (no key) will
+ // be used.
+ doInit(false);
+ EXPECT_EQ(0, keyring->size());
+ deinitKeyring(*mccs);
+}
+
+// Init twice
+TEST_F(KeyringTest, initTwice) {
+ // It is NULL before
+ EXPECT_EQ(keyring, boost::shared_ptr<TSIGKeyRing>()) <<
+ "Someone forgot to deinit it before";
+ {
+ SCOPED_TRACE("First init");
+ doInit();
+ }
+ boost::shared_ptr<TSIGKeyRing> backup(keyring);
+ {
+ SCOPED_TRACE("Second init");
+ EXPECT_NO_THROW(initKeyring(*mccs)) <<
+ "It not only does something when it is already initialized, "
+ "it even throws at it";
+ }
+ EXPECT_EQ(backup, keyring) << "The second init replaced the data";
+ deinitKeyring(*mccs);
+}
+
+// deinit when not initialized
+TEST_F(KeyringTest, extraDeinit) {
+ // It is NULL before
+ EXPECT_EQ(boost::shared_ptr<TSIGKeyRing>(), keyring) <<
+ "Someone forgot to deinit it before";
+ // Check that it doesn't get confused when we do not have it initialized
+ EXPECT_NO_THROW(deinitKeyring(*mccs));
+ // It is still NULL
+ EXPECT_EQ(keyring, boost::shared_ptr<TSIGKeyRing>()) <<
+ "Where did it get something after deinit?";
+}
+
+}
diff --git a/src/lib/server_common/tests/run_unittests.cc b/src/lib/server_common/tests/run_unittests.cc
index 7ebc985..860cb77 100644
--- a/src/lib/server_common/tests/run_unittests.cc
+++ b/src/lib/server_common/tests/run_unittests.cc
@@ -15,6 +15,8 @@
#include <config.h>
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
+#include <log/logger_support.h>
#include <dns/tests/unittest_util.h>
@@ -22,5 +24,7 @@ int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
- return (RUN_ALL_TESTS());
+ isc::log::initLogger();
+
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/server_common/tests/testdata/spec.spec b/src/lib/server_common/tests/testdata/spec.spec
new file mode 100644
index 0000000..3e0a822
--- /dev/null
+++ b/src/lib/server_common/tests/testdata/spec.spec
@@ -0,0 +1,6 @@
+{
+ "module_spec": {
+ "module_name": "test"
+ }
+}
+
diff --git a/src/lib/testutils/Makefile.am b/src/lib/testutils/Makefile.am
index ae5c6da..a511d24 100644
--- a/src/lib/testutils/Makefile.am
+++ b/src/lib/testutils/Makefile.am
@@ -5,7 +5,7 @@ AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS=$(B10_CXXFLAGS)
if HAVE_GTEST
-lib_LTLIBRARIES = libtestutils.la
+noinst_LTLIBRARIES = libtestutils.la
libtestutils_la_SOURCES = srv_test.h srv_test.cc
libtestutils_la_SOURCES += dnsmessage_test.h dnsmessage_test.cc
diff --git a/src/lib/testutils/dnsmessage_test.h b/src/lib/testutils/dnsmessage_test.h
index a8b7284..1aba526 100644
--- a/src/lib/testutils/dnsmessage_test.h
+++ b/src/lib/testutils/dnsmessage_test.h
@@ -21,6 +21,7 @@
#include <dns/message.h>
#include <dns/name.h>
#include <dns/masterload.h>
+#include <dns/rdataclass.h>
#include <dns/rrclass.h>
#include <dns/rrset.h>
@@ -113,13 +114,32 @@ void rrsetCheck(isc::dns::ConstRRsetPtr expected_rrset,
/// The definitions in this name space are not supposed to be used publicly,
/// but are given here because they are used in templated functions.
namespace detail {
-// Helper matching class used in rrsetsCheck()
+// Helper matching class used in rrsetsCheck(). Basically we only have to
+// check the equality of name, RR type and RR class, but for RRSIGs we need
+// special additional checks because they are essentially different if their
+// 'type covered' are different. For simplicity, we only compare the types
+// of the first RRSIG RDATAs (and only check when they exist); if there's
+// further difference in the RDATA, the main comparison checks will detect it.
struct RRsetMatch : public std::unary_function<isc::dns::ConstRRsetPtr, bool> {
RRsetMatch(isc::dns::ConstRRsetPtr target) : target_(target) {}
bool operator()(isc::dns::ConstRRsetPtr rrset) const {
- return (rrset->getType() == target_->getType() &&
- rrset->getClass() == target_->getClass() &&
- rrset->getName() == target_->getName());
+ if (rrset->getType() != target_->getType() ||
+ rrset->getClass() != target_->getClass() ||
+ rrset->getName() != target_->getName()) {
+ return (false);
+ }
+ if (rrset->getType() != isc::dns::RRType::RRSIG()) {
+ return (true);
+ }
+ if (rrset->getRdataCount() == 0 || target_->getRdataCount() == 0) {
+ return (true);
+ }
+ isc::dns::RdataIteratorPtr rdit = rrset->getRdataIterator();
+ isc::dns::RdataIteratorPtr targetit = target_->getRdataIterator();
+ return (dynamic_cast<const isc::dns::rdata::generic::RRSIG&>(
+ rdit->getCurrent()).typeCovered() ==
+ dynamic_cast<const isc::dns::rdata::generic::RRSIG&>(
+ targetit->getCurrent()).typeCovered());
}
const isc::dns::ConstRRsetPtr target_;
};
diff --git a/src/lib/testutils/srv_test.cc b/src/lib/testutils/srv_test.cc
index 1d79d71..dd3e425 100644
--- a/src/lib/testutils/srv_test.cc
+++ b/src/lib/testutils/srv_test.cc
@@ -72,9 +72,13 @@ SrvTestBase::createDataFromFile(const char* const datafile,
void
SrvTestBase::createRequestPacket(Message& message,
- const int protocol)
+ const int protocol, TSIGContext* context)
{
- message.toWire(request_renderer);
+ if (context == NULL) {
+ message.toWire(request_renderer);
+ } else {
+ message.toWire(request_renderer, *context);
+ }
delete io_message;
diff --git a/src/lib/testutils/srv_test.h b/src/lib/testutils/srv_test.h
index a848ffc..c92e876 100644
--- a/src/lib/testutils/srv_test.h
+++ b/src/lib/testutils/srv_test.h
@@ -84,7 +84,8 @@ protected:
/// form of \c IOMessage in \c io_message.
/// The existing content of \c io_message, if any, will be deleted.
void createRequestPacket(isc::dns::Message& message,
- const int protocol = IPPROTO_UDP);
+ const int protocol = IPPROTO_UDP,
+ isc::dns::TSIGContext* context = NULL);
MockSession notify_session;
MockServer dnsserv;
diff --git a/src/lib/testutils/testdata/Makefile.am b/src/lib/testutils/testdata/Makefile.am
index 93b9eb9..918d5c5 100644
--- a/src/lib/testutils/testdata/Makefile.am
+++ b/src/lib/testutils/testdata/Makefile.am
@@ -32,4 +32,4 @@ EXTRA_DIST += test2.zone.in
EXTRA_DIST += test2-new.zone.in
.spec.wire:
- $(abs_top_builddir)/src/lib/dns/tests/testdata/gen-wiredata.py -o $@ $<
+ $(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/lib/util/Makefile.am b/src/lib/util/Makefile.am
index 77a3e4a..0b78b29 100644
--- a/src/lib/util/Makefile.am
+++ b/src/lib/util/Makefile.am
@@ -1,6 +1,4 @@
-SUBDIRS = . tests unittests io
-# The io/tests is hack, because otherwise we can not order these directories
-# properly. Unittests use io and io/tests use unittest.
+SUBDIRS = . io unittests tests pyunittests python
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
@@ -24,5 +22,7 @@ libutil_la_SOURCES += encode/binary_from_base16.h
libutil_la_SOURCES += random/qid_gen.h random/qid_gen.cc
libutil_la_SOURCES += random/random_number_generator.h
+EXTRA_DIST = python/pycppwrapper_util.h
+
libutil_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
CLEANFILES = *.gcno *.gcda
diff --git a/src/lib/util/buffer.h b/src/lib/util/buffer.h
index b7a8e28..eb90d64 100644
--- a/src/lib/util/buffer.h
+++ b/src/lib/util/buffer.h
@@ -207,6 +207,24 @@ public:
}
//@}
+ /// @brief Read specified number of bytes as a vector.
+ ///
+ /// If specified buffer is too short, it will be expanded
+ /// using vector::resize() method.
+ ///
+ /// @param Reference to a buffer (data will be stored there).
+ /// @param Size specified number of bytes to read in a vector.
+ ///
+ void readVector(std::vector<uint8_t>& data, size_t len)
+ {
+ if (position_ + len > len_) {
+ isc_throw(InvalidBufferPosition, "read beyond end of buffer");
+ }
+
+ data.resize(len);
+ readData(&data[0], len);
+ }
+
private:
size_t position_;
@@ -519,6 +537,6 @@ typedef boost::shared_ptr<OutputBuffer> OutputBufferPtr;
} // namespace isc
#endif // __BUFFER_H
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/util/encode/base_n.cc b/src/lib/util/encode/base_n.cc
index 406dc77..0026a0b 100644
--- a/src/lib/util/encode/base_n.cc
+++ b/src/lib/util/encode/base_n.cc
@@ -160,19 +160,42 @@ public:
base_zero_code_(base_zero_code),
base_(base), base_beginpad_(base_beginpad), base_end_(base_end),
in_pad_(false)
- {}
+ {
+ // Skip beginning spaces, if any. We need do it here because
+ // otherwise the first call to operator*() would be confused.
+ skipSpaces();
+ }
DecodeNormalizer& operator++() {
++base_;
- while (base_ != base_end_ && isspace(*base_)) {
- ++base_;
- }
+ skipSpaces();
if (base_ == base_beginpad_) {
in_pad_ = true;
}
return (*this);
}
+ void skipSpaces() {
+ // If (char is signed and) *base_ < 0, on Windows platform with Visual
+ // Studio compiler it may trigger _ASSERTE((unsigned)(c + 1) <= 256);
+ // so make sure that the parameter of isspace() is larger than 0.
+ // We don't simply cast it to unsigned char to avoid confusing the
+ // isspace() implementation with a possible extension for values
+ // larger than 127. Also note the check is not ">= 0"; for systems
+ // where char is unsigned that would always be true and would possibly
+ // trigger a compiler warning that could stop the build.
+ while (base_ != base_end_ && *base_ > 0 && isspace(*base_)) {
+ ++base_;
+ }
+ }
const char& operator*() const {
- if (in_pad_ && *base_ == BASE_PADDING_CHAR) {
+ if (base_ == base_end_) {
+ // binary_from_baseX calls this operator when it needs more bits
+ // even if the internal iterator (base_) has reached its end
+ // (if that happens it means the input is an incomplete baseX
+ // string and should be rejected). So this is the only point
+ // we can catch and reject this type of invalid input.
+ isc_throw(BadValue, "Unexpected end of input in BASE decoder");
+ }
+ if (in_pad_) {
return (base_zero_code_);
} else {
return (*base_);
@@ -268,7 +291,7 @@ BaseNTransformer<BitsPerChunk, BaseZeroCode, Encoder, Decoder>::decode(
isc_throw(BadValue, "Too many " << algorithm
<< " padding characters: " << input);
}
- } else if (!isspace(ch)) {
+ } else if (ch < 0 || !isspace(ch)) {
break;
}
++srit;
diff --git a/src/lib/util/filename.cc b/src/lib/util/filename.cc
index 1f2e5db..d7da9c8 100644
--- a/src/lib/util/filename.cc
+++ b/src/lib/util/filename.cc
@@ -132,6 +132,24 @@ Filename::useAsDefault(const string& name) const {
return (retstring);
}
+void
+Filename::setDirectory(const std::string& new_directory) {
+ std::string directory(new_directory);
+
+ if (directory.length() > 0) {
+ // append '/' if necessary
+ size_t sep = directory.rfind('/');
+ if (sep == std::string::npos || sep < directory.size() - 1) {
+ directory += "/";
+ }
+ }
+ // and regenerate the full name
+ std::string full_name = directory + name_ + extension_;
+
+ directory_.swap(directory);
+ full_name_.swap(full_name);
+}
+
} // namespace log
} // namespace isc
diff --git a/src/lib/util/filename.h b/src/lib/util/filename.h
index 984ecb0..f625938 100644
--- a/src/lib/util/filename.h
+++ b/src/lib/util/filename.h
@@ -86,6 +86,13 @@ public:
return (directory_);
}
+ /// \brief Set directory for the file
+ ///
+ /// \param new_directory The directory to set. If this is an empty
+ /// string, the directory this filename object currently
+ /// has will be removed.
+ void setDirectory(const std::string& new_directory);
+
/// \return Name of Given File Name
std::string name() const {
return (name_);
@@ -96,6 +103,11 @@ public:
return (extension_);
}
+ /// \return Name + extension of Given File Name
+ std::string nameAndExtension() const {
+ return (name_ + extension_);
+ }
+
/// \brief Expand Name with Default
///
/// A default file specified is supplied and used to fill in any missing
diff --git a/src/lib/util/io/Makefile.am b/src/lib/util/io/Makefile.am
index 9f06ef9..cbcd54d 100644
--- a/src/lib/util/io/Makefile.am
+++ b/src/lib/util/io/Makefile.am
@@ -1,5 +1,3 @@
-SUBDIRS = . tests
-
AM_CXXFLAGS = $(B10_CXXFLAGS)
lib_LTLIBRARIES = libutil_io.la
@@ -15,4 +13,6 @@ libutil_io_python_la_LDFLAGS = -module
libutil_io_python_la_SOURCES = fdshare_python.cc
libutil_io_python_la_LIBADD = libutil_io.la
libutil_io_python_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
-libutil_io_python_la_CXXFLAGS = $(AM_CXXFLAGS)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+libutil_io_python_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
diff --git a/src/lib/util/io/tests/Makefile.am b/src/lib/util/io/tests/Makefile.am
deleted file mode 100644
index 56d50cf..0000000
--- a/src/lib/util/io/tests/Makefile.am
+++ /dev/null
@@ -1,25 +0,0 @@
-CLEANFILES = *.gcno *.gcda
-
-AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
-AM_CXXFLAGS = $(B10_CXXFLAGS)
-
-if USE_STATIC_LINK
-AM_LDFLAGS = -static
-endif
-
-TESTS =
-if HAVE_GTEST
-TESTS += run_unittests
-run_unittests_SOURCES = run_unittests.cc
-run_unittests_SOURCES += fd_tests.cc
-run_unittests_SOURCES += fd_share_tests.cc
-
-run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
-run_unittests_LDADD += $(top_builddir)/src/lib/util/io/libutil_io.la
-run_unittests_LDADD += \
- $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
-endif
-
-noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/util/io/tests/fd_share_tests.cc b/src/lib/util/io/tests/fd_share_tests.cc
deleted file mode 100644
index 0902ce0..0000000
--- a/src/lib/util/io/tests/fd_share_tests.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include "../fd.h"
-#include "../fd_share.h"
-
-#include <util/unittests/fork.h>
-
-#include <gtest/gtest.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <cstdio>
-
-using namespace isc::util::io;
-using namespace isc::util::unittests;
-
-namespace {
-
-// We test that we can transfer a pipe over other pipe
-TEST(FDShare, transfer) {
- // Get a pipe and fork
- int pipes[2];
- ASSERT_NE(-1, socketpair(AF_UNIX, SOCK_STREAM, 0, pipes));
- pid_t sender(fork());
- ASSERT_NE(-1, sender);
- if(sender) { // We are in parent
- // Close the other side of pipe, we want only writible one
- EXPECT_NE(-1, close(pipes[0]));
- // Get a process to check data
- int fd(0);
- pid_t checker(check_output(&fd, "data", 4));
- ASSERT_NE(-1, checker);
- // Now, send the file descriptor, close it and close the pipe
- EXPECT_NE(-1, send_fd(pipes[1], fd));
- EXPECT_NE(-1, close(pipes[1]));
- EXPECT_NE(-1, close(fd));
- // Check both subprocesses ended well
- EXPECT_TRUE(process_ok(sender));
- EXPECT_TRUE(process_ok(checker));
- } else { // We are in child. We do not use ASSERT here
- // Close the write end, we only read
- if(close(pipes[1])) {
- exit(1);
- }
- // Get the file descriptor
- int fd(recv_fd(pipes[0]));
- if(fd == -1) {
- exit(1);
- }
- // This pipe is not needed
- if(close(pipes[0])) {
- exit(1);
- }
- // Send "data" trough the received fd, close it and be done
- if(!write_data(fd, "data", 4) || close(fd) == -1) {
- exit(1);
- }
- exit(0);
- }
-}
-
-}
diff --git a/src/lib/util/io/tests/fd_tests.cc b/src/lib/util/io/tests/fd_tests.cc
deleted file mode 100644
index 12b70d8..0000000
--- a/src/lib/util/io/tests/fd_tests.cc
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include "../fd.h"
-
-#include <util/unittests/fork.h>
-
-#include <gtest/gtest.h>
-
-using namespace isc::util::io;
-using namespace isc::util::unittests;
-
-namespace {
-
-// Make sure the test is large enough and does not fit into one
-// read or write request
-const size_t TEST_DATA_SIZE = 8 * 1024 * 1024;
-
-class FDTest : public ::testing::Test {
- public:
- unsigned char *data, *buffer;
- FDTest() :
- // We do not care what is inside, we just need it to be the same
- data(new unsigned char[TEST_DATA_SIZE]),
- buffer(NULL)
- { }
- ~ FDTest() {
- delete[] data;
- delete[] buffer;
- }
-};
-
-// Test we read what was sent
-TEST_F(FDTest, read) {
- int read_pipe(0);
- buffer = new unsigned char[TEST_DATA_SIZE];
- pid_t feeder(provide_input(&read_pipe, data, TEST_DATA_SIZE));
- ASSERT_GE(feeder, 0);
- ssize_t received(read_data(read_pipe, buffer, TEST_DATA_SIZE));
- EXPECT_TRUE(process_ok(feeder));
- EXPECT_EQ(TEST_DATA_SIZE, received);
- EXPECT_EQ(0, memcmp(data, buffer, received));
-}
-
-// Test we write the correct thing
-TEST_F(FDTest, write) {
- int write_pipe(0);
- pid_t checker(check_output(&write_pipe, data, TEST_DATA_SIZE));
- ASSERT_GE(checker, 0);
- EXPECT_TRUE(write_data(write_pipe, data, TEST_DATA_SIZE));
- EXPECT_EQ(0, close(write_pipe));
- EXPECT_TRUE(process_ok(checker));
-}
-
-}
diff --git a/src/lib/util/io/tests/run_unittests.cc b/src/lib/util/io/tests/run_unittests.cc
deleted file mode 100644
index e787ab1..0000000
--- a/src/lib/util/io/tests/run_unittests.cc
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <gtest/gtest.h>
-
-int
-main(int argc, char *argv[]) {
- ::testing::InitGoogleTest(&argc, argv);
-
- return RUN_ALL_TESTS();
-}
diff --git a/src/lib/util/io_utilities.h b/src/lib/util/io_utilities.h
index ecab3ce..61d4c9c 100644
--- a/src/lib/util/io_utilities.h
+++ b/src/lib/util/io_utilities.h
@@ -48,13 +48,54 @@ readUint16(const void* buffer) {
/// \param value 16-bit value to convert
/// \param buffer Data buffer at least two bytes long into which the 16-bit
/// value is written in network-byte order.
-
-inline void
+///
+/// \return pointer to the next byte after stored value
+inline uint8_t*
writeUint16(uint16_t value, void* buffer) {
uint8_t* byte_buffer = static_cast<uint8_t*>(buffer);
byte_buffer[0] = static_cast<uint8_t>((value & 0xff00U) >> 8);
byte_buffer[1] = static_cast<uint8_t>(value & 0x00ffU);
+
+ return (byte_buffer + sizeof(uint16_t));
+}
+
+/// \brief Read Unsigned 32-Bit Integer from Buffer
+///
+/// \param buffer Data buffer at least four bytes long of which the first four
+/// bytes are assumed to represent a 32-bit integer in network-byte
+/// order.
+///
+/// \return Value of 32-bit unsigned integer
+inline uint32_t
+readUint32(const uint8_t* buffer) {
+ const uint8_t* byte_buffer = static_cast<const uint8_t*>(buffer);
+
+ uint32_t result = (static_cast<uint32_t>(byte_buffer[0])) << 24;
+ result |= (static_cast<uint32_t>(byte_buffer[1])) << 16;
+ result |= (static_cast<uint32_t>(byte_buffer[2])) << 8;
+ result |= (static_cast<uint32_t>(byte_buffer[3]));
+
+ return (result);
+}
+
+/// \brief Write Unisgned 32-Bit Integer to Buffer
+///
+/// \param value 32-bit value to convert
+/// \param buffer Data buffer at least four bytes long into which the 32-bit
+/// value is written in network-byte order.
+///
+/// \return pointer to the next byte after stored value
+inline uint8_t*
+writeUint32(uint32_t value, uint8_t* buffer) {
+ uint8_t* byte_buffer = static_cast<uint8_t*>(buffer);
+
+ byte_buffer[0] = static_cast<uint8_t>((value & 0xff000000U) >> 24);
+ byte_buffer[1] = static_cast<uint8_t>((value & 0x00ff0000U) >> 16);
+ byte_buffer[2] = static_cast<uint8_t>((value & 0x0000ff00U) >> 8);
+ byte_buffer[3] = static_cast<uint8_t>((value & 0x000000ffU));
+
+ return (byte_buffer + sizeof(uint32_t));
}
} // namespace util
diff --git a/src/lib/util/python/Makefile.am b/src/lib/util/python/Makefile.am
new file mode 100644
index 0000000..81d528c
--- /dev/null
+++ b/src/lib/util/python/Makefile.am
@@ -0,0 +1 @@
+noinst_SCRIPTS = gen_wiredata.py mkpywrapper.py
diff --git a/src/lib/util/python/gen_wiredata.py.in b/src/lib/util/python/gen_wiredata.py.in
new file mode 100755
index 0000000..8bd2b3c
--- /dev/null
+++ b/src/lib/util/python/gen_wiredata.py.in
@@ -0,0 +1,1232 @@
+#!@PYTHON@
+
+# Copyright (C) 2010 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Generator of various types of DNS data in the hex format.
+
+This script reads a human readable specification file (called "spec
+file" hereafter) that defines some type of DNS data (an RDATA, an RR,
+or a complete message) and dumps the defined data to a separate file
+as a "wire format" sequence parsable by the
+UnitTestUtil::readWireData() function (currently defined as part of
+libdns++ tests). Many DNS related tests involve wire format test
+data, so it will be convenient if we can define the data in a more
+intuitive way than writing the entire hex sequence by hand.
+
+Here is a simple example. Consider the following spec file:
+
+ [custom]
+ sections: a
+ [a]
+ as_rr: True
+
+When the script reads this file, it detects the file specifies a single
+component (called "section" here) that consists of a single A RDATA,
+which must be dumped as an RR (not only the part of RDATA). It then
+dumps the following content:
+
+ # A RR (QNAME=example.com Class=IN(1) TTL=86400 RDLEN=4)
+ 076578616d706c6503636f6d00 0001 0001 00015180 0004
+ # Address=192.0.2.1
+ c0000201
+
+As can be seen, the script automatically completes all variable
+parameters of RRs: owner name, class, TTL, RDATA length and data. For
+testing purposes many of these will be the same common one (like
+"example.com" or 192.0.2.1), so it would be convenient if we only have
+to specify non default parameters. To change the RDATA (i.e., the
+IPv4 address), we should add the following line at the end of the spec
+file:
+
+ address: 192.0.2.2
+
+Then the last two lines of the output file will be as follows:
+
+ # Address=192.0.2.2
+ c0000202
+
+In some cases we would rather specify malformed data for tests. This
+script has the ability to specify broken parameters for many types of
+data. For example, we can generate data that would look like an A RR
+but the RDLEN is 3 by adding the following line to the spec file:
+
+ rdlen: 3
+
+Then the first two lines of the output file will be as follows:
+
+ # A RR (QNAME=example.com Class=IN(1) TTL=86400 RDLEN=3)
+ 076578616d706c6503636f6d00 0001 0001 00015180 0003
+
+** USAGE **
+
+ gen_wiredata.py [-o output_file] spec_file
+
+If the -o option is missing, and if the spec_file has a suffix (such as
+in the form of "data.spec"), the output file name will be the prefix
+part of it (as in "data"); if -o is missing and the spec_file does not
+have a suffix, the script will fail.
+
+** SPEC FILE SYNTAX **
+
+A spec file accepted in this script should be in the form of a
+configuration file that is parsable by the Python's standard
+configparser module. In short, it consists of sections; each section
+is identified in the form of [section_name] followed by "name: value"
+entries. Lines beginning with # or ; will be treated as comments.
+Refer to the configparser module documentation for further details of
+the general syntax.
+
+This script has two major modes: the custom mode and the DNS query
+mode. The former generates an arbitrary combination of DNS message
+header, question section, RDATAs or RRs. It is mainly intended to
+generate a test data for a single type of RDATA or RR, or for
+complicated complete DNS messages. The DNS query mode is actually a
+special case of the custom mode, which is a shortcut to generate a
+simple DNS query message (with or without EDNS).
+
+* Custom mode syntax *
+
+By default this script assumes the DNS query mode. To specify the
+custom mode, there must be a special "custom" section in the spec
+file, which should contain 'sections' entry. This value of this
+entryis colon-separated string fields, each of which is either
+"header", "question", "edns", "name", or a string specifying an RR
+type. For RR types the string is lower-cased string mnemonic that
+identifies the type: 'a' for type A, 'ns' for type NS, and so on
+(note: in the current implementation it's case sensitive, and must be
+lower cased).
+
+Each of these fields is interpreted as a section name of the spec
+(configuration), and in that section parameters specific to the
+semantics of the field can be configured.
+
+A "header" section specifies the content of a DNS message header.
+See the documentation of the DNSHeader class of this module for
+configurable parameters.
+
+A "question" section specifies the content of a single question that
+is normally to be placed in the Question section of a DNS message.
+See the documentation of the DNSQuestion class of this module for
+configurable parameters.
+
+An "edns" section specifies the content of an EDNS OPT RR. See the
+documentation of the EDNS class of this module for configurable
+parameters.
+
+A "name" section specifies a domain name with or without compression.
+This is specifically intended to be used for testing name related
+functionalities and would rarely be used with other sections. See the
+documentation of the Name class of this module for configurable
+parameters.
+
+In a specific section for an RR or RDATA, possible entries depend on
+the type. But there are some common configurable entries. See the
+description of the RR class. The most important one would be "as_rr".
+It controls whether the entry should be treated as an RR (with name,
+type, class and TTL) or only as an RDATA. By default as_rr is
+"False", so if an entry is to be intepreted as an RR, an as_rr entry
+must be explicitly specified with a value of "True".
+
+Another common entry is "rdlen". It specifies the RDLEN field value
+of the RR (note: this is included when the entry is interpreted as
+RDATA, too). By default this value is automatically determined by the
+RR type and (it has a variable length) from other fields of RDATA, but
+as shown in the above example, it can be explicitly set, possibly to a
+bogus value for testing against invalid data.
+
+For type specific entries (and their defaults when provided), see the
+documentation of the corresponding Python class defined in this
+module. In general, there should be a class named the same mnemonic
+of the corresponding RR type for each supported type, and they are a
+subclass of the RR class. For example, the "NS" class is defined for
+RR type NS.
+
+Look again at the A RR example shown at the beginning of this
+description. There's a "custom" section, which consists of a
+"sections" entry whose value is a single "a", which means the data to
+be generated is an A RR or RDATA. There's a corresponding "a"
+section, which only specifies that it should be interpreted as an RR
+(all field values of the RR are derived from the default).
+
+If you want to generate a data sequence for two ore more RRs or
+RDATAs, you can specify them in the form of colon-separated fields for
+the "sections" entry. For example, to generate a sequence of A and NS
+RRs in that order, the "custom" section would be something like this:
+
+ [custom]
+ sections: a:ns
+
+and there must be an "ns" section in addtion to "a".
+
+If a sequence of two or more RRs/RDATAs of the same RR type should be
+generated, these should be uniquely indexed with the "/" separator.
+For example, to generate two A RRs, the "custom" section would be as
+follows:
+
+ [custom]
+ sections: a/1:a/2
+
+and there must be "a/1" and "a/2" sections.
+
+Another practical example that would be used for many tests is to
+generate data for a complete DNS ressponse message. The spec file of
+such an example configuration would look like as follows:
+
+ [custom]
+ sections: header:question:a
+ [header]
+ qr: 1
+ ancount: 1
+ [question]
+ [a]
+ as_rr: True
+
+With this configuration, this script will generate test data for a DNS
+response to a query for example.com/IN/A containing one corresponding
+A RR in the answer section.
+
+* DNS query mode syntax *
+
+If the spec file does not contain a "custom" section (that has a
+"sections" entry), this script assumes the DNS query mode. This mode
+is actually a special case of custom mode; it implicitly assumes the
+"sections" entry whose value is "header:question:edns".
+
+In this mode it is expected that the spec file also contains at least
+a "header" and "question" sections, and optionally an "edns" section.
+But the script does not warn or fail even if the expected sections are
+missing.
+
+* Entry value types *
+
+As described above, a section of the spec file accepts entries
+specific to the semantics of the section. They generally correspond
+to DNS message or RR fields.
+
+Many of them are expected to be integral values, for which either decimal or
+hexadecimal representation is accepted, for example:
+
+ rr_ttl: 3600
+ tag: 0x1234
+
+Some others are expected to be string. A string value does not have
+to be quated:
+
+ address: 192.0.2.2
+
+but can also be quoated with single quotes:
+
+ address: '192.0.2.2'
+
+Note 1: a string that can be interpreted as an integer must be quated.
+For example, if you want to set a "string" entry to "3600", it should
+be:
+
+ string: '3600'
+
+instead of
+
+ string: 3600
+
+Note 2: a string enclosed with double quotes is not accepted:
+
+ # This doesn't work:
+ address: "192.0.2.2"
+
+In general, string values are converted to hexadecimal sequences
+according to the semantics of the entry. For instance, a textual IPv4
+address in the above example will be converted to a hexadecimal
+sequence corresponding to a 4-byte integer. So, in many cases, the
+acceptable syntax for a particular string entry value should be
+obvious from the context. There are still some exceptional cases
+especially for complicated RR field values, for which the
+corresponding class documentation should be referenced.
+
+One special string syntax that would be worth noting is domain names,
+which would natually be used in many kinds of entries. The simplest
+form of acceptable syntax is a textual representation of domain names
+such as "example.com" (note: names are always assumed to be
+"absolute", so the trailing dot can be omitted). But a domain name in
+the wire format can also contain a compression pointer. This script
+provides a simple support for name compression with a special notation
+of "ptr=nn" where nn is the numeric pointer value (decimal). For example,
+if the NSDNAME field of an NS RDATA is specified as follows:
+
+ nsname: ns.ptr=12
+
+this script will generate the following output:
+
+ # NS name=ns.ptr=12
+ 026e73c00c
+
+** EXTEND THE SCRIPT **
+
+This script is expected to be extended as we add more support for
+various types of RR. It is encouraged to add support for a new type
+of RR to this script as we see the need for testing that type. Here
+is a simple instruction of how to do that.
+
+Assume you are adding support for "FOO" RR. Also assume that the FOO
+RDATA contains a single field named "value".
+
+What you are expected to do is as follows:
+
+- Define a new class named "FOO" inherited from the RR class. Also
+ define a class variable named "value" for the FOO RDATA field (the
+ variable name can be different from the field name, but it's
+ convenient if it can be easily identifiable.) with an appropriate
+ default value (if possible):
+
+ class FOO(RR):
+ value = 10
+
+ The name of the variable will be (automatically) used as the
+ corresponding entry name in the spec file. So, a spec file that
+ sets this field to 20 would look like this:
+
+ [foo]
+ value: 20
+
+- Define the "dump()" method for class FOO. It must call
+ self.dump_header() (which is derived from class RR) at the
+ beginning. It then prints the RDATA field values in an appropriate
+ way. Assuming the value is a 16-bit integer field, a complete
+ dump() method would look like this:
+
+ def dump(self, f):
+ if self.rdlen is None:
+ self.rdlen = 2
+ self.dump_header(f, self.rdlen)
+ f.write('# Value=%d\\n' % (self.value))
+ f.write('%04x\\n' % (self.value))
+
+ The first f.write() call is not mandatory, but is encouraged to
+ be provided so that the generated files will be more human readable.
+ Depending on the complexity of the RDATA fields, the dump()
+ implementation would be more complicated. In particular, if the
+ RDATA length is variable and the RDLEN field value is not specified
+ in the spec file, the dump() method is normally expected to
+ calculate the correct length and pass it to dump_header(). See the
+ implementation of various derived classes of class RR for actual
+ examples.
+"""
+
+import configparser, re, time, socket, sys
+from datetime import datetime
+from optparse import OptionParser
+
+re_hex = re.compile(r'^0x[0-9a-fA-F]+')
+re_decimal = re.compile(r'^\d+$')
+re_string = re.compile(r"\'(.*)\'$")
+
+dnssec_timefmt = '%Y%m%d%H%M%S'
+
+dict_qr = { 'query' : 0, 'response' : 1 }
+dict_opcode = { 'query' : 0, 'iquery' : 1, 'status' : 2, 'notify' : 4,
+ 'update' : 5 }
+rdict_opcode = dict([(dict_opcode[k], k.upper()) for k in dict_opcode.keys()])
+dict_rcode = { 'noerror' : 0, 'formerr' : 1, 'servfail' : 2, 'nxdomain' : 3,
+ 'notimp' : 4, 'refused' : 5, 'yxdomain' : 6, 'yxrrset' : 7,
+ 'nxrrset' : 8, 'notauth' : 9, 'notzone' : 10 }
+rdict_rcode = dict([(dict_rcode[k], k.upper()) for k in dict_rcode.keys()])
+dict_rrtype = { 'none' : 0, 'a' : 1, 'ns' : 2, 'md' : 3, 'mf' : 4, 'cname' : 5,
+ 'soa' : 6, 'mb' : 7, 'mg' : 8, 'mr' : 9, 'null' : 10,
+ 'wks' : 11, 'ptr' : 12, 'hinfo' : 13, 'minfo' : 14, 'mx' : 15,
+ 'txt' : 16, 'rp' : 17, 'afsdb' : 18, 'x25' : 19, 'isdn' : 20,
+ 'rt' : 21, 'nsap' : 22, 'nsap_tr' : 23, 'sig' : 24, 'key' : 25,
+ 'px' : 26, 'gpos' : 27, 'aaaa' : 28, 'loc' : 29, 'nxt' : 30,
+ 'srv' : 33, 'naptr' : 35, 'kx' : 36, 'cert' : 37, 'a6' : 38,
+ 'dname' : 39, 'opt' : 41, 'apl' : 42, 'ds' : 43, 'sshfp' : 44,
+ 'ipseckey' : 45, 'rrsig' : 46, 'nsec' : 47, 'dnskey' : 48,
+ 'dhcid' : 49, 'nsec3' : 50, 'nsec3param' : 51, 'hip' : 55,
+ 'spf' : 99, 'unspec' : 103, 'tkey' : 249, 'tsig' : 250,
+ 'dlv' : 32769, 'ixfr' : 251, 'axfr' : 252, 'mailb' : 253,
+ 'maila' : 254, 'any' : 255 }
+rdict_rrtype = dict([(dict_rrtype[k], k.upper()) for k in dict_rrtype.keys()])
+dict_rrclass = { 'in' : 1, 'ch' : 3, 'hs' : 4, 'any' : 255 }
+rdict_rrclass = dict([(dict_rrclass[k], k.upper()) for k in \
+ dict_rrclass.keys()])
+dict_algorithm = { 'rsamd5' : 1, 'dh' : 2, 'dsa' : 3, 'ecc' : 4,
+ 'rsasha1' : 5 }
+dict_nsec3_algorithm = { 'reserved' : 0, 'sha1' : 1 }
+rdict_algorithm = dict([(dict_algorithm[k], k.upper()) for k in \
+ dict_algorithm.keys()])
+rdict_nsec3_algorithm = dict([(dict_nsec3_algorithm[k], k.upper()) for k in \
+ dict_nsec3_algorithm.keys()])
+
+header_xtables = { 'qr' : dict_qr, 'opcode' : dict_opcode,
+ 'rcode' : dict_rcode }
+question_xtables = { 'rrtype' : dict_rrtype, 'rrclass' : dict_rrclass }
+
+def parse_value(value, xtable = {}):
+ if re.search(re_hex, value):
+ return int(value, 16)
+ if re.search(re_decimal, value):
+ return int(value)
+ m = re.match(re_string, value)
+ if m:
+ return m.group(1)
+ lovalue = value.lower()
+ if lovalue in xtable:
+ return xtable[lovalue]
+ return value
+
+def code_totext(code, dict):
+ if code in dict.keys():
+ return dict[code] + '(' + str(code) + ')'
+ return str(code)
+
+def encode_name(name, absolute=True):
+ # make sure the name is dot-terminated. duplicate dots will be ignored
+ # below.
+ name += '.'
+ labels = name.split('.')
+ wire = ''
+ for l in labels:
+ if len(l) > 4 and l[0:4] == 'ptr=':
+ # special meta-syntax for compression pointer
+ wire += '%04x' % (0xc000 | int(l[4:]))
+ break
+ if absolute or len(l) > 0:
+ wire += '%02x' % len(l)
+ wire += ''.join(['%02x' % ord(ch) for ch in l])
+ if len(l) == 0:
+ break
+ return wire
+
+def encode_string(name, len=None):
+ if type(name) is int and len is not None:
+ return '%0.*x' % (len * 2, name)
+ return ''.join(['%02x' % ord(ch) for ch in name])
+
+def count_namelabels(name):
+ if name == '.': # special case
+ return 0
+ m = re.match('^(.*)\.$', name)
+ if m:
+ name = m.group(1)
+ return len(name.split('.'))
+
+def get_config(config, section, configobj, xtables = {}):
+ try:
+ for field in config.options(section):
+ value = config.get(section, field)
+ if field in xtables.keys():
+ xtable = xtables[field]
+ else:
+ xtable = {}
+ configobj.__dict__[field] = parse_value(value, xtable)
+ except configparser.NoSectionError:
+ return False
+ return True
+
+def print_header(f, input_file):
+ f.write('''###
+### This data file was auto-generated from ''' + input_file + '''
+###
+''')
+
+class Name:
+ '''Implements rendering a single domain name in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - name (string): A textual representation of the name, such as
+ 'example.com'.
+ - pointer (int): If specified, compression pointer will be
+ prepended to the generated data with the offset being the value
+ of this parameter.
+ '''
+
+ name = 'example.com'
+ pointer = None # no compression by default
+ def dump(self, f):
+ name = self.name
+ if self.pointer is not None:
+ if len(name) > 0 and name[-1] != '.':
+ name += '.'
+ name += 'ptr=%d' % self.pointer
+ name_wire = encode_name(name)
+ f.write('\n# DNS Name: %s' % self.name)
+ if self.pointer is not None:
+ f.write(' + compression pointer: %d' % self.pointer)
+ f.write('\n')
+ f.write('%s' % name_wire)
+ f.write('\n')
+
+class DNSHeader:
+ '''Implements rendering a DNS Header section in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - id (16-bit int):
+ - qr, aa, tc, rd, ra, ad, cd (0 or 1): Standard header bits as
+ defined in RFC1035 and RFC4035. If set to 1, the corresponding
+ bit will be set; if set to 0, it will be cleared.
+ - mbz (0-3): The reserved field of the 3rd and 4th octets of the
+ header.
+ - rcode (4-bit int or string): The RCODE field. If specified as a
+ string, it must be the commonly used textual mnemonic of the RCODEs
+ (NOERROR, FORMERR, etc, case insensitive).
+ - opcode (4-bit int or string): The OPCODE field. If specified as
+ a string, it must be the commonly used textual mnemonic of the
+ OPCODEs (QUERY, NOTIFY, etc, case insensitive).
+ - qdcount, ancount, nscount, arcount (16-bit int): The QD/AN/NS/AR
+ COUNT fields, respectively.
+ '''
+
+ id = 0x1035
+ (qr, aa, tc, rd, ra, ad, cd) = 0, 0, 0, 0, 0, 0, 0
+ mbz = 0
+ rcode = 0 # noerror
+ opcode = 0 # query
+ (qdcount, ancount, nscount, arcount) = 1, 0, 0, 0
+
+ def dump(self, f):
+ f.write('\n# Header Section\n')
+ f.write('# ID=' + str(self.id))
+ f.write(' QR=' + ('Response' if self.qr else 'Query'))
+ f.write(' Opcode=' + code_totext(self.opcode, rdict_opcode))
+ f.write(' Rcode=' + code_totext(self.rcode, rdict_rcode))
+ f.write('%s' % (' AA' if self.aa else ''))
+ f.write('%s' % (' TC' if self.tc else ''))
+ f.write('%s' % (' RD' if self.rd else ''))
+ f.write('%s' % (' AD' if self.ad else ''))
+ f.write('%s' % (' CD' if self.cd else ''))
+ f.write('\n')
+ f.write('%04x ' % self.id)
+ flag_and_code = 0
+ flag_and_code |= (self.qr << 15 | self.opcode << 14 | self.aa << 10 |
+ self.tc << 9 | self.rd << 8 | self.ra << 7 |
+ self.mbz << 6 | self.ad << 5 | self.cd << 4 |
+ self.rcode)
+ f.write('%04x\n' % flag_and_code)
+ f.write('# QDCNT=%d, ANCNT=%d, NSCNT=%d, ARCNT=%d\n' %
+ (self.qdcount, self.ancount, self.nscount, self.arcount))
+ f.write('%04x %04x %04x %04x\n' % (self.qdcount, self.ancount,
+ self.nscount, self.arcount))
+
+class DNSQuestion:
+ '''Implements rendering a DNS question in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - name (string): The QNAME. The string must be interpreted as a
+ valid domain name.
+ - rrtype (int or string): The question type. If specified
+ as an integer, it must be the 16-bit RR type value of the
+ covered type. If specifed as a string, it must be the textual
+ mnemonic of the type.
+ - rrclass (int or string): The question class. If specified as an
+ integer, it must be the 16-bit RR class value of the covered
+ type. If specifed as a string, it must be the textual mnemonic
+ of the class.
+ '''
+ name = 'example.com.'
+ rrtype = parse_value('A', dict_rrtype)
+ rrclass = parse_value('IN', dict_rrclass)
+
+ def dump(self, f):
+ f.write('\n# Question Section\n')
+ f.write('# QNAME=%s QTYPE=%s QCLASS=%s\n' %
+ (self.name,
+ code_totext(self.rrtype, rdict_rrtype),
+ code_totext(self.rrclass, rdict_rrclass)))
+ f.write(encode_name(self.name))
+ f.write(' %04x %04x\n' % (self.rrtype, self.rrclass))
+
+class EDNS:
+ '''Implements rendering EDNS OPT RR in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - name (string): The owner name of the OPT RR. The string must be
+ interpreted as a valid domain name.
+ - udpsize (16-bit int): The UDP payload size (set as the RR class)
+ - extrcode (8-bit int): The upper 8 bits of the extended RCODE.
+ - version (8-bit int): The EDNS version.
+ - do (int): The DNSSEC DO bit. The bit will be set if this value
+ is 1; otherwise the bit will be unset.
+ - mbz (15-bit int): The rest of the flags field.
+ - rdlen (16-bit int): The RDLEN field. Note: right now specifying
+ a non 0 value (except for making bogus data) doesn't make sense
+ because there is no way to configure RDATA.
+ '''
+ name = '.'
+ udpsize = 4096
+ extrcode = 0
+ version = 0
+ do = 0
+ mbz = 0
+ rdlen = 0
+ def dump(self, f):
+ f.write('\n# EDNS OPT RR\n')
+ f.write('# NAME=%s TYPE=%s UDPSize=%d ExtRcode=%s Version=%s DO=%d\n' %
+ (self.name, code_totext(dict_rrtype['opt'], rdict_rrtype),
+ self.udpsize, self.extrcode, self.version,
+ 1 if self.do else 0))
+
+ code_vers = (self.extrcode << 8) | (self.version & 0x00ff)
+ extflags = (self.do << 15) | (self.mbz & ~0x8000)
+ f.write('%s %04x %04x %04x %04x\n' %
+ (encode_name(self.name), dict_rrtype['opt'], self.udpsize,
+ code_vers, extflags))
+ f.write('# RDLEN=%d\n' % self.rdlen)
+ f.write('%04x\n' % self.rdlen)
+
+class RR:
+ '''This is a base class for various types of RR test data.
+ For each RR type (A, AAAA, NS, etc), we define a derived class of RR
+ to dump type specific RDATA parameters. This class defines parameters
+ common to all types of RDATA, namely the owner name, RR class and TTL.
+ The dump() method of derived classes are expected to call dump_header(),
+ whose default implementation is provided in this class. This method
+ decides whether to dump the test data as an RR (with name, type, class)
+ or only as RDATA (with its length), and dumps the corresponding data
+ via the specified file object.
+
+ By convention we assume derived classes are named after the common
+ standard mnemonic of the corresponding RR types. For example, the
+ derived class for the RR type SOA should be named "SOA".
+
+ Configurable parameters are as follows:
+ - as_rr (bool): Whether or not the data is to be dumped as an RR.
+ False by default.
+ - rr_name (string): The owner name of the RR. The string must be
+ interpreted as a valid domain name (compression pointer can be
+ contained). Default is 'example.com.'
+ - rr_class (string): The RR class of the data. Only meaningful
+ when the data is dumped as an RR. Default is 'IN'.
+ - rr_ttl (int): The TTL value of the RR. Only meaningful when
+ the data is dumped as an RR. Default is 86400 (1 day).
+ - rdlen (int): 16-bit RDATA length. It can be None (i.e. omitted
+ in the spec file), in which case the actual length of the
+ generated RDATA is automatically determined and used; if
+ negative, the RDLEN field will be omitted from the output data.
+ (Note that omitting RDLEN with as_rr being True is mostly
+ meaningless, although the script doesn't complain about it).
+ Default is None.
+ '''
+
+ def __init__(self):
+ self.as_rr = False
+ # only when as_rr is True, same for class/TTL:
+ self.rr_name = 'example.com'
+ self.rr_class = 'IN'
+ self.rr_ttl = 86400
+ self.rdlen = None
+
+ def dump_header(self, f, rdlen):
+ type_txt = self.__class__.__name__
+ type_code = parse_value(type_txt, dict_rrtype)
+ rdlen_spec = ''
+ rdlen_data = ''
+ if rdlen >= 0:
+ rdlen_spec = ', RDLEN=%d' % rdlen
+ rdlen_data = '%04x' % rdlen
+ if self.as_rr:
+ rrclass = parse_value(self.rr_class, dict_rrclass)
+ f.write('\n# %s RR (QNAME=%s Class=%s TTL=%d%s)\n' %
+ (type_txt, self.rr_name,
+ code_totext(rrclass, rdict_rrclass), self.rr_ttl,
+ rdlen_spec))
+ f.write('%s %04x %04x %08x %s\n' %
+ (encode_name(self.rr_name), type_code, rrclass,
+ self.rr_ttl, rdlen_data))
+ else:
+ f.write('\n# %s RDATA%s\n' % (type_txt, rdlen_spec))
+ f.write('%s\n' % rdlen_data)
+
+class A(RR):
+ '''Implements rendering A RDATA (of class IN) in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - address (string): The address field. This must be a valid textual
+ IPv4 address.
+ '''
+ RDLEN_DEFAULT = 4 # fixed by default
+ address = '192.0.2.1'
+
+ def dump(self, f):
+ if self.rdlen is None:
+ self.rdlen = self.RDLEN_DEFAULT
+ self.dump_header(f, self.rdlen)
+ f.write('# Address=%s\n' % (self.address))
+ bin_address = socket.inet_aton(self.address)
+ f.write('%02x%02x%02x%02x\n' % (bin_address[0], bin_address[1],
+ bin_address[2], bin_address[3]))
+
+class AAAA(RR):
+ '''Implements rendering AAAA RDATA (of class IN) in the test data
+ format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - address (string): The address field. This must be a valid textual
+ IPv6 address.
+ '''
+ RDLEN_DEFAULT = 16 # fixed by default
+ address = '2001:db8::1'
+
+ def dump(self, f):
+ if self.rdlen is None:
+ self.rdlen = self.RDLEN_DEFAULT
+ self.dump_header(f, self.rdlen)
+ f.write('# Address=%s\n' % (self.address))
+ bin_address = socket.inet_pton(socket.AF_INET6, self.address)
+ [f.write('%02x' % x) for x in bin_address]
+ f.write('\n')
+
+class NS(RR):
+ '''Implements rendering NS RDATA in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - nsname (string): The NSDNAME field. The string must be
+ interpreted as a valid domain name.
+ '''
+
+ nsname = 'ns.example.com'
+
+ def dump(self, f):
+ nsname_wire = encode_name(self.nsname)
+ if self.rdlen is None:
+ self.rdlen = len(nsname_wire) / 2
+ self.dump_header(f, self.rdlen)
+ f.write('# NS name=%s\n' % (self.nsname))
+ f.write('%s\n' % nsname_wire)
+
+class SOA(RR):
+ '''Implements rendering SOA RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - mname/rname (string): The MNAME/RNAME fields, respectively. The
+ string must be interpreted as a valid domain name.
+ - serial (32-bit int): The SERIAL field
+ - refresh (32-bit int): The REFRESH field
+ - retry (32-bit int): The RETRY field
+ - expire (32-bit int): The EXPIRE field
+ - minimum (32-bit int): The MINIMUM field
+ '''
+
+ mname = 'ns.example.com'
+ rname = 'root.example.com'
+ serial = 2010012601
+ refresh = 3600
+ retry = 300
+ expire = 3600000
+ minimum = 1200
+ def dump(self, f):
+ mname_wire = encode_name(self.mname)
+ rname_wire = encode_name(self.rname)
+ if self.rdlen is None:
+ self.rdlen = int(20 + len(mname_wire) / 2 + len(str(rname_wire)) / 2)
+ self.dump_header(f, self.rdlen)
+ f.write('# NNAME=%s RNAME=%s\n' % (self.mname, self.rname))
+ f.write('%s %s\n' % (mname_wire, rname_wire))
+ f.write('# SERIAL(%d) REFRESH(%d) RETRY(%d) EXPIRE(%d) MINIMUM(%d)\n' %
+ (self.serial, self.refresh, self.retry, self.expire,
+ self.minimum))
+ f.write('%08x %08x %08x %08x %08x\n' % (self.serial, self.refresh,
+ self.retry, self.expire,
+ self.minimum))
+
+class TXT(RR):
+ '''Implements rendering TXT RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - nstring (int): number of character-strings
+ - stringlenN (int) (int, N = 0, ..., nstring-1): the length of the
+ N-th character-string.
+ - stringN (string, N = 0, ..., nstring-1): the N-th
+ character-string.
+ - stringlen (int): the default string. If nstring >= 1 and the
+ corresponding stringlenN isn't specified in the spec file, this
+ value will be used. If this parameter isn't specified either,
+ the length of the string will be used. Note that it means
+ this parameter (or any stringlenN) doesn't have to be specified
+ unless you want to intentially build a broken character string.
+ - string (string): the default string. If nstring >= 1 and the
+ corresponding stringN isn't specified in the spec file, this
+ string will be used.
+ '''
+
+ nstring = 1
+ stringlen = None
+ string = 'Test String'
+
+ def dump(self, f):
+ stringlen_list = []
+ string_list = []
+ wirestring_list = []
+ for i in range(0, self.nstring):
+ key_string = 'string' + str(i)
+ if key_string in self.__dict__:
+ string_list.append(self.__dict__[key_string])
+ else:
+ string_list.append(self.string)
+ wirestring_list.append(encode_string(string_list[-1]))
+ key_stringlen = 'stringlen' + str(i)
+ if key_stringlen in self.__dict__:
+ stringlen_list.append(self.__dict__[key_stringlen])
+ else:
+ stringlen_list.append(self.stringlen)
+ if stringlen_list[-1] is None:
+ stringlen_list[-1] = int(len(wirestring_list[-1]) / 2)
+ if self.rdlen is None:
+ self.rdlen = int(len(''.join(wirestring_list)) / 2) + self.nstring
+ self.dump_header(f, self.rdlen)
+ for i in range(0, self.nstring):
+ f.write('# String Len=%d, String=\"%s\"\n' %
+ (stringlen_list[i], string_list[i]))
+ f.write('%02x%s%s\n' % (stringlen_list[i],
+ ' ' if len(wirestring_list[i]) > 0 else '',
+ wirestring_list[i]))
+
+class RP(RR):
+ '''Implements rendering RP RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - mailbox (string): The mailbox field.
+ - text (string): The text field.
+ These strings must be interpreted as a valid domain name.
+ '''
+ mailbox = 'root.example.com'
+ text = 'rp-text.example.com'
+ def dump(self, f):
+ mailbox_wire = encode_name(self.mailbox)
+ text_wire = encode_name(self.text)
+ if self.rdlen is None:
+ self.rdlen = (len(mailbox_wire) + len(text_wire)) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
+ f.write('%s %s\n' % (mailbox_wire, text_wire))
+
+class MINFO(RR):
+ '''Implements rendering MINFO RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - rmailbox (string): The rmailbox field.
+ - emailbox (string): The emailbox field.
+ These strings must be interpreted as a valid domain name.
+ '''
+ rmailbox = 'rmailbox.example.com'
+ emailbox = 'emailbox.example.com'
+ def dump(self, f):
+ rmailbox_wire = encode_name(self.rmailbox)
+ emailbox_wire = encode_name(self.emailbox)
+ if self.rdlen is None:
+ self.rdlen = (len(rmailbox_wire) + len(emailbox_wire)) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# RMAILBOX=%s EMAILBOX=%s\n' % (self.rmailbox, self.emailbox))
+ f.write('%s %s\n' % (rmailbox_wire, emailbox_wire))
+
+class AFSDB(RR):
+ '''Implements rendering AFSDB RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - subtype (16 bit int): The subtype field.
+ - server (string): The server field.
+ The string must be interpreted as a valid domain name.
+ '''
+ subtype = 1
+ server = 'afsdb.example.com'
+ def dump(self, f):
+ server_wire = encode_name(self.server)
+ if self.rdlen is None:
+ self.rdlen = 2 + len(server_wire) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# SUBTYPE=%d SERVER=%s\n' % (self.subtype, self.server))
+ f.write('%04x %s\n' % (self.subtype, server_wire))
+
+class NSECBASE(RR):
+ '''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
+ these RRs. The NSEC and NSEC3 classes will be inherited from this
+ class.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - nbitmap (int): The number of type bitmaps.
+ The following three define the bitmaps. If suffixed with "N"
+ (0 <= N < nbitmaps), it means the definition for the N-th bitmap.
+ If there is no suffix (e.g., just "block", it means the default
+ for any unspecified values)
+ - block[N] (8-bit int): The Window Block.
+ - maplen[N] (8-bit int): The Bitmap Length. The default "maplen"
+ can also be unspecified (with being set to None), in which case
+ the corresponding length will be calculated from the bitmap.
+ - bitmap[N] (string): The Bitmap. This must be the hexadecimal
+ representation of the bitmap field. For example, for a bitmap
+ where the 7th and 15th bits (and only these bits) are set, it
+ must be '0101'. Note also that the value must be quated with
+ single quatations because it could also be interpreted as an
+ integer.
+ '''
+ nbitmap = 1 # number of bitmaps
+ block = 0
+ maplen = None # default bitmap length, auto-calculate
+ bitmap = '040000000003' # an arbtrarily chosen bitmap sample
+ def dump(self, f):
+ # first, construct the bitmpa data
+ block_list = []
+ maplen_list = []
+ bitmap_list = []
+ for i in range(0, self.nbitmap):
+ key_bitmap = 'bitmap' + str(i)
+ if key_bitmap in self.__dict__:
+ bitmap_list.append(self.__dict__[key_bitmap])
+ else:
+ bitmap_list.append(self.bitmap)
+ key_maplen = 'maplen' + str(i)
+ if key_maplen in self.__dict__:
+ maplen_list.append(self.__dict__[key_maplen])
+ else:
+ maplen_list.append(self.maplen)
+ if maplen_list[-1] is None: # calculate it if not specified
+ maplen_list[-1] = int(len(bitmap_list[-1]) / 2)
+ key_block = 'block' + str(i)
+ if key_block in self.__dict__:
+ block_list.append(self.__dict__[key_block])
+ else:
+ block_list.append(self.block)
+
+ # dump RR-type specific part (NSEC or NSEC3)
+ self.dump_fixedpart(f, 2 * self.nbitmap + \
+ int(len(''.join(bitmap_list)) / 2))
+
+ # dump the bitmap
+ for i in range(0, self.nbitmap):
+ f.write('# Bitmap: Block=%d, Length=%d\n' %
+ (block_list[i], maplen_list[i]))
+ f.write('%02x %02x %s\n' %
+ (block_list[i], maplen_list[i], bitmap_list[i]))
+
+class NSEC(NSECBASE):
+ '''Implements rendering NSEC RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - Type bitmap related parameters: see class NSECBASE
+ - nextname (string): The Next Domain Name field. The string must be
+ interpreted as a valid domain name.
+ '''
+
+ nextname = 'next.example.com'
+ def dump_fixedpart(self, f, bitmap_totallen):
+ name_wire = encode_name(self.nextname)
+ if self.rdlen is None:
+ # if rdlen needs to be calculated, it must be based on the bitmap
+ # length, because the configured maplen can be fake.
+ self.rdlen = int(len(name_wire) / 2) + bitmap_totallen
+ self.dump_header(f, self.rdlen)
+ f.write('# Next Name=%s (%d bytes)\n' % (self.nextname,
+ int(len(name_wire) / 2)))
+ f.write('%s\n' % name_wire)
+
+class NSEC3(NSECBASE):
+ '''Implements rendering NSEC3 RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - Type bitmap related parameters: see class NSECBASE
+ - hashalg (8-bit int): The Hash Algorithm field. Note that
+ currently the only defined algorithm is SHA-1, for which a value
+ of 1 will be used, and it's the default. So this implementation
+ does not support any string representation right now.
+ - optout (bool): The Opt-Out flag of the Flags field.
+ - mbz (7-bit int): The rest of the Flags field. This value will
+ be left shifted for 1 bit and then OR-ed with optout to
+ construct the complete Flags field.
+ - iterations (16-bit int): The Iterations field.
+ - saltlen (int): The Salt Length field.
+ - salt (string): The Salt field. It is converted to a sequence of
+ ascii codes and its hexadecimal representation will be used.
+ - hashlen (int): The Hash Length field.
+ - hash (string): The Next Hashed Owner Name field. This parameter
+ is interpreted as "salt".
+ '''
+
+ hashalg = 1 # SHA-1
+ optout = False # opt-out flag
+ mbz = 0 # other flag fields (none defined yet)
+ iterations = 1
+ saltlen = 5
+ salt = 's' * saltlen
+ hashlen = 20
+ hash = 'h' * hashlen
+ def dump_fixedpart(self, f, bitmap_totallen):
+ if self.rdlen is None:
+ # if rdlen needs to be calculated, it must be based on the bitmap
+ # length, because the configured maplen can be fake.
+ self.rdlen = 4 + 1 + len(self.salt) + 1 + len(self.hash) \
+ + bitmap_totallen
+ self.dump_header(f, self.rdlen)
+ optout_val = 1 if self.optout else 0
+ f.write('# Hash Alg=%s, Opt-Out=%d, Other Flags=%0x, Iterations=%d\n' %
+ (code_totext(self.hashalg, rdict_nsec3_algorithm),
+ optout_val, self.mbz, self.iterations))
+ f.write('%02x %02x %04x\n' %
+ (self.hashalg, (self.mbz << 1) | optout_val, self.iterations))
+ f.write("# Salt Len=%d, Salt='%s'\n" % (self.saltlen, self.salt))
+ f.write('%02x%s%s\n' % (self.saltlen,
+ ' ' if len(self.salt) > 0 else '',
+ encode_string(self.salt)))
+ f.write("# Hash Len=%d, Hash='%s'\n" % (self.hashlen, self.hash))
+ f.write('%02x%s%s\n' % (self.hashlen,
+ ' ' if len(self.hash) > 0 else '',
+ encode_string(self.hash)))
+
+class RRSIG(RR):
+ '''Implements rendering RRSIG RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - covered (int or string): The Type Covered field. If specified
+ as an integer, it must be the 16-bit RR type value of the
+ covered type. If specifed as a string, it must be the textual
+ mnemonic of the type.
+ - algorithm (int or string): The Algorithm field. If specified
+ as an integer, it must be the 8-bit algorithm number as defined
+ in RFC4034. If specifed as a string, it must be one of the keys
+ of dict_algorithm (case insensitive).
+ - labels (int): The Labels field. If omitted (the corresponding
+ variable being set to None), the number of labels of "signer"
+ (excluding the trailing null label as specified in RFC4034) will
+ be used.
+ - originalttl (32-bit int): The Original TTL field.
+ - expiration (32-bit int): The Expiration TTL field.
+ - inception (32-bit int): The Inception TTL field.
+ - tag (16-bit int): The Key Tag field.
+ - signer (string): The Signer's Name field. The string must be
+ interpreted as a valid domain name.
+ - signature (int): The Signature field. Right now only a simple
+ integer form is supported. A prefix of "0" will be prepended if
+ the resulting hexadecimal representation consists of an odd
+ number of characters.
+ '''
+
+ covered = 'A'
+ algorithm = 'RSASHA1'
+ labels = None # auto-calculate (#labels of signer)
+ originalttl = 3600
+ expiration = int(time.mktime(datetime.strptime('20100131120000',
+ dnssec_timefmt).timetuple()))
+ inception = int(time.mktime(datetime.strptime('20100101120000',
+ dnssec_timefmt).timetuple()))
+ tag = 0x1035
+ signer = 'example.com'
+ signature = 0x123456789abcdef123456789abcdef
+
+ def dump(self, f):
+ name_wire = encode_name(self.signer)
+ sig_wire = '%x' % self.signature
+ if len(sig_wire) % 2 != 0:
+ sig_wire = '0' + sig_wire
+ if self.rdlen is None:
+ self.rdlen = int(18 + len(name_wire) / 2 + len(str(sig_wire)) / 2)
+ self.dump_header(f, self.rdlen)
+
+ if type(self.covered) is str:
+ self.covered = dict_rrtype[self.covered.lower()]
+ if type(self.algorithm) is str:
+ self.algorithm = dict_algorithm[self.algorithm.lower()]
+ if self.labels is None:
+ self.labels = count_namelabels(self.signer)
+ f.write('# Covered=%s Algorithm=%s Labels=%d OrigTTL=%d\n' %
+ (code_totext(self.covered, rdict_rrtype),
+ code_totext(self.algorithm, rdict_algorithm), self.labels,
+ self.originalttl))
+ f.write('%04x %02x %02x %08x\n' % (self.covered, self.algorithm,
+ self.labels, self.originalttl))
+ f.write('# Expiration=%s, Inception=%s\n' %
+ (str(self.expiration), str(self.inception)))
+ f.write('%08x %08x\n' % (self.expiration, self.inception))
+ f.write('# Tag=%d Signer=%s and Signature\n' % (self.tag, self.signer))
+ f.write('%04x %s %s\n' % (self.tag, name_wire, sig_wire))
+
+class TSIG(RR):
+ '''Implements rendering TSIG RDATA in the test data format.
+
+ As a meta RR type TSIG uses some non common parameters. This
+ class overrides some of the default attributes of the RR class
+ accordingly:
+ - rr_class is set to 'ANY'
+ - rr_ttl is set to 0
+ Like other derived classes these can be overridden via the spec
+ file.
+
+ Other configurable parameters are as follows (see the description
+ of the same name of attribute for the default value):
+ - algorithm (string): The Algorithm Name field. The value is
+ generally interpreted as a domain name string, and will
+ typically be one of the standard algorithm names defined in
+ RFC4635. For convenience, however, a shortcut value "hmac-md5"
+ is allowed instead of the standard "hmac-md5.sig-alg.reg.int".
+ - time_signed (48-bit int): The Time Signed field.
+ - fudge (16-bit int): The Fudge field.
+ - mac_size (int): The MAC Size field. If omitted, the common value
+ determined by the algorithm will be used.
+ - mac (int or string): The MAC field. If specified as an integer,
+ the integer value is used as the MAC, possibly with prepended
+ 0's so that the total length will be mac_size. If specifed as a
+ string, it is converted to a sequence of ascii codes and its
+ hexadecimal representation will be used. So, for example, if
+ "mac" is set to 'abc', it will be converted to '616263'. Note
+ that in this case the length of "mac" may not be equal to
+ mac_size. If unspecified, the mac_size number of '78' (ascii
+ code of 'x') will be used.
+ - original_id (16-bit int): The Original ID field.
+ - error (16-bit int): The Error field.
+ - other_len (int): The Other Len field.
+ - other_data (int or string): The Other Data field. This is
+ interpreted just like "mac" except that other_len is used
+ instead of mac_size. If unspecified this will be empty unless
+ the "error" is set to 18 (which means the "BADTIME" error), in
+ which case a hexadecimal representation of "time_signed + fudge
+ + 1" will be used.
+ '''
+
+ algorithm = 'hmac-sha256'
+ time_signed = 1286978795 # arbitrarily chosen default
+ fudge = 300
+ mac_size = None # use a common value for the algorithm
+ mac = None # use 'x' * mac_size
+ original_id = 2845 # arbitrarily chosen default
+ error = 0
+ other_len = None # 6 if error is BADTIME; otherwise 0
+ other_data = None # use time_signed + fudge + 1 for BADTIME
+ dict_macsize = { 'hmac-md5' : 16, 'hmac-sha1' : 20, 'hmac-sha256' : 32 }
+
+ # TSIG has some special defaults
+ def __init__(self):
+ super().__init__()
+ self.rr_class = 'ANY'
+ self.rr_ttl = 0
+
+ def dump(self, f):
+ if str(self.algorithm) == 'hmac-md5':
+ name_wire = encode_name('hmac-md5.sig-alg.reg.int')
+ else:
+ name_wire = encode_name(self.algorithm)
+ mac_size = self.mac_size
+ if mac_size is None:
+ if self.algorithm in self.dict_macsize.keys():
+ mac_size = self.dict_macsize[self.algorithm]
+ else:
+ raise RuntimeError('TSIG Mac Size cannot be determined')
+ mac = encode_string('x' * mac_size) if self.mac is None else \
+ encode_string(self.mac, mac_size)
+ other_len = self.other_len
+ if other_len is None:
+ # 18 = BADTIME
+ other_len = 6 if self.error == 18 else 0
+ other_data = self.other_data
+ if other_data is None:
+ other_data = '%012x' % (self.time_signed + self.fudge + 1) \
+ if self.error == 18 else ''
+ else:
+ other_data = encode_string(self.other_data, other_len)
+ if self.rdlen is None:
+ self.rdlen = int(len(name_wire) / 2 + 16 + len(mac) / 2 + \
+ len(other_data) / 2)
+ self.dump_header(f, self.rdlen)
+ f.write('# Algorithm=%s Time-Signed=%d Fudge=%d\n' %
+ (self.algorithm, self.time_signed, self.fudge))
+ f.write('%s %012x %04x\n' % (name_wire, self.time_signed, self.fudge))
+ f.write('# MAC Size=%d MAC=(see hex)\n' % mac_size)
+ f.write('%04x%s\n' % (mac_size, ' ' + mac if len(mac) > 0 else ''))
+ f.write('# Original-ID=%d Error=%d\n' % (self.original_id, self.error))
+ f.write('%04x %04x\n' % (self.original_id, self.error))
+ f.write('# Other-Len=%d Other-Data=(see hex)\n' % other_len)
+ f.write('%04x%s\n' % (other_len,
+ ' ' + other_data if len(other_data) > 0 else ''))
+
+# Build section-class mapping
+config_param = { 'name' : (Name, {}),
+ 'header' : (DNSHeader, header_xtables),
+ 'question' : (DNSQuestion, question_xtables),
+ 'edns' : (EDNS, {}) }
+for rrtype in dict_rrtype.keys():
+ # For any supported RR types add the tuple of (RR_CLASS, {}).
+ # We expect KeyError as not all the types are supported, and simply
+ # ignore them.
+ try:
+ cur_mod = sys.modules[__name__]
+ config_param[rrtype] = (cur_mod.__dict__[rrtype.upper()], {})
+ except KeyError:
+ pass
+
+def get_config_param(section):
+ s = section
+ m = re.match('^([^:]+)/\d+$', section)
+ if m:
+ s = m.group(1)
+ return config_param[s]
+
+usage = '''usage: %prog [options] input_file'''
+
+if __name__ == "__main__":
+ parser = OptionParser(usage=usage)
+ parser.add_option('-o', '--output', action='store', dest='output',
+ default=None, metavar='FILE',
+ help='output file name [default: prefix of input_file]')
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ parser.error('input file is missing')
+ configfile = args[0]
+
+ outputfile = options.output
+ if not outputfile:
+ m = re.match('(.*)\.[^.]+$', configfile)
+ if m:
+ outputfile = m.group(1)
+ else:
+ raise ValueError('output file is not specified and input file is not in the form of "output_file.suffix"')
+
+ config = configparser.SafeConfigParser()
+ config.read(configfile)
+
+ output = open(outputfile, 'w')
+
+ print_header(output, configfile)
+
+ # First try the 'custom' mode; if it fails assume the query mode.
+ try:
+ sections = config.get('custom', 'sections').split(':')
+ except configparser.NoSectionError:
+ sections = ['header', 'question', 'edns']
+
+ for s in sections:
+ section_param = get_config_param(s)
+ (obj, xtables) = (section_param[0](), section_param[1])
+ if get_config(config, s, obj, xtables):
+ obj.dump(output)
+
+ output.close()
diff --git a/src/lib/util/python/mkpywrapper.py.in b/src/lib/util/python/mkpywrapper.py.in
new file mode 100755
index 0000000..4bf7752
--- /dev/null
+++ b/src/lib/util/python/mkpywrapper.py.in
@@ -0,0 +1,100 @@
+#!@PYTHON@
+
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""This utility program generates a C++ header and implementation files
+that can be used as a template of C++ python binding for a C++ class.
+
+Usage: ./mkpywrapper.py ClassName
+(the script should be run on this directory)
+
+It will generate two files: classname_python.h and classname_python.cc,
+many of whose definitions are in the namespace isc::MODULE_NAME::python.
+By default MODULE_NAME will be 'dns' (because this tool is originally
+intended to be used for the C++ python binding of the DNS library), but
+can be changed via the -m command line option.
+
+The generated files contain code fragments that are commonly used in
+C++ python binding implementations. It will define a class named
+s_ClassName which is a derived class of PyModule and can meet the
+requirement of the CPPPyObjectContainer template class (see
+pycppwrapper_util.h). It also defines (and declares in the header file)
+"classname_type", which is of PyTypeObject and is intended to be used
+to define details of the python bindings for the ClassName class.
+
+In many cases the header file can be used as a startpoint of the
+binding development without modification. But you may want to make
+ClassName::cppobj a constant variable (and you should if you can).
+Many definitions of classname_python.cc should also be able to be used
+just as defined, but some will need to be changed or removed. In
+particular, you should at least adjust ClassName_init(). You'll
+probably also need to add more definitions to that file to provide
+complete features of the C++ class.
+"""
+
+import datetime, string, sys
+from optparse import OptionParser
+
+# Remember the current year to produce the copyright boilerplate
+YEAR = datetime.date.today().timetuple()[0]
+
+def dump_file(out_file, temp_file, class_name, module):
+ for line in temp_file.readlines():
+ line = line.replace("@YEAR@", str(YEAR))
+ line = line.replace("@CPPCLASS at _H", class_name.upper() + "_H")
+ line = line.replace("@CPPCLASS@", class_name)
+ line = line.replace("@cppclass@", class_name.lower())
+ line = line.replace("@MODULE@", module)
+ out_file.write(line)
+
+def dump_wrappers(class_name, output, module):
+ try:
+ if output == "-":
+ header_file = sys.stdout
+ else:
+ header_file = open(output + "_python.h", "w")
+ header_template_file = open("wrapper_template.h", "r")
+ if output == "-":
+ impl_file = sys.stdout
+ else:
+ impl_file = open(output + "_python.cc", "w")
+ impl_template_file = open("wrapper_template.cc", "r")
+ except:
+ sys.stderr.write('Failed to open C++ file(s)\n')
+ sys.exit(1)
+ dump_file(header_file, header_template_file, class_name, module)
+ dump_file(impl_file, impl_template_file, class_name, module)
+
+usage = '''usage: %prog [options] class_name'''
+
+if __name__ == "__main__":
+ parser = OptionParser(usage=usage)
+ parser.add_option('-o', '--output', action='store', dest='output',
+ default=None, metavar='FILE',
+ help='prefix of output file names [default: derived from the class name]')
+ parser.add_option('-m', '--module', action='store', dest='module',
+ default='dns',
+ help='C++ module name of the wrapper (for namespaces) [default: dns]')
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ parser.error('input file is missing')
+
+ class_name = args[0]
+ if not options.output:
+ options.output = class_name.lower()
+
+ dump_wrappers(class_name, options.output, options.module)
diff --git a/src/lib/util/python/pycppwrapper_util.h b/src/lib/util/python/pycppwrapper_util.h
new file mode 100644
index 0000000..462e715
--- /dev/null
+++ b/src/lib/util/python/pycppwrapper_util.h
@@ -0,0 +1,335 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYCPPWRAPPER_UTIL_H
+#define __PYCPPWRAPPER_UTIL_H 1
+
+#include <Python.h>
+
+#include <exceptions/exceptions.h>
+
+/**
+ * @file pycppwrapper_util.h
+ * @short Shared definitions for python/C(++) API
+ *
+ * This utility defines a set of convenient wrappers for the python C API
+ * to use it safely from our C++ bindings. The python C API has many pitfalls
+ * such as not-so-consistent reference count policies. Also, many existing
+ * examples are careless about error handling. It's easy to find on the net
+ * example (even of "production use") python extensions like this:
+ *
+ * \code
+ * new_exception = PyErr_NewException("mymodule.Exception", NULL, NULL);
+ * // new_exception can be NULL, in which case the call to
+ * // PyModule_AddObject will cause a surprising disruption.
+ * PyModule_AddObject(mymodule, "Exception", new_exception); \endcode
+ *
+ * When using the python C API with C++, we should also be careful about
+ * exception safety. The underlying C++ code (including standard C++ libraries
+ * and memory allocation) can throw exceptions, in which case we need to
+ * make sure any intermediate python objects are cleaned up (we also need to
+ * catch the C++ exceptions inside the binding and convert them to python
+ * errors, but that's a different subject). This is not a trivial task
+ * because the python objects are represented as bare C pointers (so there's
+ * no destructor) and we need to address the exception safety along with python
+ * reference counters (so we cannot naively apply standard smart pointers).
+ *
+ * This utility tries to help address these issues.
+ *
+ * Also, it's intentional that this is a header-only utility. This way the
+ * C++ loadable module won't depend on another C++ library (which is not
+ * necessarily wrong, but would increase management cost such as link-time
+ * troubles only for a small utility feature).
+ */
+
+namespace isc {
+namespace util {
+namespace python {
+
+/// This is thrown inside this utility when it finds a NULL pointer is passed
+/// when it should not be NULL.
+class PyCPPWrapperException : public isc::Exception {
+public:
+ PyCPPWrapperException(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+/// This helper class is similar to the standard autoptr and manages PyObject
+/// using some kind of RAII techniques. It is, however, customized for the
+/// python C API.
+///
+/// A PyObjectContainer object is constructed with a pointer to PyObject,
+/// which is often just created dynamically. The caller will eventually
+/// attach the object to a different python object (often a module or class)
+/// via specific methods or directly return it to the python interpreter.
+///
+/// There are two cases in destructing the object: with or without decreasing
+/// a reference to the PyObject. If the object is intended to be an argument
+/// to another python C library that increases the reference to the object for
+/// itself, we should normally release our own reference; otherwise the
+/// reference will leak and the object won't be garbage collected. Also, when
+/// an unexpected error happens in the form of C++ exception, we should
+/// release the reference to prevent resource leak.
+///
+/// In some other cases, we should simply give our reference to the caller.
+/// That is the case when the created object itself is a return value of
+/// an extended python method written in the C++ binding. Likewise, some
+/// python C library functions "steal" the reference. In these cases we
+/// should not decrease the reference; otherwise it would cause duplicate free.
+///
+/// By default, the destructor of this class releases the reference to the
+/// PyObject. If this behavior is desirable, you can extract the original
+/// bare pointer to the PyObject by the \c get() method. If you don't want
+/// the reference to be decreased, the original bare pointer should be
+/// extracted using the \c release() method.
+///
+/// In some other cases, it would be convenient if it's possible to create
+/// an "empty" container and reset it with a Python object later.
+/// For example, we may want to create a temporary Python object in the
+/// middle of a function and make sure that it's valid within the rest of
+/// the function scope, while we want to make sure its reference is released
+/// when the function returns (either normally or as a result of exception).
+/// To allow this scenario, this class defines the default constructor
+/// and the \c reset() method. The default constructor allows the class
+/// object with an "empty" (NULL) Python object, while \c reset() allows
+/// the stored object to be replaced with a new one. If there's a valid
+/// object was already set, \c reset() releases its reference.
+/// In general, it's safer to construct the container object with a valid
+/// Python object pointer. The use of the default constructor and
+/// \c reset() should therefore be restricted to cases where it's
+/// absolutely necessary.
+///
+/// There are two convenience methods for commonly used operations:
+/// \c installAsClassVariable() to add the PyObject as a class variable
+/// and \c installToModule to add the PyObject to a specified python module.
+/// These methods (at least to some extent) take care of the reference to
+/// the object (either release or keep) depending on the usage context so
+/// that the user don't have to worry about it.
+///
+/// On construction, this class expects the pointer can be NULL.
+/// If it happens it immediately throws a \c PyCPPWrapperException exception.
+/// This behavior is to convert failures in the python C API (such as
+/// PyObject_New() returning NULL) to C++ exception so that we can unify
+/// error handling in the style of C++ exceptions.
+///
+/// Examples 1: To create a tuple of two python objects, do this:
+///
+/// \code
+/// try {
+/// PyObjectContainer container0(Py_BuildValue("I", 0));
+/// PyObjectContainer container1(Py_BuildValue("s", cppobj.toText().c_str()));
+/// return (Py_BuildValue("OO", container0.get(), container1.get()));
+/// } catch { ... set python exception, etc ... } \endcode
+///
+/// Commonly deployed buggy implementation to achieve this would be like this:
+/// \code
+/// return (Py_BuildValue("OO", Py_BuildValue("I", 0),
+/// Py_BuildValue("s", cppobj.toText().c_str())));
+/// \endcode
+/// One clear bug of this code is that references to the element objects of
+/// the tuple will leak.
+/// (Assuming \c cppobj.toText() can throw) this code is also not exception
+/// safe; if \c cppobj.toText() throws the reference to the first object
+/// will leak, even if the code tried to do the necessary cleanup in the
+/// successful case.
+/// Further, this code naively passes the result of the first two calls to
+/// \c Py_BuildValue() to the third one even if they can be NULL.
+/// In this specific case, it happens to be okay because \c Py_BuildValue()
+/// accepts NULL and treats it as an indication of error. But not all
+/// python C library works that way (remember, the API is so inconsistent)
+/// and we need to refer to the API manual every time we have to worry about
+/// passing a NULL object to a library function. We'd certainly like to
+/// avoid such development overhead. The code using \c PyObjectContainer
+/// addresses all these problems.
+///
+/// Examples 2: Install a (constant) variable to a class.
+///
+/// \code
+/// try {
+/// // installClassVariable is a wrapper of
+/// // PyObjectContainer::installAsClassVariable. See below.
+/// installClassVariable(myclass_type, "SOME_CONSTANT",
+/// Py_BuildValue("I", 0));
+/// } catch { ... }
+/// \endcode
+///
+/// Examples 3: Install a custom exception to a module.
+///
+/// \code
+/// PyObject* new_exception; // publicly visible
+/// ...
+/// try {
+/// new_exception = PyErr_NewException("mymodule.NewException",
+/// NULL, NULL);
+/// PyObjectContainer(new_exception).installToModule(mymodule,
+/// "NewException");
+/// } catch { ... }
+/// \endcode
+///
+/// Note that \c installToModule() keeps the reference to \c new_exception
+/// by default. This is a common practice when we introduce a custom
+/// exception in a python biding written in C/C++. See the code comment
+/// of the method for more details.
+struct PyObjectContainer {
+ PyObjectContainer() : obj_(NULL) {}
+ PyObjectContainer(PyObject* obj) : obj_(obj) {
+ if (obj_ == NULL) {
+ isc_throw(PyCPPWrapperException, "Unexpected NULL PyObject, "
+ "probably due to short memory");
+ }
+ }
+ ~PyObjectContainer() {
+ if (obj_ != NULL) {
+ Py_DECREF(obj_);
+ }
+ }
+ void reset(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "Unexpected NULL PyObject, "
+ "probably due to short memory");
+ }
+ if (obj_ != NULL) {
+ Py_DECREF(obj_);
+ }
+ obj_ = obj;
+ }
+ PyObject* get() {
+ return (obj_);
+ }
+ PyObject* release() {
+ PyObject* ret = obj_;
+ obj_ = NULL;
+ return (ret);
+ }
+
+ // Install the enclosed PyObject to the specified python class 'pyclass'
+ // as a variable named 'name'.
+ void installAsClassVariable(PyTypeObject& pyclass, const char* name) {
+ if (PyDict_SetItemString(pyclass.tp_dict, name, obj_) < 0) {
+ isc_throw(PyCPPWrapperException, "Failed to set a class variable, "
+ "probably due to short memory");
+ }
+ // Ownership successfully transferred to the class object. We'll let
+ // it be released in the destructor.
+ }
+
+ // Install the enclosed PyObject to the specified module 'mod' as an
+ // object named 'name'.
+ // By default, this method explicitly keeps the reference to the object
+ // even after the module "steals" it. To cancel this behavior and give
+ // the reference to the module completely, the third parameter 'keep_ref'
+ // should be set to false.
+ void installToModule(PyObject* mod, const char* name,
+ bool keep_ref = true)
+ {
+ if (PyModule_AddObject(mod, name, obj_) < 0) {
+ isc_throw(PyCPPWrapperException, "Failed to add an object to "
+ "module, probably due to short memory");
+ }
+ // PyModule_AddObject has "stolen" the reference, so unless we
+ // have to retain it ourselves we don't (shouldn't) decrease it.
+ // However, we actually often need to keep our own reference because
+ // objects added to a module are often referenced via non local
+ // C/C++ variables in various places of the C/C++ code. In order
+ // for the code to run safely even if some buggy/evil python program
+ // performs 'del mod.obj', we need the extra reference. See, e.g.:
+ // http://docs.python.org/py3k/c-api/init.html#Py_Initialize
+ // http://mail.python.org/pipermail/python-dev/2005-June/054238.html
+ if (keep_ref) {
+ Py_INCREF(obj_);
+ }
+ obj_ = NULL;
+ }
+
+protected:
+ PyObject* obj_;
+};
+
+/// This templated class is a derived class of \c PyObjectContainer and
+/// manages C++-class based python objects.
+///
+/// The template parameter \c PYSTRUCT must be a derived class (structure) of
+/// \c PyObject that has a member variable named \c cppobj, which must be a
+/// a pointer to \c CPPCLASS (the second template parameter).
+///
+/// For example, to define a custom python class based on a C++ class, MyClass,
+/// we'd define a class (struct) named \c s_MyClass like this:
+/// \code
+/// class s_MyClass : public PyObject {
+/// public:
+/// s_MyClass() : cppobj(NULL) {}
+/// MyClass* cppobj;
+/// };
+/// \endcode
+///
+/// And, to build and return a python version of MyClass object, write the
+/// following C++ code:
+/// \code
+/// typedef CPPPyObjectContainer<s_MyClass, MyClass> MyContainer;
+/// try {
+/// // below, myclass_type is of \c PyTypeObject that defines
+/// // a python class (type) for MyClass
+/// MyContainer container(PyObject_New(s_MyClass, myclass_type));
+/// container.set(new MyClass());
+/// return (container.release()); // give the reference to the caller
+/// } catch { ... }
+/// \endcode
+///
+/// This code prevents bugs like NULL pointer dereference when \c PyObject_New
+/// fails or resource leaks when new'ing \c MyClass results in an exception.
+/// Note that we use \c release() (derived from the base class) instead of
+/// \c get(); in this case we should simply pass the reference generated in
+/// \c PyObject_New() to the caller.
+template <typename PYSTRUCT, typename CPPCLASS>
+struct CPPPyObjectContainer : public PyObjectContainer {
+ explicit CPPPyObjectContainer(PYSTRUCT* obj) : PyObjectContainer(obj) {}
+
+ // This method associates a C++ object with the corresponding python
+ // object enclosed in this class.
+ void set(CPPCLASS* value) {
+ if (value == NULL) {
+ isc_throw(PyCPPWrapperException, "Unexpected NULL C++ object, "
+ "probably due to short memory");
+ }
+ static_cast<PYSTRUCT*>(obj_)->cppobj = value;
+ }
+
+ // This is a convenience short cut to associate a C++ object with the
+ // python object and install it to the specified python class \c pyclass
+ // as a variable named \c name.
+ void installAsClassVariable(PyTypeObject& pyclass, const char* name,
+ CPPCLASS* value)
+ {
+ set(value);
+ PyObjectContainer::installAsClassVariable(pyclass, name);
+ }
+};
+
+/// A shortcut function to install a python class variable.
+///
+/// It installs a python object \c obj to a specified class \c pyclass
+/// as a variable named \c name.
+inline void
+installClassVariable(PyTypeObject& pyclass, const char* name, PyObject* obj) {
+ PyObjectContainer(obj).installAsClassVariable(pyclass, name);
+}
+
+} // namespace python
+} // namespace util
+} // namespace isc
+#endif // __PYCPPWRAPPER_UTIL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/util/python/wrapper_template.cc b/src/lib/util/python/wrapper_template.cc
new file mode 100644
index 0000000..426ced5
--- /dev/null
+++ b/src/lib/util/python/wrapper_template.cc
@@ -0,0 +1,309 @@
+// Copyright (C) @YEAR@ Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include "@cppclass at _python.h"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::@MODULE@;
+using namespace isc::@MODULE@::python;
+
+//
+// Definition of the classes
+//
+
+// For each class, we need a struct, a helper functions (init, destroy,
+// and static wrappers around the methods we export), a list of methods,
+// and a type description
+
+//
+// @CPPCLASS@
+//
+
+// Trivial constructor.
+s_ at CPPCLASS@::s_ at CPPCLASS@() : cppobj(NULL) {
+}
+
+namespace {
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ at CPPCLASS@, @CPPCLASS@> @CPPCLASS at Container;
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+// General creation and destruction
+int @CPPCLASS at _init(s_ at CPPCLASS@* self, PyObject* args);
+void @CPPCLASS at _destroy(s_ at CPPCLASS@* self);
+
+// These are the functions we export
+// ADD/REMOVE/MODIFY THE FOLLOWING AS APPROPRIATE FOR THE ACTUAL CLASS.
+//
+PyObject* @CPPCLASS at _toText(const s_ at CPPCLASS@* const self);
+PyObject* @CPPCLASS at _str(PyObject* self);
+PyObject* @CPPCLASS at _richcmp(const s_ at CPPCLASS@* const self,
+ const s_ at CPPCLASS@* const other, int op);
+
+// This is quite specific pydnspp. For other wrappers this should probably
+// be removed.
+PyObject* @CPPCLASS at _toWire(const s_ at CPPCLASS@* self, PyObject* args);
+
+// These are the functions we export
+// For a minimal support, we don't need them.
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef @CPPCLASS at _methods[] = {
+ { "to_text", reinterpret_cast<PyCFunction>(@CPPCLASS at _toText), METH_NOARGS,
+ "Returns the text representation" },
+ // This is quite specific pydnspp. For other wrappers this should probably
+ // be removed:
+ { "to_wire", reinterpret_cast<PyCFunction>(@CPPCLASS at _toWire), METH_VARARGS,
+ "Converts the @CPPCLASS@ object to wire format.\n"
+ "The argument can be either a MessageRenderer or an object that "
+ "implements the sequence interface. If the object is mutable "
+ "(for instance a bytearray()), the wire data is added in-place.\n"
+ "If it is not (for instance a bytes() object), a new object is "
+ "returned" },
+ { NULL, NULL, 0, NULL }
+};
+
+// This is a template of typical code logic of python class initialization
+// with C++ backend. You'll need to adjust it according to details of the
+// actual C++ class.
+int
+ at CPPCLASS@_init(s_ at CPPCLASS@* self, PyObject* args) {
+ try {
+ if (PyArg_ParseTuple(args, "REPLACE ME")) {
+ // YOU'LL NEED SOME VALIDATION, PREPARATION, ETC, HERE.
+ self->cppobj = new @CPPCLASS@(/*NECESSARY PARAMS*/);
+ return (0);
+ }
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct @CPPCLASS@ object: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception in constructing @CPPCLASS@");
+ return (-1);
+ }
+
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to @CPPCLASS@ constructor");
+
+ return (-1);
+}
+
+// This is a template of typical code logic of python object destructor.
+// In many cases you can use it without modification, but check that carefully.
+void
+ at CPPCLASS@_destroy(s_ at CPPCLASS@* const self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+// This should be able to be used without modification as long as the
+// underlying C++ class has toText().
+PyObject*
+ at CPPCLASS@_toText(const s_ at CPPCLASS@* const self) {
+ try {
+ // toText() could throw, so we need to catch any exceptions below.
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to convert @CPPCLASS@ object to text: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "converting @CPPCLASS@ object to text");
+ }
+ return (NULL);
+}
+
+PyObject*
+ at CPPCLASS@_str(PyObject* self) {
+ // Simply call the to_text method we already defined
+ return (PyObject_CallMethod(self, const_cast<char*>("to_text"),
+ const_cast<char*>("")));
+}
+
+PyObject*
+ at CPPCLASS@_richcmp(const s_ at CPPCLASS@* const self,
+ const s_ at CPPCLASS@* const other,
+ const int op)
+{
+ bool c = false;
+
+ // Check for null and if the types match. If different type,
+ // simply return False
+ if (other == NULL || (self->ob_type != other->ob_type)) {
+ Py_RETURN_FALSE;
+ }
+
+ // Only equals and not equals here, unorderable type
+ switch (op) {
+ case Py_LT:
+ PyErr_SetString(PyExc_TypeError, "Unorderable type; @CPPCLASS@");
+ return (NULL);
+ case Py_LE:
+ PyErr_SetString(PyExc_TypeError, "Unorderable type; @CPPCLASS@");
+ return (NULL);
+ case Py_EQ:
+ c = (*self->cppobj == *other->cppobj);
+ break;
+ case Py_NE:
+ c = (*self->cppobj != *other->cppobj);
+ break;
+ case Py_GT:
+ PyErr_SetString(PyExc_TypeError, "Unorderable type; @CPPCLASS@");
+ return (NULL);
+ case Py_GE:
+ PyErr_SetString(PyExc_TypeError, "Unorderable type; @CPPCLASS@");
+ return (NULL);
+ }
+ if (c) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+}
+} // end of unnamed namespace
+
+namespace isc {
+namespace @MODULE@ {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_ at CPPCLASS@
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject @cppclass at _type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "@MODULE at .@CPPCLASS@",
+ sizeof(s_ at CPPCLASS@), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(@CPPCLASS at _destroy), // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ // THIS MAY HAVE TO BE CHANGED TO NULL:
+ @CPPCLASS at _str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The @CPPCLASS@ class objects is...(COMPLETE THIS)",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ // THIS MAY HAVE TO BE CHANGED TO NULL:
+ reinterpret_cast<richcmpfunc>(@CPPCLASS at _richcmp), // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ @CPPCLASS at _methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(@CPPCLASS at _init), // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+// Module Initialization, all statics are initialized here
+bool
+initModulePart_ at CPPCLASS@(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&@cppclass at _type) < 0) {
+ return (false);
+ }
+ void* p = &@cppclass at _type;
+ if (PyModule_AddObject(mod, "@CPPCLASS@", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&@cppclass at _type);
+
+ // The following template is the typical procedure for installing class
+ // variables. If the class doesn't have a class variable, remove the
+ // entire try-catch clauses.
+ try {
+ // Constant class variables
+ installClassVariable(@cppclass at _type, "REPLACE_ME",
+ Py_BuildValue("REPLACE ME"));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure in @CPPCLASS@ initialization: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in @CPPCLASS@ initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+PyObject*
+create at CPPCLASS@Object(const @CPPCLASS@& source) {
+ @CPPCLASS at Container container(PyObject_New(s_ at CPPCLASS@,
+ &@cppclass at _type));
+ container.set(new @CPPCLASS@(source));
+ return (container.release());
+}
+} // namespace python
+} // namespace @MODULE@
+} // namespace isc
diff --git a/src/lib/util/python/wrapper_template.h b/src/lib/util/python/wrapper_template.h
new file mode 100644
index 0000000..be701e1
--- /dev/null
+++ b/src/lib/util/python/wrapper_template.h
@@ -0,0 +1,59 @@
+// Copyright (C) @YEAR@ Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_ at CPPCLASS@_H
+#define __PYTHON_ at CPPCLASS@_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace @MODULE@ {
+class @CPPCLASS@;
+
+namespace python {
+
+// The s_* Class simply covers one instantiation of the object
+class s_ at CPPCLASS@ : public PyObject {
+public:
+ s_ at CPPCLASS@();
+ @CPPCLASS@* cppobj;
+};
+
+extern PyTypeObject @cppclass at _type;
+
+bool initModulePart_ at CPPCLASS@(PyObject* mod);
+
+// Note: this utility function works only when @CPPCLASS@ is a copy
+// constructable.
+// And, it would only be useful when python binding needs to create this
+// object frequently. Otherwise, it would (or should) probably be better to
+// remove the declaration and definition of this function.
+//
+/// This is a simple shortcut to create a python @CPPCLASS@ object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* create at CPPCLASS@Object(const @CPPCLASS@& source);
+
+} // namespace python
+} // namespace @MODULE@
+} // namespace isc
+#endif // __PYTHON_ at CPPCLASS@_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/util/pyunittests/Makefile.am b/src/lib/util/pyunittests/Makefile.am
new file mode 100644
index 0000000..dd2d39a
--- /dev/null
+++ b/src/lib/util/pyunittests/Makefile.am
@@ -0,0 +1,22 @@
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+noinst_LTLIBRARIES = pyunittests_util.la
+
+pyunittests_util_la_SOURCES = pyunittests_util.cc
+pyunittests_util_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+pyunittests_util_la_LDFLAGS = $(PYTHON_LDFLAGS)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+pyunittests_util_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+
+# Python prefers .so, while some OSes (specifically MacOS) use a different
+# suffix for dynamic objects. -module is necessary to work this around.
+pyunittests_util_la_LDFLAGS += -module
+pyunittests_util_la_LIBADD = $(top_builddir)/src/lib/util/libutil.la
+pyunittests_util_la_LIBADD += $(PYTHON_LIB)
+
+# hack to trigger libtool to not create a convenience archive,
+# resulting in shared modules
+pyunittests_util_la_LDFLAGS += -rpath /nowhere
diff --git a/src/lib/util/pyunittests/pyunittests_util.cc b/src/lib/util/pyunittests/pyunittests_util.cc
new file mode 100644
index 0000000..d266c84
--- /dev/null
+++ b/src/lib/util/pyunittests/pyunittests_util.cc
@@ -0,0 +1,84 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <stdint.h>
+
+// see util/time_utilities.h
+namespace isc {
+namespace util {
+namespace detail {
+extern int64_t (*gettimeFunction)();
+}
+}
+}
+
+namespace {
+int64_t fake_current_time;
+
+int64_t
+getFakeTime() {
+ return (fake_current_time);
+}
+
+PyObject*
+fixCurrentTime(PyObject*, PyObject* args) {
+ PyObject* maybe_none;
+ if (PyArg_ParseTuple(args, "L", &fake_current_time)) {
+ isc::util::detail::gettimeFunction = getFakeTime;
+ } else if (PyArg_ParseTuple(args, "O", &maybe_none) &&
+ maybe_none == Py_None) {
+ isc::util::detail::gettimeFunction = NULL;
+ } else {
+ PyErr_SetString(PyExc_TypeError, "Invalid arguments to "
+ "pyunittests_util.fix_current_time");
+ return (NULL);
+ }
+
+ PyErr_Clear();
+ Py_RETURN_NONE;
+}
+
+PyMethodDef PyUnittestsUtilMethods[] = {
+ { "fix_current_time", fixCurrentTime, METH_VARARGS,
+ "Fix the current system time at the specified (fake) value.\n\n"
+ "This is useful for testing modules that depend on the current time.\n"
+ "Note that it only affects C++ modules that use gettimeWrapper() "
+ "defined in libutil, which allows a hook for testing.\n"
+ "If an integer (signed 64bit) is given, the current time will be fixed "
+ "to that value; if None is specified (which is the default) the use of "
+ "faked time will be canceled."
+ },
+ { NULL, NULL, 0, NULL}
+};
+
+PyModuleDef pyunittests_util = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "pyunittests_util",
+ "This module is a collection of utilities useful for testing "
+ "the BIND 10 C++ binding modules.",
+ -1,
+ PyUnittestsUtilMethods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+} // end of unnamed namespace
+
+PyMODINIT_FUNC
+PyInit_pyunittests_util(void) {
+ return (PyModule_Create(&pyunittests_util));
+}
diff --git a/src/lib/util/strutil.cc b/src/lib/util/strutil.cc
index 161f9ac..ed7fc9b 100644
--- a/src/lib/util/strutil.cc
+++ b/src/lib/util/strutil.cc
@@ -132,6 +132,17 @@ format(const std::string& format, const std::vector<std::string>& args) {
return (result);
}
+std::string
+getToken(std::istringstream& iss) {
+ string token;
+ iss >> token;
+ if (iss.bad() || iss.fail()) {
+ isc_throw(StringTokenError, "could not read token from string");
+ }
+ return (token);
+}
+
+
} // namespace str
} // namespace util
} // namespace isc
diff --git a/src/lib/util/strutil.h b/src/lib/util/strutil.h
index e044c15..021c236 100644
--- a/src/lib/util/strutil.h
+++ b/src/lib/util/strutil.h
@@ -18,7 +18,10 @@
#include <algorithm>
#include <cctype>
#include <string>
+#include <sstream>
#include <vector>
+#include <exceptions/exceptions.h>
+#include <boost/lexical_cast.hpp>
namespace isc {
namespace util {
@@ -26,6 +29,16 @@ namespace str {
/// \brief A Set of C++ Utilities for Manipulating Strings
+///
+/// \brief A standard string util exception that is thrown if getToken or
+/// numToToken are called with bad input data
+///
+class StringTokenError : public Exception {
+public:
+ StringTokenError(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
/// \brief Normalize Backslash
///
/// Only relevant to Windows, this replaces all "\" in a string with "/" and
@@ -140,6 +153,55 @@ std::string format(const std::string& format,
const std::vector<std::string>& args);
+/// \brief Returns one token from the given stringstream
+///
+/// Using the >> operator, with basic error checking
+///
+/// \exception StringTokenError if the token cannot be read from the stream
+///
+/// \param iss stringstream to read one token from
+///
+/// \return the first token read from the stringstream
+std::string getToken(std::istringstream& iss);
+
+/// \brief Converts a string token to an *unsigned* integer.
+///
+/// The value is converted using a lexical cast, with error and bounds
+/// checking.
+///
+/// NumType is a *signed* integral type (e.g. int32_t) that is sufficiently
+/// wide to store resulting integers.
+///
+/// BitSize is the maximum number of bits that the resulting integer can take.
+/// This function first checks whether the given token can be converted to
+/// an integer of NumType type. It then confirms the conversion result is
+/// within the valid range, i.e., [0, 2^BitSize - 1]. The second check is
+/// necessary because lexical_cast<T> where T is an unsigned integer type
+/// doesn't correctly reject negative numbers when compiled with SunStudio.
+///
+/// \exception StringTokenError if the value is out of range, or if it
+/// could not be converted
+///
+/// \param num_token the string token to convert
+///
+/// \return the converted value, of type NumType
+template <typename NumType, int BitSize>
+NumType
+tokenToNum(const std::string& num_token) {
+ NumType num;
+ try {
+ num = boost::lexical_cast<NumType>(num_token);
+ } catch (const boost::bad_lexical_cast& ex) {
+ isc_throw(StringTokenError, "Invalid SRV numeric parameter: " <<
+ num_token);
+ }
+ if (num < 0 || num >= (static_cast<NumType>(1) << BitSize)) {
+ isc_throw(StringTokenError, "Numeric SRV parameter out of range: " <<
+ num);
+ }
+ return (num);
+}
+
} // namespace str
} // namespace util
} // namespace isc
diff --git a/src/lib/util/tests/Makefile.am b/src/lib/util/tests/Makefile.am
index 7b97202..47243f8 100644
--- a/src/lib/util/tests/Makefile.am
+++ b/src/lib/util/tests/Makefile.am
@@ -1,8 +1,6 @@
SUBDIRS = .
AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/exceptions -I$(top_builddir)/src/lib/exceptions
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
@@ -15,26 +13,30 @@ CLEANFILES = *.gcno *.gcda
TESTS =
if HAVE_GTEST
TESTS += run_unittests
-run_unittests_SOURCES =
-run_unittests_SOURCES += filename_unittest.cc
-run_unittests_SOURCES += strutil_unittest.cc
-run_unittests_SOURCES += run_unittests.cc
+run_unittests_SOURCES = run_unittests.cc
run_unittests_SOURCES += base32hex_unittest.cc
run_unittests_SOURCES += base64_unittest.cc
-run_unittests_SOURCES += hex_unittest.cc
-run_unittests_SOURCES += sha1_unittest.cc
run_unittests_SOURCES += buffer_unittest.cc
-run_unittests_SOURCES += time_utilities_unittest.cc
-run_unittests_SOURCES += random_number_generator_unittest.cc
-run_unittests_SOURCES += lru_list_unittest.cc
+run_unittests_SOURCES += fd_share_tests.cc
+run_unittests_SOURCES += fd_tests.cc
+run_unittests_SOURCES += filename_unittest.cc
+run_unittests_SOURCES += hex_unittest.cc
run_unittests_SOURCES += io_utilities_unittest.cc
+run_unittests_SOURCES += lru_list_unittest.cc
run_unittests_SOURCES += qid_gen_unittest.cc
+run_unittests_SOURCES += random_number_generator_unittest.cc
+run_unittests_SOURCES += sha1_unittest.cc
+run_unittests_SOURCES += strutil_unittest.cc
+run_unittests_SOURCES += time_utilities_unittest.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/io/libutil_io.la
+run_unittests_LDADD += \
+ $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
endif
diff --git a/src/lib/util/tests/base32hex_unittest.cc b/src/lib/util/tests/base32hex_unittest.cc
index bf79627..fa4a290 100644
--- a/src/lib/util/tests/base32hex_unittest.cc
+++ b/src/lib/util/tests/base32hex_unittest.cc
@@ -66,7 +66,7 @@ decodeCheck(const string& input_string, vector<uint8_t>& output,
const string& expected)
{
decodeBase32Hex(input_string, output);
- EXPECT_EQ(expected, string(&output[0], &output[0] + output.size()));
+ EXPECT_EQ(expected, string(output.begin(), output.end()));
}
TEST_F(Base32HexTest, decode) {
@@ -79,6 +79,11 @@ TEST_F(Base32HexTest, decode) {
// whitespace should be allowed
decodeCheck("CP NM\tUOG=", decoded_data, "foob");
decodeCheck("CPNMU===\n", decoded_data, "foo");
+ decodeCheck(" CP NM\tUOG=", decoded_data, "foob");
+ decodeCheck(" ", decoded_data, "");
+
+ // Incomplete input
+ EXPECT_THROW(decodeBase32Hex("CPNMUOJ", decoded_data), BadValue);
// invalid number of padding characters
EXPECT_THROW(decodeBase32Hex("CPNMU0==", decoded_data), BadValue);
diff --git a/src/lib/util/tests/base64_unittest.cc b/src/lib/util/tests/base64_unittest.cc
index c2b2785..b0c926d 100644
--- a/src/lib/util/tests/base64_unittest.cc
+++ b/src/lib/util/tests/base64_unittest.cc
@@ -52,7 +52,7 @@ decodeCheck(const string& input_string, vector<uint8_t>& output,
const string& expected)
{
decodeBase64(input_string, output);
- EXPECT_EQ(expected, string(&output[0], &output[0] + output.size()));
+ EXPECT_EQ(expected, string(output.begin(), output.end()));
}
TEST_F(Base64Test, decode) {
@@ -66,6 +66,12 @@ TEST_F(Base64Test, decode) {
decodeCheck("Zm 9v\tYmF\ny", decoded_data, "foobar");
decodeCheck("Zm9vYg==", decoded_data, "foob");
decodeCheck("Zm9vYmE=\n", decoded_data, "fooba");
+ decodeCheck(" Zm9vYmE=\n", decoded_data, "fooba");
+ decodeCheck(" ", decoded_data, "");
+ decodeCheck("\n\t", decoded_data, "");
+
+ // incomplete input
+ EXPECT_THROW(decodeBase64("Zm9vYmF", decoded_data), BadValue);
// only up to 2 padding characters are allowed
EXPECT_THROW(decodeBase64("A===", decoded_data), BadValue);
diff --git a/src/lib/util/tests/buffer_unittest.cc b/src/lib/util/tests/buffer_unittest.cc
index 0cd1823..666924e 100644
--- a/src/lib/util/tests/buffer_unittest.cc
+++ b/src/lib/util/tests/buffer_unittest.cc
@@ -239,4 +239,36 @@ TEST_F(BufferTest, outputBufferZeroSize) {
});
}
+TEST_F(BufferTest, readVectorAll) {
+ std::vector<uint8_t> vec;
+
+ // check that vector can read the whole buffer
+ ibuffer.readVector(vec, 5);
+
+ ASSERT_EQ(5, vec.size());
+ EXPECT_EQ(0, memcmp(&vec[0], testdata, 5));
+
+ // ibuffer is 5 bytes long. Can't read past it.
+ EXPECT_THROW(
+ ibuffer.readVector(vec, 1),
+ isc::util::InvalidBufferPosition
+ );
+}
+
+TEST_F(BufferTest, readVectorChunks) {
+ std::vector<uint8_t> vec;
+
+ // check that vector can read the whole buffer
+ ibuffer.readVector(vec, 3);
+ EXPECT_EQ(3, vec.size());
+
+ EXPECT_EQ(0, memcmp(&vec[0], testdata, 3));
+
+ EXPECT_NO_THROW(
+ ibuffer.readVector(vec, 2)
+ );
+
+ EXPECT_EQ(0, memcmp(&vec[0], testdata+3, 2));
+}
+
}
diff --git a/src/lib/util/tests/fd_share_tests.cc b/src/lib/util/tests/fd_share_tests.cc
new file mode 100644
index 0000000..cc92e47
--- /dev/null
+++ b/src/lib/util/tests/fd_share_tests.cc
@@ -0,0 +1,74 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/io/fd.h>
+#include <util/io/fd_share.h>
+
+#include <util/unittests/fork.h>
+
+#include <gtest/gtest.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <cstdio>
+
+using namespace isc::util::io;
+using namespace isc::util::unittests;
+
+namespace {
+
+// We test that we can transfer a pipe over other pipe
+TEST(FDShare, transfer) {
+ // Get a pipe and fork
+ int pipes[2];
+ ASSERT_NE(-1, socketpair(AF_UNIX, SOCK_STREAM, 0, pipes));
+ pid_t sender(fork());
+ ASSERT_NE(-1, sender);
+ if(sender) { // We are in parent
+ // Close the other side of pipe, we want only writible one
+ EXPECT_NE(-1, close(pipes[0]));
+ // Get a process to check data
+ int fd(0);
+ pid_t checker(check_output(&fd, "data", 4));
+ ASSERT_NE(-1, checker);
+ // Now, send the file descriptor, close it and close the pipe
+ EXPECT_NE(-1, send_fd(pipes[1], fd));
+ EXPECT_NE(-1, close(pipes[1]));
+ EXPECT_NE(-1, close(fd));
+ // Check both subprocesses ended well
+ EXPECT_TRUE(process_ok(sender));
+ EXPECT_TRUE(process_ok(checker));
+ } else { // We are in child. We do not use ASSERT here
+ // Close the write end, we only read
+ if(close(pipes[1])) {
+ exit(1);
+ }
+ // Get the file descriptor
+ int fd(recv_fd(pipes[0]));
+ if(fd == -1) {
+ exit(1);
+ }
+ // This pipe is not needed
+ if(close(pipes[0])) {
+ exit(1);
+ }
+ // Send "data" trough the received fd, close it and be done
+ if(!write_data(fd, "data", 4) || close(fd) == -1) {
+ exit(1);
+ }
+ exit(0);
+ }
+}
+
+}
diff --git a/src/lib/util/tests/fd_tests.cc b/src/lib/util/tests/fd_tests.cc
new file mode 100644
index 0000000..6ba2766
--- /dev/null
+++ b/src/lib/util/tests/fd_tests.cc
@@ -0,0 +1,66 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/io/fd.h>
+
+#include <util/unittests/fork.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::util::io;
+using namespace isc::util::unittests;
+
+namespace {
+
+// Make sure the test is large enough and does not fit into one
+// read or write request
+const size_t TEST_DATA_SIZE = 8 * 1024 * 1024;
+
+class FDTest : public ::testing::Test {
+ public:
+ unsigned char *data, *buffer;
+ FDTest() :
+ // We do not care what is inside, we just need it to be the same
+ data(new unsigned char[TEST_DATA_SIZE]),
+ buffer(NULL)
+ { }
+ ~ FDTest() {
+ delete[] data;
+ delete[] buffer;
+ }
+};
+
+// Test we read what was sent
+TEST_F(FDTest, read) {
+ int read_pipe(0);
+ buffer = new unsigned char[TEST_DATA_SIZE];
+ pid_t feeder(provide_input(&read_pipe, data, TEST_DATA_SIZE));
+ ASSERT_GE(feeder, 0);
+ ssize_t received(read_data(read_pipe, buffer, TEST_DATA_SIZE));
+ EXPECT_TRUE(process_ok(feeder));
+ EXPECT_EQ(TEST_DATA_SIZE, received);
+ EXPECT_EQ(0, memcmp(data, buffer, received));
+}
+
+// Test we write the correct thing
+TEST_F(FDTest, write) {
+ int write_pipe(0);
+ pid_t checker(check_output(&write_pipe, data, TEST_DATA_SIZE));
+ ASSERT_GE(checker, 0);
+ EXPECT_TRUE(write_data(write_pipe, data, TEST_DATA_SIZE));
+ EXPECT_EQ(0, close(write_pipe));
+ EXPECT_TRUE(process_ok(checker));
+}
+
+}
diff --git a/src/lib/util/tests/filename_unittest.cc b/src/lib/util/tests/filename_unittest.cc
index 33e6456..07f3525 100644
--- a/src/lib/util/tests/filename_unittest.cc
+++ b/src/lib/util/tests/filename_unittest.cc
@@ -51,42 +51,49 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/alpha/beta/", fname.directory());
EXPECT_EQ("gamma", fname.name());
EXPECT_EQ(".delta", fname.extension());
+ EXPECT_EQ("gamma.delta", fname.nameAndExtension());
// Directory only
fname.setName("/gamma/delta/");
EXPECT_EQ("/gamma/delta/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// Filename only
fname.setName("epsilon");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("epsilon", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("epsilon", fname.nameAndExtension());
// Extension only
fname.setName(".zeta");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".zeta", fname.extension());
+ EXPECT_EQ(".zeta", fname.nameAndExtension());
// Missing directory
fname.setName("eta.theta");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("eta", fname.name());
EXPECT_EQ(".theta", fname.extension());
+ EXPECT_EQ("eta.theta", fname.nameAndExtension());
// Missing filename
fname.setName("/iota/.kappa");
EXPECT_EQ("/iota/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".kappa", fname.extension());
+ EXPECT_EQ(".kappa", fname.nameAndExtension());
// Missing extension
fname.setName("lambda/mu/nu");
EXPECT_EQ("lambda/mu/", fname.directory());
EXPECT_EQ("nu", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("nu", fname.nameAndExtension());
// Check that the decomposition can occur in the presence of leading and
// trailing spaces
@@ -94,18 +101,21 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("lambda/mu/", fname.directory());
EXPECT_EQ("nu", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("nu", fname.nameAndExtension());
// Empty string
fname.setName("");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// ... and just spaces
fname.setName(" ");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// Check corner cases - where separators are present, but strings are
// absent.
@@ -113,16 +123,19 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
fname.setName(".");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(".", fname.nameAndExtension());
fname.setName("/.");
EXPECT_EQ("/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(".", fname.nameAndExtension());
// Note that the space is a valid filename here; only leading and trailing
// spaces should be trimmed.
@@ -130,11 +143,13 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/", fname.directory());
EXPECT_EQ(" ", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(" .", fname.nameAndExtension());
fname.setName(" / . ");
EXPECT_EQ("/", fname.directory());
EXPECT_EQ(" ", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(" .", fname.nameAndExtension());
}
// Check that the expansion with a default works.
@@ -177,3 +192,40 @@ TEST_F(FilenameTest, UseAsDefault) {
EXPECT_EQ("/s/t/u", fname.useAsDefault("/s/t/u"));
EXPECT_EQ("/a/b/c", fname.useAsDefault(""));
}
+
+TEST_F(FilenameTest, setDirectory) {
+ Filename fname("a.b");
+ EXPECT_EQ("", fname.directory());
+ EXPECT_EQ("a.b", fname.fullName());
+ EXPECT_EQ("a.b", fname.expandWithDefault(""));
+
+ fname.setDirectory("/just/some/dir/");
+ EXPECT_EQ("/just/some/dir/", fname.directory());
+ EXPECT_EQ("/just/some/dir/a.b", fname.fullName());
+ EXPECT_EQ("/just/some/dir/a.b", fname.expandWithDefault(""));
+
+ fname.setDirectory("/just/some/dir");
+ EXPECT_EQ("/just/some/dir/", fname.directory());
+ EXPECT_EQ("/just/some/dir/a.b", fname.fullName());
+ EXPECT_EQ("/just/some/dir/a.b", fname.expandWithDefault(""));
+
+ fname.setDirectory("/");
+ EXPECT_EQ("/", fname.directory());
+ EXPECT_EQ("/a.b", fname.fullName());
+ EXPECT_EQ("/a.b", fname.expandWithDefault(""));
+
+ fname.setDirectory("");
+ EXPECT_EQ("", fname.directory());
+ EXPECT_EQ("a.b", fname.fullName());
+ EXPECT_EQ("a.b", fname.expandWithDefault(""));
+
+ fname = Filename("/first/a.b");
+ EXPECT_EQ("/first/", fname.directory());
+ EXPECT_EQ("/first/a.b", fname.fullName());
+ EXPECT_EQ("/first/a.b", fname.expandWithDefault(""));
+
+ fname.setDirectory("/just/some/dir");
+ EXPECT_EQ("/just/some/dir/", fname.directory());
+ EXPECT_EQ("/just/some/dir/a.b", fname.fullName());
+ EXPECT_EQ("/just/some/dir/a.b", fname.expandWithDefault(""));
+}
diff --git a/src/lib/util/tests/io_utilities_unittest.cc b/src/lib/util/tests/io_utilities_unittest.cc
index 4aad560..4293c7e 100644
--- a/src/lib/util/tests/io_utilities_unittest.cc
+++ b/src/lib/util/tests/io_utilities_unittest.cc
@@ -19,6 +19,7 @@
#include <cstddef>
+#include <arpa/inet.h>
#include <gtest/gtest.h>
#include <util/buffer.h>
@@ -71,3 +72,48 @@ TEST(asioutil, writeUint16) {
EXPECT_EQ(ref[1], test[1]);
}
}
+
+// test data shared amount readUint32 and writeUint32 tests
+const static uint32_t test32[] = {
+ 0,
+ 1,
+ 2000,
+ 0x80000000,
+ 0xffffffff
+};
+
+TEST(asioutil, readUint32) {
+ uint8_t data[8];
+
+ // make sure that we can read data, regardless of
+ // the memory alignment. That' why we need to repeat
+ // it 4 times.
+ for (int offset=0; offset < 4; offset++) {
+ for (int i=0; i < sizeof(test32)/sizeof(uint32_t); i++) {
+ uint32_t tmp = htonl(test32[i]);
+ memcpy(&data[offset], &tmp, sizeof(uint32_t));
+
+ EXPECT_EQ(test32[i], readUint32(&data[offset]));
+ }
+ }
+}
+
+
+TEST(asioutil, writeUint32) {
+ uint8_t data[8];
+
+ // make sure that we can write data, regardless of
+ // the memory alignment. That's why we need to repeat
+ // it 4 times.
+ for (int offset=0; offset < 4; offset++) {
+ for (int i=0; i < sizeof(test32)/sizeof(uint32_t); i++) {
+ uint8_t* ptr = writeUint32(test32[i], &data[offset]);
+
+ EXPECT_EQ(&data[offset]+sizeof(uint32_t), ptr);
+
+ uint32_t tmp = htonl(test32[i]);
+
+ EXPECT_EQ(0, memcmp(&tmp, &data[offset], sizeof(uint32_t)));
+ }
+ }
+}
diff --git a/src/lib/util/tests/run_unittests.cc b/src/lib/util/tests/run_unittests.cc
index bd3c4c9..a2181cf 100644
--- a/src/lib/util/tests/run_unittests.cc
+++ b/src/lib/util/tests/run_unittests.cc
@@ -13,9 +13,11 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
- return (RUN_ALL_TESTS());
+
+ return (isc::util::unittests::run_all());
}
diff --git a/src/lib/util/tests/strutil_unittest.cc b/src/lib/util/tests/strutil_unittest.cc
index cd3a9ca..74bc17d 100644
--- a/src/lib/util/tests/strutil_unittest.cc
+++ b/src/lib/util/tests/strutil_unittest.cc
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <stdint.h>
+
#include <string>
#include <gtest/gtest.h>
@@ -22,17 +24,9 @@ using namespace isc;
using namespace isc::util;
using namespace std;
-class StringUtilTest : public ::testing::Test {
-protected:
- StringUtilTest()
- {
- }
-};
-
-
// Check for slash replacement
-TEST_F(StringUtilTest, Slash) {
+TEST(StringUtilTest, Slash) {
string instring = "";
isc::util::str::normalizeSlash(instring);
@@ -49,7 +43,7 @@ TEST_F(StringUtilTest, Slash) {
// Check that leading and trailing space trimming works
-TEST_F(StringUtilTest, Trim) {
+TEST(StringUtilTest, Trim) {
// Empty and full string.
EXPECT_EQ("", isc::util::str::trim(""));
@@ -71,7 +65,7 @@ TEST_F(StringUtilTest, Trim) {
// returned vector; if not as expected, the following references may be invalid
// so should not be used.
-TEST_F(StringUtilTest, Tokens) {
+TEST(StringUtilTest, Tokens) {
vector<string> result;
// Default delimiters
@@ -157,7 +151,7 @@ TEST_F(StringUtilTest, Tokens) {
// Changing case
-TEST_F(StringUtilTest, ChangeCase) {
+TEST(StringUtilTest, ChangeCase) {
string mixed("abcDEFghiJKLmno123[]{=+--+]}");
string upper("ABCDEFGHIJKLMNO123[]{=+--+]}");
string lower("abcdefghijklmno123[]{=+--+]}");
@@ -173,7 +167,7 @@ TEST_F(StringUtilTest, ChangeCase) {
// Formatting
-TEST_F(StringUtilTest, Formatting) {
+TEST(StringUtilTest, Formatting) {
vector<string> args;
args.push_back("arg1");
@@ -213,3 +207,63 @@ TEST_F(StringUtilTest, Formatting) {
string format9 = "%s %s";
EXPECT_EQ(format9, isc::util::str::format(format9, args));
}
+
+TEST(StringUtilTest, getToken) {
+ string s("a b c");
+ istringstream ss(s);
+ EXPECT_EQ("a", isc::util::str::getToken(ss));
+ EXPECT_EQ("b", isc::util::str::getToken(ss));
+ EXPECT_EQ("c", isc::util::str::getToken(ss));
+ EXPECT_THROW(isc::util::str::getToken(ss), isc::util::str::StringTokenError);
+}
+
+int32_t tokenToNumCall_32_16(const string& token) {
+ return isc::util::str::tokenToNum<int32_t, 16>(token);
+}
+
+int16_t tokenToNumCall_16_8(const string& token) {
+ return isc::util::str::tokenToNum<int16_t, 8>(token);
+}
+
+TEST(StringUtilTest, tokenToNum) {
+ uint32_t num32 = tokenToNumCall_32_16("0");
+ EXPECT_EQ(0, num32);
+ num32 = tokenToNumCall_32_16("123");
+ EXPECT_EQ(123, num32);
+ num32 = tokenToNumCall_32_16("65535");
+ EXPECT_EQ(65535, num32);
+
+ EXPECT_THROW(tokenToNumCall_32_16(""),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_32_16("a"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_32_16("-1"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_32_16("65536"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_32_16("1234567890"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_32_16("-1234567890"),
+ isc::util::str::StringTokenError);
+
+ uint16_t num16 = tokenToNumCall_16_8("123");
+ EXPECT_EQ(123, num16);
+ num16 = tokenToNumCall_16_8("0");
+ EXPECT_EQ(0, num16);
+ num16 = tokenToNumCall_16_8("255");
+ EXPECT_EQ(255, num16);
+
+ EXPECT_THROW(tokenToNumCall_16_8(""),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_16_8("a"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_16_8("-1"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_16_8("256"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_16_8("1234567890"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_16_8("-1234567890"),
+ isc::util::str::StringTokenError);
+
+}
diff --git a/src/lib/util/unittests/Makefile.am b/src/lib/util/unittests/Makefile.am
index 340cd1f..bbb0d49 100644
--- a/src/lib/util/unittests/Makefile.am
+++ b/src/lib/util/unittests/Makefile.am
@@ -1,10 +1,22 @@
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CXXFLAGS = $(B10_CXXFLAGS)
-lib_LTLIBRARIES = libutil_unittests.la
+noinst_LTLIBRARIES = libutil_unittests.la
libutil_unittests_la_SOURCES = fork.h fork.cc resolver.h
libutil_unittests_la_SOURCES += newhook.h newhook.cc
libutil_unittests_la_SOURCES += testdata.h testdata.cc
+if HAVE_GTEST
+libutil_unittests_la_SOURCES += run_all.h run_all.cc
libutil_unittests_la_SOURCES += textdata.h
+endif
+
+libutil_unittests_la_CPPFLAGS = $(AM_CPPFLAGS)
+if HAVE_GTEST
+libutil_unittests_la_CPPFLAGS += $(GTEST_INCLUDES)
+endif
+
+libutil_unittests_la_LIBADD = $(top_builddir)/src/lib/util/libutil.la
+libutil_unittests_la_LIBADD += $(top_builddir)/src/lib/util/io/libutil_io.la
+libutil_unittests_la_LIBADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
CLEANFILES = *.gcno *.gcda
diff --git a/src/lib/util/unittests/run_all.cc b/src/lib/util/unittests/run_all.cc
new file mode 100644
index 0000000..5f50f77
--- /dev/null
+++ b/src/lib/util/unittests/run_all.cc
@@ -0,0 +1,95 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdlib.h>
+
+#include <iostream>
+#include <iomanip>
+
+#include <gtest/gtest.h>
+#include <exceptions/exceptions.h>
+#include <util/unittests/run_all.h>
+
+namespace isc {
+namespace util {
+namespace unittests {
+
+int
+run_all() {
+ int ret = 0;
+
+ // The catching of exceptions generated in tests is controlled by the
+ // B10TEST_CATCH_EXCEPTION environment variable. Setting this to
+ // 1 enables the cacthing of exceptions; setting it to 0 disables it.
+ // Anything else causes a message to be printed to stderr and the default
+ // taken. (The default is to catch exceptions if compiling with clang
+ // and false if not.)
+#ifdef __clang__
+ bool catch_exception = true;
+#else
+ bool catch_exception = false;
+#endif
+
+ const char* b10test_catch_exception = getenv("B10TEST_CATCH_EXCEPTION");
+ if (b10test_catch_exception != NULL) {
+ if (strcmp(b10test_catch_exception, "1") == 0) {
+ catch_exception = true;
+ } else if (strcmp(b10test_catch_exception, "0") == 0) {
+ catch_exception = false;
+ } else {
+ std::cerr << "***ERROR: B10TEST_CATCH_EXCEPTION is '"
+ << b10test_catch_exception
+ << "': allowed values are '1' or '0'.\n"
+ << " The default value of "
+ << (catch_exception ?
+ "1 (exception catching enabled)":
+ "0 (exception catching disabled)")
+ << " will be used.\n";
+ }
+ }
+
+ // Actually run the code
+ if (catch_exception) {
+ try {
+ ret = RUN_ALL_TESTS();
+ } catch (const isc::Exception& ex) {
+ // Could output more information with typeid(), but there is no
+ // guarantee that all compilers will support it without an explicit
+ // flag on the command line.
+ std::cerr << "*** Exception derived from isc::exception thrown:\n"
+ << " file: " << ex.getFile() << "\n"
+ << " line: " << ex.getLine() << "\n"
+ << " what: " << ex.what() << std::endl;
+ throw;
+ } catch (const std::exception& ex) {
+ std::cerr << "*** Exception derived from std::exception thrown:\n"
+ << " what: " << ex.what() << std::endl;
+ throw;
+ }
+ } else {
+ // This is a separate path for the case where the exception is not
+ // being caught. Although the other code path re-throws the exception
+ // after catching it, there is no guarantee that the state of the
+ // stack is preserved - a compiler might have unwound the stack to
+ // the point at which the exception is caught. This would prove
+ // awkward if trying to debug the program using a debugger.
+ ret = RUN_ALL_TESTS();
+ }
+
+ return (ret);
+}
+
+} // namespace unittests
+} // namespace util
+} // namespace isc
diff --git a/src/lib/util/unittests/run_all.h b/src/lib/util/unittests/run_all.h
new file mode 100644
index 0000000..94c7cb0
--- /dev/null
+++ b/src/lib/util/unittests/run_all.h
@@ -0,0 +1,52 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+
+#ifndef __RUN_ALL_H
+#define __RUN_ALL_H
+
+// Avoid need for user to include this header file.
+
+#include <gtest/gtest.h>
+
+namespace isc {
+namespace util {
+namespace unittests {
+
+/// \brief Run All Tests
+///
+/// A wrapper for the Google Test RUN_ALL_TESTS() macro, this calls the macro
+/// but wraps the call in a try...catch block if the environment variable
+/// B10TEST_CATCH_EXCEPTION is defined, and calls the macro directly if not.
+///
+/// The catch block catches exceptions of types isc::Exception and
+/// std::exception and prints some information about them to stderr. (In the
+/// case of isc::Exception, this includes the file and line number from which
+/// the exception was raised.) It then re-throws the exception.
+///
+/// See: https://lists.isc.org/pipermail/bind10-dev/2011-January/001867.html
+/// for some context.
+///
+/// \return Return value from RUN_ALL_TESTS().
+
+int run_all();
+
+} // namespace unittests
+} // namespace util
+} // namespace isc
+
+
+
+#endif // __RUN_ALL_H
diff --git a/tests/lettuce/README b/tests/lettuce/README
new file mode 100644
index 0000000..21a57c7
--- /dev/null
+++ b/tests/lettuce/README
@@ -0,0 +1,127 @@
+BIND10 system testing with Lettuce
+or: to BDD or not to BDD
+
+In this directory, we define a set of behavioral tests for BIND 10. Currently,
+these tests are specific for BIND10, but we are keeping in mind that RFC-related
+tests could be separated, so that we can test other systems as well.
+
+Prerequisites:
+- Installed version of BIND 10 (but see below how to run it from source tree)
+- dig
+- lettuce (http://lettuce.it)
+
+To install lettuce, if you have the python pip installation tool, simply do
+pip install lettuce
+See http://lettuce.it/intro/install.html
+
+Most systems have the pip tool in a separate package; on Debian-based systems
+it is called python-pip. On FreeBSD the port is devel/py-pip.
+
+Running the tests
+-----------------
+
+At this moment, we have a fixed port for local tests in our setups, port 47806.
+This port must be free. (TODO: can we make this run-time discovered?).
+Port 47805 is used for cmdctl, and must also be available.
+(note, we will need to extend this to a range, or if possible, we will need to
+do some on-the-fly available port finding)
+
+The bind10 main program, bindctl, and dig must all be in the default search
+path of your environment, and BIND 10 must not be running if you use the
+installed version when you run the tests.
+
+If you want to test an installed version of bind 10, just run 'lettuce' in
+this directory.
+
+We have provided a script that sets up the shell environment to run the tests
+with the build tree version of bind. If your shell uses export to set
+environment variables, you can source the script setup_intree_bind10.sh, then
+run lettuce.
+
+Due to the default way lettuce prints its output, it is advisable to run it
+in a terminal that is wide than the default. If you see a lot of lines twice
+in different colors, the terminal is not wide enough.
+
+If you just want to run one specific feature test, use
+lettuce features/<feature file>
+
+To run a specific scenario from a feature, use
+lettuce features/<feature file> -s <scenario number>
+
+We have set up the tests to assume that lettuce is run from this directory,
+so even if you specify a specific feature file, you should do it from this
+directory.
+
+What to do when a test fails
+----------------------------
+
+First of all, look at the error it printed and see what step it occurred in.
+If written well, the output should explain most of what went wrong.
+
+The stacktrace that is printed is *not* of bind10, but of the testing
+framework; this helps in finding more information about what exactly the test
+tried to achieve when it failed (as well as help debug the tests themselves).
+
+Furthermore, if any scenario fails, the output from long-running processes
+will be stored in the directory output/. The name of the files will be
+<Feature name>-<Scenario name>-<Process name>.stdout and
+<Feature name>-<Scenario name>-<Process name>.stderr
+Where spaces and other non-standard characters are replaced by an underscore.
+The process name is either the standard name for said process (e.g. 'bind10'),
+or the name given to it by the test ('when i run bind10 as <name>').
+
+These files *will* be overwritten or deleted if the same scenarios are run
+again, so if you want to inspect them after a failed test, either do so
+immediately or move the files.
+
+Adding and extending tests
+--------------------------
+
+If you want to add tests, it is advisable to first go through the examples to
+see what is possible, and read the documentation on http://www.lettuce.it
+
+There is also a README.tutorial file here.
+
+We have a couple of conventions to keep things manageable.
+
+Configuration files go into the configurations/ directory.
+Data files go into the data/ directory.
+Step definition go into the features/terrain/ directory (the name terrain is
+chosen for the same reason Lettuce chose terrain.py, this is the place the
+tests 'live' in).
+Feature definitions go directly into the features/ directory.
+
+These directories are currently not divided further; we may want to consider
+this as the set grows. Due to a (current?) limitation of Lettuce, for
+feature files this is currently not possible; the python files containing
+steps and terrain must be below or at the same level of the feature files.
+
+Long-running processes should be started through the world.RunningProcesses
+instance. If you want to add a process (e.g. bind9), create start, stop and
+control steps in terrain/<base_name>_control.py, and let it use the
+RunningProcesses API (defined in terrain.py). See bind10_control.py for an
+example.
+
+For sending queries and checking the results, steps have been defined in
+terrain/querying.py. These use dig and store the results split up into text
+strings. This is intentionally not parsed through our own library (as that way
+we might run into a 'symmetric bug'). If you need something more advanced from
+query results, define it here.
+
+Some very general steps are defined in terrain/steps.py.
+Initialization code, cleanup code, and helper classes are defined in
+terrain/terrain.py.
+
+To find the right steps, case insensitive matching is used. Parameters taken
+from the steps are case-sensitive though. So a step defined as
+'do foo with value (bar)' will be matched when using
+'Do Foo with value xyz', but xyz will be taken as given.
+
+If you need to add steps that are very particular to one test, create a new
+file with a name relevant for that test in terrain. We may want to consider
+creating a specific subdirectory for these, but at this moment it is unclear
+whether we need to.
+
+We should try to keep steps as general as possible, while not making them to
+complex and error-prone.
+
diff --git a/tests/lettuce/README.tutorial b/tests/lettuce/README.tutorial
new file mode 100644
index 0000000..18c94cf
--- /dev/null
+++ b/tests/lettuce/README.tutorial
@@ -0,0 +1,157 @@
+Quick tutorial and overview
+---------------------------
+
+Lettuce is a framework for doing Behaviour Driven Development (BDD).
+
+The idea behind BDD is that you first write down your requirements in
+the form of scenarios, then implement their behaviour.
+
+We do not plan on doing full BDD, but such a system should also help
+us make system tests. And, hopefully, being able to better identify
+what exactly is going wrong when a test fails.
+
+Lettuce is a python implementation of the Cucumber framework, which is
+a ruby system. So far we chose lettuce because we already need python
+anyway, so chances are higher that any system we want to run it on
+supports it. It only supports a subset of cucumber, but more cucumber
+features are planned. As I do not know much details of cucumber, I
+can't really say what is there and what is not.
+
+A slight letdown is that the current version does not support python 3.
+However, as long as the tool-calling glue is python2, this should not
+cause any problems, since these aren't unit tests; We do not plan to use
+our libraries directly, but only through the runnable scripts and
+executables.
+
+-----
+
+Features, Scenarios, Steps.
+
+Lettuce makes a distinction between features, scenarios, and steps.
+
+Features are general, well, features. Each 'feature' has its own file
+ending in .feature. A feature file contains a description and a number
+of scenarios. Each scenario tests one or more particular parts of the
+feature. Each scenario consists of a number of steps.
+
+So let's open up a simple one.
+
+-- example.feature
+Feature: showing off BIND 10
+ This is to show BIND 10 running and that it answer queries
+
+ Scenario: Starting bind10
+ # steps go here
+--
+
+I have predefined a number of steps we can use, as we build test we
+will need to expand these, but we will look at them shortly.
+
+This file defines a feature, just under the feature name we can
+provide a description of the feature.
+
+The one scenario we have no has no steps, so if we run it we should
+see something like:
+
+-- output
+> lettuce
+Feature: showing off BIND 10
+ This is to show BIND 10 running and that it answer queries
+
+ Scenario: Starting bind10
+
+1 feature (1 passed)
+1 scenario (1 passed)
+0 step (0 passed)
+--
+
+Let's first add some steps that send queries.
+
+--
+ A query for www.example.com should have rcode REFUSED
+ A query for www.example.org should have rcode NOERROR
+--
+
+Since we didn't start any bind10, dig will time out and the result
+should be an error saying it got no answer. Errors are in the
+form of stack traces (trigger by failed assertions), so we can find
+out easily where in the tests they occurred. Especially when the total
+set of steps gets bigger we might need that.
+
+So let's add a step that starts bind10.
+
+--
+ When I start bind10 with configuration example.org.config
+--
+
+This is not good enough; it will fire of the process, but setting up
+b10-auth may take a few moments, so we need to add a step to wait for
+it to be started before we continue.
+
+--
+ Then wait for bind10 auth to start
+--
+
+And let's run the tests again.
+
+--
+> lettuce
+
+Feature: showing off BIND 10
+ This is to show BIND 10 running and that it answer queries
+
+ Scenario: Starting bind10
+ When I start bind10 with configuration example.org.config
+ Then wait for bind10 auth to start
+ A query for www.example.com should have rcode REFUSED
+ A query for www.example.org should have rcode NOERROR
+
+1 feature (1 passed)
+1 scenario (1 passed)
+4 steps (4 passed)
+(finished within 2 seconds)
+--
+
+So take a look at one of those steps, let's pick the first one.
+
+A step is defined through a python decorator, which in essence is a regular
+expression; lettuce searches through all defined steps to find one that
+matches. These are 'partial' matches (unless specified otherwise in the
+regular expression itself), so if the step is defined with "do foo bar", the
+scenario can add words for readability "When I do foo bar".
+
+Each captured group will be passed as an argument to the function we define.
+For bind10, i defined a configuration file, a cmdctl port, and a process
+name. The first two should be self-evident, and the process name is an
+optional name we give it, should we want to address it in the rest of the
+tests. This is most useful if we want to start multiple instances. In the
+next step (the wait for auth to start), I added a 'of <instance>'. So if we
+define the bind10 'as b10_second_instance', we can specify that one here as
+'of b10_second_instance'.
+
+--
+ When I start bind10 with configuration second.config
+ with cmdctl port 12345 as b10_second_instance
+--
+(line wrapped for readability)
+
+But notice how we needed two steps, which we probably always need (but
+not entirely always)? We can also combine steps; for instance:
+
+--
+ at step('have bind10 running(?: with configuration ([\w.]+))?')
+def have_bind10_running(step, config_file):
+ step.given('start bind10 with configuration ' + config_file)
+ step.given('wait for bind10 auth to start')
+--
+
+Now we can replace the two steps with one:
+
+--
+ Given I have bind10 running
+--
+
+That's it for the quick overview. For some more examples, with comments,
+take a look at features/example.feature. You can read more about lettuce and
+its features on http://www.lettuce.it, and if you plan on adding tests and
+scenarios, please consult the last section of the main README first.
diff --git a/tests/lettuce/configurations/example.org.config.orig b/tests/lettuce/configurations/example.org.config.orig
new file mode 100644
index 0000000..642f2dd
--- /dev/null
+++ b/tests/lettuce/configurations/example.org.config.orig
@@ -0,0 +1,17 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "debuglevel": 99,
+ "severity": "DEBUG",
+ "name": "auth"
+ } ]
+ },
+ "Auth": {
+ "database_file": "data/example.org.sqlite3",
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/configurations/example2.org.config b/tests/lettuce/configurations/example2.org.config
new file mode 100644
index 0000000..1a40d1b
--- /dev/null
+++ b/tests/lettuce/configurations/example2.org.config
@@ -0,0 +1,18 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "severity": "DEBUG",
+ "name": "auth",
+ "debuglevel": 99
+ }
+ ]
+ },
+ "Auth": {
+ "database_file": "data/example.org.sqlite3",
+ "listen_on": [ {
+ "port": 47807,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/configurations/no_db_file.config b/tests/lettuce/configurations/no_db_file.config
new file mode 100644
index 0000000..f865354
--- /dev/null
+++ b/tests/lettuce/configurations/no_db_file.config
@@ -0,0 +1,10 @@
+{
+ "version": 2,
+ "Auth": {
+ "database_file": "data/test_nonexistent_db.sqlite3",
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/data/empty_db.sqlite3 b/tests/lettuce/data/empty_db.sqlite3
new file mode 100644
index 0000000..f27a8b8
Binary files /dev/null and b/tests/lettuce/data/empty_db.sqlite3 differ
diff --git a/tests/lettuce/data/example.org.sqlite3 b/tests/lettuce/data/example.org.sqlite3
new file mode 100644
index 0000000..070012f
Binary files /dev/null and b/tests/lettuce/data/example.org.sqlite3 differ
diff --git a/tests/lettuce/features/example.feature b/tests/lettuce/features/example.feature
new file mode 100644
index 0000000..d1ed6b3
--- /dev/null
+++ b/tests/lettuce/features/example.feature
@@ -0,0 +1,142 @@
+Feature: Example feature
+ This is an example Feature set. Is is mainly intended to show
+ our use of the lettuce tool and our own framework for it
+ The first scenario is to show what a simple test would look like, and
+ is intentionally uncommented.
+ The later scenarios have comments to show what the test steps do and
+ support
+
+ Scenario: A simple example
+ Given I have bind10 running with configuration example.org.config
+ A query for www.example.org should have rcode NOERROR
+ A query for www.doesnotexist.org should have rcode REFUSED
+ The SOA serial for example.org should be 1234
+
+ Scenario: New database
+ # This test checks whether a database file is automatically created
+ # Underwater, we take advantage of our intialization routines so
+ # that we are sure this file does not exist, see
+ # features/terrain/terrain.py
+
+ # Standard check to test (non-)existence of a file
+ # This file is actually automatically
+ The file data/test_nonexistent_db.sqlite3 should not exist
+
+ # In the first scenario, we used 'given I have bind10 running', which
+ # is actually a compound step consisting of the following two
+ # one to start the server
+ When I start bind10 with configuration no_db_file.config
+ # And one to wait until it reports that b10-auth has started
+ Then wait for bind10 auth to start
+
+ # This is a general step to stop a named process. By convention,
+ # the default name for any process is the same as the one we
+ # use in the start step (for bind 10, that is 'I start bind10 with')
+ # See scenario 'Multiple instances' for more.
+ Then stop process bind10
+
+ # Now we use the first step again to see if the file has been created
+ The file data/test_nonexistent_db.sqlite3 should exist
+
+ Scenario: example.org queries
+ # This scenario performs a number of queries and inspects the results
+ # Simple queries have already been show, but after we have sent a query,
+ # we can also do more extensive checks on the result.
+ # See querying.py for more information on these steps.
+
+ # note: lettuce can group similar checks by using tables, but we
+ # intentionally do not make use of that here
+
+ # This is a compound statement that starts and waits for the
+ # started message
+ Given I have bind10 running with configuration example.org.config
+
+ # Some simple queries that is not examined further
+ A query for www.example.com should have rcode REFUSED
+ A query for www.example.org should have rcode NOERROR
+
+ # A query where we look at some of the result properties
+ A query for www.example.org should have rcode NOERROR
+ The last query response should have qdcount 1
+ The last query response should have ancount 1
+ The last query response should have nscount 3
+ The last query response should have adcount 0
+ # The answer section can be inspected in its entirety; in the future
+ # we may add more granular inspection steps
+ The answer section of the last query response should be
+ """
+ www.example.org. 3600 IN A 192.0.2.1
+ """
+
+ A query for example.org type NS should have rcode NOERROR
+ The answer section of the last query response should be
+ """
+ example.org. 3600 IN NS ns1.example.org.
+ example.org. 3600 IN NS ns2.example.org.
+ example.org. 3600 IN NS ns3.example.org.
+ """
+
+ # We have a specific step for checking SOA serial numbers
+ The SOA serial for example.org should be 1234
+
+ # Another query where we look at some of the result properties
+ A query for doesnotexist.example.org should have rcode NXDOMAIN
+ The last query response should have qdcount 1
+ The last query response should have ancount 0
+ The last query response should have nscount 1
+ The last query response should have adcount 0
+ # When checking flags, we must pass them exactly as they appear in
+ # the output of dig.
+ The last query response should have flags qr aa rd
+
+ A query for www.example.org type TXT should have rcode NOERROR
+ The last query response should have ancount 0
+
+ # Some queries where we specify more details about what to send and
+ # where
+ A query for www.example.org class CH should have rcode REFUSED
+ A query for www.example.org to 127.0.0.1 should have rcode NOERROR
+ A query for www.example.org to 127.0.0.1:47806 should have rcode NOERROR
+ A query for www.example.org type A class IN to 127.0.0.1:47806 should have rcode NOERROR
+
+ Scenario: changing database
+ # This scenario contains a lot of 'wait for' steps
+ # If those are not present, the asynchronous nature of the application
+ # can cause some of the things we send to be handled out of order;
+ # for instance auth could still be serving the old zone when we send
+ # the new query, or already respond from the new database.
+ # Therefore we wait for specific log messages after each operation
+ #
+ # This scenario outlines every single step, and does not use
+ # 'steps of steps' (e.g. Given I have bind10 running)
+ # We can do that but as an example this is probably better to learn
+ # the system
+
+ When I start bind10 with configuration example.org.config
+ Then wait for bind10 auth to start
+ Wait for bind10 stderr message CMDCTL_STARTED
+ A query for www.example.org should have rcode NOERROR
+ Wait for new bind10 stderr message AUTH_SEND_NORMAL_RESPONSE
+ Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3
+ And wait for new bind10 stderr message DATASRC_SQLITE_OPEN
+ A query for www.example.org should have rcode REFUSED
+ Wait for new bind10 stderr message AUTH_SEND_NORMAL_RESPONSE
+ Then set bind10 configuration Auth/database_file to data/example.org.sqlite3
+ And wait for new bind10 stderr message DATASRC_SQLITE_OPEN
+ A query for www.example.org should have rcode NOERROR
+
+ Scenario: two bind10 instances
+ # This is more a test of the test system, start 2 bind10's
+ When I start bind10 with configuration example.org.config as bind10_one
+ And I start bind10 with configuration example2.org.config with cmdctl port 47804 as bind10_two
+
+ Then wait for bind10 auth of bind10_one to start
+ Then wait for bind10 auth of bind10_two to start
+ A query for www.example.org to 127.0.0.1:47806 should have rcode NOERROR
+ A query for www.example.org to 127.0.0.1:47807 should have rcode NOERROR
+
+ Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3
+ And wait for bind10_one stderr message DATASRC_SQLITE_OPEN
+
+ A query for www.example.org to 127.0.0.1:47806 should have rcode REFUSED
+ A query for www.example.org to 127.0.0.1:47807 should have rcode NOERROR
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
new file mode 100644
index 0000000..e104a81
--- /dev/null
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -0,0 +1,108 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from lettuce import *
+import subprocess
+import re
+
+ at step('start bind10(?: with configuration (\S+))?' +\
+ '(?: with cmdctl port (\d+))?(?: as (\S+))?')
+def start_bind10(step, config_file, cmdctl_port, process_name):
+ """
+ Start BIND 10 with the given optional config file, cmdctl port, and
+ store the running process in world with the given process name.
+ Parameters:
+ config_file ('with configuration <file>', optional): this configuration
+ will be used. The path is relative to the base lettuce
+ directory.
+ cmdctl_port ('with cmdctl port <portnr>', optional): The port on which
+ b10-cmdctl listens for bindctl commands. Defaults to 47805.
+ process_name ('as <name>', optional). This is the name that can be used
+ in the following steps of the scenario to refer to this
+ BIND 10 instance. Defaults to 'bind10'.
+ This call will block until BIND10_STARTUP_COMPLETE or BIND10_STARTUP_ERROR
+ is logged. In the case of the latter, or if it times out, the step (and
+ scenario) will fail.
+ It will also fail if there is a running process with the given process_name
+ already.
+ """
+ args = [ 'bind10', '-v' ]
+ if config_file is not None:
+ args.append('-p')
+ args.append("configurations/")
+ args.append('-c')
+ args.append(config_file)
+ if cmdctl_port is None:
+ args.append('--cmdctl-port=47805')
+ else:
+ args.append('--cmdctl-port=' + cmdctl_port)
+ if process_name is None:
+ process_name = "bind10"
+ else:
+ args.append('-m')
+ args.append(process_name + '_msgq.socket')
+
+ world.processes.add_process(step, process_name, args)
+
+ # check output to know when startup has been completed
+ message = world.processes.wait_for_stderr_str(process_name,
+ ["BIND10_STARTUP_COMPLETE",
+ "BIND10_STARTUP_ERROR"])
+ assert message == "BIND10_STARTUP_COMPLETE", "Got: " + str(message)
+
+ at step('wait for bind10 auth (?:of (\w+) )?to start')
+def wait_for_auth(step, process_name):
+ """Wait for b10-auth to run. This is done by blocking until the message
+ AUTH_SERVER_STARTED is logged.
+ Parameters:
+ process_name ('of <name', optional): The name of the BIND 10 instance
+ to wait for. Defaults to 'bind10'.
+ """
+ if process_name is None:
+ process_name = "bind10"
+ world.processes.wait_for_stderr_str(process_name, ['AUTH_SERVER_STARTED'],
+ False)
+
+ at step('have bind10 running(?: with configuration ([\w.]+))?')
+def have_bind10_running(step, config_file):
+ """
+ Compound convenience step for running bind10, which consists of
+ start_bind10 and wait_for_auth.
+ Currently only supports the 'with configuration' option.
+ """
+ step.given('start bind10 with configuration ' + config_file)
+ step.given('wait for bind10 auth to start')
+
+ at step('set bind10 configuration (\S+) to (.*)(?: with cmdctl port (\d+))?')
+def set_config_command(step, name, value, cmdctl_port):
+ """
+ Run bindctl, set the given configuration to the given value, and commit it.
+ Parameters:
+ name ('configuration <name>'): Identifier of the configuration to set
+ value ('to <value>'): value to set it to.
+ cmdctl_port ('with cmdctl port <portnr>', optional): cmdctl port to send
+ the command to. Defaults to 47805.
+ Fails if cmdctl does not exit with status code 0.
+ """
+ if cmdctl_port is None:
+ cmdctl_port = '47805'
+ args = ['bindctl', '-p', cmdctl_port]
+ bindctl = subprocess.Popen(args, 1, None, subprocess.PIPE,
+ subprocess.PIPE, None)
+ bindctl.stdin.write("config set " + name + " " + value + "\n")
+ bindctl.stdin.write("config commit\n")
+ bindctl.stdin.write("quit\n")
+ result = bindctl.wait()
+ assert result == 0, "bindctl exit code: " + str(result)
diff --git a/tests/lettuce/features/terrain/querying.py b/tests/lettuce/features/terrain/querying.py
new file mode 100644
index 0000000..ea89b18
--- /dev/null
+++ b/tests/lettuce/features/terrain/querying.py
@@ -0,0 +1,279 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This script provides querying functionality
+# The most important step is
+#
+# query for <name> [type X] [class X] [to <addr>[:port]] should have rcode <rc>
+#
+# By default, it will send queries to 127.0.0.1:47806 unless specified
+# otherwise. The rcode is always checked. If the result is not NO_ANSWER,
+# the result will be stored in last_query_result, which can then be inspected
+# more closely, for instance with the step
+#
+# "the last query response should have <property> <value>"
+#
+# Also see example.feature for some examples
+
+from lettuce import *
+import subprocess
+import re
+
+#
+# define a class to easily access different parts
+# We may consider using our full library for this, but for now
+# simply store several parts of the response as text values in
+# this structure.
+# (this actually has the advantage of not relying on our own libraries
+# to test our own, well, libraries)
+#
+# The following attributes are 'parsed' from the response, all as strings,
+# and end up as direct attributes of the QueryResult object:
+# opcode, rcode, id, flags, qdcount, ancount, nscount, adcount
+# (flags is one string with all flags, in the order they appear in the
+# response packet.)
+#
+# this will set 'rcode' as the result code, we 'define' one additional
+# rcode, "NO_ANSWER", if the dig process returned an error code itself
+# In this case none of the other attributes will be set.
+#
+# The different sections will be lists of strings, one for each RR in the
+# section. The question section will start with ';', as per dig output
+#
+# See server_from_sqlite3.feature for various examples to perform queries
+class QueryResult(object):
+ status_re = re.compile("opcode: ([A-Z])+, status: ([A-Z]+), id: ([0-9]+)")
+ flags_re = re.compile("flags: ([a-z ]+); QUERY: ([0-9]+), ANSWER: " +
+ "([0-9]+), AUTHORITY: ([0-9]+), ADDITIONAL: ([0-9]+)")
+
+ def __init__(self, name, qtype, qclass, address, port):
+ """
+ Constructor. This fires of a query using dig.
+ Parameters:
+ name: The domain name to query
+ qtype: The RR type to query. Defaults to A if it is None.
+ qclass: The RR class to query. Defaults to IN if it is None.
+ address: The IP adress to send the query to.
+ port: The port number to send the query to.
+ All parameters must be either strings or have the correct string
+ representation.
+ Only one query attempt will be made.
+ """
+ args = [ 'dig', '+tries=1', '@' + str(address), '-p', str(port) ]
+ if qtype is not None:
+ args.append('-t')
+ args.append(str(qtype))
+ if qclass is not None:
+ args.append('-c')
+ args.append(str(qclass))
+ args.append(name)
+ dig_process = subprocess.Popen(args, 1, None, None, subprocess.PIPE,
+ None)
+ result = dig_process.wait()
+ if result != 0:
+ self.rcode = "NO_ANSWER"
+ else:
+ self.rcode = None
+ parsing = "HEADER"
+ self.question_section = []
+ self.answer_section = []
+ self.authority_section = []
+ self.additional_section = []
+ self.line_handler = self.parse_header
+ for out in dig_process.stdout:
+ self.line_handler(out)
+
+ def _check_next_header(self, line):
+ """
+ Returns true if we found a next header, and sets the internal
+ line handler to the appropriate value.
+ """
+ if line == ";; ANSWER SECTION:\n":
+ self.line_handler = self.parse_answer
+ elif line == ";; AUTHORITY SECTION:\n":
+ self.line_handler = self.parse_authority
+ elif line == ";; ADDITIONAL SECTION:\n":
+ self.line_handler = self.parse_additional
+ elif line.startswith(";; Query time"):
+ self.line_handler = self.parse_footer
+ else:
+ return False
+ return True
+
+ def parse_header(self, line):
+ """
+ Parse the header lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ status_match = self.status_re.search(line)
+ flags_match = self.flags_re.search(line)
+ if status_match is not None:
+ self.opcode = status_match.group(1)
+ self.rcode = status_match.group(2)
+ elif flags_match is not None:
+ self.flags = flags_match.group(1)
+ self.qdcount = flags_match.group(2)
+ self.ancount = flags_match.group(3)
+ self.nscount = flags_match.group(4)
+ self.adcount = flags_match.group(5)
+
+ def parse_question(self, line):
+ """
+ Parse the question section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.question_section.append(line.strip())
+
+ def parse_answer(self, line):
+ """
+ Parse the answer section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.answer_section.append(line.strip())
+
+ def parse_authority(self, line):
+ """
+ Parse the authority section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.authority_section.append(line.strip())
+
+ def parse_additional(self, line):
+ """
+ Parse the additional section lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ if not self._check_next_header(line):
+ if line != "\n":
+ self.additional_section.append(line.strip())
+
+ def parse_footer(self, line):
+ """
+ Parse the footer lines of the query response.
+ Parameters:
+ line: The current line of the response.
+ """
+ pass
+
+ at step('A query for ([\w.]+) (?:type ([A-Z]+) )?(?:class ([A-Z]+) )?' +
+ '(?:to ([^:]+)(?::([0-9]+))? )?should have rcode ([\w.]+)')
+def query(step, query_name, qtype, qclass, addr, port, rcode):
+ """
+ Run a query, check the rcode of the response, and store the query
+ result in world.last_query_result.
+ Parameters:
+ query_name ('query for <name>'): The domain name to query.
+ qtype ('type <type>', optional): The RR type to query. Defaults to A.
+ qclass ('class <class>', optional): The RR class to query. Defaults to IN.
+ addr ('to <address>', optional): The IP address of the nameserver to query.
+ Defaults to 127.0.0.1.
+ port (':<port>', optional): The port number of the nameserver to query.
+ Defaults to 47806.
+ rcode ('should have rcode <rcode>'): The expected rcode of the answer.
+ """
+ if qtype is None:
+ qtype = "A"
+ if qclass is None:
+ qclass = "IN"
+ if addr is None:
+ addr = "127.0.0.1"
+ if port is None:
+ port = 47806
+ query_result = QueryResult(query_name, qtype, qclass, addr, port)
+ assert query_result.rcode == rcode,\
+ "Expected: " + rcode + ", got " + query_result.rcode
+ world.last_query_result = query_result
+
+ at step('The SOA serial for ([\w.]+) should be ([0-9]+)')
+def query_soa(step, query_name, serial):
+ """
+ Convenience function to check the SOA SERIAL value of the given zone at
+ the nameserver at the default address (127.0.0.1:47806).
+ Parameters:
+ query_name ('for <name>'): The zone to find the SOA record for.
+ serial ('should be <number>'): The expected value of the SOA SERIAL.
+ If the rcode is not NOERROR, or the answer section does not contain the
+ SOA record, this step fails.
+ """
+ query_result = QueryResult(query_name, "SOA", "IN", "127.0.0.1", "47806")
+ assert "NOERROR" == query_result.rcode,\
+ "Got " + query_result.rcode + ", expected NOERROR"
+ assert len(query_result.answer_section) == 1,\
+ "Too few or too many answers in SOA response"
+ soa_parts = query_result.answer_section[0].split()
+ assert serial == soa_parts[6],\
+ "Got SOA serial " + soa_parts[6] + ", expected " + serial
+
+ at step('last query response should have (\S+) (.+)')
+def check_last_query(step, item, value):
+ """
+ Check a specific value in the reponse from the last successful query sent.
+ Parameters:
+ item: The item to check the value of
+ value: The expected value.
+ This performs a very simple direct string comparison of the QueryResult
+ member with the given item name and the given value.
+ Fails if the item is unknown, or if its value does not match the expected
+ value.
+ """
+ assert world.last_query_result is not None
+ assert item in world.last_query_result.__dict__
+ lq_val = world.last_query_result.__dict__[item]
+ assert str(value) == str(lq_val),\
+ "Got: " + str(lq_val) + ", expected: " + str(value)
+
+ at step('([a-zA-Z]+) section of the last query response should be')
+def check_last_query_section(step, section):
+ """
+ Check the entire contents of the given section of the response of the last
+ query.
+ Parameters:
+ section ('<section> section'): The name of the section (QUESTION, ANSWER,
+ AUTHORITY or ADDITIONAL).
+ The expected response is taken from the multiline part of the step in the
+ scenario. Differing whitespace is ignored, but currently the order is
+ significant.
+ Fails if they do not match.
+ """
+ response_string = None
+ if section.lower() == 'question':
+ response_string = "\n".join(world.last_query_result.question_section)
+ elif section.lower() == 'answer':
+ response_string = "\n".join(world.last_query_result.answer_section)
+ elif section.lower() == 'authority':
+ response_string = "\n".join(world.last_query_result.answer_section)
+ elif section.lower() == 'additional':
+ response_string = "\n".join(world.last_query_result.answer_section)
+ else:
+ assert False, "Unknown section " + section
+ # replace whitespace of any length by one space
+ response_string = re.sub("[ \t]+", " ", response_string)
+ expect = re.sub("[ \t]+", " ", step.multiline)
+ assert response_string.strip() == expect.strip(),\
+ "Got:\n'" + response_string + "'\nExpected:\n'" + step.multiline +"'"
+
+
diff --git a/tests/lettuce/features/terrain/steps.py b/tests/lettuce/features/terrain/steps.py
new file mode 100644
index 0000000..4050940
--- /dev/null
+++ b/tests/lettuce/features/terrain/steps.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# This file contains a number of common steps that are general and may be used
+# By a lot of feature files.
+#
+
+from lettuce import *
+import os
+
+ at step('stop process (\w+)')
+def stop_a_named_process(step, process_name):
+ """
+ Stop the process with the given name.
+ Parameters:
+ process_name ('process <name>'): Name of the process to stop.
+ """
+ world.processes.stop_process(process_name)
+
+ at step('wait for (new )?(\w+) stderr message (\w+)')
+def wait_for_message(step, new, process_name, message):
+ """
+ Block until the given message is printed to the given process's stderr
+ output.
+ Parameter:
+ new: (' new', optional): Only check the output printed since last time
+ this step was used for this process.
+ process_name ('<name> stderr'): Name of the process to check the output of.
+ message ('message <message>'): Output (part) to wait for.
+ Fails if the message is not found after 10 seconds.
+ """
+ world.processes.wait_for_stderr_str(process_name, [message], new)
+
+ at step('wait for (new )?(\w+) stdout message (\w+)')
+def wait_for_message(step, process_name, message):
+ """
+ Block until the given message is printed to the given process's stdout
+ output.
+ Parameter:
+ new: (' new', optional): Only check the output printed since last time
+ this step was used for this process.
+ process_name ('<name> stderr'): Name of the process to check the output of.
+ message ('message <message>'): Output (part) to wait for.
+ Fails if the message is not found after 10 seconds.
+ """
+ world.processes.wait_for_stdout_str(process_name, [message], new)
+
+ at step('the file (\S+) should (not )?exist')
+def check_existence(step, file_name, should_not_exist):
+ """
+ Check the existence of the given file.
+ Parameters:
+ file_name ('file <name>'): File to check existence of.
+ should_not_exist ('not', optional): Whether it should or should not exist.
+ Fails if the file should exist and does not, or vice versa.
+ """
+ if should_not_exist is None:
+ assert os.path.exists(file_name), file_name + " does not exist"
+ else:
+ assert not os.path.exists(file_name), file_name + " exists"
diff --git a/tests/lettuce/features/terrain/terrain.py b/tests/lettuce/features/terrain/terrain.py
new file mode 100644
index 0000000..634d2fb
--- /dev/null
+++ b/tests/lettuce/features/terrain/terrain.py
@@ -0,0 +1,360 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# This is the 'terrain' in which the lettuce lives. By convention, this is
+# where global setup and teardown is defined.
+#
+# We declare some attributes of the global 'world' variables here, so the
+# tests can safely assume they are present.
+#
+# We also use it to provide scenario invariants, such as resetting data.
+#
+
+from lettuce import *
+import subprocess
+import os.path
+import shutil
+import re
+import time
+
+# In order to make sure we start all tests with a 'clean' environment,
+# We perform a number of initialization steps, like restoring configuration
+# files, and removing generated data files.
+
+# This approach may not scale; if so we should probably provide specific
+# initialization steps for scenarios. But until that is shown to be a problem,
+# It will keep the scenarios cleaner.
+
+# This is a list of files that are freshly copied before each scenario
+# The first element is the original, the second is the target that will be
+# used by the tests that need them
+copylist = [
+["configurations/example.org.config.orig", "configurations/example.org.config"]
+]
+
+# This is a list of files that, if present, will be removed before a scenario
+removelist = [
+"data/test_nonexistent_db.sqlite3"
+]
+
+# When waiting for output data of a running process, use OUTPUT_WAIT_INTERVAL
+# as the interval in which to check again if it has not been found yet.
+# If we have waited OUTPUT_WAIT_MAX_INTERVALS times, we will abort with an
+# error (so as not to hang indefinitely)
+OUTPUT_WAIT_INTERVAL = 0.5
+OUTPUT_WAIT_MAX_INTERVALS = 20
+
+# class that keeps track of one running process and the files
+# we created for it.
+class RunningProcess:
+ def __init__(self, step, process_name, args):
+ # set it to none first so destructor won't error if initializer did
+ """
+ Initialize the long-running process structure, and start the process.
+ Parameters:
+ step: The scenario step it was called from. This is used for
+ determining the output files for redirection of stdout
+ and stderr.
+ process_name: The name to refer to this running process later.
+ args: Array of arguments to pass to Popen().
+ """
+ self.process = None
+ self.step = step
+ self.process_name = process_name
+ self.remove_files_on_exit = True
+ self._check_output_dir()
+ self._create_filenames()
+ self._start_process(args)
+
+ def _start_process(self, args):
+ """
+ Start the process.
+ Parameters:
+ args:
+ Array of arguments to pass to Popen().
+ """
+ stderr_write = open(self.stderr_filename, "w")
+ stdout_write = open(self.stdout_filename, "w")
+ self.process = subprocess.Popen(args, 1, None, subprocess.PIPE,
+ stdout_write, stderr_write)
+ # open them again, this time for reading
+ self.stderr = open(self.stderr_filename, "r")
+ self.stdout = open(self.stdout_filename, "r")
+
+ def mangle_filename(self, filebase, extension):
+ """
+ Remove whitespace and non-default characters from a base string,
+ and return the substituted value. Whitespace is replaced by an
+ underscore. Any other character that is not an ASCII letter, a
+ number, a dot, or a hyphen or underscore is removed.
+ Parameter:
+ filebase: The string to perform the substitution and removal on
+ extension: An extension to append to the result value
+ Returns the modified filebase with the given extension
+ """
+ filebase = re.sub("\s+", "_", filebase)
+ filebase = re.sub("[^a-zA-Z0-9.\-_]", "", filebase)
+ return filebase + "." + extension
+
+ def _check_output_dir(self):
+ # We may want to make this overridable by the user, perhaps
+ # through an environment variable. Since we currently expect
+ # lettuce to be run from our lettuce dir, we shall just use
+ # the relative path 'output/'
+ """
+ Make sure the output directory for stdout/stderr redirection
+ exists.
+ Fails if it exists but is not a directory, or if it does not
+ and we are unable to create it.
+ """
+ self._output_dir = os.getcwd() + os.sep + "output"
+ if not os.path.exists(self._output_dir):
+ os.mkdir(self._output_dir)
+ assert os.path.isdir(self._output_dir),\
+ self._output_dir + " is not a directory."
+
+ def _create_filenames(self):
+ """
+ Derive the filenames for stdout/stderr redirection from the
+ feature, scenario, and process name. The base will be
+ "<Feature>-<Scenario>-<process name>.[stdout|stderr]"
+ """
+ filebase = self.step.scenario.feature.name + "-" +\
+ self.step.scenario.name + "-" + self.process_name
+ self.stderr_filename = self._output_dir + os.sep +\
+ self.mangle_filename(filebase, "stderr")
+ self.stdout_filename = self._output_dir + os.sep +\
+ self.mangle_filename(filebase, "stdout")
+
+ def stop_process(self):
+ """
+ Stop this process by calling terminate(). Blocks until process has
+ exited. If remove_files_on_exit is True, redirected output files
+ are removed.
+ """
+ if self.process is not None:
+ self.process.terminate()
+ self.process.wait()
+ self.process = None
+ if self.remove_files_on_exit:
+ self._remove_files()
+
+ def _remove_files(self):
+ """
+ Remove the files created for redirection of stdout/stderr output.
+ """
+ os.remove(self.stderr_filename)
+ os.remove(self.stdout_filename)
+
+ def _wait_for_output_str(self, filename, running_file, strings, only_new):
+ """
+ Wait for a line of output in this process. This will (if only_new is
+ False) first check all previous output from the process, and if not
+ found, check all output since the last time this method was called.
+ For each line in the output, the given strings array is checked. If
+ any output lines checked contains one of the strings in the strings
+ array, that string (not the line!) is returned.
+ Parameters:
+ filename: The filename to read previous output from, if applicable.
+ running_file: The open file to read new output from.
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ """
+ if not only_new:
+ full_file = open(filename, "r")
+ for line in full_file:
+ for string in strings:
+ if line.find(string) != -1:
+ full_file.close()
+ return string
+ wait_count = 0
+ while wait_count < OUTPUT_WAIT_MAX_INTERVALS:
+ where = running_file.tell()
+ line = running_file.readline()
+ if line:
+ for string in strings:
+ if line.find(string) != -1:
+ return string
+ else:
+ wait_count += 1
+ time.sleep(OUTPUT_WAIT_INTERVAL)
+ running_file.seek(where)
+ assert False, "Timeout waiting for process output: " + str(strings)
+
+ def wait_for_stderr_str(self, strings, only_new = True):
+ """
+ Wait for one of the given strings in this process's stderr output.
+ Parameters:
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ """
+ return self._wait_for_output_str(self.stderr_filename, self.stderr,
+ strings, only_new)
+
+ def wait_for_stdout_str(self, strings, only_new = True):
+ """
+ Wait for one of the given strings in this process's stdout output.
+ Parameters:
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ """
+ return self._wait_for_output_str(self.stdout_filename, self.stdout,
+ strings, only_new)
+
+# Container class for a number of running processes
+# i.e. servers like bind10, etc
+# one-shot programs like dig or bindctl are started and closed separately
+class RunningProcesses:
+ def __init__(self):
+ """
+ Initialize with no running processes.
+ """
+ self.processes = {}
+
+ def add_process(self, step, process_name, args):
+ """
+ Start a process with the given arguments, and store it under the given
+ name.
+ Parameters:
+ step: The scenario step it was called from. This is used for
+ determining the output files for redirection of stdout
+ and stderr.
+ process_name: The name to refer to this running process later.
+ args: Array of arguments to pass to Popen().
+ Fails if a process with the given name is already running.
+ """
+ assert process_name not in self.processes,\
+ "Process " + name + " already running"
+ self.processes[process_name] = RunningProcess(step, process_name, args)
+
+ def get_process(self, process_name):
+ """
+ Return the Process with the given process name.
+ Parameters:
+ process_name: The name of the process to return.
+ Fails if the process is not running.
+ """
+ assert process_name in self.processes,\
+ "Process " + name + " unknown"
+ return self.processes[process_name]
+
+ def stop_process(self, process_name):
+ """
+ Stop the Process with the given process name.
+ Parameters:
+ process_name: The name of the process to return.
+ Fails if the process is not running.
+ """
+ assert process_name in self.processes,\
+ "Process " + name + " unknown"
+ self.processes[process_name].stop_process()
+ del self.processes[process_name]
+
+ def stop_all_processes(self):
+ """
+ Stop all running processes.
+ """
+ for process in self.processes.values():
+ process.stop_process()
+
+ def keep_files(self):
+ """
+ Keep the redirection files for stdout/stderr output of all processes
+ instead of removing them when they are stopped later.
+ """
+ for process in self.processes.values():
+ process.remove_files_on_exit = False
+
+ def wait_for_stderr_str(self, process_name, strings, only_new = True):
+ """
+ Wait for one of the given strings in the given process's stderr output.
+ Parameters:
+ process_name: The name of the process to check the stderr output of.
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ Fails if the process is unknown.
+ """
+ assert process_name in self.processes,\
+ "Process " + process_name + " unknown"
+ return self.processes[process_name].wait_for_stderr_str(strings,
+ only_new)
+
+ def wait_for_stdout_str(self, process_name, strings, only_new = True):
+ """
+ Wait for one of the given strings in the given process's stdout output.
+ Parameters:
+ process_name: The name of the process to check the stdout output of.
+ strings: Array of strings to look for.
+ only_new: If true, only check output since last time this method was
+ called. If false, first check earlier output.
+ Returns the matched string.
+ Fails if none of the strings was read after 10 seconds
+ (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+ Fails if the process is unknown.
+ """
+ assert process_name in self.processes,\
+ "Process " + process_name + " unknown"
+ return self.processes[process_name].wait_for_stdout_str(strings,
+ only_new)
+
+ at before.each_scenario
+def initialize(scenario):
+ """
+ Global initialization for each scenario.
+ """
+ # Keep track of running processes
+ world.processes = RunningProcesses()
+
+ # Convenience variable to access the last query result from querying.py
+ world.last_query_result = None
+
+ # Some tests can modify the settings. If the tests fail half-way, or
+ # don't clean up, this can leave configurations or data in a bad state,
+ # so we copy them from originals before each scenario
+ for item in copylist:
+ shutil.copy(item[0], item[1])
+
+ for item in removelist:
+ if os.path.exists(item):
+ os.remove(item)
+
+ at after.each_scenario
+def cleanup(scenario):
+ """
+ Global cleanup for each scenario.
+ """
+ # Keep output files if the scenario failed
+ if not scenario.passed:
+ world.processes.keep_files()
+ # Stop any running processes we may have had around
+ world.processes.stop_all_processes()
+
diff --git a/tests/lettuce/setup_intree_bind10.sh.in b/tests/lettuce/setup_intree_bind10.sh.in
new file mode 100755
index 0000000..40fd82d
--- /dev/null
+++ b/tests/lettuce/setup_intree_bind10.sh.in
@@ -0,0 +1,46 @@
+#! /bin/sh
+
+# Copyright (C) 2010 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
+export PYTHON_EXEC
+
+BIND10_PATH=@abs_top_builddir@/src/bin/bind10
+
+PATH=@abs_top_builddir@/src/bin/bind10:@abs_top_builddir@/src/bin/bindctl:@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
+export PATH
+
+PYTHONPATH=@abs_top_builddir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
+export PYTHONPATH
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
+B10_FROM_SOURCE=@abs_top_srcdir@
+export B10_FROM_SOURCE
+# TODO: We need to do this feature based (ie. no general from_source)
+# But right now we need a second one because some spec files are
+# generated and hence end up under builddir
+B10_FROM_BUILD=@abs_top_builddir@
+export B10_FROM_BUILD
+
+BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
+export BIND10_MSGQ_SOCKET_FILE
diff --git a/tests/system/README b/tests/system/README
index a43d49e..a1c0a97 100644
--- a/tests/system/README
+++ b/tests/system/README
@@ -5,48 +5,49 @@ See COPYRIGHT in the source root or http://isc.org/copyright.html for terms.
This is a simple test environment for running BIND 10 system tests
involving multiple name servers. It was originally developed for BIND
9, and has been ported to test BIND 10 implementations. Ideally we
-should share the same framework for both versions, so some part of
-the original setup are kept, even though they are BIND 9 specific and
-not currently used.
+should share the same framework for both versions, so some part of the
+original setup are kept, even though they are BIND 9 specific and not
+currently used.
-Also, these tests generally rely on BIND 9 programs, most commonly its
-dig, and will sometimes be its name server (named). So, the test
+Also, these tests generally rely on BIND 9 programs, most commonly
+its dig, and will sometimes be its name server (named). So, the test
environment assumes that there's a source tree of BIND 9 where its
-programs are built, and that an environment variable "BIND9_TOP" is
-set to point to the top directory of the source tree.
+programs are built, and that an environment variable "BIND9_TOP" is set
+to point to the top directory of the source tree.
There are multiple test suites, each in a separate subdirectory and
involving a different DNS setup. They are:
bindctl/ Some basic management operations using the bindctl tool
- glue/ Glue handling tests
+ glue/ Glue handling tests
+ ixfr/ Incremental transfer tests
+
(the following tests are planned to be added soon)
- dnssec/ DNSSEC tests
+ dnssec/ DNSSEC tests
masterfile/ Master file parser
- xfer/ Zone transfer tests
+ axfr/ Full-transfer tests
Typically each test suite sets up 2-5 instances of BIND 10 (or BIND 9
-named) and then performs one or more tests against them. Within the
-test suite subdirectory, each instance has a separate subdirectory
-containing its configuration data. By convention, these
-subdirectories are named "nsx1", "nsx2", etc for BIND 10 ("x" means
-BIND 10), and "ns1", "ns2", etc. for BIND 9.
+named) and then performs one or more tests against them. Within the test
+suite subdirectory, each instance has a separate subdirectory containing
+its configuration data. By convention, these subdirectories are named
+"nsx1", "nsx2", etc for BIND 10 ("x" means BIND 10), and "ns1", "ns2",
+etc. for BIND 9.
The tests are completely self-contained and do not require access to
-the real DNS. Generally, one of the test servers (ns[x]1) is set up
-as a root name server and is listed in the hints file of the others.
+the real DNS. Generally, one of the test servers (ns[x]1) is set up as
+a root name server and is listed in the hints file of the others.
-To enable all servers to run on the same machine, they bind to
-separate virtual IP address on the loopback interface. ns[x]1 runs on
-10.53.0.1, ns[x]2 on 10.53.0.2, etc. Before running any tests, you
-must set up these addresses by running "ifconfig.sh up" as root.
+To enable all servers to run on the same machine, they bind to separate
+virtual IP address on the loopback interface. ns[x]1 runs on 10.53.0.1,
+ns[x]2 on 10.53.0.2, etc. Before running any tests, you must set up
+these addresses by running "ifconfig.sh up" as root.
Mac OS X:
-If you wish to make the interfaces survive across reboots
-copy org.isc.bind.system and org.isc.bind.system to
-/Library/LaunchDaemons then run
-"launchctl load /Library/LaunchDaemons/org.isc.bind.system.plist" as
-root.
+If you wish to make the interfaces survive across reboots copy
+org.isc.bind.system and org.isc.bind.system to /Library/LaunchDaemons
+then run "launchctl load /Library/LaunchDaemons/org.isc.bind.system.plist"
+as root.
The servers use port 53210 instead of the usual port 53, so they can be
run without root privileges once the interfaces have been set up.
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 6923c41..49ef0f1 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -24,6 +24,10 @@ SYSTEMTESTTOP=..
status=0
n=0
+# TODO: consider consistency with statistics definition in auth.spec
+auth_queries_tcp="\<queries\.tcp\>"
+auth_queries_udp="\<queries\.udp\>"
+
echo "I:Checking b10-auth is working by default ($n)"
$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
# perform a simple check on the output (digcomp would be too much for this)
@@ -40,8 +44,8 @@ echo 'Stats show
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# the server should have received 1 UDP and 1 TCP queries (TCP query was
# sent from the server startup script)
-grep "\"auth.queries.tcp\": 1," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<1\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -73,8 +77,8 @@ echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# The statistics counters should have been reset while stop/start.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -97,8 +101,8 @@ echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# The statistics counters shouldn't be reset due to hot-swapping datasource.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 2," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<2\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
diff --git a/tests/system/cleanall.sh b/tests/system/cleanall.sh
index 17c3d4a..434c6b1 100755
--- a/tests/system/cleanall.sh
+++ b/tests/system/cleanall.sh
@@ -27,7 +27,10 @@ find . -type f \( \
status=0
-for d in `find . -type d -maxdepth 1 -mindepth 1 -print`
+for d in ./.* ./* ./*/*
do
+ case $d in ./.|./..) continue ;; esac
+ test -d $d || continue
+
test ! -f $d/clean.sh || ( cd $d && sh clean.sh )
done
diff --git a/tests/system/common/rndc.conf b/tests/system/common/rndc.conf
new file mode 100644
index 0000000..a897548
--- /dev/null
+++ b/tests/system/common/rndc.conf
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (C) 2000, 2001 Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+options {
+ default-key "rndc_key";
+};
+
+key rndc_key {
+ algorithm hmac-md5;
+ secret "1234abcd8765";
+};
diff --git a/tests/system/common/rndc.key b/tests/system/common/rndc.key
new file mode 100644
index 0000000..c2c3457
--- /dev/null
+++ b/tests/system/common/rndc.key
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* $Id: rndc.key,v 1.3 2011-03-12 04:59:47 tbox Exp $ */
+
+key rndc_key {
+ secret "1234abcd8765";
+ algorithm hmac-md5;
+};
diff --git a/tests/system/conf.sh.in b/tests/system/conf.sh.in
index 66aa3f5..92f72fa 100755
--- a/tests/system/conf.sh.in
+++ b/tests/system/conf.sh.in
@@ -35,23 +35,36 @@ if [ -z $BIND9_TOP ]; then
fi
# Find the top of the source and test trees.
-TOP=@abs_top_srcdir@
-TEST_TOP=@abs_builddir@
-
-RUN_BIND10=$TOP/src/bin/bind10/run_bind10.sh
-RUN_BINDCTL=$TOP/src/bin/bindctl/run_bindctl.sh
-BINDCTL_CSV_DIR=@abs_srcdir@/common/
-B10_LOADZONE=$TOP/src/bin/loadzone/run_loadzone.sh
-BIND9_NAMED=$BIND9_TOP/bin/named/named
-DIG=$BIND9_TOP/bin/dig/dig
+export TOP=@abs_top_srcdir@
+export TEST_TOP=@abs_builddir@
+
+# Programs
+export RUN_BIND10=$TOP/src/bin/bind10/run_bind10.sh
+export RUN_BINDCTL=$TOP/src/bin/bindctl/run_bindctl.sh
+export BINDCTL_CSV_DIR=@abs_srcdir@/common/
+export B10_LOADZONE=$TOP/src/bin/loadzone/run_loadzone.sh
+export BIND9_NAMED=$BIND9_TOP/bin/named/named
+export DIG=$BIND9_TOP/bin/dig/dig
+export RNDC=$BIND9_TOP/bin/rndc/rndc
+
# Test tools borrowed from BIND 9's system test (without change).
-TESTSOCK=$BIND9_TOP/bin/tests/system/testsock.pl
-DIGCOMP=$BIND9_TOP/bin/tests/system/digcomp.pl
+export TESTSOCK=$BIND9_TOP/bin/tests/system/testsock.pl
+export DIGCOMP=$BIND9_TOP/bin/tests/system/digcomp.pl
+
+export SUBDIRS="bindctl glue ixfr/in-2"
+# Add appropriate subdirectories to the above statement as the tests become
+# available.
+#SUBDIRS="dnssec masterfile ixfr/in-1 ixfr/in-2 ixfr/in-4"
-SUBDIRS="bindctl glue"
-#SUBDIRS="dnssec masterfile xfer"
+# PERL will be an empty string if no perl interpreter was found. A similar
+# comment applies to AWK.
+export PERL=@PERL@
+export AWK=@AWK@
-# PERL will be an empty string if no perl interpreter was found.
-PERL=@PERL@
+# Other constants
+export RNDC_PORT=9953
+export DNS_PORT=53210
-export RUN_BIND10 BIND9_NAMED DIG SUBDIRS PERL TESTSOCK
+export TESTS_TOP=$TOP/tests
+export SYSTEM_TOP=$TESTS_TOP/system
+export IXFR_TOP=$SYSTEM_TOP/ixfr
diff --git a/tests/system/ixfr/README b/tests/system/ixfr/README
new file mode 100644
index 0000000..51cba8a
--- /dev/null
+++ b/tests/system/ixfr/README
@@ -0,0 +1,86 @@
+Introduction
+============
+The directories in-1 to in-4 implement the following tests of the IXFR-in
+capability of BIND 10.
+
+in-1: Check that BIND 10 can receive IXFR in a single UDP packet.
+in-2: Check that BIND 10 can receive IXFR via TCP.
+in-3: Check that BIND 10 will request AXFR if the server does not support IXFR.
+in-4: Check that BIND 10 will request IXFR when its SOA refresh times out
+
+The tests are described more fully in the document:
+
+http://bind10.isc.org/wiki/IxfrSystemTests
+
+Overview
+========
+All the tests use two nameservers:
+
+* A BIND 9 nameserver acting as the IXFR server (using the nomenclature
+of RFC 1995).
+* A BIND 10 nameserver acting at the IXFR client.
+
+In general, the tests attempt to set up the server and client independently.
+Communication is established between the systems by updating their
+configurations and a notification sent to the client. This should cause the
+client to request an IXFR from the server. (The exception is test 4, where the
+request is a result of the expiration of the SOA refresh time.)
+
+A check of zone files - or in these tests, of SOA serial number - can only
+reveal that a transfer has taken place. To check what has happened,
+e.g. whether the transfer was via UDP or whether a TCP request took place,
+the BIND 10 log file is searched for known message IDs.
+
+The searching of the log files for message IDs is one of the reasons that,
+unlike other system tests, the IXFR set of tests is broken up into separate
+tests that require the stopping and starting of nameservers (and tidying up of
+log files) between each test. Doing this means that only the existence of a
+particular message ID needs to be checked - there is no risk that another test
+produced it. The other reason is that the each IXFR test requires the
+nameservers to be in a specific state at the start of the test; this is easier
+to assure if they are not updating one another as the result of configuration
+settings established in the previous test.
+
+Test Files
+==========
+
+Data Files
+----------
+(All within tests/system/ixfr. Some .in files are processed to substitute
+for build variables in the build process to give the files listed here.)
+
+db.example.nX. These files hold the RRs for a zone for which should not
+fit within a single UDP packet. The files are different versions of the zone
+- the N-0 version (i.e. the latest version - "N" - the "-0" is present so
+that the files have a consistent name), N-2 etc. (See the full description
+of the tests for the meaning of N-2 etc.)
+
+db.example.common: A set of RRs to bulk out the zone to be larger than can
+be contained in a single UDP packet.
+
+db.example.n2.refresh: The N-2 version of the zone, but with a small SOA
+refresh time (for test 4).
+
+named_xxxx.conf: Various BIND 9 configuration files with NOTIFYs and/or
+IXFR enabled or disabled.
+
+Directories
+-----------
+The tests/system/ixfr directory holds the IXFR tests. Within that
+directory are subdirectories in-1 through in-4 for each test. And within
+each test directory are the directories ns1 (for the BIND 9 nameserver)
+and nsx2 (for the BIND 10 nameserver).
+
+Shell Scripts
+-------------
+The IXFR tests use the same framework as the rest of the system tests,
+being based around shell scripts. Many have a ".in" form as they require
+substitution of build variables before they can be used, and so are
+listed in configure.ac. The files specific to the IXFR tests are:
+
+tests/system/ixfr/ixfr_init.sh.in: defines environment variables and shell
+subroutines used in the tests. (This references system/conf.sh.in which
+defines most of them.)
+
+tests/system/ixfr/common_tests.sh.in: tests in-1 and in-2 are virtually
+identical - this holds the common code.
diff --git a/tests/system/ixfr/b10-config.db.in b/tests/system/ixfr/b10-config.db.in
new file mode 100644
index 0000000..946d80f
--- /dev/null
+++ b/tests/system/ixfr/b10-config.db.in
@@ -0,0 +1,23 @@
+{"version": 2,
+ "Xfrin": {
+ "zones": [{
+ "master_addr": "10.53.0.1",
+ "master_port": 53210,
+ "name": "example.",
+ "use_ixfr": true
+ }]
+ },
+ "Auth": {
+ "listen_on": [{
+ "address": "10.53.0.2",
+ "port": 53210
+ }],
+ "database_file": "@abs_builddir@/zone.sqlite3"
+ },
+ "Zonemgr": {
+ "secondary_zones": [{
+ "name": "example.",
+ "class": "IN"
+ }]
+ }
+}
diff --git a/tests/system/ixfr/clean_ns.sh b/tests/system/ixfr/clean_ns.sh
new file mode 100644
index 0000000..88f4ff1
--- /dev/null
+++ b/tests/system/ixfr/clean_ns.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Clean up nameserver directories after zone transfer tests.
+
+rm -f ns1/named.conf
+rm -f ns1/db.example*
+rm -f ns1/named.memstats
+
+rm -f nsx2/bind10.run
+rm -f nsx2/b10-config.db
+rm -f ../zone.sqlite3
+
+rm -f client.dig
+rm -f server.dig
diff --git a/tests/system/ixfr/common_tests.sh.in b/tests/system/ixfr/common_tests.sh.in
new file mode 100644
index 0000000..90d0284
--- /dev/null
+++ b/tests/system/ixfr/common_tests.sh.in
@@ -0,0 +1,78 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This script is used in a couple of IXFR tests.
+#
+# Preconditions:\n
+# The BIND 9 nameserver (ns1, 10.53.0.1, acting as the IXFR server) is loaded
+# with the N-4 version of the zone. (It may hold prior versions as well.)
+# Notifications are disabled.
+#
+# The BIND 10 nameserver (nsx2, 10.53.0.2, acting as the IXFR client) is loaded
+# with an earlier (unspecified) version of the zone.
+#
+# Actions:\n
+# This script updates the IXFR server with the N-2 and N-0 versions of the zone.
+# It then updates the BIND 10 configuration so that it looks for IXFRs from
+# the IXFR server and causes the server to send the client a NOTIFY. After
+# waiting for the client to update from the server, it compares ther zones of
+# the two system, reporting an error if they are different.
+#
+# Caller Actions:\n
+# The caller can pre-load the BIND 10 IXFR client with whatever version of the
+# zone it requires. It can also load the BIND 9 IXFR server with zones earlier
+# than N-4.
+#
+# After this test has finished, it is up to the caller to check the logs
+# to see if they report the expected behavior.
+#
+# \return 0 if the script executed successfully, non-zero otherwise
+
+# Set up variables etc.
+. @abs_top_builddir@/tests/system/conf.sh
+. $IXFR_TOP/ixfr_init.sh
+
+set -e
+
+# Store the SOA serial number of the BIND 10 client for later use.
+old_client_serial=`$DIG_SOA @$CLIENT_IP | $AWK '{print $3}'`
+echo "I:$CLIENT_NAME SOA serial of IXFR client is $old_client_serial"
+
+# Load the BIND 9 system (the IXFR server) with the "n - 2" and "n" version of
+# the zones. With ixfr-from-differences set to "yes", the nameserver should
+# generate the differences between them.
+echo "I:$SERVER_NAME updating IXFR-server for ixfr-in tests"
+update_server_zone $SERVER_NAME $SERVER_IP $IXFR_TOP/db.example.n2
+
+# Wait a bit - it seems that if two updates are loaded in quick succession,
+# the second sometimes gets lost.
+sleep 5
+update_server_zone $SERVER_NAME $SERVER_IP $IXFR_TOP/db.example.n0
+
+echo "I:$CLIENT_NAME forcing IXFR client to retrieve new version of the zone"
+$RUN_BINDCTL << .
+Xfrin retransfer zone_name="example"
+.
+
+# Wait for the client to update itself.
+wait_for_update $CLIENT_NAME $CLIENT_IP $old_client_serial
+
+# Has updated, compare the client and server's versions of the zone s- they
+# should be the same.
+compare_zones $SERVER_NAME $SERVER_IP $CLIENT_NAME $CLIENT_IP
+
+set +e
diff --git a/tests/system/ixfr/db.example.common b/tests/system/ixfr/db.example.common
new file mode 100644
index 0000000..90435ce
--- /dev/null
+++ b/tests/system/ixfr/db.example.common
@@ -0,0 +1,1556 @@
+; Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+; This files holds a number of AAAA records to bulk out a zone file beyond
+; 16kB. It is used in tests where it is required that the contents of a zone
+; do not fit into a single UDP packet.
+
+aaaa-000 IN AAAA 2001:db8::0000
+aaaa-001 IN AAAA 2001:db8::0001
+aaaa-002 IN AAAA 2001:db8::0002
+aaaa-003 IN AAAA 2001:db8::0003
+aaaa-004 IN AAAA 2001:db8::0004
+aaaa-005 IN AAAA 2001:db8::0005
+aaaa-006 IN AAAA 2001:db8::0006
+aaaa-007 IN AAAA 2001:db8::0007
+aaaa-008 IN AAAA 2001:db8::0008
+aaaa-009 IN AAAA 2001:db8::0009
+aaaa-010 IN AAAA 2001:db8::000a
+aaaa-011 IN AAAA 2001:db8::000b
+aaaa-012 IN AAAA 2001:db8::000c
+aaaa-013 IN AAAA 2001:db8::000d
+aaaa-014 IN AAAA 2001:db8::000e
+aaaa-015 IN AAAA 2001:db8::000f
+aaaa-016 IN AAAA 2001:db8::0010
+aaaa-017 IN AAAA 2001:db8::0011
+aaaa-018 IN AAAA 2001:db8::0012
+aaaa-019 IN AAAA 2001:db8::0013
+aaaa-020 IN AAAA 2001:db8::0014
+aaaa-021 IN AAAA 2001:db8::0015
+aaaa-022 IN AAAA 2001:db8::0016
+aaaa-023 IN AAAA 2001:db8::0017
+aaaa-024 IN AAAA 2001:db8::0018
+aaaa-025 IN AAAA 2001:db8::0019
+aaaa-026 IN AAAA 2001:db8::001a
+aaaa-027 IN AAAA 2001:db8::001b
+aaaa-028 IN AAAA 2001:db8::001c
+aaaa-029 IN AAAA 2001:db8::001d
+aaaa-030 IN AAAA 2001:db8::001e
+aaaa-031 IN AAAA 2001:db8::001f
+aaaa-032 IN AAAA 2001:db8::0020
+aaaa-033 IN AAAA 2001:db8::0021
+aaaa-034 IN AAAA 2001:db8::0022
+aaaa-035 IN AAAA 2001:db8::0023
+aaaa-036 IN AAAA 2001:db8::0024
+aaaa-037 IN AAAA 2001:db8::0025
+aaaa-038 IN AAAA 2001:db8::0026
+aaaa-039 IN AAAA 2001:db8::0027
+aaaa-040 IN AAAA 2001:db8::0028
+aaaa-041 IN AAAA 2001:db8::0029
+aaaa-042 IN AAAA 2001:db8::002a
+aaaa-043 IN AAAA 2001:db8::002b
+aaaa-044 IN AAAA 2001:db8::002c
+aaaa-045 IN AAAA 2001:db8::002d
+aaaa-046 IN AAAA 2001:db8::002e
+aaaa-047 IN AAAA 2001:db8::002f
+aaaa-048 IN AAAA 2001:db8::0030
+aaaa-049 IN AAAA 2001:db8::0031
+aaaa-050 IN AAAA 2001:db8::0032
+aaaa-051 IN AAAA 2001:db8::0033
+aaaa-052 IN AAAA 2001:db8::0034
+aaaa-053 IN AAAA 2001:db8::0035
+aaaa-054 IN AAAA 2001:db8::0036
+aaaa-055 IN AAAA 2001:db8::0037
+aaaa-056 IN AAAA 2001:db8::0038
+aaaa-057 IN AAAA 2001:db8::0039
+aaaa-058 IN AAAA 2001:db8::003a
+aaaa-059 IN AAAA 2001:db8::003b
+aaaa-060 IN AAAA 2001:db8::003c
+aaaa-061 IN AAAA 2001:db8::003d
+aaaa-062 IN AAAA 2001:db8::003e
+aaaa-063 IN AAAA 2001:db8::003f
+aaaa-064 IN AAAA 2001:db8::0040
+aaaa-065 IN AAAA 2001:db8::0041
+aaaa-066 IN AAAA 2001:db8::0042
+aaaa-067 IN AAAA 2001:db8::0043
+aaaa-068 IN AAAA 2001:db8::0044
+aaaa-069 IN AAAA 2001:db8::0045
+aaaa-070 IN AAAA 2001:db8::0046
+aaaa-071 IN AAAA 2001:db8::0047
+aaaa-072 IN AAAA 2001:db8::0048
+aaaa-073 IN AAAA 2001:db8::0049
+aaaa-074 IN AAAA 2001:db8::004a
+aaaa-075 IN AAAA 2001:db8::004b
+aaaa-076 IN AAAA 2001:db8::004c
+aaaa-077 IN AAAA 2001:db8::004d
+aaaa-078 IN AAAA 2001:db8::004e
+aaaa-079 IN AAAA 2001:db8::004f
+aaaa-080 IN AAAA 2001:db8::0050
+aaaa-081 IN AAAA 2001:db8::0051
+aaaa-082 IN AAAA 2001:db8::0052
+aaaa-083 IN AAAA 2001:db8::0053
+aaaa-084 IN AAAA 2001:db8::0054
+aaaa-085 IN AAAA 2001:db8::0055
+aaaa-086 IN AAAA 2001:db8::0056
+aaaa-087 IN AAAA 2001:db8::0057
+aaaa-088 IN AAAA 2001:db8::0058
+aaaa-089 IN AAAA 2001:db8::0059
+aaaa-090 IN AAAA 2001:db8::005a
+aaaa-091 IN AAAA 2001:db8::005b
+aaaa-092 IN AAAA 2001:db8::005c
+aaaa-093 IN AAAA 2001:db8::005d
+aaaa-094 IN AAAA 2001:db8::005e
+aaaa-095 IN AAAA 2001:db8::005f
+aaaa-096 IN AAAA 2001:db8::0060
+aaaa-097 IN AAAA 2001:db8::0061
+aaaa-098 IN AAAA 2001:db8::0062
+aaaa-099 IN AAAA 2001:db8::0063
+aaaa-100 IN AAAA 2001:db8::0064
+aaaa-101 IN AAAA 2001:db8::0065
+aaaa-102 IN AAAA 2001:db8::0066
+aaaa-103 IN AAAA 2001:db8::0067
+aaaa-104 IN AAAA 2001:db8::0068
+aaaa-105 IN AAAA 2001:db8::0069
+aaaa-106 IN AAAA 2001:db8::006a
+aaaa-107 IN AAAA 2001:db8::006b
+aaaa-108 IN AAAA 2001:db8::006c
+aaaa-109 IN AAAA 2001:db8::006d
+aaaa-110 IN AAAA 2001:db8::006e
+aaaa-111 IN AAAA 2001:db8::006f
+aaaa-112 IN AAAA 2001:db8::0070
+aaaa-113 IN AAAA 2001:db8::0071
+aaaa-114 IN AAAA 2001:db8::0072
+aaaa-115 IN AAAA 2001:db8::0073
+aaaa-116 IN AAAA 2001:db8::0074
+aaaa-117 IN AAAA 2001:db8::0075
+aaaa-118 IN AAAA 2001:db8::0076
+aaaa-119 IN AAAA 2001:db8::0077
+aaaa-120 IN AAAA 2001:db8::0078
+aaaa-121 IN AAAA 2001:db8::0079
+aaaa-122 IN AAAA 2001:db8::007a
+aaaa-123 IN AAAA 2001:db8::007b
+aaaa-124 IN AAAA 2001:db8::007c
+aaaa-125 IN AAAA 2001:db8::007d
+aaaa-126 IN AAAA 2001:db8::007e
+aaaa-127 IN AAAA 2001:db8::007f
+aaaa-128 IN AAAA 2001:db8::0080
+aaaa-129 IN AAAA 2001:db8::0081
+aaaa-130 IN AAAA 2001:db8::0082
+aaaa-131 IN AAAA 2001:db8::0083
+aaaa-132 IN AAAA 2001:db8::0084
+aaaa-133 IN AAAA 2001:db8::0085
+aaaa-134 IN AAAA 2001:db8::0086
+aaaa-135 IN AAAA 2001:db8::0087
+aaaa-136 IN AAAA 2001:db8::0088
+aaaa-137 IN AAAA 2001:db8::0089
+aaaa-138 IN AAAA 2001:db8::008a
+aaaa-139 IN AAAA 2001:db8::008b
+aaaa-140 IN AAAA 2001:db8::008c
+aaaa-141 IN AAAA 2001:db8::008d
+aaaa-142 IN AAAA 2001:db8::008e
+aaaa-143 IN AAAA 2001:db8::008f
+aaaa-144 IN AAAA 2001:db8::0090
+aaaa-145 IN AAAA 2001:db8::0091
+aaaa-146 IN AAAA 2001:db8::0092
+aaaa-147 IN AAAA 2001:db8::0093
+aaaa-148 IN AAAA 2001:db8::0094
+aaaa-149 IN AAAA 2001:db8::0095
+aaaa-150 IN AAAA 2001:db8::0096
+aaaa-151 IN AAAA 2001:db8::0097
+aaaa-152 IN AAAA 2001:db8::0098
+aaaa-153 IN AAAA 2001:db8::0099
+aaaa-154 IN AAAA 2001:db8::009a
+aaaa-155 IN AAAA 2001:db8::009b
+aaaa-156 IN AAAA 2001:db8::009c
+aaaa-157 IN AAAA 2001:db8::009d
+aaaa-158 IN AAAA 2001:db8::009e
+aaaa-159 IN AAAA 2001:db8::009f
+aaaa-160 IN AAAA 2001:db8::00a0
+aaaa-161 IN AAAA 2001:db8::00a1
+aaaa-162 IN AAAA 2001:db8::00a2
+aaaa-163 IN AAAA 2001:db8::00a3
+aaaa-164 IN AAAA 2001:db8::00a4
+aaaa-165 IN AAAA 2001:db8::00a5
+aaaa-166 IN AAAA 2001:db8::00a6
+aaaa-167 IN AAAA 2001:db8::00a7
+aaaa-168 IN AAAA 2001:db8::00a8
+aaaa-169 IN AAAA 2001:db8::00a9
+aaaa-170 IN AAAA 2001:db8::00aa
+aaaa-171 IN AAAA 2001:db8::00ab
+aaaa-172 IN AAAA 2001:db8::00ac
+aaaa-173 IN AAAA 2001:db8::00ad
+aaaa-174 IN AAAA 2001:db8::00ae
+aaaa-175 IN AAAA 2001:db8::00af
+aaaa-176 IN AAAA 2001:db8::00b0
+aaaa-177 IN AAAA 2001:db8::00b1
+aaaa-178 IN AAAA 2001:db8::00b2
+aaaa-179 IN AAAA 2001:db8::00b3
+aaaa-180 IN AAAA 2001:db8::00b4
+aaaa-181 IN AAAA 2001:db8::00b5
+aaaa-182 IN AAAA 2001:db8::00b6
+aaaa-183 IN AAAA 2001:db8::00b7
+aaaa-184 IN AAAA 2001:db8::00b8
+aaaa-185 IN AAAA 2001:db8::00b9
+aaaa-186 IN AAAA 2001:db8::00ba
+aaaa-187 IN AAAA 2001:db8::00bb
+aaaa-188 IN AAAA 2001:db8::00bc
+aaaa-189 IN AAAA 2001:db8::00bd
+aaaa-190 IN AAAA 2001:db8::00be
+aaaa-191 IN AAAA 2001:db8::00bf
+aaaa-192 IN AAAA 2001:db8::00c0
+aaaa-193 IN AAAA 2001:db8::00c1
+aaaa-194 IN AAAA 2001:db8::00c2
+aaaa-195 IN AAAA 2001:db8::00c3
+aaaa-196 IN AAAA 2001:db8::00c4
+aaaa-197 IN AAAA 2001:db8::00c5
+aaaa-198 IN AAAA 2001:db8::00c6
+aaaa-199 IN AAAA 2001:db8::00c7
+aaaa-200 IN AAAA 2001:db8::00c8
+aaaa-201 IN AAAA 2001:db8::00c9
+aaaa-202 IN AAAA 2001:db8::00ca
+aaaa-203 IN AAAA 2001:db8::00cb
+aaaa-204 IN AAAA 2001:db8::00cc
+aaaa-205 IN AAAA 2001:db8::00cd
+aaaa-206 IN AAAA 2001:db8::00ce
+aaaa-207 IN AAAA 2001:db8::00cf
+aaaa-208 IN AAAA 2001:db8::00d0
+aaaa-209 IN AAAA 2001:db8::00d1
+aaaa-210 IN AAAA 2001:db8::00d2
+aaaa-211 IN AAAA 2001:db8::00d3
+aaaa-212 IN AAAA 2001:db8::00d4
+aaaa-213 IN AAAA 2001:db8::00d5
+aaaa-214 IN AAAA 2001:db8::00d6
+aaaa-215 IN AAAA 2001:db8::00d7
+aaaa-216 IN AAAA 2001:db8::00d8
+aaaa-217 IN AAAA 2001:db8::00d9
+aaaa-218 IN AAAA 2001:db8::00da
+aaaa-219 IN AAAA 2001:db8::00db
+aaaa-220 IN AAAA 2001:db8::00dc
+aaaa-221 IN AAAA 2001:db8::00dd
+aaaa-222 IN AAAA 2001:db8::00de
+aaaa-223 IN AAAA 2001:db8::00df
+aaaa-224 IN AAAA 2001:db8::00e0
+aaaa-225 IN AAAA 2001:db8::00e1
+aaaa-226 IN AAAA 2001:db8::00e2
+aaaa-227 IN AAAA 2001:db8::00e3
+aaaa-228 IN AAAA 2001:db8::00e4
+aaaa-229 IN AAAA 2001:db8::00e5
+aaaa-230 IN AAAA 2001:db8::00e6
+aaaa-231 IN AAAA 2001:db8::00e7
+aaaa-232 IN AAAA 2001:db8::00e8
+aaaa-233 IN AAAA 2001:db8::00e9
+aaaa-234 IN AAAA 2001:db8::00ea
+aaaa-235 IN AAAA 2001:db8::00eb
+aaaa-236 IN AAAA 2001:db8::00ec
+aaaa-237 IN AAAA 2001:db8::00ed
+aaaa-238 IN AAAA 2001:db8::00ee
+aaaa-239 IN AAAA 2001:db8::00ef
+aaaa-240 IN AAAA 2001:db8::00f0
+aaaa-241 IN AAAA 2001:db8::00f1
+aaaa-242 IN AAAA 2001:db8::00f2
+aaaa-243 IN AAAA 2001:db8::00f3
+aaaa-244 IN AAAA 2001:db8::00f4
+aaaa-245 IN AAAA 2001:db8::00f5
+aaaa-246 IN AAAA 2001:db8::00f6
+aaaa-247 IN AAAA 2001:db8::00f7
+aaaa-248 IN AAAA 2001:db8::00f8
+aaaa-249 IN AAAA 2001:db8::00f9
+aaaa-250 IN AAAA 2001:db8::00fa
+aaaa-251 IN AAAA 2001:db8::00fb
+aaaa-252 IN AAAA 2001:db8::00fc
+aaaa-253 IN AAAA 2001:db8::00fd
+aaaa-254 IN AAAA 2001:db8::00fe
+aaaa-255 IN AAAA 2001:db8::00ff
+aaaa-256 IN AAAA 2001:db8::0100
+aaaa-257 IN AAAA 2001:db8::0101
+aaaa-258 IN AAAA 2001:db8::0102
+aaaa-259 IN AAAA 2001:db8::0103
+aaaa-260 IN AAAA 2001:db8::0104
+aaaa-261 IN AAAA 2001:db8::0105
+aaaa-262 IN AAAA 2001:db8::0106
+aaaa-263 IN AAAA 2001:db8::0107
+aaaa-264 IN AAAA 2001:db8::0108
+aaaa-265 IN AAAA 2001:db8::0109
+aaaa-266 IN AAAA 2001:db8::010a
+aaaa-267 IN AAAA 2001:db8::010b
+aaaa-268 IN AAAA 2001:db8::010c
+aaaa-269 IN AAAA 2001:db8::010d
+aaaa-270 IN AAAA 2001:db8::010e
+aaaa-271 IN AAAA 2001:db8::010f
+aaaa-272 IN AAAA 2001:db8::0110
+aaaa-273 IN AAAA 2001:db8::0111
+aaaa-274 IN AAAA 2001:db8::0112
+aaaa-275 IN AAAA 2001:db8::0113
+aaaa-276 IN AAAA 2001:db8::0114
+aaaa-277 IN AAAA 2001:db8::0115
+aaaa-278 IN AAAA 2001:db8::0116
+aaaa-279 IN AAAA 2001:db8::0117
+aaaa-280 IN AAAA 2001:db8::0118
+aaaa-281 IN AAAA 2001:db8::0119
+aaaa-282 IN AAAA 2001:db8::011a
+aaaa-283 IN AAAA 2001:db8::011b
+aaaa-284 IN AAAA 2001:db8::011c
+aaaa-285 IN AAAA 2001:db8::011d
+aaaa-286 IN AAAA 2001:db8::011e
+aaaa-287 IN AAAA 2001:db8::011f
+aaaa-288 IN AAAA 2001:db8::0120
+aaaa-289 IN AAAA 2001:db8::0121
+aaaa-290 IN AAAA 2001:db8::0122
+aaaa-291 IN AAAA 2001:db8::0123
+aaaa-292 IN AAAA 2001:db8::0124
+aaaa-293 IN AAAA 2001:db8::0125
+aaaa-294 IN AAAA 2001:db8::0126
+aaaa-295 IN AAAA 2001:db8::0127
+aaaa-296 IN AAAA 2001:db8::0128
+aaaa-297 IN AAAA 2001:db8::0129
+aaaa-298 IN AAAA 2001:db8::012a
+aaaa-299 IN AAAA 2001:db8::012b
+aaaa-300 IN AAAA 2001:db8::012c
+aaaa-301 IN AAAA 2001:db8::012d
+aaaa-302 IN AAAA 2001:db8::012e
+aaaa-303 IN AAAA 2001:db8::012f
+aaaa-304 IN AAAA 2001:db8::0130
+aaaa-305 IN AAAA 2001:db8::0131
+aaaa-306 IN AAAA 2001:db8::0132
+aaaa-307 IN AAAA 2001:db8::0133
+aaaa-308 IN AAAA 2001:db8::0134
+aaaa-309 IN AAAA 2001:db8::0135
+aaaa-310 IN AAAA 2001:db8::0136
+aaaa-311 IN AAAA 2001:db8::0137
+aaaa-312 IN AAAA 2001:db8::0138
+aaaa-313 IN AAAA 2001:db8::0139
+aaaa-314 IN AAAA 2001:db8::013a
+aaaa-315 IN AAAA 2001:db8::013b
+aaaa-316 IN AAAA 2001:db8::013c
+aaaa-317 IN AAAA 2001:db8::013d
+aaaa-318 IN AAAA 2001:db8::013e
+aaaa-319 IN AAAA 2001:db8::013f
+aaaa-320 IN AAAA 2001:db8::0140
+aaaa-321 IN AAAA 2001:db8::0141
+aaaa-322 IN AAAA 2001:db8::0142
+aaaa-323 IN AAAA 2001:db8::0143
+aaaa-324 IN AAAA 2001:db8::0144
+aaaa-325 IN AAAA 2001:db8::0145
+aaaa-326 IN AAAA 2001:db8::0146
+aaaa-327 IN AAAA 2001:db8::0147
+aaaa-328 IN AAAA 2001:db8::0148
+aaaa-329 IN AAAA 2001:db8::0149
+aaaa-330 IN AAAA 2001:db8::014a
+aaaa-331 IN AAAA 2001:db8::014b
+aaaa-332 IN AAAA 2001:db8::014c
+aaaa-333 IN AAAA 2001:db8::014d
+aaaa-334 IN AAAA 2001:db8::014e
+aaaa-335 IN AAAA 2001:db8::014f
+aaaa-336 IN AAAA 2001:db8::0150
+aaaa-337 IN AAAA 2001:db8::0151
+aaaa-338 IN AAAA 2001:db8::0152
+aaaa-339 IN AAAA 2001:db8::0153
+aaaa-340 IN AAAA 2001:db8::0154
+aaaa-341 IN AAAA 2001:db8::0155
+aaaa-342 IN AAAA 2001:db8::0156
+aaaa-343 IN AAAA 2001:db8::0157
+aaaa-344 IN AAAA 2001:db8::0158
+aaaa-345 IN AAAA 2001:db8::0159
+aaaa-346 IN AAAA 2001:db8::015a
+aaaa-347 IN AAAA 2001:db8::015b
+aaaa-348 IN AAAA 2001:db8::015c
+aaaa-349 IN AAAA 2001:db8::015d
+aaaa-350 IN AAAA 2001:db8::015e
+aaaa-351 IN AAAA 2001:db8::015f
+aaaa-352 IN AAAA 2001:db8::0160
+aaaa-353 IN AAAA 2001:db8::0161
+aaaa-354 IN AAAA 2001:db8::0162
+aaaa-355 IN AAAA 2001:db8::0163
+aaaa-356 IN AAAA 2001:db8::0164
+aaaa-357 IN AAAA 2001:db8::0165
+aaaa-358 IN AAAA 2001:db8::0166
+aaaa-359 IN AAAA 2001:db8::0167
+aaaa-360 IN AAAA 2001:db8::0168
+aaaa-361 IN AAAA 2001:db8::0169
+aaaa-362 IN AAAA 2001:db8::016a
+aaaa-363 IN AAAA 2001:db8::016b
+aaaa-364 IN AAAA 2001:db8::016c
+aaaa-365 IN AAAA 2001:db8::016d
+aaaa-366 IN AAAA 2001:db8::016e
+aaaa-367 IN AAAA 2001:db8::016f
+aaaa-368 IN AAAA 2001:db8::0170
+aaaa-369 IN AAAA 2001:db8::0171
+aaaa-370 IN AAAA 2001:db8::0172
+aaaa-371 IN AAAA 2001:db8::0173
+aaaa-372 IN AAAA 2001:db8::0174
+aaaa-373 IN AAAA 2001:db8::0175
+aaaa-374 IN AAAA 2001:db8::0176
+aaaa-375 IN AAAA 2001:db8::0177
+aaaa-376 IN AAAA 2001:db8::0178
+aaaa-377 IN AAAA 2001:db8::0179
+aaaa-378 IN AAAA 2001:db8::017a
+aaaa-379 IN AAAA 2001:db8::017b
+aaaa-380 IN AAAA 2001:db8::017c
+aaaa-381 IN AAAA 2001:db8::017d
+aaaa-382 IN AAAA 2001:db8::017e
+aaaa-383 IN AAAA 2001:db8::017f
+aaaa-384 IN AAAA 2001:db8::0180
+aaaa-385 IN AAAA 2001:db8::0181
+aaaa-386 IN AAAA 2001:db8::0182
+aaaa-387 IN AAAA 2001:db8::0183
+aaaa-388 IN AAAA 2001:db8::0184
+aaaa-389 IN AAAA 2001:db8::0185
+aaaa-390 IN AAAA 2001:db8::0186
+aaaa-391 IN AAAA 2001:db8::0187
+aaaa-392 IN AAAA 2001:db8::0188
+aaaa-393 IN AAAA 2001:db8::0189
+aaaa-394 IN AAAA 2001:db8::018a
+aaaa-395 IN AAAA 2001:db8::018b
+aaaa-396 IN AAAA 2001:db8::018c
+aaaa-397 IN AAAA 2001:db8::018d
+aaaa-398 IN AAAA 2001:db8::018e
+aaaa-399 IN AAAA 2001:db8::018f
+aaaa-400 IN AAAA 2001:db8::0190
+aaaa-401 IN AAAA 2001:db8::0191
+aaaa-402 IN AAAA 2001:db8::0192
+aaaa-403 IN AAAA 2001:db8::0193
+aaaa-404 IN AAAA 2001:db8::0194
+aaaa-405 IN AAAA 2001:db8::0195
+aaaa-406 IN AAAA 2001:db8::0196
+aaaa-407 IN AAAA 2001:db8::0197
+aaaa-408 IN AAAA 2001:db8::0198
+aaaa-409 IN AAAA 2001:db8::0199
+aaaa-410 IN AAAA 2001:db8::019a
+aaaa-411 IN AAAA 2001:db8::019b
+aaaa-412 IN AAAA 2001:db8::019c
+aaaa-413 IN AAAA 2001:db8::019d
+aaaa-414 IN AAAA 2001:db8::019e
+aaaa-415 IN AAAA 2001:db8::019f
+aaaa-416 IN AAAA 2001:db8::01a0
+aaaa-417 IN AAAA 2001:db8::01a1
+aaaa-418 IN AAAA 2001:db8::01a2
+aaaa-419 IN AAAA 2001:db8::01a3
+aaaa-420 IN AAAA 2001:db8::01a4
+aaaa-421 IN AAAA 2001:db8::01a5
+aaaa-422 IN AAAA 2001:db8::01a6
+aaaa-423 IN AAAA 2001:db8::01a7
+aaaa-424 IN AAAA 2001:db8::01a8
+aaaa-425 IN AAAA 2001:db8::01a9
+aaaa-426 IN AAAA 2001:db8::01aa
+aaaa-427 IN AAAA 2001:db8::01ab
+aaaa-428 IN AAAA 2001:db8::01ac
+aaaa-429 IN AAAA 2001:db8::01ad
+aaaa-430 IN AAAA 2001:db8::01ae
+aaaa-431 IN AAAA 2001:db8::01af
+aaaa-432 IN AAAA 2001:db8::01b0
+aaaa-433 IN AAAA 2001:db8::01b1
+aaaa-434 IN AAAA 2001:db8::01b2
+aaaa-435 IN AAAA 2001:db8::01b3
+aaaa-436 IN AAAA 2001:db8::01b4
+aaaa-437 IN AAAA 2001:db8::01b5
+aaaa-438 IN AAAA 2001:db8::01b6
+aaaa-439 IN AAAA 2001:db8::01b7
+aaaa-440 IN AAAA 2001:db8::01b8
+aaaa-441 IN AAAA 2001:db8::01b9
+aaaa-442 IN AAAA 2001:db8::01ba
+aaaa-443 IN AAAA 2001:db8::01bb
+aaaa-444 IN AAAA 2001:db8::01bc
+aaaa-445 IN AAAA 2001:db8::01bd
+aaaa-446 IN AAAA 2001:db8::01be
+aaaa-447 IN AAAA 2001:db8::01bf
+aaaa-448 IN AAAA 2001:db8::01c0
+aaaa-449 IN AAAA 2001:db8::01c1
+aaaa-450 IN AAAA 2001:db8::01c2
+aaaa-451 IN AAAA 2001:db8::01c3
+aaaa-452 IN AAAA 2001:db8::01c4
+aaaa-453 IN AAAA 2001:db8::01c5
+aaaa-454 IN AAAA 2001:db8::01c6
+aaaa-455 IN AAAA 2001:db8::01c7
+aaaa-456 IN AAAA 2001:db8::01c8
+aaaa-457 IN AAAA 2001:db8::01c9
+aaaa-458 IN AAAA 2001:db8::01ca
+aaaa-459 IN AAAA 2001:db8::01cb
+aaaa-460 IN AAAA 2001:db8::01cc
+aaaa-461 IN AAAA 2001:db8::01cd
+aaaa-462 IN AAAA 2001:db8::01ce
+aaaa-463 IN AAAA 2001:db8::01cf
+aaaa-464 IN AAAA 2001:db8::01d0
+aaaa-465 IN AAAA 2001:db8::01d1
+aaaa-466 IN AAAA 2001:db8::01d2
+aaaa-467 IN AAAA 2001:db8::01d3
+aaaa-468 IN AAAA 2001:db8::01d4
+aaaa-469 IN AAAA 2001:db8::01d5
+aaaa-470 IN AAAA 2001:db8::01d6
+aaaa-471 IN AAAA 2001:db8::01d7
+aaaa-472 IN AAAA 2001:db8::01d8
+aaaa-473 IN AAAA 2001:db8::01d9
+aaaa-474 IN AAAA 2001:db8::01da
+aaaa-475 IN AAAA 2001:db8::01db
+aaaa-476 IN AAAA 2001:db8::01dc
+aaaa-477 IN AAAA 2001:db8::01dd
+aaaa-478 IN AAAA 2001:db8::01de
+aaaa-479 IN AAAA 2001:db8::01df
+aaaa-480 IN AAAA 2001:db8::01e0
+aaaa-481 IN AAAA 2001:db8::01e1
+aaaa-482 IN AAAA 2001:db8::01e2
+aaaa-483 IN AAAA 2001:db8::01e3
+aaaa-484 IN AAAA 2001:db8::01e4
+aaaa-485 IN AAAA 2001:db8::01e5
+aaaa-486 IN AAAA 2001:db8::01e6
+aaaa-487 IN AAAA 2001:db8::01e7
+aaaa-488 IN AAAA 2001:db8::01e8
+aaaa-489 IN AAAA 2001:db8::01e9
+aaaa-490 IN AAAA 2001:db8::01ea
+aaaa-491 IN AAAA 2001:db8::01eb
+aaaa-492 IN AAAA 2001:db8::01ec
+aaaa-493 IN AAAA 2001:db8::01ed
+aaaa-494 IN AAAA 2001:db8::01ee
+aaaa-495 IN AAAA 2001:db8::01ef
+aaaa-496 IN AAAA 2001:db8::01f0
+aaaa-497 IN AAAA 2001:db8::01f1
+aaaa-498 IN AAAA 2001:db8::01f2
+aaaa-499 IN AAAA 2001:db8::01f3
+aaaa-500 IN AAAA 2001:db8::01f4
+aaaa-501 IN AAAA 2001:db8::01f5
+aaaa-502 IN AAAA 2001:db8::01f6
+aaaa-503 IN AAAA 2001:db8::01f7
+aaaa-504 IN AAAA 2001:db8::01f8
+aaaa-505 IN AAAA 2001:db8::01f9
+aaaa-506 IN AAAA 2001:db8::01fa
+aaaa-507 IN AAAA 2001:db8::01fb
+aaaa-508 IN AAAA 2001:db8::01fc
+aaaa-509 IN AAAA 2001:db8::01fd
+aaaa-510 IN AAAA 2001:db8::01fe
+aaaa-511 IN AAAA 2001:db8::01ff
+
+bbbb-000 IN AAAA 2001:db8::1:0000
+bbbb-001 IN AAAA 2001:db8::1:0001
+bbbb-002 IN AAAA 2001:db8::1:0002
+bbbb-003 IN AAAA 2001:db8::1:0003
+bbbb-004 IN AAAA 2001:db8::1:0004
+bbbb-005 IN AAAA 2001:db8::1:0005
+bbbb-006 IN AAAA 2001:db8::1:0006
+bbbb-007 IN AAAA 2001:db8::1:0007
+bbbb-008 IN AAAA 2001:db8::1:0008
+bbbb-009 IN AAAA 2001:db8::1:0009
+bbbb-010 IN AAAA 2001:db8::1:000a
+bbbb-011 IN AAAA 2001:db8::1:000b
+bbbb-012 IN AAAA 2001:db8::1:000c
+bbbb-013 IN AAAA 2001:db8::1:000d
+bbbb-014 IN AAAA 2001:db8::1:000e
+bbbb-015 IN AAAA 2001:db8::1:000f
+bbbb-016 IN AAAA 2001:db8::1:0010
+bbbb-017 IN AAAA 2001:db8::1:0011
+bbbb-018 IN AAAA 2001:db8::1:0012
+bbbb-019 IN AAAA 2001:db8::1:0013
+bbbb-020 IN AAAA 2001:db8::1:0014
+bbbb-021 IN AAAA 2001:db8::1:0015
+bbbb-022 IN AAAA 2001:db8::1:0016
+bbbb-023 IN AAAA 2001:db8::1:0017
+bbbb-024 IN AAAA 2001:db8::1:0018
+bbbb-025 IN AAAA 2001:db8::1:0019
+bbbb-026 IN AAAA 2001:db8::1:001a
+bbbb-027 IN AAAA 2001:db8::1:001b
+bbbb-028 IN AAAA 2001:db8::1:001c
+bbbb-029 IN AAAA 2001:db8::1:001d
+bbbb-030 IN AAAA 2001:db8::1:001e
+bbbb-031 IN AAAA 2001:db8::1:001f
+bbbb-032 IN AAAA 2001:db8::1:0020
+bbbb-033 IN AAAA 2001:db8::1:0021
+bbbb-034 IN AAAA 2001:db8::1:0022
+bbbb-035 IN AAAA 2001:db8::1:0023
+bbbb-036 IN AAAA 2001:db8::1:0024
+bbbb-037 IN AAAA 2001:db8::1:0025
+bbbb-038 IN AAAA 2001:db8::1:0026
+bbbb-039 IN AAAA 2001:db8::1:0027
+bbbb-040 IN AAAA 2001:db8::1:0028
+bbbb-041 IN AAAA 2001:db8::1:0029
+bbbb-042 IN AAAA 2001:db8::1:002a
+bbbb-043 IN AAAA 2001:db8::1:002b
+bbbb-044 IN AAAA 2001:db8::1:002c
+bbbb-045 IN AAAA 2001:db8::1:002d
+bbbb-046 IN AAAA 2001:db8::1:002e
+bbbb-047 IN AAAA 2001:db8::1:002f
+bbbb-048 IN AAAA 2001:db8::1:0030
+bbbb-049 IN AAAA 2001:db8::1:0031
+bbbb-050 IN AAAA 2001:db8::1:0032
+bbbb-051 IN AAAA 2001:db8::1:0033
+bbbb-052 IN AAAA 2001:db8::1:0034
+bbbb-053 IN AAAA 2001:db8::1:0035
+bbbb-054 IN AAAA 2001:db8::1:0036
+bbbb-055 IN AAAA 2001:db8::1:0037
+bbbb-056 IN AAAA 2001:db8::1:0038
+bbbb-057 IN AAAA 2001:db8::1:0039
+bbbb-058 IN AAAA 2001:db8::1:003a
+bbbb-059 IN AAAA 2001:db8::1:003b
+bbbb-060 IN AAAA 2001:db8::1:003c
+bbbb-061 IN AAAA 2001:db8::1:003d
+bbbb-062 IN AAAA 2001:db8::1:003e
+bbbb-063 IN AAAA 2001:db8::1:003f
+bbbb-064 IN AAAA 2001:db8::1:0040
+bbbb-065 IN AAAA 2001:db8::1:0041
+bbbb-066 IN AAAA 2001:db8::1:0042
+bbbb-067 IN AAAA 2001:db8::1:0043
+bbbb-068 IN AAAA 2001:db8::1:0044
+bbbb-069 IN AAAA 2001:db8::1:0045
+bbbb-070 IN AAAA 2001:db8::1:0046
+bbbb-071 IN AAAA 2001:db8::1:0047
+bbbb-072 IN AAAA 2001:db8::1:0048
+bbbb-073 IN AAAA 2001:db8::1:0049
+bbbb-074 IN AAAA 2001:db8::1:004a
+bbbb-075 IN AAAA 2001:db8::1:004b
+bbbb-076 IN AAAA 2001:db8::1:004c
+bbbb-077 IN AAAA 2001:db8::1:004d
+bbbb-078 IN AAAA 2001:db8::1:004e
+bbbb-079 IN AAAA 2001:db8::1:004f
+bbbb-080 IN AAAA 2001:db8::1:0050
+bbbb-081 IN AAAA 2001:db8::1:0051
+bbbb-082 IN AAAA 2001:db8::1:0052
+bbbb-083 IN AAAA 2001:db8::1:0053
+bbbb-084 IN AAAA 2001:db8::1:0054
+bbbb-085 IN AAAA 2001:db8::1:0055
+bbbb-086 IN AAAA 2001:db8::1:0056
+bbbb-087 IN AAAA 2001:db8::1:0057
+bbbb-088 IN AAAA 2001:db8::1:0058
+bbbb-089 IN AAAA 2001:db8::1:0059
+bbbb-090 IN AAAA 2001:db8::1:005a
+bbbb-091 IN AAAA 2001:db8::1:005b
+bbbb-092 IN AAAA 2001:db8::1:005c
+bbbb-093 IN AAAA 2001:db8::1:005d
+bbbb-094 IN AAAA 2001:db8::1:005e
+bbbb-095 IN AAAA 2001:db8::1:005f
+bbbb-096 IN AAAA 2001:db8::1:0060
+bbbb-097 IN AAAA 2001:db8::1:0061
+bbbb-098 IN AAAA 2001:db8::1:0062
+bbbb-099 IN AAAA 2001:db8::1:0063
+bbbb-100 IN AAAA 2001:db8::1:0064
+bbbb-101 IN AAAA 2001:db8::1:0065
+bbbb-102 IN AAAA 2001:db8::1:0066
+bbbb-103 IN AAAA 2001:db8::1:0067
+bbbb-104 IN AAAA 2001:db8::1:0068
+bbbb-105 IN AAAA 2001:db8::1:0069
+bbbb-106 IN AAAA 2001:db8::1:006a
+bbbb-107 IN AAAA 2001:db8::1:006b
+bbbb-108 IN AAAA 2001:db8::1:006c
+bbbb-109 IN AAAA 2001:db8::1:006d
+bbbb-110 IN AAAA 2001:db8::1:006e
+bbbb-111 IN AAAA 2001:db8::1:006f
+bbbb-112 IN AAAA 2001:db8::1:0070
+bbbb-113 IN AAAA 2001:db8::1:0071
+bbbb-114 IN AAAA 2001:db8::1:0072
+bbbb-115 IN AAAA 2001:db8::1:0073
+bbbb-116 IN AAAA 2001:db8::1:0074
+bbbb-117 IN AAAA 2001:db8::1:0075
+bbbb-118 IN AAAA 2001:db8::1:0076
+bbbb-119 IN AAAA 2001:db8::1:0077
+bbbb-120 IN AAAA 2001:db8::1:0078
+bbbb-121 IN AAAA 2001:db8::1:0079
+bbbb-122 IN AAAA 2001:db8::1:007a
+bbbb-123 IN AAAA 2001:db8::1:007b
+bbbb-124 IN AAAA 2001:db8::1:007c
+bbbb-125 IN AAAA 2001:db8::1:007d
+bbbb-126 IN AAAA 2001:db8::1:007e
+bbbb-127 IN AAAA 2001:db8::1:007f
+bbbb-128 IN AAAA 2001:db8::1:0080
+bbbb-129 IN AAAA 2001:db8::1:0081
+bbbb-130 IN AAAA 2001:db8::1:0082
+bbbb-131 IN AAAA 2001:db8::1:0083
+bbbb-132 IN AAAA 2001:db8::1:0084
+bbbb-133 IN AAAA 2001:db8::1:0085
+bbbb-134 IN AAAA 2001:db8::1:0086
+bbbb-135 IN AAAA 2001:db8::1:0087
+bbbb-136 IN AAAA 2001:db8::1:0088
+bbbb-137 IN AAAA 2001:db8::1:0089
+bbbb-138 IN AAAA 2001:db8::1:008a
+bbbb-139 IN AAAA 2001:db8::1:008b
+bbbb-140 IN AAAA 2001:db8::1:008c
+bbbb-141 IN AAAA 2001:db8::1:008d
+bbbb-142 IN AAAA 2001:db8::1:008e
+bbbb-143 IN AAAA 2001:db8::1:008f
+bbbb-144 IN AAAA 2001:db8::1:0090
+bbbb-145 IN AAAA 2001:db8::1:0091
+bbbb-146 IN AAAA 2001:db8::1:0092
+bbbb-147 IN AAAA 2001:db8::1:0093
+bbbb-148 IN AAAA 2001:db8::1:0094
+bbbb-149 IN AAAA 2001:db8::1:0095
+bbbb-150 IN AAAA 2001:db8::1:0096
+bbbb-151 IN AAAA 2001:db8::1:0097
+bbbb-152 IN AAAA 2001:db8::1:0098
+bbbb-153 IN AAAA 2001:db8::1:0099
+bbbb-154 IN AAAA 2001:db8::1:009a
+bbbb-155 IN AAAA 2001:db8::1:009b
+bbbb-156 IN AAAA 2001:db8::1:009c
+bbbb-157 IN AAAA 2001:db8::1:009d
+bbbb-158 IN AAAA 2001:db8::1:009e
+bbbb-159 IN AAAA 2001:db8::1:009f
+bbbb-160 IN AAAA 2001:db8::1:00a0
+bbbb-161 IN AAAA 2001:db8::1:00a1
+bbbb-162 IN AAAA 2001:db8::1:00a2
+bbbb-163 IN AAAA 2001:db8::1:00a3
+bbbb-164 IN AAAA 2001:db8::1:00a4
+bbbb-165 IN AAAA 2001:db8::1:00a5
+bbbb-166 IN AAAA 2001:db8::1:00a6
+bbbb-167 IN AAAA 2001:db8::1:00a7
+bbbb-168 IN AAAA 2001:db8::1:00a8
+bbbb-169 IN AAAA 2001:db8::1:00a9
+bbbb-170 IN AAAA 2001:db8::1:00aa
+bbbb-171 IN AAAA 2001:db8::1:00ab
+bbbb-172 IN AAAA 2001:db8::1:00ac
+bbbb-173 IN AAAA 2001:db8::1:00ad
+bbbb-174 IN AAAA 2001:db8::1:00ae
+bbbb-175 IN AAAA 2001:db8::1:00af
+bbbb-176 IN AAAA 2001:db8::1:00b0
+bbbb-177 IN AAAA 2001:db8::1:00b1
+bbbb-178 IN AAAA 2001:db8::1:00b2
+bbbb-179 IN AAAA 2001:db8::1:00b3
+bbbb-180 IN AAAA 2001:db8::1:00b4
+bbbb-181 IN AAAA 2001:db8::1:00b5
+bbbb-182 IN AAAA 2001:db8::1:00b6
+bbbb-183 IN AAAA 2001:db8::1:00b7
+bbbb-184 IN AAAA 2001:db8::1:00b8
+bbbb-185 IN AAAA 2001:db8::1:00b9
+bbbb-186 IN AAAA 2001:db8::1:00ba
+bbbb-187 IN AAAA 2001:db8::1:00bb
+bbbb-188 IN AAAA 2001:db8::1:00bc
+bbbb-189 IN AAAA 2001:db8::1:00bd
+bbbb-190 IN AAAA 2001:db8::1:00be
+bbbb-191 IN AAAA 2001:db8::1:00bf
+bbbb-192 IN AAAA 2001:db8::1:00c0
+bbbb-193 IN AAAA 2001:db8::1:00c1
+bbbb-194 IN AAAA 2001:db8::1:00c2
+bbbb-195 IN AAAA 2001:db8::1:00c3
+bbbb-196 IN AAAA 2001:db8::1:00c4
+bbbb-197 IN AAAA 2001:db8::1:00c5
+bbbb-198 IN AAAA 2001:db8::1:00c6
+bbbb-199 IN AAAA 2001:db8::1:00c7
+bbbb-200 IN AAAA 2001:db8::1:00c8
+bbbb-201 IN AAAA 2001:db8::1:00c9
+bbbb-202 IN AAAA 2001:db8::1:00ca
+bbbb-203 IN AAAA 2001:db8::1:00cb
+bbbb-204 IN AAAA 2001:db8::1:00cc
+bbbb-205 IN AAAA 2001:db8::1:00cd
+bbbb-206 IN AAAA 2001:db8::1:00ce
+bbbb-207 IN AAAA 2001:db8::1:00cf
+bbbb-208 IN AAAA 2001:db8::1:00d0
+bbbb-209 IN AAAA 2001:db8::1:00d1
+bbbb-210 IN AAAA 2001:db8::1:00d2
+bbbb-211 IN AAAA 2001:db8::1:00d3
+bbbb-212 IN AAAA 2001:db8::1:00d4
+bbbb-213 IN AAAA 2001:db8::1:00d5
+bbbb-214 IN AAAA 2001:db8::1:00d6
+bbbb-215 IN AAAA 2001:db8::1:00d7
+bbbb-216 IN AAAA 2001:db8::1:00d8
+bbbb-217 IN AAAA 2001:db8::1:00d9
+bbbb-218 IN AAAA 2001:db8::1:00da
+bbbb-219 IN AAAA 2001:db8::1:00db
+bbbb-220 IN AAAA 2001:db8::1:00dc
+bbbb-221 IN AAAA 2001:db8::1:00dd
+bbbb-222 IN AAAA 2001:db8::1:00de
+bbbb-223 IN AAAA 2001:db8::1:00df
+bbbb-224 IN AAAA 2001:db8::1:00e0
+bbbb-225 IN AAAA 2001:db8::1:00e1
+bbbb-226 IN AAAA 2001:db8::1:00e2
+bbbb-227 IN AAAA 2001:db8::1:00e3
+bbbb-228 IN AAAA 2001:db8::1:00e4
+bbbb-229 IN AAAA 2001:db8::1:00e5
+bbbb-230 IN AAAA 2001:db8::1:00e6
+bbbb-231 IN AAAA 2001:db8::1:00e7
+bbbb-232 IN AAAA 2001:db8::1:00e8
+bbbb-233 IN AAAA 2001:db8::1:00e9
+bbbb-234 IN AAAA 2001:db8::1:00ea
+bbbb-235 IN AAAA 2001:db8::1:00eb
+bbbb-236 IN AAAA 2001:db8::1:00ec
+bbbb-237 IN AAAA 2001:db8::1:00ed
+bbbb-238 IN AAAA 2001:db8::1:00ee
+bbbb-239 IN AAAA 2001:db8::1:00ef
+bbbb-240 IN AAAA 2001:db8::1:00f0
+bbbb-241 IN AAAA 2001:db8::1:00f1
+bbbb-242 IN AAAA 2001:db8::1:00f2
+bbbb-243 IN AAAA 2001:db8::1:00f3
+bbbb-244 IN AAAA 2001:db8::1:00f4
+bbbb-245 IN AAAA 2001:db8::1:00f5
+bbbb-246 IN AAAA 2001:db8::1:00f6
+bbbb-247 IN AAAA 2001:db8::1:00f7
+bbbb-248 IN AAAA 2001:db8::1:00f8
+bbbb-249 IN AAAA 2001:db8::1:00f9
+bbbb-250 IN AAAA 2001:db8::1:00fa
+bbbb-251 IN AAAA 2001:db8::1:00fb
+bbbb-252 IN AAAA 2001:db8::1:00fc
+bbbb-253 IN AAAA 2001:db8::1:00fd
+bbbb-254 IN AAAA 2001:db8::1:00fe
+bbbb-255 IN AAAA 2001:db8::1:00ff
+bbbb-256 IN AAAA 2001:db8::1:0100
+bbbb-257 IN AAAA 2001:db8::1:0101
+bbbb-258 IN AAAA 2001:db8::1:0102
+bbbb-259 IN AAAA 2001:db8::1:0103
+bbbb-260 IN AAAA 2001:db8::1:0104
+bbbb-261 IN AAAA 2001:db8::1:0105
+bbbb-262 IN AAAA 2001:db8::1:0106
+bbbb-263 IN AAAA 2001:db8::1:0107
+bbbb-264 IN AAAA 2001:db8::1:0108
+bbbb-265 IN AAAA 2001:db8::1:0109
+bbbb-266 IN AAAA 2001:db8::1:010a
+bbbb-267 IN AAAA 2001:db8::1:010b
+bbbb-268 IN AAAA 2001:db8::1:010c
+bbbb-269 IN AAAA 2001:db8::1:010d
+bbbb-270 IN AAAA 2001:db8::1:010e
+bbbb-271 IN AAAA 2001:db8::1:010f
+bbbb-272 IN AAAA 2001:db8::1:0110
+bbbb-273 IN AAAA 2001:db8::1:0111
+bbbb-274 IN AAAA 2001:db8::1:0112
+bbbb-275 IN AAAA 2001:db8::1:0113
+bbbb-276 IN AAAA 2001:db8::1:0114
+bbbb-277 IN AAAA 2001:db8::1:0115
+bbbb-278 IN AAAA 2001:db8::1:0116
+bbbb-279 IN AAAA 2001:db8::1:0117
+bbbb-280 IN AAAA 2001:db8::1:0118
+bbbb-281 IN AAAA 2001:db8::1:0119
+bbbb-282 IN AAAA 2001:db8::1:011a
+bbbb-283 IN AAAA 2001:db8::1:011b
+bbbb-284 IN AAAA 2001:db8::1:011c
+bbbb-285 IN AAAA 2001:db8::1:011d
+bbbb-286 IN AAAA 2001:db8::1:011e
+bbbb-287 IN AAAA 2001:db8::1:011f
+bbbb-288 IN AAAA 2001:db8::1:0120
+bbbb-289 IN AAAA 2001:db8::1:0121
+bbbb-290 IN AAAA 2001:db8::1:0122
+bbbb-291 IN AAAA 2001:db8::1:0123
+bbbb-292 IN AAAA 2001:db8::1:0124
+bbbb-293 IN AAAA 2001:db8::1:0125
+bbbb-294 IN AAAA 2001:db8::1:0126
+bbbb-295 IN AAAA 2001:db8::1:0127
+bbbb-296 IN AAAA 2001:db8::1:0128
+bbbb-297 IN AAAA 2001:db8::1:0129
+bbbb-298 IN AAAA 2001:db8::1:012a
+bbbb-299 IN AAAA 2001:db8::1:012b
+bbbb-300 IN AAAA 2001:db8::1:012c
+bbbb-301 IN AAAA 2001:db8::1:012d
+bbbb-302 IN AAAA 2001:db8::1:012e
+bbbb-303 IN AAAA 2001:db8::1:012f
+bbbb-304 IN AAAA 2001:db8::1:0130
+bbbb-305 IN AAAA 2001:db8::1:0131
+bbbb-306 IN AAAA 2001:db8::1:0132
+bbbb-307 IN AAAA 2001:db8::1:0133
+bbbb-308 IN AAAA 2001:db8::1:0134
+bbbb-309 IN AAAA 2001:db8::1:0135
+bbbb-310 IN AAAA 2001:db8::1:0136
+bbbb-311 IN AAAA 2001:db8::1:0137
+bbbb-312 IN AAAA 2001:db8::1:0138
+bbbb-313 IN AAAA 2001:db8::1:0139
+bbbb-314 IN AAAA 2001:db8::1:013a
+bbbb-315 IN AAAA 2001:db8::1:013b
+bbbb-316 IN AAAA 2001:db8::1:013c
+bbbb-317 IN AAAA 2001:db8::1:013d
+bbbb-318 IN AAAA 2001:db8::1:013e
+bbbb-319 IN AAAA 2001:db8::1:013f
+bbbb-320 IN AAAA 2001:db8::1:0140
+bbbb-321 IN AAAA 2001:db8::1:0141
+bbbb-322 IN AAAA 2001:db8::1:0142
+bbbb-323 IN AAAA 2001:db8::1:0143
+bbbb-324 IN AAAA 2001:db8::1:0144
+bbbb-325 IN AAAA 2001:db8::1:0145
+bbbb-326 IN AAAA 2001:db8::1:0146
+bbbb-327 IN AAAA 2001:db8::1:0147
+bbbb-328 IN AAAA 2001:db8::1:0148
+bbbb-329 IN AAAA 2001:db8::1:0149
+bbbb-330 IN AAAA 2001:db8::1:014a
+bbbb-331 IN AAAA 2001:db8::1:014b
+bbbb-332 IN AAAA 2001:db8::1:014c
+bbbb-333 IN AAAA 2001:db8::1:014d
+bbbb-334 IN AAAA 2001:db8::1:014e
+bbbb-335 IN AAAA 2001:db8::1:014f
+bbbb-336 IN AAAA 2001:db8::1:0150
+bbbb-337 IN AAAA 2001:db8::1:0151
+bbbb-338 IN AAAA 2001:db8::1:0152
+bbbb-339 IN AAAA 2001:db8::1:0153
+bbbb-340 IN AAAA 2001:db8::1:0154
+bbbb-341 IN AAAA 2001:db8::1:0155
+bbbb-342 IN AAAA 2001:db8::1:0156
+bbbb-343 IN AAAA 2001:db8::1:0157
+bbbb-344 IN AAAA 2001:db8::1:0158
+bbbb-345 IN AAAA 2001:db8::1:0159
+bbbb-346 IN AAAA 2001:db8::1:015a
+bbbb-347 IN AAAA 2001:db8::1:015b
+bbbb-348 IN AAAA 2001:db8::1:015c
+bbbb-349 IN AAAA 2001:db8::1:015d
+bbbb-350 IN AAAA 2001:db8::1:015e
+bbbb-351 IN AAAA 2001:db8::1:015f
+bbbb-352 IN AAAA 2001:db8::1:0160
+bbbb-353 IN AAAA 2001:db8::1:0161
+bbbb-354 IN AAAA 2001:db8::1:0162
+bbbb-355 IN AAAA 2001:db8::1:0163
+bbbb-356 IN AAAA 2001:db8::1:0164
+bbbb-357 IN AAAA 2001:db8::1:0165
+bbbb-358 IN AAAA 2001:db8::1:0166
+bbbb-359 IN AAAA 2001:db8::1:0167
+bbbb-360 IN AAAA 2001:db8::1:0168
+bbbb-361 IN AAAA 2001:db8::1:0169
+bbbb-362 IN AAAA 2001:db8::1:016a
+bbbb-363 IN AAAA 2001:db8::1:016b
+bbbb-364 IN AAAA 2001:db8::1:016c
+bbbb-365 IN AAAA 2001:db8::1:016d
+bbbb-366 IN AAAA 2001:db8::1:016e
+bbbb-367 IN AAAA 2001:db8::1:016f
+bbbb-368 IN AAAA 2001:db8::1:0170
+bbbb-369 IN AAAA 2001:db8::1:0171
+bbbb-370 IN AAAA 2001:db8::1:0172
+bbbb-371 IN AAAA 2001:db8::1:0173
+bbbb-372 IN AAAA 2001:db8::1:0174
+bbbb-373 IN AAAA 2001:db8::1:0175
+bbbb-374 IN AAAA 2001:db8::1:0176
+bbbb-375 IN AAAA 2001:db8::1:0177
+bbbb-376 IN AAAA 2001:db8::1:0178
+bbbb-377 IN AAAA 2001:db8::1:0179
+bbbb-378 IN AAAA 2001:db8::1:017a
+bbbb-379 IN AAAA 2001:db8::1:017b
+bbbb-380 IN AAAA 2001:db8::1:017c
+bbbb-381 IN AAAA 2001:db8::1:017d
+bbbb-382 IN AAAA 2001:db8::1:017e
+bbbb-383 IN AAAA 2001:db8::1:017f
+bbbb-384 IN AAAA 2001:db8::1:0180
+bbbb-385 IN AAAA 2001:db8::1:0181
+bbbb-386 IN AAAA 2001:db8::1:0182
+bbbb-387 IN AAAA 2001:db8::1:0183
+bbbb-388 IN AAAA 2001:db8::1:0184
+bbbb-389 IN AAAA 2001:db8::1:0185
+bbbb-390 IN AAAA 2001:db8::1:0186
+bbbb-391 IN AAAA 2001:db8::1:0187
+bbbb-392 IN AAAA 2001:db8::1:0188
+bbbb-393 IN AAAA 2001:db8::1:0189
+bbbb-394 IN AAAA 2001:db8::1:018a
+bbbb-395 IN AAAA 2001:db8::1:018b
+bbbb-396 IN AAAA 2001:db8::1:018c
+bbbb-397 IN AAAA 2001:db8::1:018d
+bbbb-398 IN AAAA 2001:db8::1:018e
+bbbb-399 IN AAAA 2001:db8::1:018f
+bbbb-400 IN AAAA 2001:db8::1:0190
+bbbb-401 IN AAAA 2001:db8::1:0191
+bbbb-402 IN AAAA 2001:db8::1:0192
+bbbb-403 IN AAAA 2001:db8::1:0193
+bbbb-404 IN AAAA 2001:db8::1:0194
+bbbb-405 IN AAAA 2001:db8::1:0195
+bbbb-406 IN AAAA 2001:db8::1:0196
+bbbb-407 IN AAAA 2001:db8::1:0197
+bbbb-408 IN AAAA 2001:db8::1:0198
+bbbb-409 IN AAAA 2001:db8::1:0199
+bbbb-410 IN AAAA 2001:db8::1:019a
+bbbb-411 IN AAAA 2001:db8::1:019b
+bbbb-412 IN AAAA 2001:db8::1:019c
+bbbb-413 IN AAAA 2001:db8::1:019d
+bbbb-414 IN AAAA 2001:db8::1:019e
+bbbb-415 IN AAAA 2001:db8::1:019f
+bbbb-416 IN AAAA 2001:db8::1:01a0
+bbbb-417 IN AAAA 2001:db8::1:01a1
+bbbb-418 IN AAAA 2001:db8::1:01a2
+bbbb-419 IN AAAA 2001:db8::1:01a3
+bbbb-420 IN AAAA 2001:db8::1:01a4
+bbbb-421 IN AAAA 2001:db8::1:01a5
+bbbb-422 IN AAAA 2001:db8::1:01a6
+bbbb-423 IN AAAA 2001:db8::1:01a7
+bbbb-424 IN AAAA 2001:db8::1:01a8
+bbbb-425 IN AAAA 2001:db8::1:01a9
+bbbb-426 IN AAAA 2001:db8::1:01aa
+bbbb-427 IN AAAA 2001:db8::1:01ab
+bbbb-428 IN AAAA 2001:db8::1:01ac
+bbbb-429 IN AAAA 2001:db8::1:01ad
+bbbb-430 IN AAAA 2001:db8::1:01ae
+bbbb-431 IN AAAA 2001:db8::1:01af
+bbbb-432 IN AAAA 2001:db8::1:01b0
+bbbb-433 IN AAAA 2001:db8::1:01b1
+bbbb-434 IN AAAA 2001:db8::1:01b2
+bbbb-435 IN AAAA 2001:db8::1:01b3
+bbbb-436 IN AAAA 2001:db8::1:01b4
+bbbb-437 IN AAAA 2001:db8::1:01b5
+bbbb-438 IN AAAA 2001:db8::1:01b6
+bbbb-439 IN AAAA 2001:db8::1:01b7
+bbbb-440 IN AAAA 2001:db8::1:01b8
+bbbb-441 IN AAAA 2001:db8::1:01b9
+bbbb-442 IN AAAA 2001:db8::1:01ba
+bbbb-443 IN AAAA 2001:db8::1:01bb
+bbbb-444 IN AAAA 2001:db8::1:01bc
+bbbb-445 IN AAAA 2001:db8::1:01bd
+bbbb-446 IN AAAA 2001:db8::1:01be
+bbbb-447 IN AAAA 2001:db8::1:01bf
+bbbb-448 IN AAAA 2001:db8::1:01c0
+bbbb-449 IN AAAA 2001:db8::1:01c1
+bbbb-450 IN AAAA 2001:db8::1:01c2
+bbbb-451 IN AAAA 2001:db8::1:01c3
+bbbb-452 IN AAAA 2001:db8::1:01c4
+bbbb-453 IN AAAA 2001:db8::1:01c5
+bbbb-454 IN AAAA 2001:db8::1:01c6
+bbbb-455 IN AAAA 2001:db8::1:01c7
+bbbb-456 IN AAAA 2001:db8::1:01c8
+bbbb-457 IN AAAA 2001:db8::1:01c9
+bbbb-458 IN AAAA 2001:db8::1:01ca
+bbbb-459 IN AAAA 2001:db8::1:01cb
+bbbb-460 IN AAAA 2001:db8::1:01cc
+bbbb-461 IN AAAA 2001:db8::1:01cd
+bbbb-462 IN AAAA 2001:db8::1:01ce
+bbbb-463 IN AAAA 2001:db8::1:01cf
+bbbb-464 IN AAAA 2001:db8::1:01d0
+bbbb-465 IN AAAA 2001:db8::1:01d1
+bbbb-466 IN AAAA 2001:db8::1:01d2
+bbbb-467 IN AAAA 2001:db8::1:01d3
+bbbb-468 IN AAAA 2001:db8::1:01d4
+bbbb-469 IN AAAA 2001:db8::1:01d5
+bbbb-470 IN AAAA 2001:db8::1:01d6
+bbbb-471 IN AAAA 2001:db8::1:01d7
+bbbb-472 IN AAAA 2001:db8::1:01d8
+bbbb-473 IN AAAA 2001:db8::1:01d9
+bbbb-474 IN AAAA 2001:db8::1:01da
+bbbb-475 IN AAAA 2001:db8::1:01db
+bbbb-476 IN AAAA 2001:db8::1:01dc
+bbbb-477 IN AAAA 2001:db8::1:01dd
+bbbb-478 IN AAAA 2001:db8::1:01de
+bbbb-479 IN AAAA 2001:db8::1:01df
+bbbb-480 IN AAAA 2001:db8::1:01e0
+bbbb-481 IN AAAA 2001:db8::1:01e1
+bbbb-482 IN AAAA 2001:db8::1:01e2
+bbbb-483 IN AAAA 2001:db8::1:01e3
+bbbb-484 IN AAAA 2001:db8::1:01e4
+bbbb-485 IN AAAA 2001:db8::1:01e5
+bbbb-486 IN AAAA 2001:db8::1:01e6
+bbbb-487 IN AAAA 2001:db8::1:01e7
+bbbb-488 IN AAAA 2001:db8::1:01e8
+bbbb-489 IN AAAA 2001:db8::1:01e9
+bbbb-490 IN AAAA 2001:db8::1:01ea
+bbbb-491 IN AAAA 2001:db8::1:01eb
+bbbb-492 IN AAAA 2001:db8::1:01ec
+bbbb-493 IN AAAA 2001:db8::1:01ed
+bbbb-494 IN AAAA 2001:db8::1:01ee
+bbbb-495 IN AAAA 2001:db8::1:01ef
+bbbb-496 IN AAAA 2001:db8::1:01f0
+bbbb-497 IN AAAA 2001:db8::1:01f1
+bbbb-498 IN AAAA 2001:db8::1:01f2
+bbbb-499 IN AAAA 2001:db8::1:01f3
+bbbb-500 IN AAAA 2001:db8::1:01f4
+bbbb-501 IN AAAA 2001:db8::1:01f5
+bbbb-502 IN AAAA 2001:db8::1:01f6
+bbbb-503 IN AAAA 2001:db8::1:01f7
+bbbb-504 IN AAAA 2001:db8::1:01f8
+bbbb-505 IN AAAA 2001:db8::1:01f9
+bbbb-506 IN AAAA 2001:db8::1:01fa
+bbbb-507 IN AAAA 2001:db8::1:01fb
+bbbb-508 IN AAAA 2001:db8::1:01fc
+bbbb-509 IN AAAA 2001:db8::1:01fd
+bbbb-510 IN AAAA 2001:db8::1:01fe
+bbbb-511 IN AAAA 2001:db8::1:01ff
+
+cccc-000 IN AAAA 2001:db8::2:0000
+cccc-001 IN AAAA 2001:db8::2:0001
+cccc-002 IN AAAA 2001:db8::2:0002
+cccc-003 IN AAAA 2001:db8::2:0003
+cccc-004 IN AAAA 2001:db8::2:0004
+cccc-005 IN AAAA 2001:db8::2:0005
+cccc-006 IN AAAA 2001:db8::2:0006
+cccc-007 IN AAAA 2001:db8::2:0007
+cccc-008 IN AAAA 2001:db8::2:0008
+cccc-009 IN AAAA 2001:db8::2:0009
+cccc-010 IN AAAA 2001:db8::2:000a
+cccc-011 IN AAAA 2001:db8::2:000b
+cccc-012 IN AAAA 2001:db8::2:000c
+cccc-013 IN AAAA 2001:db8::2:000d
+cccc-014 IN AAAA 2001:db8::2:000e
+cccc-015 IN AAAA 2001:db8::2:000f
+cccc-016 IN AAAA 2001:db8::2:0010
+cccc-017 IN AAAA 2001:db8::2:0011
+cccc-018 IN AAAA 2001:db8::2:0012
+cccc-019 IN AAAA 2001:db8::2:0013
+cccc-020 IN AAAA 2001:db8::2:0014
+cccc-021 IN AAAA 2001:db8::2:0015
+cccc-022 IN AAAA 2001:db8::2:0016
+cccc-023 IN AAAA 2001:db8::2:0017
+cccc-024 IN AAAA 2001:db8::2:0018
+cccc-025 IN AAAA 2001:db8::2:0019
+cccc-026 IN AAAA 2001:db8::2:001a
+cccc-027 IN AAAA 2001:db8::2:001b
+cccc-028 IN AAAA 2001:db8::2:001c
+cccc-029 IN AAAA 2001:db8::2:001d
+cccc-030 IN AAAA 2001:db8::2:001e
+cccc-031 IN AAAA 2001:db8::2:001f
+cccc-032 IN AAAA 2001:db8::2:0020
+cccc-033 IN AAAA 2001:db8::2:0021
+cccc-034 IN AAAA 2001:db8::2:0022
+cccc-035 IN AAAA 2001:db8::2:0023
+cccc-036 IN AAAA 2001:db8::2:0024
+cccc-037 IN AAAA 2001:db8::2:0025
+cccc-038 IN AAAA 2001:db8::2:0026
+cccc-039 IN AAAA 2001:db8::2:0027
+cccc-040 IN AAAA 2001:db8::2:0028
+cccc-041 IN AAAA 2001:db8::2:0029
+cccc-042 IN AAAA 2001:db8::2:002a
+cccc-043 IN AAAA 2001:db8::2:002b
+cccc-044 IN AAAA 2001:db8::2:002c
+cccc-045 IN AAAA 2001:db8::2:002d
+cccc-046 IN AAAA 2001:db8::2:002e
+cccc-047 IN AAAA 2001:db8::2:002f
+cccc-048 IN AAAA 2001:db8::2:0030
+cccc-049 IN AAAA 2001:db8::2:0031
+cccc-050 IN AAAA 2001:db8::2:0032
+cccc-051 IN AAAA 2001:db8::2:0033
+cccc-052 IN AAAA 2001:db8::2:0034
+cccc-053 IN AAAA 2001:db8::2:0035
+cccc-054 IN AAAA 2001:db8::2:0036
+cccc-055 IN AAAA 2001:db8::2:0037
+cccc-056 IN AAAA 2001:db8::2:0038
+cccc-057 IN AAAA 2001:db8::2:0039
+cccc-058 IN AAAA 2001:db8::2:003a
+cccc-059 IN AAAA 2001:db8::2:003b
+cccc-060 IN AAAA 2001:db8::2:003c
+cccc-061 IN AAAA 2001:db8::2:003d
+cccc-062 IN AAAA 2001:db8::2:003e
+cccc-063 IN AAAA 2001:db8::2:003f
+cccc-064 IN AAAA 2001:db8::2:0040
+cccc-065 IN AAAA 2001:db8::2:0041
+cccc-066 IN AAAA 2001:db8::2:0042
+cccc-067 IN AAAA 2001:db8::2:0043
+cccc-068 IN AAAA 2001:db8::2:0044
+cccc-069 IN AAAA 2001:db8::2:0045
+cccc-070 IN AAAA 2001:db8::2:0046
+cccc-071 IN AAAA 2001:db8::2:0047
+cccc-072 IN AAAA 2001:db8::2:0048
+cccc-073 IN AAAA 2001:db8::2:0049
+cccc-074 IN AAAA 2001:db8::2:004a
+cccc-075 IN AAAA 2001:db8::2:004b
+cccc-076 IN AAAA 2001:db8::2:004c
+cccc-077 IN AAAA 2001:db8::2:004d
+cccc-078 IN AAAA 2001:db8::2:004e
+cccc-079 IN AAAA 2001:db8::2:004f
+cccc-080 IN AAAA 2001:db8::2:0050
+cccc-081 IN AAAA 2001:db8::2:0051
+cccc-082 IN AAAA 2001:db8::2:0052
+cccc-083 IN AAAA 2001:db8::2:0053
+cccc-084 IN AAAA 2001:db8::2:0054
+cccc-085 IN AAAA 2001:db8::2:0055
+cccc-086 IN AAAA 2001:db8::2:0056
+cccc-087 IN AAAA 2001:db8::2:0057
+cccc-088 IN AAAA 2001:db8::2:0058
+cccc-089 IN AAAA 2001:db8::2:0059
+cccc-090 IN AAAA 2001:db8::2:005a
+cccc-091 IN AAAA 2001:db8::2:005b
+cccc-092 IN AAAA 2001:db8::2:005c
+cccc-093 IN AAAA 2001:db8::2:005d
+cccc-094 IN AAAA 2001:db8::2:005e
+cccc-095 IN AAAA 2001:db8::2:005f
+cccc-096 IN AAAA 2001:db8::2:0060
+cccc-097 IN AAAA 2001:db8::2:0061
+cccc-098 IN AAAA 2001:db8::2:0062
+cccc-099 IN AAAA 2001:db8::2:0063
+cccc-100 IN AAAA 2001:db8::2:0064
+cccc-101 IN AAAA 2001:db8::2:0065
+cccc-102 IN AAAA 2001:db8::2:0066
+cccc-103 IN AAAA 2001:db8::2:0067
+cccc-104 IN AAAA 2001:db8::2:0068
+cccc-105 IN AAAA 2001:db8::2:0069
+cccc-106 IN AAAA 2001:db8::2:006a
+cccc-107 IN AAAA 2001:db8::2:006b
+cccc-108 IN AAAA 2001:db8::2:006c
+cccc-109 IN AAAA 2001:db8::2:006d
+cccc-110 IN AAAA 2001:db8::2:006e
+cccc-111 IN AAAA 2001:db8::2:006f
+cccc-112 IN AAAA 2001:db8::2:0070
+cccc-113 IN AAAA 2001:db8::2:0071
+cccc-114 IN AAAA 2001:db8::2:0072
+cccc-115 IN AAAA 2001:db8::2:0073
+cccc-116 IN AAAA 2001:db8::2:0074
+cccc-117 IN AAAA 2001:db8::2:0075
+cccc-118 IN AAAA 2001:db8::2:0076
+cccc-119 IN AAAA 2001:db8::2:0077
+cccc-120 IN AAAA 2001:db8::2:0078
+cccc-121 IN AAAA 2001:db8::2:0079
+cccc-122 IN AAAA 2001:db8::2:007a
+cccc-123 IN AAAA 2001:db8::2:007b
+cccc-124 IN AAAA 2001:db8::2:007c
+cccc-125 IN AAAA 2001:db8::2:007d
+cccc-126 IN AAAA 2001:db8::2:007e
+cccc-127 IN AAAA 2001:db8::2:007f
+cccc-128 IN AAAA 2001:db8::2:0080
+cccc-129 IN AAAA 2001:db8::2:0081
+cccc-130 IN AAAA 2001:db8::2:0082
+cccc-131 IN AAAA 2001:db8::2:0083
+cccc-132 IN AAAA 2001:db8::2:0084
+cccc-133 IN AAAA 2001:db8::2:0085
+cccc-134 IN AAAA 2001:db8::2:0086
+cccc-135 IN AAAA 2001:db8::2:0087
+cccc-136 IN AAAA 2001:db8::2:0088
+cccc-137 IN AAAA 2001:db8::2:0089
+cccc-138 IN AAAA 2001:db8::2:008a
+cccc-139 IN AAAA 2001:db8::2:008b
+cccc-140 IN AAAA 2001:db8::2:008c
+cccc-141 IN AAAA 2001:db8::2:008d
+cccc-142 IN AAAA 2001:db8::2:008e
+cccc-143 IN AAAA 2001:db8::2:008f
+cccc-144 IN AAAA 2001:db8::2:0090
+cccc-145 IN AAAA 2001:db8::2:0091
+cccc-146 IN AAAA 2001:db8::2:0092
+cccc-147 IN AAAA 2001:db8::2:0093
+cccc-148 IN AAAA 2001:db8::2:0094
+cccc-149 IN AAAA 2001:db8::2:0095
+cccc-150 IN AAAA 2001:db8::2:0096
+cccc-151 IN AAAA 2001:db8::2:0097
+cccc-152 IN AAAA 2001:db8::2:0098
+cccc-153 IN AAAA 2001:db8::2:0099
+cccc-154 IN AAAA 2001:db8::2:009a
+cccc-155 IN AAAA 2001:db8::2:009b
+cccc-156 IN AAAA 2001:db8::2:009c
+cccc-157 IN AAAA 2001:db8::2:009d
+cccc-158 IN AAAA 2001:db8::2:009e
+cccc-159 IN AAAA 2001:db8::2:009f
+cccc-160 IN AAAA 2001:db8::2:00a0
+cccc-161 IN AAAA 2001:db8::2:00a1
+cccc-162 IN AAAA 2001:db8::2:00a2
+cccc-163 IN AAAA 2001:db8::2:00a3
+cccc-164 IN AAAA 2001:db8::2:00a4
+cccc-165 IN AAAA 2001:db8::2:00a5
+cccc-166 IN AAAA 2001:db8::2:00a6
+cccc-167 IN AAAA 2001:db8::2:00a7
+cccc-168 IN AAAA 2001:db8::2:00a8
+cccc-169 IN AAAA 2001:db8::2:00a9
+cccc-170 IN AAAA 2001:db8::2:00aa
+cccc-171 IN AAAA 2001:db8::2:00ab
+cccc-172 IN AAAA 2001:db8::2:00ac
+cccc-173 IN AAAA 2001:db8::2:00ad
+cccc-174 IN AAAA 2001:db8::2:00ae
+cccc-175 IN AAAA 2001:db8::2:00af
+cccc-176 IN AAAA 2001:db8::2:00b0
+cccc-177 IN AAAA 2001:db8::2:00b1
+cccc-178 IN AAAA 2001:db8::2:00b2
+cccc-179 IN AAAA 2001:db8::2:00b3
+cccc-180 IN AAAA 2001:db8::2:00b4
+cccc-181 IN AAAA 2001:db8::2:00b5
+cccc-182 IN AAAA 2001:db8::2:00b6
+cccc-183 IN AAAA 2001:db8::2:00b7
+cccc-184 IN AAAA 2001:db8::2:00b8
+cccc-185 IN AAAA 2001:db8::2:00b9
+cccc-186 IN AAAA 2001:db8::2:00ba
+cccc-187 IN AAAA 2001:db8::2:00bb
+cccc-188 IN AAAA 2001:db8::2:00bc
+cccc-189 IN AAAA 2001:db8::2:00bd
+cccc-190 IN AAAA 2001:db8::2:00be
+cccc-191 IN AAAA 2001:db8::2:00bf
+cccc-192 IN AAAA 2001:db8::2:00c0
+cccc-193 IN AAAA 2001:db8::2:00c1
+cccc-194 IN AAAA 2001:db8::2:00c2
+cccc-195 IN AAAA 2001:db8::2:00c3
+cccc-196 IN AAAA 2001:db8::2:00c4
+cccc-197 IN AAAA 2001:db8::2:00c5
+cccc-198 IN AAAA 2001:db8::2:00c6
+cccc-199 IN AAAA 2001:db8::2:00c7
+cccc-200 IN AAAA 2001:db8::2:00c8
+cccc-201 IN AAAA 2001:db8::2:00c9
+cccc-202 IN AAAA 2001:db8::2:00ca
+cccc-203 IN AAAA 2001:db8::2:00cb
+cccc-204 IN AAAA 2001:db8::2:00cc
+cccc-205 IN AAAA 2001:db8::2:00cd
+cccc-206 IN AAAA 2001:db8::2:00ce
+cccc-207 IN AAAA 2001:db8::2:00cf
+cccc-208 IN AAAA 2001:db8::2:00d0
+cccc-209 IN AAAA 2001:db8::2:00d1
+cccc-210 IN AAAA 2001:db8::2:00d2
+cccc-211 IN AAAA 2001:db8::2:00d3
+cccc-212 IN AAAA 2001:db8::2:00d4
+cccc-213 IN AAAA 2001:db8::2:00d5
+cccc-214 IN AAAA 2001:db8::2:00d6
+cccc-215 IN AAAA 2001:db8::2:00d7
+cccc-216 IN AAAA 2001:db8::2:00d8
+cccc-217 IN AAAA 2001:db8::2:00d9
+cccc-218 IN AAAA 2001:db8::2:00da
+cccc-219 IN AAAA 2001:db8::2:00db
+cccc-220 IN AAAA 2001:db8::2:00dc
+cccc-221 IN AAAA 2001:db8::2:00dd
+cccc-222 IN AAAA 2001:db8::2:00de
+cccc-223 IN AAAA 2001:db8::2:00df
+cccc-224 IN AAAA 2001:db8::2:00e0
+cccc-225 IN AAAA 2001:db8::2:00e1
+cccc-226 IN AAAA 2001:db8::2:00e2
+cccc-227 IN AAAA 2001:db8::2:00e3
+cccc-228 IN AAAA 2001:db8::2:00e4
+cccc-229 IN AAAA 2001:db8::2:00e5
+cccc-230 IN AAAA 2001:db8::2:00e6
+cccc-231 IN AAAA 2001:db8::2:00e7
+cccc-232 IN AAAA 2001:db8::2:00e8
+cccc-233 IN AAAA 2001:db8::2:00e9
+cccc-234 IN AAAA 2001:db8::2:00ea
+cccc-235 IN AAAA 2001:db8::2:00eb
+cccc-236 IN AAAA 2001:db8::2:00ec
+cccc-237 IN AAAA 2001:db8::2:00ed
+cccc-238 IN AAAA 2001:db8::2:00ee
+cccc-239 IN AAAA 2001:db8::2:00ef
+cccc-240 IN AAAA 2001:db8::2:00f0
+cccc-241 IN AAAA 2001:db8::2:00f1
+cccc-242 IN AAAA 2001:db8::2:00f2
+cccc-243 IN AAAA 2001:db8::2:00f3
+cccc-244 IN AAAA 2001:db8::2:00f4
+cccc-245 IN AAAA 2001:db8::2:00f5
+cccc-246 IN AAAA 2001:db8::2:00f6
+cccc-247 IN AAAA 2001:db8::2:00f7
+cccc-248 IN AAAA 2001:db8::2:00f8
+cccc-249 IN AAAA 2001:db8::2:00f9
+cccc-250 IN AAAA 2001:db8::2:00fa
+cccc-251 IN AAAA 2001:db8::2:00fb
+cccc-252 IN AAAA 2001:db8::2:00fc
+cccc-253 IN AAAA 2001:db8::2:00fd
+cccc-254 IN AAAA 2001:db8::2:00fe
+cccc-255 IN AAAA 2001:db8::2:00ff
+cccc-256 IN AAAA 2001:db8::2:0100
+cccc-257 IN AAAA 2001:db8::2:0101
+cccc-258 IN AAAA 2001:db8::2:0102
+cccc-259 IN AAAA 2001:db8::2:0103
+cccc-260 IN AAAA 2001:db8::2:0104
+cccc-261 IN AAAA 2001:db8::2:0105
+cccc-262 IN AAAA 2001:db8::2:0106
+cccc-263 IN AAAA 2001:db8::2:0107
+cccc-264 IN AAAA 2001:db8::2:0108
+cccc-265 IN AAAA 2001:db8::2:0109
+cccc-266 IN AAAA 2001:db8::2:010a
+cccc-267 IN AAAA 2001:db8::2:010b
+cccc-268 IN AAAA 2001:db8::2:010c
+cccc-269 IN AAAA 2001:db8::2:010d
+cccc-270 IN AAAA 2001:db8::2:010e
+cccc-271 IN AAAA 2001:db8::2:010f
+cccc-272 IN AAAA 2001:db8::2:0110
+cccc-273 IN AAAA 2001:db8::2:0111
+cccc-274 IN AAAA 2001:db8::2:0112
+cccc-275 IN AAAA 2001:db8::2:0113
+cccc-276 IN AAAA 2001:db8::2:0114
+cccc-277 IN AAAA 2001:db8::2:0115
+cccc-278 IN AAAA 2001:db8::2:0116
+cccc-279 IN AAAA 2001:db8::2:0117
+cccc-280 IN AAAA 2001:db8::2:0118
+cccc-281 IN AAAA 2001:db8::2:0119
+cccc-282 IN AAAA 2001:db8::2:011a
+cccc-283 IN AAAA 2001:db8::2:011b
+cccc-284 IN AAAA 2001:db8::2:011c
+cccc-285 IN AAAA 2001:db8::2:011d
+cccc-286 IN AAAA 2001:db8::2:011e
+cccc-287 IN AAAA 2001:db8::2:011f
+cccc-288 IN AAAA 2001:db8::2:0120
+cccc-289 IN AAAA 2001:db8::2:0121
+cccc-290 IN AAAA 2001:db8::2:0122
+cccc-291 IN AAAA 2001:db8::2:0123
+cccc-292 IN AAAA 2001:db8::2:0124
+cccc-293 IN AAAA 2001:db8::2:0125
+cccc-294 IN AAAA 2001:db8::2:0126
+cccc-295 IN AAAA 2001:db8::2:0127
+cccc-296 IN AAAA 2001:db8::2:0128
+cccc-297 IN AAAA 2001:db8::2:0129
+cccc-298 IN AAAA 2001:db8::2:012a
+cccc-299 IN AAAA 2001:db8::2:012b
+cccc-300 IN AAAA 2001:db8::2:012c
+cccc-301 IN AAAA 2001:db8::2:012d
+cccc-302 IN AAAA 2001:db8::2:012e
+cccc-303 IN AAAA 2001:db8::2:012f
+cccc-304 IN AAAA 2001:db8::2:0130
+cccc-305 IN AAAA 2001:db8::2:0131
+cccc-306 IN AAAA 2001:db8::2:0132
+cccc-307 IN AAAA 2001:db8::2:0133
+cccc-308 IN AAAA 2001:db8::2:0134
+cccc-309 IN AAAA 2001:db8::2:0135
+cccc-310 IN AAAA 2001:db8::2:0136
+cccc-311 IN AAAA 2001:db8::2:0137
+cccc-312 IN AAAA 2001:db8::2:0138
+cccc-313 IN AAAA 2001:db8::2:0139
+cccc-314 IN AAAA 2001:db8::2:013a
+cccc-315 IN AAAA 2001:db8::2:013b
+cccc-316 IN AAAA 2001:db8::2:013c
+cccc-317 IN AAAA 2001:db8::2:013d
+cccc-318 IN AAAA 2001:db8::2:013e
+cccc-319 IN AAAA 2001:db8::2:013f
+cccc-320 IN AAAA 2001:db8::2:0140
+cccc-321 IN AAAA 2001:db8::2:0141
+cccc-322 IN AAAA 2001:db8::2:0142
+cccc-323 IN AAAA 2001:db8::2:0143
+cccc-324 IN AAAA 2001:db8::2:0144
+cccc-325 IN AAAA 2001:db8::2:0145
+cccc-326 IN AAAA 2001:db8::2:0146
+cccc-327 IN AAAA 2001:db8::2:0147
+cccc-328 IN AAAA 2001:db8::2:0148
+cccc-329 IN AAAA 2001:db8::2:0149
+cccc-330 IN AAAA 2001:db8::2:014a
+cccc-331 IN AAAA 2001:db8::2:014b
+cccc-332 IN AAAA 2001:db8::2:014c
+cccc-333 IN AAAA 2001:db8::2:014d
+cccc-334 IN AAAA 2001:db8::2:014e
+cccc-335 IN AAAA 2001:db8::2:014f
+cccc-336 IN AAAA 2001:db8::2:0150
+cccc-337 IN AAAA 2001:db8::2:0151
+cccc-338 IN AAAA 2001:db8::2:0152
+cccc-339 IN AAAA 2001:db8::2:0153
+cccc-340 IN AAAA 2001:db8::2:0154
+cccc-341 IN AAAA 2001:db8::2:0155
+cccc-342 IN AAAA 2001:db8::2:0156
+cccc-343 IN AAAA 2001:db8::2:0157
+cccc-344 IN AAAA 2001:db8::2:0158
+cccc-345 IN AAAA 2001:db8::2:0159
+cccc-346 IN AAAA 2001:db8::2:015a
+cccc-347 IN AAAA 2001:db8::2:015b
+cccc-348 IN AAAA 2001:db8::2:015c
+cccc-349 IN AAAA 2001:db8::2:015d
+cccc-350 IN AAAA 2001:db8::2:015e
+cccc-351 IN AAAA 2001:db8::2:015f
+cccc-352 IN AAAA 2001:db8::2:0160
+cccc-353 IN AAAA 2001:db8::2:0161
+cccc-354 IN AAAA 2001:db8::2:0162
+cccc-355 IN AAAA 2001:db8::2:0163
+cccc-356 IN AAAA 2001:db8::2:0164
+cccc-357 IN AAAA 2001:db8::2:0165
+cccc-358 IN AAAA 2001:db8::2:0166
+cccc-359 IN AAAA 2001:db8::2:0167
+cccc-360 IN AAAA 2001:db8::2:0168
+cccc-361 IN AAAA 2001:db8::2:0169
+cccc-362 IN AAAA 2001:db8::2:016a
+cccc-363 IN AAAA 2001:db8::2:016b
+cccc-364 IN AAAA 2001:db8::2:016c
+cccc-365 IN AAAA 2001:db8::2:016d
+cccc-366 IN AAAA 2001:db8::2:016e
+cccc-367 IN AAAA 2001:db8::2:016f
+cccc-368 IN AAAA 2001:db8::2:0170
+cccc-369 IN AAAA 2001:db8::2:0171
+cccc-370 IN AAAA 2001:db8::2:0172
+cccc-371 IN AAAA 2001:db8::2:0173
+cccc-372 IN AAAA 2001:db8::2:0174
+cccc-373 IN AAAA 2001:db8::2:0175
+cccc-374 IN AAAA 2001:db8::2:0176
+cccc-375 IN AAAA 2001:db8::2:0177
+cccc-376 IN AAAA 2001:db8::2:0178
+cccc-377 IN AAAA 2001:db8::2:0179
+cccc-378 IN AAAA 2001:db8::2:017a
+cccc-379 IN AAAA 2001:db8::2:017b
+cccc-380 IN AAAA 2001:db8::2:017c
+cccc-381 IN AAAA 2001:db8::2:017d
+cccc-382 IN AAAA 2001:db8::2:017e
+cccc-383 IN AAAA 2001:db8::2:017f
+cccc-384 IN AAAA 2001:db8::2:0180
+cccc-385 IN AAAA 2001:db8::2:0181
+cccc-386 IN AAAA 2001:db8::2:0182
+cccc-387 IN AAAA 2001:db8::2:0183
+cccc-388 IN AAAA 2001:db8::2:0184
+cccc-389 IN AAAA 2001:db8::2:0185
+cccc-390 IN AAAA 2001:db8::2:0186
+cccc-391 IN AAAA 2001:db8::2:0187
+cccc-392 IN AAAA 2001:db8::2:0188
+cccc-393 IN AAAA 2001:db8::2:0189
+cccc-394 IN AAAA 2001:db8::2:018a
+cccc-395 IN AAAA 2001:db8::2:018b
+cccc-396 IN AAAA 2001:db8::2:018c
+cccc-397 IN AAAA 2001:db8::2:018d
+cccc-398 IN AAAA 2001:db8::2:018e
+cccc-399 IN AAAA 2001:db8::2:018f
+cccc-400 IN AAAA 2001:db8::2:0190
+cccc-401 IN AAAA 2001:db8::2:0191
+cccc-402 IN AAAA 2001:db8::2:0192
+cccc-403 IN AAAA 2001:db8::2:0193
+cccc-404 IN AAAA 2001:db8::2:0194
+cccc-405 IN AAAA 2001:db8::2:0195
+cccc-406 IN AAAA 2001:db8::2:0196
+cccc-407 IN AAAA 2001:db8::2:0197
+cccc-408 IN AAAA 2001:db8::2:0198
+cccc-409 IN AAAA 2001:db8::2:0199
+cccc-410 IN AAAA 2001:db8::2:019a
+cccc-411 IN AAAA 2001:db8::2:019b
+cccc-412 IN AAAA 2001:db8::2:019c
+cccc-413 IN AAAA 2001:db8::2:019d
+cccc-414 IN AAAA 2001:db8::2:019e
+cccc-415 IN AAAA 2001:db8::2:019f
+cccc-416 IN AAAA 2001:db8::2:01a0
+cccc-417 IN AAAA 2001:db8::2:01a1
+cccc-418 IN AAAA 2001:db8::2:01a2
+cccc-419 IN AAAA 2001:db8::2:01a3
+cccc-420 IN AAAA 2001:db8::2:01a4
+cccc-421 IN AAAA 2001:db8::2:01a5
+cccc-422 IN AAAA 2001:db8::2:01a6
+cccc-423 IN AAAA 2001:db8::2:01a7
+cccc-424 IN AAAA 2001:db8::2:01a8
+cccc-425 IN AAAA 2001:db8::2:01a9
+cccc-426 IN AAAA 2001:db8::2:01aa
+cccc-427 IN AAAA 2001:db8::2:01ab
+cccc-428 IN AAAA 2001:db8::2:01ac
+cccc-429 IN AAAA 2001:db8::2:01ad
+cccc-430 IN AAAA 2001:db8::2:01ae
+cccc-431 IN AAAA 2001:db8::2:01af
+cccc-432 IN AAAA 2001:db8::2:01b0
+cccc-433 IN AAAA 2001:db8::2:01b1
+cccc-434 IN AAAA 2001:db8::2:01b2
+cccc-435 IN AAAA 2001:db8::2:01b3
+cccc-436 IN AAAA 2001:db8::2:01b4
+cccc-437 IN AAAA 2001:db8::2:01b5
+cccc-438 IN AAAA 2001:db8::2:01b6
+cccc-439 IN AAAA 2001:db8::2:01b7
+cccc-440 IN AAAA 2001:db8::2:01b8
+cccc-441 IN AAAA 2001:db8::2:01b9
+cccc-442 IN AAAA 2001:db8::2:01ba
+cccc-443 IN AAAA 2001:db8::2:01bb
+cccc-444 IN AAAA 2001:db8::2:01bc
+cccc-445 IN AAAA 2001:db8::2:01bd
+cccc-446 IN AAAA 2001:db8::2:01be
+cccc-447 IN AAAA 2001:db8::2:01bf
+cccc-448 IN AAAA 2001:db8::2:01c0
+cccc-449 IN AAAA 2001:db8::2:01c1
+cccc-450 IN AAAA 2001:db8::2:01c2
+cccc-451 IN AAAA 2001:db8::2:01c3
+cccc-452 IN AAAA 2001:db8::2:01c4
+cccc-453 IN AAAA 2001:db8::2:01c5
+cccc-454 IN AAAA 2001:db8::2:01c6
+cccc-455 IN AAAA 2001:db8::2:01c7
+cccc-456 IN AAAA 2001:db8::2:01c8
+cccc-457 IN AAAA 2001:db8::2:01c9
+cccc-458 IN AAAA 2001:db8::2:01ca
+cccc-459 IN AAAA 2001:db8::2:01cb
+cccc-460 IN AAAA 2001:db8::2:01cc
+cccc-461 IN AAAA 2001:db8::2:01cd
+cccc-462 IN AAAA 2001:db8::2:01ce
+cccc-463 IN AAAA 2001:db8::2:01cf
+cccc-464 IN AAAA 2001:db8::2:01d0
+cccc-465 IN AAAA 2001:db8::2:01d1
+cccc-466 IN AAAA 2001:db8::2:01d2
+cccc-467 IN AAAA 2001:db8::2:01d3
+cccc-468 IN AAAA 2001:db8::2:01d4
+cccc-469 IN AAAA 2001:db8::2:01d5
+cccc-470 IN AAAA 2001:db8::2:01d6
+cccc-471 IN AAAA 2001:db8::2:01d7
+cccc-472 IN AAAA 2001:db8::2:01d8
+cccc-473 IN AAAA 2001:db8::2:01d9
+cccc-474 IN AAAA 2001:db8::2:01da
+cccc-475 IN AAAA 2001:db8::2:01db
+cccc-476 IN AAAA 2001:db8::2:01dc
+cccc-477 IN AAAA 2001:db8::2:01dd
+cccc-478 IN AAAA 2001:db8::2:01de
+cccc-479 IN AAAA 2001:db8::2:01df
+cccc-480 IN AAAA 2001:db8::2:01e0
+cccc-481 IN AAAA 2001:db8::2:01e1
+cccc-482 IN AAAA 2001:db8::2:01e2
+cccc-483 IN AAAA 2001:db8::2:01e3
+cccc-484 IN AAAA 2001:db8::2:01e4
+cccc-485 IN AAAA 2001:db8::2:01e5
+cccc-486 IN AAAA 2001:db8::2:01e6
+cccc-487 IN AAAA 2001:db8::2:01e7
+cccc-488 IN AAAA 2001:db8::2:01e8
+cccc-489 IN AAAA 2001:db8::2:01e9
+cccc-490 IN AAAA 2001:db8::2:01ea
+cccc-491 IN AAAA 2001:db8::2:01eb
+cccc-492 IN AAAA 2001:db8::2:01ec
+cccc-493 IN AAAA 2001:db8::2:01ed
+cccc-494 IN AAAA 2001:db8::2:01ee
+cccc-495 IN AAAA 2001:db8::2:01ef
+cccc-496 IN AAAA 2001:db8::2:01f0
+cccc-497 IN AAAA 2001:db8::2:01f1
+cccc-498 IN AAAA 2001:db8::2:01f2
+cccc-499 IN AAAA 2001:db8::2:01f3
+cccc-500 IN AAAA 2001:db8::2:01f4
+cccc-501 IN AAAA 2001:db8::2:01f5
+cccc-502 IN AAAA 2001:db8::2:01f6
+cccc-503 IN AAAA 2001:db8::2:01f7
+cccc-504 IN AAAA 2001:db8::2:01f8
+cccc-505 IN AAAA 2001:db8::2:01f9
+cccc-506 IN AAAA 2001:db8::2:01fa
+cccc-507 IN AAAA 2001:db8::2:01fb
+cccc-508 IN AAAA 2001:db8::2:01fc
+cccc-509 IN AAAA 2001:db8::2:01fd
+cccc-510 IN AAAA 2001:db8::2:01fe
+cccc-511 IN AAAA 2001:db8::2:01ff
diff --git a/tests/system/ixfr/db.example.n0.in b/tests/system/ixfr/db.example.n0.in
new file mode 100644
index 0000000..92fa0b0
--- /dev/null
+++ b/tests/system/ixfr/db.example.n0.in
@@ -0,0 +1,29 @@
+; Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN example.
+$TTL 3600
+
+@ IN SOA ns1.example. hostmaster.example. 100 3600 900 7200 300
+
+ IN NS ns1.example.
+ IN NS ns2.example.
+
+ns1 IN A 192.0.2.1
+ns2 IN A 192.0.2.2
+
+a-1 IN A 192.0.2.101
+b-1 IN A 192.0.2.201
+
+$INCLUDE @abs_top_builddir@/tests/system/ixfr/db.example.common
diff --git a/tests/system/ixfr/db.example.n2.in b/tests/system/ixfr/db.example.n2.in
new file mode 100644
index 0000000..6a999af
--- /dev/null
+++ b/tests/system/ixfr/db.example.n2.in
@@ -0,0 +1,28 @@
+; Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN example.
+$TTL 3600
+
+@ IN SOA ns1.example. hostmaster.example. 98 3600 900 7200 300
+
+ IN NS ns1.example.
+ IN NS ns2.example.
+
+ns1 IN A 192.0.2.1
+ns2 IN A 192.0.2.2
+
+a-1 IN A 192.0.2.101
+
+$INCLUDE @abs_top_builddir@/tests/system/ixfr/db.example.common
diff --git a/tests/system/ixfr/db.example.n2.refresh.in b/tests/system/ixfr/db.example.n2.refresh.in
new file mode 100644
index 0000000..2c59416
--- /dev/null
+++ b/tests/system/ixfr/db.example.n2.refresh.in
@@ -0,0 +1,28 @@
+; Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN example.
+$TTL 3600
+
+@ IN SOA ns1.example. hostmaster.example. 98 30 2 7200 300
+
+ IN NS ns1.example.
+ IN NS ns2.example.
+
+ns1 IN A 192.0.2.1
+ns2 IN A 192.0.2.2
+
+a-1 IN A 192.0.2.101
+
+$INCLUDE @abs_top_builddir@/tests/system/ixfr/db.example.common
diff --git a/tests/system/ixfr/db.example.n4.in b/tests/system/ixfr/db.example.n4.in
new file mode 100644
index 0000000..ae15a54
--- /dev/null
+++ b/tests/system/ixfr/db.example.n4.in
@@ -0,0 +1,31 @@
+; Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN example.
+$TTL 3600
+
+@ IN SOA ns1.example. hostmaster.example. 96 3600 900 7200 300
+
+ IN NS ns1.example.
+ IN NS ns2.example.
+
+ns1 IN A 192.0.2.1
+ns2 IN A 192.0.2.2
+
+a-1 IN A 192.0.2.101
+a-2 IN A 192.0.2.102
+b-1 IN A 192.0.2.201
+b-2 IN A 192.0.2.202
+
+$INCLUDE @abs_top_builddir@/tests/system/ixfr/db.example.common
diff --git a/tests/system/ixfr/db.example.n6.in b/tests/system/ixfr/db.example.n6.in
new file mode 100644
index 0000000..33a82a0
--- /dev/null
+++ b/tests/system/ixfr/db.example.n6.in
@@ -0,0 +1,29 @@
+; Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN example.
+$TTL 3600
+
+@ IN SOA ns1.example. hostmaster.example. 94 3600 900 7200 300
+
+ IN NS ns1.example.
+ IN NS ns2.example.
+
+ns1 IN A 192.0.2.1
+ns2 IN A 192.0.2.2
+
+a-1 IN A 192.0.2.101
+a-2 IN A 192.0.2.102
+b-1 IN A 192.0.2.201
+b-2 IN A 192.0.2.202
diff --git a/tests/system/ixfr/in-1/clean.sh b/tests/system/ixfr/in-1/clean.sh
new file mode 120000
index 0000000..099bebd
--- /dev/null
+++ b/tests/system/ixfr/in-1/clean.sh
@@ -0,0 +1 @@
+../clean_ns.sh
\ No newline at end of file
diff --git a/tests/system/ixfr/in-1/ns1/README b/tests/system/ixfr/in-1/ns1/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-1/ns1/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence. It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-1/nsx2/README b/tests/system/ixfr/in-1/nsx2/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-1/nsx2/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence. It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-1/setup.sh.in b/tests/system/ixfr/in-1/setup.sh.in
new file mode 100644
index 0000000..d4c3978
--- /dev/null
+++ b/tests/system/ixfr/in-1/setup.sh.in
@@ -0,0 +1,30 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007, 2011 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2001, 2002 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+. @abs_top_builddir@/tests/system/conf.sh
+
+# Clean up from last time
+sh clean.sh
+
+# Set up the initial version of the IXFR server - load the n-4 version of the
+# zone. The configuration file enables IXFR and disabled notifies.
+cp -f $IXFR_TOP/named_nonotify.conf ns1/named.conf
+cp -f $IXFR_TOP/db.example.n4 ns1/db.example
+
+# Set up the IXFR client - load the same version of the zone.
+cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
+${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n4
diff --git a/tests/system/ixfr/in-1/tests.sh b/tests/system/ixfr/in-1/tests.sh
new file mode 100644
index 0000000..2f49ddf
--- /dev/null
+++ b/tests/system/ixfr/in-1/tests.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This script performs the first IXFR-IN test. A BIND 9 nameserver (the
+# "server") contains a version of the zone (version N) and two previous
+# versions, N-2 and N-4. A BIND 10 nameserver (the "client") is loaded with
+# version N-4 of the zone. A NOTIFY is sent to it, and it is expected that
+# it will send an IXFR to the server and update itself with the latest version
+# of the zone. (The changes are such that the update should be in the form of
+# a single UDP packet.)
+#
+# The pre-requisites for this test are the same as for the common tests, so
+# we can execute that directly.
+
+. ../common_tests.sh
+status=$?
+
+# TODO: Check the BIND 10 log, looking for the IXFR messages that indicate that
+# it has initiated an IXFR and that it received the update within a single
+# packet.
+
+echo "I:exit status: $status"
+exit $status
diff --git a/tests/system/ixfr/in-2/clean.sh b/tests/system/ixfr/in-2/clean.sh
new file mode 120000
index 0000000..099bebd
--- /dev/null
+++ b/tests/system/ixfr/in-2/clean.sh
@@ -0,0 +1 @@
+../clean_ns.sh
\ No newline at end of file
diff --git a/tests/system/ixfr/in-2/ns1/README b/tests/system/ixfr/in-2/ns1/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-2/ns1/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence. It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-2/nsx2/README b/tests/system/ixfr/in-2/nsx2/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-2/nsx2/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence. It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-2/setup.sh.in b/tests/system/ixfr/in-2/setup.sh.in
new file mode 100644
index 0000000..a5f64e5
--- /dev/null
+++ b/tests/system/ixfr/in-2/setup.sh.in
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+. @abs_top_builddir@/tests/system/conf.sh
+
+# Clean up from last time
+sh clean.sh
+
+# Set up the initial version of the IXFR server - load the n-6 version of the
+# zone. The configuration file enables IXFR and disables notifies.
+cp -f $IXFR_TOP/named_nonotify.conf ns1/named.conf
+cp -f $IXFR_TOP/db.example.n6 ns1/db.example
+
+# Set up the IXFR client - load an earlier version of the zone
+cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
+${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n6
diff --git a/tests/system/ixfr/in-2/tests.sh b/tests/system/ixfr/in-2/tests.sh
new file mode 100644
index 0000000..7b1e2a8
--- /dev/null
+++ b/tests/system/ixfr/in-2/tests.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This script performs the first IXFR-IN test. A BIND 9 nameserver (the
+# "server") contains a version of the zone (version N) and three previous
+# versions, N-2, N-4 and N-6. A BIND 10 nameserver (the "client") is loaded
+# with version N-6 of the zone. A NOTIFY is sent to it, and it is expected that
+# it will send an IXFR to the server and update itself with the latest version
+# of the zone. (The changes are such that the update will have to take place
+# over TCP.)
+
+. ../ixfr_init.sh
+
+# On entry, the IXFR server is at version N-6. The common tests assume that
+# it is an N-4, so update it.
+echo "I:$SERVER_NAME updating IXFR-server to suitable start version"
+update_server_zone $SERVER_NAME $SERVER_IP $IXFR_TOP/db.example.n4
+if [ $? -ne 0 ];
+then
+ exit 1
+fi
+
+# The pre-requisites for this test are the same as for the common tests, so
+# we can execute that directly.
+. ../common_tests.sh
+if [ $? -ne 0 ];
+then
+ exit 1
+fi
+
+# TEMPORARY: at the time of writing (October 2011) BIND 10 does not attempt
+# a UDP transfer first. Therefore just check for TCP transfer.
+
+# Check that the client initiated and completed an IXFR. Use a simple grep as
+# the syntax and capabilities of egrep may vary between systems.
+grep XFRIN_XFR_TRANSFER_STARTED nsx2/bind10.run | grep IXFR > /dev/null
+if [ $? -ne 0 ];
+then
+ echo "R:$CLIENT_NAME FAIL no 'IXFR started' message in the BIND 10 log"
+ exit 1
+fi
+
+grep XFRIN_XFR_TRANSFER_SUCCESS nsx2/bind10.run | grep IXFR > /dev/null
+if [ $? -ne 0 ];
+then
+ echo "R:$CLIENT_NAME FAIL no 'IXFR successful' message in the BIND 10 log"
+ exit 1
+fi
+
+# Look in the named log file to see if a TCP IXFR was requested. Again use a
+# simple grep.
+grep "transfer of" ns1/named.run | grep "sending TCP message" > /dev/null
+if [ $? -ne 0 ];
+then
+ echo "R:$SERVER_NAME FAIL no 'sending TCP' message in the BIND 9 log"
+ exit 1
+fi
+
+grep "IXFR ended" ns1/named.run > /dev/null
+if [ $? -ne 0 ];
+then
+ echo "R:$SERVER_NAME FAIL no 'IXFR ended' message in the BIND 9 log"
+ exit 1
+fi
+
+echo "I:exit status: 0"
+exit 0
diff --git a/tests/system/ixfr/in-3/clean.sh b/tests/system/ixfr/in-3/clean.sh
new file mode 120000
index 0000000..099bebd
--- /dev/null
+++ b/tests/system/ixfr/in-3/clean.sh
@@ -0,0 +1 @@
+../clean_ns.sh
\ No newline at end of file
diff --git a/tests/system/ixfr/in-3/ns1/README b/tests/system/ixfr/in-3/ns1/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-3/ns1/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence. It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-3/nsx2/README b/tests/system/ixfr/in-3/nsx2/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-3/nsx2/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence. It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-3/setup.sh.in b/tests/system/ixfr/in-3/setup.sh.in
new file mode 100644
index 0000000..867e06e
--- /dev/null
+++ b/tests/system/ixfr/in-3/setup.sh.in
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+. @abs_top_builddir@/tests/system/conf.sh
+# Clean up from last time
+
+sh clean.sh
+
+# Set up the initial version of the IXFR server - load the latest version of
+# the zone.
+cp -f $IXFR_TOP/named_noixfr.conf ns1/named.conf
+cp -f $IXFR_TOP/db.example.n0 ns1/db.example
+
+# Set up the IXFR client - load a previous version of the zone.
+cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
+${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n2
diff --git a/tests/system/ixfr/in-3/tests.sh b/tests/system/ixfr/in-3/tests.sh
new file mode 100644
index 0000000..d47a221
--- /dev/null
+++ b/tests/system/ixfr/in-3/tests.sh
@@ -0,0 +1,66 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This script performs the third IXFR-IN test. A BIND 9 nameserver (the
+# "server") contains a version of the zone (version N) and has IXFRs disabled.
+# A BIND 10 nameserver (the "client") is loaded with version N-2 of the zone.
+# A NOTIFY is sent to it, and it is expected that it will send an IXFR to the
+# server; the server should not respond to the request, so the client should
+# then send an AXFR request and receive the latest copy of the zone.
+
+# TODO It seems bind9 still allows IXFR even when provide-ixfr on;
+
+. ../ixfr_init.sh
+status=$?
+
+# Store the SOA serial number of the BIND 10 client for later use.
+old_client_serial=`$DIG_SOA @$CLIENT_IP | $AWK '{print $3}'`
+echo "I:SOA serial of IXFR client $CLIENT_NAME is $old_client_serial"
+
+# If required, get the IXFR server to notify the IXFR client of the new zone.
+# Do this by allowing notifies and then triggering a re-notification of the
+# server.
+echo "I:notifying IXFR-client $CLIENT_NAME of presence of new version of zone"
+do_rndc $SERVER_NAME $SERVER_IP notify example
+status=`expr $status + $?`
+
+# Wait for the client to update itself.
+wait_for_update $CLIENT_NAME $CLIENT_IP $old_client_serial
+status=`expr $status + $?`
+
+# Has updated, get the latest serial of the client and server - they
+# should be the same.
+compare_soa $SERVER_NAME $SERVER_IP $CLIENT_NAME $CLIENT_IP
+status=`expr $status + $?`
+
+# Check the log there's the IXFR and fallback
+grep XFRIN_XFR_TRANSFER_STARTED nsx2/bind10.run | grep IXFR
+if [ $? -ne 0 ];
+then
+ echo "R:$CLIENT_NAME FAIL no 'IXFR started' message in the BIND 10 log"
+ exit 1
+fi
+
+grep XFRIN_XFR_TRANSFER_FALLBACK nsx2/bind10.run
+if [ $? -ne 0 ];
+then
+ echo "R:$CLIENT_NAME FAIL no fallback message in BIND10 log"
+ exit 1
+fi
+
+echo "I:exit status: $status"
+exit $status
diff --git a/tests/system/ixfr/in-4/clean.sh b/tests/system/ixfr/in-4/clean.sh
new file mode 120000
index 0000000..099bebd
--- /dev/null
+++ b/tests/system/ixfr/in-4/clean.sh
@@ -0,0 +1 @@
+../clean_ns.sh
\ No newline at end of file
diff --git a/tests/system/ixfr/in-4/ns1/README b/tests/system/ixfr/in-4/ns1/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-4/ns1/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence. It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-4/nsx2/README b/tests/system/ixfr/in-4/nsx2/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-4/nsx2/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence. It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-4/setup.sh.in b/tests/system/ixfr/in-4/setup.sh.in
new file mode 100644
index 0000000..7419e27
--- /dev/null
+++ b/tests/system/ixfr/in-4/setup.sh.in
@@ -0,0 +1,30 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+. @abs_top_builddir@/tests/system/conf.sh
+# Clean up from last time
+
+sh clean.sh
+
+# Set up the initial version of the ixfr server - load the last-but-one version
+# of the zone.
+cp $IXFR_TOP/named_nonotify.conf ns1/named.conf
+cp -f $IXFR_TOP/db.example.n2.refresh ns1/db.example
+
+# Set up the IXFR client - load a previous version of the zone with a short
+# refresh time.
+cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
+${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n2.refresh
diff --git a/tests/system/ixfr/in-4/tests.sh b/tests/system/ixfr/in-4/tests.sh
new file mode 100644
index 0000000..3024253
--- /dev/null
+++ b/tests/system/ixfr/in-4/tests.sh
@@ -0,0 +1,53 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This script performs the fourth IXFR-IN test. A BIND 9 nameserver (the
+# "server") contains a version of the zone (version N) and has IXFRs enabled.
+# A BIND 10 nameserver (the "client") is loaded with version N-2 of the zone
+# and a small refresh time. After this expires, the IXFR client should send
+# an IXFR request to the IXFR server.
+
+. ../ixfr_init.sh
+status=$?
+
+# Ensure the server has the latest copy of the zone. The implicit assumption
+# here is that starting the two systems and reloading the IXFR server takes
+# less time than the SOA refresh time set in the "db.example.n2.refresh" zone
+# file.
+cp $IXFR_TOP/db.example.n0 ns1/db.example
+do_rndc $SERVER_NAME $SERVER_IP reload
+
+# Store the SOA serial number of the BIND 10 client for later use.
+old_client_serial=`$DIG_SOA @$CLIENT_IP | $AWK '{print $3}'`
+echo "I:SOA serial of IXFR client $CLIENT_NAME is $old_client_serial"
+
+# Wait for the client to update itself. 30 seconds has been given as the
+# refresh interface and 2 seconds as the retry interval. The wait_for_update
+# function will check for up to a minute looking for the new serial.
+wait_for_update $CLIENT_NAME $CLIENT_IP $old_client_serial
+status=`expr $status + $?`
+
+# Has updated, get the latest serial of the client and server - they
+# should be the same.
+compare_soa $SERVER_NAME $SERVER_IP $CLIENT_NAME $CLIENT_IP
+status=`expr $status + $?`
+
+# TODO: Check the BIND 10 log, looking for the IXFR messages that indicate that
+# the client has initiated the IXFR.
+
+echo "I:exit status: $status"
+exit $status
diff --git a/tests/system/ixfr/ixfr_init.sh.in b/tests/system/ixfr/ixfr_init.sh.in
new file mode 100644
index 0000000..ba6049e
--- /dev/null
+++ b/tests/system/ixfr/ixfr_init.sh.in
@@ -0,0 +1,330 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This file should be run by all IXFR tests before doing anything else. It
+# includes the main configuration script to set the environment variables as
+# well as defining useful shell subroutines.
+
+. @abs_top_builddir@/tests/system/conf.sh
+
+# Useful symbols used in the IXFR tests.
+
+# Short-hand for getting SOA - just supply address of the server
+DIG_SOA="$DIG +norecurse +short -p $DNS_PORT example. SOA"
+
+# All IXFR tests use a BIND 9 server serving a BIND 10 client. These have the
+# smae name and use the same address in all tests.
+SERVER_NAME=ns1
+SERVER_IP=10.53.0.1 # BIND 9
+
+CLIENT_NAME=nsx2
+CLIENT_IP=10.53.0.2 # BIND 10
+
+# \brief Check Arguments
+#
+# Most functions take the name of nameserver as the first argument and its IP
+# address as the second. This function is passed "$*" and just checks that
+# both $1 and $2 are defined.
+#
+# \arg $* Arguments passed to caller
+#
+# \return status 0 => $1 and $2 are defined, 1 => they are not.
+check_name_ip() {
+
+ if [ "$1" = "" ];
+ then
+ echo "R:FAIL name of server not supplied"
+ return 1
+ fi
+
+ if [ "$2" = "" ];
+ then
+ echo "R:FAIL IP address of server not supplied"
+ return 1
+ fi
+
+ return 0
+}
+
+
+# \brief Perform RNDC Command
+#
+# Controls the BIND 9 IXFR server. Called do_rndc (instead of rndc) to avoid
+# confusion if rndc itself is in the search path.
+#
+# \arg $1 - Name of the server (ns1, nsx2 etc.)
+# \arg $2 - IP address of the server
+# \arg $* - Command to execute (which may be multiple tokens)
+#
+# \return 0 on success, 1 on failure (in which case an error message will
+# have been output).
+do_rndc () {
+
+ # If the following checks fail, the code is wrong.
+
+ check_name_ip $*
+ if [ $? -ne 0 ];
+ then
+ echo "R:FAIL do_rndc - name or ip address of server not supplied"
+ return 1
+ fi
+
+ name=$1
+ shift
+ ip=$1
+ shift
+
+ if [ "$1" = "" ];
+ then
+ echo "R:FAIL do_rndc - rndc command not supplied"
+ return 1
+ fi
+
+ $RNDC -c $SYSTEM_TOP/common/rndc.conf -s $ip -p $RNDC_PORT $* 2>&1 \
+ | sed "s/^/I:$name /"
+}
+
+# \brief Wait for update
+#
+# Given a serial number and a server, poll the nameserver until the SOA serial
+# number is different from that given. The poll takes place every five seconds
+# for a minute.
+#
+# \arg $1 - Name of the server
+# \arg $2 - IP address of the server
+# \arg $3 - Serial number to check against
+#
+# \return 0 if the serial number is different (requires another poll to obtain
+# it), 1 if the serial number has not changed after one minute.
+wait_for_update() {
+
+ # If the following checks fail, the code is wrong.
+
+ check_name_ip $*
+ if [ $? -ne 0 ];
+ then
+ echo "R:FAIL wait_for_update - name or ip address of system not supplied"
+ return 1
+
+ fi
+
+ name=$1
+ shift
+ ip=$1
+ shift
+
+ serial=$1
+ if [ "$serial" = "" ];
+ then
+ echo "R:FAIL wait_for_update - serial number not supplied"
+ return 1
+ fi
+
+ # Now poll the server looking for the new serial number
+
+ echo "I:$name waiting for SOA serial to change from $serial"
+ for i in 1 2 3 4 5 6 7 8 9 10 11 12
+ do
+ if [ $i -gt 1 ];
+ then
+ sleep 5
+ fi
+
+ new_serial=`$DIG_SOA @$ip | $AWK '{print $3}'`
+ if [ "$new_serial" != "$serial" ];
+ then
+ echo "I:$name SOA serial was at $serial, now at $new_serial"
+ return 0
+ fi
+ done
+
+ echo "R:$name FAIL serial number has not updated"
+ return 1
+}
+
+
+
+# \brief Update server zone
+#
+# Reloads the example. zone in the BIND 9 IXFR server and waits a maximum of
+# one minute for it to be served.
+#
+# \arg $1 - Name of the server (ns1, nsx2 etc.)
+# \arg $2 - IP address of the server
+# \arg $3 - Zone file to load
+# \arg $* - Command to execute (which may be multiple tokens)
+#
+# \return 0 on success, 1 on failure (for which an error message will have
+# been output).
+update_server_zone() {
+
+ # If the following checks fail, the code is wrong.
+
+ check_name_ip $*
+ if [ $? -ne 0 ];
+ then
+ echo "R:FAIL update_server_zone - name or ip address of server not supplied"
+ return 1
+ fi
+
+ name=$1
+ shift
+ ip=$1
+ shift
+
+ file=$1
+ shift
+ if [ "$file" = "" ];
+ then
+ echo "R:FAIL update_server_zone - new zone file not supplied"
+ return 1
+ fi
+
+ if [ ! -e $file ];
+ then
+ echo "R:FAIL update_server_zone - zone file does not exist: $file"
+ return 1
+ fi
+
+ old_serial=`$DIG_SOA @$ip | $AWK '{print $3}'`
+
+ echo "I:$name IXFR server loading $file"
+ cp $file $name/db.example
+ do_rndc $name $ip reload
+ if [ $? -ne 0 ];
+ then
+ return 1 # Message will have already been output
+ fi
+
+ wait_for_update $name $ip $old_serial
+ if [ $? -ne 0 ];
+ then
+ echo "R:$name FAIL IXFR server did not update zone after reload"
+ return 1
+ fi
+ new_serial=`$DIG_SOA @$ip | $AWK '{print $3}'`
+
+ return 0
+}
+
+# \brief Compare client and server SOAs
+#
+# Checks the SOAs of two systems and reports if they are not equal.
+#
+# \arg $1 Name of the IXFR server
+# \arg $2 IP of the IXFR server
+# \arg $3 Name of the IXFR client
+# \arg $4 IP of the IXFR client
+#
+# \return 0 if the systems have the same SOA, 1 if not. In the latter case,
+# an error will be output.
+compare_soa() {
+
+ # If the following checks fail, the code is wrong.
+
+ check_name_ip $*
+ if [ $? -ne 0 ];
+ then
+ echo "R:FAIL compare_soa - name or ip address of server not supplied"
+ return 1
+ fi
+
+ server_name=$1
+ shift
+ server_ip=$1
+ shift
+
+ check_name_ip $*
+ if [ $? -ne 0 ];
+ then
+ echo "R:FAIL compare_soa - name or ip address of client not supplied"
+ return 1
+ fi
+
+ client_name=$1
+ shift
+ client_ip=$1
+ shift
+
+ client_serial=`$DIG_SOA @$client_ip | $AWK '{print $3}'`
+ server_serial=`$DIG_SOA @$server_ip | $AWK '{print $3}'`
+ if [ "$client_serial" != "$server_serial" ];
+ then
+ echo "R:FAIL client $client_name serial $client_serial not same as server $server_name serial $server_serial"
+ return 1
+ fi
+
+ return 0
+}
+
+# \brief Compare client and server zones
+#
+# Checks the zones of two systems and reports if they are not identical.
+#
+# The check is simplistic. Each zone is listed via "dig", after which comment
+# lines, blank lines and spaces/tabs are removed, and the result sorted. The
+# output from each system is then compared. They should be identical.
+#
+# \arg $1 Name of the IXFR server
+# \arg $2 IP of the IXFR server
+# \arg $3 Name of the IXFR client
+# \arg $4 IP of the IXFR client
+#
+# \return 0 if the zones are the same, 1 if not.
+compare_zones() {
+
+ # If the following checks fail, the code is wrong.
+
+ check_name_ip $*
+ if [ $? -ne 0 ];
+ then
+ echo "R:FAIL compare_zones - name or ip address of server not supplied"
+ return 1
+ fi
+
+ server_name=$1
+ shift
+ server_ip=$1
+ shift
+
+ check_name_ip $*
+ if [ $? -ne 0 ];
+ then
+ echo "R:FAIL compare_zones - name or ip address of client not supplied"
+ return 1
+ fi
+
+ client_name=$1
+ shift
+ client_ip=$1
+ shift
+
+ $DIG @$client_ip -p $DNS_PORT example. axfr | grep -v '^;' | grep -v '^$' \
+ | sed -e 's/ //g' -e 's/\t//g' | sort > client.dig
+ $DIG @$server_ip -p $DNS_PORT example. axfr | grep -v '^;' | grep -v '^$' \
+ | sed -e 's/ //g' -e 's/\t//g' | sort > server.dig
+ diff client.dig server.dig
+ if [ $? -eq 0 ];
+ then
+ echo "I:client and server zones identical"
+ else
+ echo "R:FAIL client $client_name zone not same as server $server_name zone"
+ return 1
+ fi
+
+ return 0
+}
diff --git a/tests/system/ixfr/named_noixfr.conf b/tests/system/ixfr/named_noixfr.conf
new file mode 100644
index 0000000..d171876
--- /dev/null
+++ b/tests/system/ixfr/named_noixfr.conf
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2004, 2007, 2011 Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (C) 2000, 2001 Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+include "../../../common/rndc.key";
+
+controls {
+ inet 10.53.0.1 port 9953 allow { any; } keys { rndc_key; };
+};
+
+options {
+ query-source address 10.53.0.1;
+ notify-source 10.53.0.1;
+ transfer-source 10.53.0.1;
+ port 53210;
+ pid-file "named.pid";
+ listen-on { 10.53.0.1; };
+ listen-on-v6 { none; };
+ recursion no;
+ ixfr-from-differences no;
+ notify explicit;
+ also-notify { 10.53.0.2; };
+ provide-ixfr no;
+};
+
+zone "example" {
+ type master;
+ file "db.example";
+};
diff --git a/tests/system/ixfr/named_nonotify.conf b/tests/system/ixfr/named_nonotify.conf
new file mode 100644
index 0000000..c08c212
--- /dev/null
+++ b/tests/system/ixfr/named_nonotify.conf
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2004, 2007, 2011 Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (C) 2000, 2001 Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+include "../../../common/rndc.key";
+
+controls {
+ inet 10.53.0.1 port 9953 allow { any; } keys { rndc_key; };
+};
+
+options {
+ query-source address 10.53.0.1;
+ notify-source 10.53.0.1;
+ transfer-source 10.53.0.1;
+ port 53210;
+ pid-file "named.pid";
+ listen-on { 10.53.0.1; };
+ listen-on-v6 { none; };
+ recursion no;
+ ixfr-from-differences yes;
+ notify no;
+};
+
+zone "example" {
+ type master;
+ file "db.example";
+};
diff --git a/tests/system/ixfr/named_notify.conf b/tests/system/ixfr/named_notify.conf
new file mode 100644
index 0000000..df45e6f
--- /dev/null
+++ b/tests/system/ixfr/named_notify.conf
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2004, 2007, 2011 Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (C) 2000, 2001 Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+include "../../../common/rndc.key";
+
+controls {
+ inet 10.53.0.1 port 9953 allow { any; } keys { rndc_key; };
+};
+
+options {
+ query-source address 10.53.0.1;
+ notify-source 10.53.0.1;
+ transfer-source 10.53.0.1;
+ port 53210;
+ pid-file "named.pid";
+ listen-on { 10.53.0.1; };
+ listen-on-v6 { none; };
+ recursion no;
+ ixfr-from-differences yes;
+ notify explicit;
+ also-notify { 10.53.0.2; };
+};
+
+zone "example" {
+ type master;
+ file "db.example";
+};
diff --git a/tests/system/run.sh b/tests/system/run.sh
deleted file mode 100755
index 4f852f4..0000000
--- a/tests/system/run.sh
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/bin/sh
-#
-# Copyright (C) 2004, 2007, 2010 Internet Systems Consortium, Inc. ("ISC")
-# Copyright (C) 2000, 2001 Internet Software Consortium.
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-#
-# Run a system test.
-#
-
-SYSTEMTESTTOP=.
-. $SYSTEMTESTTOP/conf.sh
-
-stopservers=true
-
-case $1 in
- --keep) stopservers=false; shift ;;
-esac
-
-test $# -gt 0 || { echo "usage: $0 [--keep] test-directory" >&2; exit 1; }
-
-test=$1
-shift
-
-test -d $test || { echo "$0: $test: no such test" >&2; exit 1; }
-
-echo "S:$test:`date`" >&2
-echo "T:$test:1:A" >&2
-echo "A:System test $test" >&2
-
-if [ x$PERL = x ]
-then
- echo "I:Perl not available. Skipping test." >&2
- echo "R:UNTESTED" >&2
- echo "E:$test:`date`" >&2
- exit 0;
-fi
-
-$PERL $TESTSOCK || {
- echo "I:Network interface aliases not set up. Skipping test." >&2;
- echo "R:UNTESTED" >&2;
- echo "E:$test:`date`" >&2;
- exit 0;
-}
-
-
-# Check for test-specific prerequisites.
-test ! -f $test/prereq.sh || ( cd $test && sh prereq.sh "$@" )
-result=$?
-
-if [ $result -eq 0 ]; then
- : prereqs ok
-else
- echo "I:Prerequisites for $test missing, skipping test." >&2
- [ $result -eq 255 ] && echo "R:SKIPPED" || echo "R:UNTESTED"
- echo "E:$test:`date`" >&2
- exit 0
-fi
-
-# Check for PKCS#11 support
-if
- test ! -f $test/usepkcs11 || sh cleanpkcs11.sh
-then
- : pkcs11 ok
-else
- echo "I:Need PKCS#11 for $test, skipping test." >&2
- echo "R:PKCS11ONLY" >&2
- echo "E:$test:`date`" >&2
- exit 0
-fi
-
-# Set up any dynamically generated test data
-if test -f $test/setup.sh
-then
- ( cd $test && sh setup.sh "$@" )
-fi
-
-# Start name servers running
-$PERL start.pl $test || exit 1
-
-# Run the tests
-( cd $test ; sh tests.sh )
-
-status=$?
-
-if $stopservers
-then
- :
-else
- exit $status
-fi
-
-# Shutdown
-$PERL stop.pl $test
-
-status=`expr $status + $?`
-
-if [ $status != 0 ]; then
- echo "R:FAIL"
- # Don't clean up - we need the evidence.
- find . -name core -exec chmod 0644 '{}' \;
-else
- echo "R:PASS"
-
- # Clean up.
- if test -f $test/clean.sh
- then
- ( cd $test && sh clean.sh "$@" )
- fi
-fi
-
-echo "E:$test:`date`"
-
-exit $status
diff --git a/tests/system/run.sh.in b/tests/system/run.sh.in
new file mode 100755
index 0000000..619b865
--- /dev/null
+++ b/tests/system/run.sh.in
@@ -0,0 +1,125 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007, 2010 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2000, 2001 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Run a system test.
+#
+
+SYSTEMTOP=@abs_top_builddir@/tests/system
+. $SYSTEMTOP/conf.sh
+
+stopservers=true
+
+case $1 in
+ --keep) stopservers=false; shift ;;
+esac
+
+test $# -gt 0 || { echo "usage: $0 [--keep] test-directory" >&2; exit 1; }
+
+test=$1
+shift
+
+test -d $test || { echo "$0: $test: no such test" >&2; exit 1; }
+
+echo "S:$test:`date`" >&2
+echo "T:$test:1:A" >&2
+echo "A:System test $test" >&2
+
+if [ x$PERL = x ]
+then
+ echo "I:Perl not available. Skipping test." >&2
+ echo "R:UNTESTED" >&2
+ echo "E:$test:`date`" >&2
+ exit 0;
+fi
+
+$PERL $TESTSOCK || {
+ echo "I:Network interface aliases not set up. Skipping test." >&2;
+ echo "R:UNTESTED" >&2;
+ echo "E:$test:`date`" >&2;
+ exit 0;
+}
+
+
+# Check for test-specific prerequisites.
+test ! -f $test/prereq.sh || ( cd $test && sh prereq.sh "$@" )
+result=$?
+
+if [ $result -eq 0 ]; then
+ : prereqs ok
+else
+ echo "I:Prerequisites for $test missing, skipping test." >&2
+ [ $result -eq 255 ] && echo "R:SKIPPED" || echo "R:UNTESTED"
+ echo "E:$test:`date`" >&2
+ exit 0
+fi
+
+# Check for PKCS#11 support
+if
+ test ! -f $test/usepkcs11 || sh cleanpkcs11.sh
+then
+ : pkcs11 ok
+else
+ echo "I:Need PKCS#11 for $test, skipping test." >&2
+ echo "R:PKCS11ONLY" >&2
+ echo "E:$test:`date`" >&2
+ exit 0
+fi
+
+# Set up any dynamically generated test data
+if test -f $test/setup.sh
+then
+ ( cd $test && sh setup.sh "$@" )
+fi
+
+# Start name servers running
+$PERL $SYSTEMTOP/start.pl $test || exit 1
+
+# Run the tests
+( cd $test ; sh tests.sh )
+
+status=$?
+
+if $stopservers
+then
+ :
+else
+ exit $status
+fi
+
+# Shutdown
+$PERL $SYSTEMTOP/stop.pl $test
+
+status=`expr $status + $?`
+
+if [ $status != 0 ]; then
+ echo "R:FAIL"
+ # Don't clean up - we need the evidence.
+ find . -name core -exec chmod 0644 '{}' \;
+else
+ echo "R:PASS"
+
+ # Clean up.
+ if test -f $test/clean.sh
+ then
+ ( cd $test && sh clean.sh "$@" )
+ fi
+fi
+
+echo "E:$test:`date`"
+
+exit $status
diff --git a/tests/system/start.pl b/tests/system/start.pl
index 56f00c4..daa4577 100755
--- a/tests/system/start.pl
+++ b/tests/system/start.pl
@@ -53,7 +53,7 @@ if ($server && !-d "$test/$server") {
my $topdir = abs_path("$test/..");
my $testdir = abs_path("$test");
my $RUN_BIND10 = $ENV{'RUN_BIND10'};
-my $NAMED = $ENV{'NAMED'};
+my $NAMED = $ENV{'BIND9_NAMED'};
my $LWRESD = $ENV{'LWRESD'};
my $DIG = $ENV{'DIG'};
my $PERL = $ENV{'PERL'};
@@ -182,7 +182,7 @@ sub start_server {
exit 1;
}
- # print "I:starting server $server\n";
+ print "I:starting server $server\n";
chdir "$testdir/$server";
diff --git a/tests/tools/badpacket/Makefile.am b/tests/tools/badpacket/Makefile.am
index 7df7077..fcba404 100644
--- a/tests/tools/badpacket/Makefile.am
+++ b/tests/tools/badpacket/Makefile.am
@@ -29,5 +29,5 @@ badpacket_LDADD = $(top_builddir)/src/lib/asiodns/libasiodns.la
badpacket_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
badpacket_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
badpacket_LDADD += $(top_builddir)/src/lib/log/liblog.la
+badpacket_LDADD += $(top_builddir)/src/lib/util/libutil.la
badpacket_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
-
diff --git a/tests/tools/badpacket/tests/Makefile.am b/tests/tools/badpacket/tests/Makefile.am
index e83c3b6..2daa664 100644
--- a/tests/tools/badpacket/tests/Makefile.am
+++ b/tests/tools/badpacket/tests/Makefile.am
@@ -21,12 +21,12 @@ run_unittests_SOURCES += $(top_builddir)/tests/tools/badpacket/command_options.c
run_unittests_SOURCES += $(top_builddir)/tests/tools/badpacket/option_info.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDFLAGS += $(top_builddir)/src/lib/log/liblog.la
-run_unittests_LDFLAGS += $(top_builddir)/src/lib/exceptions/libexceptions.la
-run_unittests_LDFLAGS += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/tests/tools/badpacket/tests/run_unittests.cc b/tests/tools/badpacket/tests/run_unittests.cc
index 624cf6f..6eeca75 100644
--- a/tests/tools/badpacket/tests/run_unittests.cc
+++ b/tests/tools/badpacket/tests/run_unittests.cc
@@ -15,10 +15,11 @@
#include <config.h>
#include <gtest/gtest.h>
+#include <util/unittests/run_all.h>
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
- return (RUN_ALL_TESTS());
+ return (isc::util::unittests::run_all());
}
diff --git a/tools/system_messages.py b/tools/system_messages.py
new file mode 100644
index 0000000..7b0d60c
--- /dev/null
+++ b/tools/system_messages.py
@@ -0,0 +1,419 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Produce System Messages Manual
+#
+# This tool reads all the .mes files in the directory tree whose root is given
+# on the command line and interprets them as BIND 10 message files. It pulls
+# all the messages and description out, sorts them by message ID, and writes
+# them out as a single (formatted) file.
+#
+# Invocation:
+# The code is invoked using the command line:
+#
+# python system_messages.py [-o <output-file>] <top-source-directory>
+#
+# If no output file is specified, output is written to stdout.
+
+import re
+import os
+import sys
+from optparse import OptionParser
+
+# Main dictionary holding all the messages. The messages are accumulated here
+# before being printed in alphabetical order.
+dictionary = {}
+
+# The structure of the output page is:
+#
+# header
+# message
+# separator
+# message
+# separator
+# :
+# separator
+# message
+# trailer
+#
+# (Indentation is not relevant - it has only been added to the above
+# illustration to make the structure clearer.) The text of these section is:
+
+# Header - this is output before anything else.
+SEC_HEADER="""<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
+<!ENTITY mdash "—" >
+<!ENTITY % version SYSTEM "version.ent">
+%version;
+]>
+<!--
+ This XML document is generated using the system_messages.py tool
+ based on the .mes message files.
+
+ Do not edit this file.
+-->
+<book>
+ <?xml-stylesheet href="bind10-guide.css" type="text/css"?>
+
+ <bookinfo>
+ <title>BIND 10 Messages Manual</title>
+
+ <copyright>
+ <year>2011</year><holder>Internet Systems Consortium, Inc.</holder>
+ </copyright>
+
+ <abstract>
+ <para>BIND 10 is a Domain Name System (DNS) suite managed by
+ Internet Systems Consortium (ISC). It includes DNS libraries
+ and modular components for controlling authoritative and
+ recursive DNS servers.
+ </para>
+ <para>
+ This is the messages manual for BIND 10 version &__VERSION__;.
+ The most up-to-date version of this document, along with
+ other documents for BIND 10, can be found at
+ <ulink url="http://bind10.isc.org/docs"/>.
+ </para>
+ </abstract>
+
+ <releaseinfo>This is the messages manual for BIND 10 version
+ &__VERSION__;.</releaseinfo>
+ </bookinfo>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ This document lists each message that can be logged by the
+ programs in the BIND 10 package. Each entry in this manual
+ is of the form:
+ <screen>IDENTIFICATION message-text</screen>
+ ... where "IDENTIFICATION" is the message identification included
+ in each message logged and "message-text" is the accompanying
+ message text. The "message-text" may include placeholders of the
+ form "%1", "%2" etc.; these parameters are replaced by relevant
+ values when the message is logged.
+ </para>
+ <para>
+ Each entry is also accompanied by a description giving more
+ information about the circumstances that result in the message
+ being logged.
+ </para>
+ <para>
+ For information on configuring and using BIND 10 logging,
+ refer to the <ulink url="bind10-guide.html">BIND 10 Guide</ulink>.
+ </para>
+ </chapter>
+
+ <chapter id="messages">
+ <title>BIND 10 Messages</title>
+ <para>
+ <variablelist>
+"""
+
+# This is output once for each message. The string contains substitution
+# tokens: $I is replaced by the message identification, $T by the message text,
+# and $D by the message description.
+SEC_MESSAGE = """<varlistentry id="$I">
+<term>$I $T</term>
+<listitem><para>
+$D
+</para></listitem>
+</varlistentry>"""
+
+# A description may contain blank lines intended to separate paragraphs. If so,
+# each blank line is replaced by the following.
+SEC_BLANK = "</para><para>"
+
+# The separator is copied to the output verbatim after each message except
+# the last.
+SEC_SEPARATOR = ""
+
+# The trailier is copied to the output verbatim after the last message.
+SEC_TRAILER = """ </variablelist>
+ </para>
+ </chapter>
+</book>"""
+
+
+def reportError(filename, what):
+ """Report an error and exit"""
+ print("*** ERROR in ", filename, file=sys.stderr)
+ print("*** REASON: ", what, file=sys.stderr)
+ print("*** System message generator terminating", file=sys.stderr)
+ sys.exit(1)
+
+
+
+def replaceTag(string):
+ """Replaces the '<' and '>' in text about to be inserted into the template
+ sections above with < and > to avoid problems with message text
+ being interpreted as XML text.
+ """
+ string1 = string.replace("<", "<")
+ string2 = string1.replace(">", ">")
+ return string2
+
+
+
+def replaceBlankLines(lines):
+ """Replaces blank lines in an array with the contents of the 'blank'
+ section.
+ """
+ result = []
+ for l in lines:
+ if len(l) == 0:
+ result.append(SEC_BLANK)
+ else:
+ result.append(l)
+
+ return result
+
+
+
+# Printing functions
+def printHeader():
+ print(SEC_HEADER)
+
+def printSeparator():
+ print(SEC_SEPARATOR)
+
+def printMessage(msgid):
+ # In the message ID, replace "<" and ">" with XML-safe versions and
+ # substitute into the data.
+ m1 = SEC_MESSAGE.replace("$I", replaceTag(msgid))
+
+ # Do the same for the message text.
+ m2 = m1.replace("$T", replaceTag(dictionary[msgid]['text']))
+
+ # Do the same for the description then replace blank lines with the
+ # specified separator. (We do this in that order to avoid replacing
+ # the "<" and ">" in the XML tags in the separator.)
+ desc1 = [replaceTag(l) for l in dictionary[msgid]['description']]
+ desc2 = replaceBlankLines(desc1)
+
+ # Join the lines together to form a single string and insert into
+ # current text.
+ m3 = m2.replace("$D", "\n".join(desc2))
+
+ print(m3)
+
+def printTrailer():
+ print(SEC_TRAILER)
+
+
+
+def removeEmptyLeadingTrailing(lines):
+ """Removes leading and trailing empty lines.
+
+ A list of strings is passed as argument, some of which may be empty.
+ This function removes from the start and end of list a contiguous
+ sequence of empty lines and returns the result. Embedded sequence of
+ empty lines are not touched.
+
+ Parameters:
+ lines List of strings to be modified.
+
+ Return:
+ Input list of strings with leading/trailing blank line sequences
+ removed.
+ """
+
+ retlines = []
+
+ # Dispose of degenerate case of empty array
+ if len(lines) == 0:
+ return retlines
+
+ # Search for first non-blank line
+ start = 0
+ while start < len(lines):
+ if len(lines[start]) > 0:
+ break
+ start = start + 1
+
+ # Handle case when entire list is empty
+ if start >= len(lines):
+ return retlines
+
+ # Search for last non-blank line
+ finish = len(lines) - 1
+ while finish >= 0:
+ if len(lines[finish]) > 0:
+ break
+ finish = finish - 1
+
+ retlines = lines[start:finish + 1]
+ return retlines
+
+
+
+def addToDictionary(msgid, msgtext, desc, filename):
+ """Add the current message ID and associated information to the global
+ dictionary. If a message with that ID already exists, loop appending
+ suffixes of the form "(n)" to it until one is found that doesn't.
+
+ Parameters:
+ msgid Message ID
+ msgtext Message text
+ desc Message description
+ filename File from which the message came. Currently this is
+ not used, but a future enhancement may wish to include the
+ name of the message file in the messages manual.
+ """
+
+ # If the ID is in the dictionary, append a "(n)" to the name - this wil
+ # flag that there are multiple instances. (However, this is an error -
+ # each ID should be unique in BIND-10.)
+ if msgid in dictionary:
+ i = 1
+ while msgid + " (" + str(i) + ")" in dictionary:
+ i = i + 1
+ msgid = msgid + " (" + str(i) + ")"
+
+ # Remove leading and trailing blank lines in the description, then
+ # add everything into a subdictionary which is then added to the main
+ # one.
+ details = {}
+ details['text'] = msgtext
+ details['description'] = removeEmptyLeadingTrailing(desc)
+ details['filename'] = filename
+ dictionary[msgid] = details
+
+
+
+def processFileContent(filename, lines):
+ """Processes file content. Messages and descriptions are identified and
+ added to a dictionary (keyed by message ID). If the key already exists,
+ a numeric suffix is added to it.
+
+ Parameters:
+ filename Name of the message file being processed
+ lines Lines read from the file
+ """
+
+ prefix = "" # Last prefix encountered
+ msgid = "" # Last message ID encountered
+ msgtext = "" # Text of the message
+ description = [] # Description
+
+ for l in lines:
+ if l.startswith("$"):
+ # Starts with "$". Ignore anything other than $PREFIX
+ words = re.split("\s+", l)
+ if words[0].upper() == "$PREFIX":
+ if len(words) == 1:
+ prefix = ""
+ else:
+ prefix = words[1]
+
+ elif l.startswith("%"):
+ # Start of a message. Add the message we were processing to the
+ # dictionary and clear everything apart from the file name.
+ if msgid != "":
+ addToDictionary(msgid, msgtext, description, filename)
+
+ msgid = ""
+ msgtext = ""
+ description = []
+
+ # Start of a message
+ l = l[1:].strip() # Remove "%" and trim leading spaces
+ if len(l) == 0:
+ printError(filename, "Line with single % found")
+ next
+
+ # Split into words. The first word is the message ID
+ words = re.split("\s+", l)
+ msgid = (prefix + words[0]).upper()
+ msgtext = l[len(words[0]):].strip()
+
+ else:
+ # Part of a description, so add to the current description array
+ description.append(l)
+
+ # All done, add the last message to the global dictionaty.
+ if msgid != "":
+ addToDictionary(msgid, msgtext, description, filename)
+
+
+
+def processFile(filename):
+ """Processes a file by reading it in and stripping out all comments and
+ and directives. Leading and trailing blank lines in the file are removed
+ and the remainder passed for message processing.
+
+ Parameters:
+ filename Name of the message file to process
+ """
+ lines = open(filename).readlines();
+
+ # Trim leading and trailing spaces from each line, and remove comments.
+ lines = [l.strip() for l in lines]
+ lines = [l for l in lines if not l.startswith("#")]
+
+ # Remove leading/trailing empty line sequences from the result
+ lines = removeEmptyLeadingTrailing(lines)
+
+ # Interpret content
+ processFileContent(filename, lines)
+
+
+
+def processAllFiles(root):
+ """Iterates through all files in the tree starting at the given root and
+ calls processFile for all .mes files found.
+
+ Parameters:
+ root Directory that is the root of the BIND-10 source tree
+ """
+ for (path, dirs, files) in os.walk(root):
+
+ # Identify message files
+ mes_files = [f for f in files if f.endswith(".mes")]
+
+ # ... and process each file in the list
+ for m in mes_files:
+ processFile(path + os.sep + m)
+
+
+# Main program
+if __name__ == "__main__":
+ parser = OptionParser(usage="Usage: %prog [--help | options] root")
+ parser.add_option("-o", "--output", dest="output", default=None,
+ metavar="FILE",
+ help="output file name (default to stdout)")
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ parser.error("Must supply directory at which to begin search")
+ elif len(args) > 1:
+ parser.error("Only a single root directory can be given")
+
+ # Redirect output if specified (errors are written to stderr)
+ if options.output is not None:
+ sys.stdout = open(options.output, 'w')
+
+ # Read the files and load the data
+ processAllFiles(args[0])
+
+ # Now just print out everything we've read (in alphabetical order).
+ count = 1
+ printHeader()
+ for msgid in sorted(dictionary):
+ if count > 1:
+ printSeparator()
+ count = count + 1
+ printMessage(msgid)
+ printTrailer()
More information about the bind10-changes
mailing list