BIND 10 exp/res-research, updated. 8b563bac04ff5d49e0cf181df643b2e5e6688ef6 [res-research] correctly handle delegation chain fixup on replay.
BIND 10 source code commits
bind10-changes at lists.isc.org
Tue Jul 17 16:58:23 UTC 2012
The branch, exp/res-research has been updated
via 8b563bac04ff5d49e0cf181df643b2e5e6688ef6 (commit)
via 8fb1f716cc914aad1a7a46dfe020bcf1a7a3e11a (commit)
from a29ce7e1d04d4ff9c6cd9694b52cb16ed03d27e3 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 8b563bac04ff5d49e0cf181df643b2e5e6688ef6
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Tue Jul 17 09:57:59 2012 -0700
[res-research] correctly handle delegation chain fixup on replay.
commit 8fb1f716cc914aad1a7a46dfe020bcf1a7a3e11a
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Mon Jul 16 18:19:56 2012 -0700
[res-research] more statistics
-----------------------------------------------------------------------
Summary of changes:
exp/res-research/analysis/dns_cache.py | 69 ++++++++++++++++++++++++----
exp/res-research/analysis/mini_resolver.py | 13 ++++--
exp/res-research/analysis/query_replay.py | 51 +++++++++++++++++---
3 files changed, 115 insertions(+), 18 deletions(-)
-----------------------------------------------------------------------
diff --git a/exp/res-research/analysis/dns_cache.py b/exp/res-research/analysis/dns_cache.py
index 02cefe8..a5e9b7d 100755
--- a/exp/res-research/analysis/dns_cache.py
+++ b/exp/res-research/analysis/dns_cache.py
@@ -367,19 +367,72 @@ class SimpleDNSCache:
def dump_ttl_stat(self, f, used_only=True):
total_stat = {} # TTL => counter
+ answer_stat = {} # TTL => counter
+ authority_stat = {} # TTL => counter
+ glue_stat = {} # TTL => counter
+ nonglue_stat = {} # TTL => counter
for rdata_map in self.__table.values():
for entries in rdata_map.values():
for entry in entries:
if used_only and entry.time_updated is None:
continue
-
- if not entry.ttl in total_stat:
- total_stat[entry.ttl] = 0
- total_stat[entry.ttl] += 1
- ttl_list = list(total_stat.keys())
- ttl_list.sort()
- for ttl in ttl_list:
- f.write('%d,%d\n' % (ttl, total_stat[ttl]))
+ self.__update_ttl_stat(entry, total_stat)
+ self.__update_ttl_stat(entry, answer_stat,
+ self.TRUST_ANSWER)
+ self.__update_ttl_stat(entry, authority_stat,
+ self.TRUST_AUTHAUTHORITY)
+ self.__update_ttl_stat(entry, glue_stat, self.TRUST_GLUE)
+ if entry.trust == self.TRUST_GLUE:
+ for other_entry in entries:
+ if used_only and other_entry.time_updated is None:
+ continue
+ if (other_entry.trust == self.TRUST_ANSWER or
+ other_entry.trust == self.TRUST_AUTHAUTHORITY):
+ self.__update_ttl_stat(other_entry,
+ nonglue_stat)
+ for stat, desc in [(total_stat, "All"), (answer_stat, "Answer"),
+ (authority_stat, "Auth Authority"),
+ (glue_stat, "Glue"), (nonglue_stat, "Non Glue")]:
+ ttl_list = list(stat.keys())
+ ttl_list.sort()
+ f.write('%s TTL histogram\n' % (desc))
+ for ttl in ttl_list:
+ f.write('%d,%d\n' % (ttl, stat[ttl]))
+
+ def __update_ttl_stat(self, entry, stat, trust=None):
+ if trust is not None and entry.trust != trust:
+ return
+ if not entry.ttl in stat:
+ stat[entry.ttl] = 0
+ stat[entry.ttl] += 1
+
+ def dump_stat(self, f, used_only=True):
+ f.write('Cache content statistics\n')
+ TRUST_GLUE_PURGED = -1 # none of defined TRUST_xxx, local value here
+ stat = {} # TRUST_xxx => #-of-entries
+ stat[TRUST_GLUE_PURGED] = 0
+ for rdata_map in self.__table.values():
+ for entries in rdata_map.values():
+ for entry in entries:
+ if used_only and entry.time_updated is None:
+ continue
+ if not entry.trust in stat:
+ stat[entry.trust] = 0
+ stat[entry.trust] += 1
+ if entry.trust == self.TRUST_GLUE:
+ for other_entry in entries:
+ if used_only and other_entry.time_updated is None:
+ continue
+ if (other_entry.trust == self.TRUST_ANSWER or
+ other_entry.trust == self.TRUST_AUTHAUTHORITY):
+ stat[TRUST_GLUE_PURGED] += 1
+ for trust, desc in [(self.TRUST_LOCAL, "Local"),
+ (self.TRUST_ANSWER, "Answer"),
+ (self.TRUST_AUTHAUTHORITY, "Authority Records"),
+ (self.TRUST_GLUE, "Glue or Delegation"),
+ (TRUST_GLUE_PURGED, "Glues overridden")]:
+ if trust in stat:
+ f.write(' %s: %d\n' % (desc, stat[trust]))
def __serialize(self, f):
'''Dump cache database content to a file in serialized binary format.
diff --git a/exp/res-research/analysis/mini_resolver.py b/exp/res-research/analysis/mini_resolver.py
index 685ac37..7595042 100755
--- a/exp/res-research/analysis/mini_resolver.py
+++ b/exp/res-research/analysis/mini_resolver.py
@@ -299,10 +299,10 @@ class ResolverContext:
# typical NXDOMAIN answer
if resp_msg.get_rr_count(Message.SECTION_AUTHORITY) > 0:
auth = resp_msg.get_section(Message.SECTION_AUTHORITY)[0]
- if auth.get_type() == RRType.SOA():
+ if self.__valid_soa(auth):
return SimpleDNSCache.RESP_NXDOMAIN_SOA
elif resp_msg.get_rr_count(Message.SECTION_AUTHORITY) == 0:
- return SimpleDNSCache.RESP_NXDOMAIN_NOAUTH
+ return SimpleDNSCache.RESP_NXDOMAIN_NOAUTH
return SimpleDNSCache.RESP_NXDOMAIN_UNEXPECTED
elif (resp_msg.get_header_flag(Message.HEADERFLAG_AA) and
resp_msg.get_rcode() == Rcode.NOERROR() and
@@ -310,7 +310,7 @@ class ResolverContext:
# typical NXRRSET answer
if resp_msg.get_rr_count(Message.SECTION_AUTHORITY) > 0:
auth = resp_msg.get_section(Message.SECTION_AUTHORITY)[0]
- if auth.get_type() == RRType.SOA():
+ if self.__valid_soa(auth):
return SimpleDNSCache.RESP_NXRRSET_SOA
elif resp_msg.get_rr_count(Message.SECTION_AUTHORITY) == 0:
return SimpleDNSCache.RESP_NXRRSET_NOAUTH
@@ -351,6 +351,13 @@ class ResolverContext:
return True
return False
+ def __valid_soa(self, auth_rrset):
+ if auth_rrset.get_type() != RRType.SOA():
+ return False
+ cmp_reln = self.__qname.compare(auth_rrset.get_name()).get_relation()
+ return (cmp_reln == NameComparisonResult.EQUAL or
+ cmp_reln == NameComparisonResult.SUBDOMAIN)
+
def __is_cname_response(self, resp_msg):
# From BIND 9: A BIND8 server could return a non-authoritative
# answer when a CNAME is followed. We should treat it as a valid
diff --git a/exp/res-research/analysis/query_replay.py b/exp/res-research/analysis/query_replay.py
index 31c623f..6960cf0 100755
--- a/exp/res-research/analysis/query_replay.py
+++ b/exp/res-research/analysis/query_replay.py
@@ -64,6 +64,13 @@ class QueryTrace:
hits += log.hits
return hits
+ def get_cache_samettl_hits(self):
+ '''Return the total count of cache hits for the query'''
+ hits = 0
+ for log in self.__cache_log:
+ hits += log.samettl_hits
+ return hits
+
def get_cache_misses(self):
'''Return the total count of cache misses for the query'''
misses = 0
@@ -114,6 +121,7 @@ class CacheLog:
def __init__(self, now, resp_list, on_miss=True):
self.time_last_used = now
self.hits = 0
+ self.samettl_hits = 0
self.misses = 1 if on_miss else 0
self.resp_list = resp_list
@@ -364,12 +372,26 @@ class ResolverContext:
for i in range(1, len(chain)):
entry = chain[i][2]
zname = chain[i][1].get_name()
- if (not entry.is_expired(self.__now) and
- self.__is_glue_active(zname, parent_zones[i], entry)):
+ if entry.is_expired(self.__now):
+ continue
+ if self.__is_glue_active(zname, parent_zones[i], entry):
self.dprint(LOGLVL_DEBUG10,
'located the deepest active delegtion to %s at %s',
[zname, parent_zones[i]])
return chain[:i + 1], resp_list[:i]
+ if entry.trust != SimpleDNSCache.TRUST_GLUE:
+ # If it fails, we need to make sure this delegation comes from
+ # glue because it won't be the top level.
+ self.dprint(LOGLVL_DEBUG10,
+ 'replace delegation info to %s at %s',
+ [zname, parent_zones[i]])
+ rcode, rrset, id = \
+ self.__cache.find(zname, self.__qclass,
+ RRType.NS(),
+ SimpleDNSCache.FIND_ALLOW_NOANSWER,
+ SimpleDNSCache.TRUST_GLUE)
+ new_entry = self.__cache.get(id) # id shouldn't be None
+ chain[i] = (id, rrset, new_entry)
# In our setup root server should be always available.
raise QueryReplaceError('no name server found for ' +
@@ -593,6 +615,7 @@ class QueryReplay:
else:
if int(cache_log.time_last_used) == int(qry_time):
self.cache_samettl_hits += 1
+ cache_log.samettl_hits += 1
cache_log.time_last_used = qry_time
cache_log.hits += 1
self.cache_total_hits += 1
@@ -739,10 +762,11 @@ class QueryReplay:
def dump_popularity_stat(self, dump_file):
cumulative_n_qry = 0
cumulative_cache_hits = 0
+ cumulative_cache_samettl_hits = 0
position = 1
with open(dump_file, 'w') as f:
- f.write(('position,% in total,hit rate,#CNAME,av ext qry,' +
- 'resp-size\n'))
+ f.write('position,% in total,hit rate,same TTL rate,' +
+ '#CNAME,av ext qry,' + 'resp-size\n')
for qry_param in self.__get_query_params():
qinfo = self.__queries[qry_param]
n_queries = qinfo.get_query_count()
@@ -754,14 +778,19 @@ class QueryReplay:
cumulative_hit_rate = \
(float(cumulative_cache_hits) / cumulative_n_qry) * 100
+ cumulative_cache_samettl_hits += qinfo.get_cache_samettl_hits()
+ cumulative_samettl_hit_rate = \
+ (float(cumulative_cache_samettl_hits) /
+ cumulative_cache_hits) * 100
+
n_ext_queries_list = qinfo.get_external_query_count()
n_ext_queries = 0
for n in n_ext_queries_list:
n_ext_queries += n
- f.write('%d,%.2f,%.2f,%d,%.2f,%d\n' %
+ f.write('%d,%.2f,%.2f,%.2f,%d,%.2f,%d\n' %
(position, cumulative_percentage, cumulative_hit_rate,
- len(qinfo.cname_trace),
+ cumulative_samettl_hit_rate, len(qinfo.cname_trace),
float(n_ext_queries) / len(n_ext_queries_list),
qinfo.resp_size))
position += 1
@@ -820,10 +849,12 @@ class QueryReplay:
self.__extresp_stat[type]))
def dump_ttl_stat(self, dump_file):
- print('TTL statistics:\n')
with open(dump_file, 'w') as f:
self.__cache.dump_ttl_stat(f)
+ def dump_cache_stat(self):
+ self.__cache.dump_stat(sys.stdout)
+
def main(log_file, options):
cache = SimpleDNSCache()
cache.load(options.cache_dbfile)
@@ -848,12 +879,18 @@ def main(log_file, options):
replay.dump_extresp_stat()
if options.ttl_stat_file is not None:
replay.dump_ttl_stat(options.ttl_stat_file)
+ if options.dump_cache_stat:
+ replay.dump_cache_stat()
def get_option_parser():
parser = OptionParser(usage='usage: %prog [options] log_file')
parser.add_option("-c", "--cache-dbfile",
dest="cache_dbfile", action="store", default=None,
help="Serialized DNS cache DB")
+ parser.add_option("-C", "--dump-cache-stat",
+ dest="dump_cache_stat", action="store_true",
+ default=False,
+ help="dump cached entry counts per trust level")
parser.add_option("-d", "--dbg-level", dest="dbg_level", action="store",
default=0,
help="specify the verbosity level of debug output")
More information about the bind10-changes
mailing list