refactor(webconnectivity@v0.5): improve logging clarity (#964)

We're bumping the experiment's version number because we changed the name of the field used to contain late/duplicate DNS responses. We have also changed the algorithm to determine `#dnsDiff`. However, the change should only impact how we log this information. Overall, here the idea is to provide users with a reasonably clear explanation of how the probe maps observations to blocking and accessible using expected/unexpected as the conceptual framework.

Part of https://github.com/ooni/probe/issues/2237
This commit is contained in:
Simone Basso 2022-09-15 07:03:53 +02:00 committed by GitHub
parent d6a362d96f
commit 5e76c6ec92
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 88 additions and 33 deletions

View File

@ -109,7 +109,7 @@ func (tk *TestKeys) analysisToplevel(logger model.Logger) {
tk.Blocking = "dns" tk.Blocking = "dns"
tk.Accessible = false tk.Accessible = false
logger.Warnf( logger.Warnf(
"ANOMALY: flags=%d accessible=%+v, blocking=%+v", "ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking, tk.BlockingFlags, tk.Accessible, tk.Blocking,
) )
@ -117,14 +117,14 @@ func (tk *TestKeys) analysisToplevel(logger model.Logger) {
tk.Blocking = "tcp_ip" tk.Blocking = "tcp_ip"
tk.Accessible = false tk.Accessible = false
logger.Warnf( logger.Warnf(
"ANOMALY: flags=%d accessible=%+v, blocking=%+v", "ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking, tk.BlockingFlags, tk.Accessible, tk.Blocking,
) )
case (tk.BlockingFlags & (analysisFlagTLSBlocking | analysisFlagHTTPBlocking)) != 0: case (tk.BlockingFlags & (analysisFlagTLSBlocking | analysisFlagHTTPBlocking)) != 0:
tk.Blocking = "http-failure" tk.Blocking = "http-failure"
tk.Accessible = false tk.Accessible = false
logger.Warnf("ANOMALY: flags=%d accessible=%+v, blocking=%+v", logger.Warnf("ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking, tk.BlockingFlags, tk.Accessible, tk.Blocking,
) )
@ -132,7 +132,7 @@ func (tk *TestKeys) analysisToplevel(logger model.Logger) {
tk.Blocking = "http-diff" tk.Blocking = "http-diff"
tk.Accessible = false tk.Accessible = false
logger.Warnf( logger.Warnf(
"ANOMALY: flags=%d accessible=%+v, blocking=%+v", "ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking, tk.BlockingFlags, tk.Accessible, tk.Blocking,
) )
@ -140,7 +140,7 @@ func (tk *TestKeys) analysisToplevel(logger model.Logger) {
tk.Blocking = false tk.Blocking = false
tk.Accessible = true tk.Accessible = true
logger.Infof( logger.Infof(
"ACCESSIBLE: flags=%d accessible=%+v, blocking=%+v", "ACCESSIBLE: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking, tk.BlockingFlags, tk.Accessible, tk.Blocking,
) )

View File

@ -58,6 +58,7 @@ const (
func (tk *TestKeys) analysisDNSToplevel(logger model.Logger) { func (tk *TestKeys) analysisDNSToplevel(logger model.Logger) {
tk.analysisDNSExperimentFailure() tk.analysisDNSExperimentFailure()
tk.analysisDNSBogon(logger) tk.analysisDNSBogon(logger)
tk.analysisDNSDuplicateResponses(logger)
tk.analysisDNSUnexpectedFailure(logger) tk.analysisDNSUnexpectedFailure(logger)
tk.analysisDNSUnexpectedAddrs(logger) tk.analysisDNSUnexpectedAddrs(logger)
if tk.DNSFlags != 0 { if tk.DNSFlags != 0 {
@ -70,6 +71,15 @@ func (tk *TestKeys) analysisDNSToplevel(logger model.Logger) {
} }
} }
// analysisDNSDuplicateResponses checks whether we received duplicate
// replies for DNS-over-UDP queries, which is very unexpected.
func (tk *TestKeys) analysisDNSDuplicateResponses(logger model.Logger) {
if length := len(tk.DNSDuplicateResponses); length > 0 {
// TODO(bassosimone): write algorithm to analyze this
logger.Warnf("DNS: got %d unexpected late/duplicate DNS responses", length)
}
}
// analysisDNSExperimentFailure sets the legacy DNSExperimentFailure field. // analysisDNSExperimentFailure sets the legacy DNSExperimentFailure field.
func (tk *TestKeys) analysisDNSExperimentFailure() { func (tk *TestKeys) analysisDNSExperimentFailure() {
for _, query := range tk.Queries { for _, query := range tk.Queries {
@ -106,13 +116,23 @@ func (tk *TestKeys) analysisDNSBogon(logger model.Logger) {
switch answer.AnswerType { switch answer.AnswerType {
case "A": case "A":
if net.ParseIP(answer.IPv4) != nil && netxlite.IsBogon(answer.IPv4) { if net.ParseIP(answer.IPv4) != nil && netxlite.IsBogon(answer.IPv4) {
logger.Warnf("DNS: BOGON %s in #%d", answer.IPv4, query.TransactionID) logger.Warnf(
"DNS: got BOGON answer %s for domain %s (see #%d)",
answer.IPv4,
query.Hostname,
query.TransactionID,
)
tk.DNSFlags |= AnalysisDNSBogon tk.DNSFlags |= AnalysisDNSBogon
// continue processing so we print all the bogons we have // continue processing so we print all the bogons we have
} }
case "AAAA": case "AAAA":
if net.ParseIP(answer.IPv6) != nil && netxlite.IsBogon(answer.IPv6) { if net.ParseIP(answer.IPv6) != nil && netxlite.IsBogon(answer.IPv6) {
logger.Warnf("DNS: BOGON %s in #%d", answer.IPv6, query.TransactionID) logger.Warnf(
"DNS: got BOGON answer %s for domain %s (see #%d)",
answer.IPv6,
query.Hostname,
query.TransactionID,
)
tk.DNSFlags |= AnalysisDNSBogon tk.DNSFlags |= AnalysisDNSBogon
// continue processing so we print all the bogons we have // continue processing so we print all the bogons we have
} }
@ -255,7 +275,7 @@ func (tk *TestKeys) analysisDNSUnexpectedAddrs(logger model.Logger) {
// if the probe has not collected any addr for the same domain, it's // if the probe has not collected any addr for the same domain, it's
// definitely suspicious and counts as a difference // definitely suspicious and counts as a difference
if len(probeAddrs) <= 0 { if len(probeAddrs) <= 0 {
logger.Warnf("DNS: no IP address resolved by the probe") logger.Warnf("DNS: the probe did not resolve any IP address")
tk.DNSFlags |= AnalysisDNSUnexpectedAddrs tk.DNSFlags |= AnalysisDNSUnexpectedAddrs
return return
} }
@ -266,6 +286,9 @@ func (tk *TestKeys) analysisDNSUnexpectedAddrs(logger model.Logger) {
if len(differentAddrs) <= 0 { if len(differentAddrs) <= 0 {
return return
} }
for _, addr := range differentAddrs {
logger.Infof("DNS: address %s: not resolved by TH", addr)
}
// now, let's exclude the differentAddrs for which we successfully // now, let's exclude the differentAddrs for which we successfully
// completed a TLS handshake: those should be good addrs // completed a TLS handshake: those should be good addrs
@ -273,19 +296,24 @@ func (tk *TestKeys) analysisDNSUnexpectedAddrs(logger model.Logger) {
if len(withoutHandshake) <= 0 { if len(withoutHandshake) <= 0 {
return return
} }
for _, addr := range withoutHandshake {
logger.Infof("DNS: address %s: cannot confirm using TLS handshake", addr)
}
// as a last resort, accept the addresses without an handshake whose // as a last resort, accept the addresses without an handshake whose
// ASN overlaps with ASNs resolved by the TH // ASN overlaps with ASNs resolved by the TH
differentASNs := tk.analysisDNSDiffASN(withoutHandshake, thAddrs) differentASNs := tk.analysisDNSDiffASN(logger, withoutHandshake, thAddrs)
if len(differentASNs) <= 0 { if len(differentASNs) <= 0 {
return return
} }
// otherwise, conclude we have unexpected probe addrs // otherwise, conclude we have unexpected probe addrs
for addr, asn := range differentASNs {
logger.Warnf( logger.Warnf(
"DNSDiff: differentAddrs: %+v; withoutHandshake: %+v; differentASNs: %+v", "DNS: address %s has unexpected AS%d and we cannot use TLS to confirm it",
differentAddrs, withoutHandshake, differentASNs, addr, asn,
) )
}
tk.DNSFlags |= AnalysisDNSUnexpectedAddrs tk.DNSFlags |= AnalysisDNSUnexpectedAddrs
} }
@ -298,6 +326,10 @@ func (tk *TestKeys) analysisDNSDiffAddrs(probeAddrs, thAddrs []string) (diff []s
) )
mapping := make(map[string]int) mapping := make(map[string]int)
for _, addr := range probeAddrs { for _, addr := range probeAddrs {
if net.ParseIP(addr) != nil && netxlite.IsBogon(addr) {
// we can exclude bogons from the analysis because we already analyzed them
continue
}
mapping[addr] |= inProbe mapping[addr] |= inProbe
} }
for _, addr := range thAddrs { for _, addr := range thAddrs {
@ -313,23 +345,44 @@ func (tk *TestKeys) analysisDNSDiffAddrs(probeAddrs, thAddrs []string) (diff []s
// analysisDNSDiffASN returns whether there are IP addresses in the probe's // analysisDNSDiffASN returns whether there are IP addresses in the probe's
// list with different ASNs from the ones in the TH's list. // list with different ASNs from the ones in the TH's list.
func (tk *TestKeys) analysisDNSDiffASN(probeAddrs, thAddrs []string) (asns []uint) { func (tk *TestKeys) analysisDNSDiffASN(
logger model.Logger,
probeAddrs,
thAddrs []string,
) (result map[string]uint) {
const ( const (
inProbe = 1 << iota inProbe = 1 << iota
inTH inTH
) )
mapping := make(map[uint]int) logger.Debugf("DNS: probeAddrs %+v, thAddrs %+v", probeAddrs, thAddrs)
uniqueAddrs := make(map[string]uint)
asnToFlags := make(map[uint]int)
for _, addr := range probeAddrs { for _, addr := range probeAddrs {
asn, _, _ := geoipx.LookupASN(addr) asn, _, _ := geoipx.LookupASN(addr)
mapping[asn] |= inProbe // including the zero ASN that means unknown asnToFlags[asn] |= inProbe // including the zero ASN that means unknown
uniqueAddrs[addr] = asn
} }
for _, addr := range thAddrs { for _, addr := range thAddrs {
asn, _, _ := geoipx.LookupASN(addr) asn, _, _ := geoipx.LookupASN(addr)
mapping[asn] |= inTH // including the zero ASN that means unknown asnToFlags[asn] |= inTH // including the zero ASN that means unknown
uniqueAddrs[addr] = asn
} }
for asn, where := range mapping { for addr, asn := range uniqueAddrs {
logger.Infof("DNS: addr %s has AS%d", addr, asn)
}
probeOnlyASNs := make(map[uint]bool)
for asn, where := range asnToFlags {
if (where & inTH) == 0 { if (where & inTH) == 0 {
asns = append(asns, asn) probeOnlyASNs[asn] = true
}
}
for asn := range probeOnlyASNs {
logger.Infof("DNS: AS%d: only seen by probe", asn)
}
result = make(map[string]uint)
for addr, asn := range uniqueAddrs {
if probeOnlyASNs[asn] {
result[addr] = asn
} }
} }
return return

View File

@ -53,10 +53,10 @@ func (tk *TestKeys) analysisHTTPToplevel(logger model.Logger) {
netxlite.FailureEOFError: netxlite.FailureEOFError:
tk.BlockingFlags |= analysisFlagHTTPBlocking tk.BlockingFlags |= analysisFlagHTTPBlocking
logger.Warnf( logger.Warnf(
"HTTP: endpoint %s is blocked (see #%d): %s", "HTTP: unexpected failure %s for %s (see #%d)",
*failure,
finalRequest.Address, finalRequest.Address,
finalRequest.TransactionID, finalRequest.TransactionID,
*failure,
) )
default: default:
// leave this case for ooni/pipeline // leave this case for ooni/pipeline

View File

@ -53,6 +53,7 @@ func (tk *TestKeys) analysisHTTPDiff(logger model.Logger,
tk.BlockingFlags |= analysisFlagSuccess tk.BlockingFlags |= analysisFlagSuccess
return return
} }
logger.Infof("HTTP: body length: MISMATCH (see #%d)", probe.TransactionID)
if tk.HeadersMatch != nil && *tk.HeadersMatch { if tk.HeadersMatch != nil && *tk.HeadersMatch {
logger.Infof( logger.Infof(
"HTTP: statusCodeMatch && headersMatch => #%d is successful", "HTTP: statusCodeMatch && headersMatch => #%d is successful",
@ -61,6 +62,7 @@ func (tk *TestKeys) analysisHTTPDiff(logger model.Logger,
tk.BlockingFlags |= analysisFlagSuccess tk.BlockingFlags |= analysisFlagSuccess
return return
} }
logger.Infof("HTTP: uncommon headers: MISMATCH (see #%d)", probe.TransactionID)
if tk.TitleMatch != nil && *tk.TitleMatch { if tk.TitleMatch != nil && *tk.TitleMatch {
logger.Infof( logger.Infof(
"HTTP: statusCodeMatch && titleMatch => #%d is successful", "HTTP: statusCodeMatch && titleMatch => #%d is successful",
@ -69,6 +71,9 @@ func (tk *TestKeys) analysisHTTPDiff(logger model.Logger,
tk.BlockingFlags |= analysisFlagSuccess tk.BlockingFlags |= analysisFlagSuccess
return return
} }
logger.Infof("HTTP: title: MISMATCH (see #%d)", probe.TransactionID)
} else {
logger.Infof("HTTP: status code: MISMATCH (see #%d)", probe.TransactionID)
} }
tk.BlockingFlags |= analysisFlagHTTPDiff tk.BlockingFlags |= analysisFlagHTTPDiff

View File

@ -71,10 +71,10 @@ func (tk *TestKeys) analysisTCPIPToplevel(logger model.Logger) {
continue continue
} }
logger.Warnf( logger.Warnf(
"TCP/IP: endpoint %s is blocked (see #%d): %s", "TCP/IP: unexpected failure %s for %s (see #%d)",
*failure,
epnt, epnt,
entry.TransactionID, entry.TransactionID,
*failure,
) )
entry.Status.Blocked = &istrue entry.Status.Blocked = &istrue
tk.BlockingFlags |= analysisFlagTCPIPBlocking tk.BlockingFlags |= analysisFlagTCPIPBlocking

View File

@ -40,10 +40,10 @@ func (tk *TestKeys) analysisTLSToplevel(logger model.Logger) {
continue continue
} }
logger.Warnf( logger.Warnf(
"TLS: endpoint %s is blocked (see #%d): %s", "TLS: unexpected failure %s for %s (see #%d)",
*failure,
epnt, epnt,
entry.TransactionID, entry.TransactionID,
*failure,
) )
tk.BlockingFlags |= analysisFlagTLSBlocking tk.BlockingFlags |= analysisFlagTLSBlocking
} }

View File

@ -268,9 +268,6 @@ func (t *DNSResolvers) waitForLateReplies(parentCtx context.Context, trace *meas
defer t.WaitGroup.Done() defer t.WaitGroup.Done()
const lateTimeout = 500 * time.Millisecond const lateTimeout = 500 * time.Millisecond
events := trace.DelayedDNSResponseWithTimeout(parentCtx, lateTimeout) events := trace.DelayedDNSResponseWithTimeout(parentCtx, lateTimeout)
if length := len(events); length > 0 {
t.Logger.Warnf("got %d late DNS replies", length)
}
t.TestKeys.AppendDNSLateReplies(events...) t.TestKeys.AppendDNSLateReplies(events...)
} }

View File

@ -36,7 +36,7 @@ func (m *Measurer) ExperimentName() string {
// ExperimentVersion implements model.ExperimentMeasurer. // ExperimentVersion implements model.ExperimentMeasurer.
func (m *Measurer) ExperimentVersion() string { func (m *Measurer) ExperimentVersion() string {
return "0.5.16" return "0.5.17"
} }
// Run implements model.ExperimentMeasurer. // Run implements model.ExperimentMeasurer.

View File

@ -43,9 +43,9 @@ type TestKeys struct {
// Do53 contains ancillary observations collected by Do53 resolvers. // Do53 contains ancillary observations collected by Do53 resolvers.
Do53 *TestKeysDo53 `json:"x_do53"` Do53 *TestKeysDo53 `json:"x_do53"`
// DNSLateReplies contains late replies we didn't expect to receive from // DNSDuplicateResponses contains late/duplicate responses we didn't expect to receive from
// a resolver (which may raise eyebrows if they're different). // a resolver (which may raise eyebrows if they're different).
DNSLateReplies []*model.ArchivalDNSLookupResult `json:"x_dns_late_replies"` DNSDuplicateResponses []*model.ArchivalDNSLookupResult `json:"x_dns_duplicate_responses"`
// Queries contains DNS queries. // Queries contains DNS queries.
Queries []*model.ArchivalDNSLookupResult `json:"queries"` Queries []*model.ArchivalDNSLookupResult `json:"queries"`
@ -202,7 +202,7 @@ func (tk *TestKeys) AppendNetworkEvents(v ...*model.ArchivalNetworkEvent) {
// AppendDNSLateReplies appends to DNSLateReplies. // AppendDNSLateReplies appends to DNSLateReplies.
func (tk *TestKeys) AppendDNSLateReplies(v ...*model.ArchivalDNSLookupResult) { func (tk *TestKeys) AppendDNSLateReplies(v ...*model.ArchivalDNSLookupResult) {
tk.mu.Lock() tk.mu.Lock()
tk.DNSLateReplies = append(tk.DNSLateReplies, v...) tk.DNSDuplicateResponses = append(tk.DNSDuplicateResponses, v...)
tk.mu.Unlock() tk.mu.Unlock()
} }
@ -325,7 +325,7 @@ func NewTestKeys() *TestKeys {
NetworkEvents: []*model.ArchivalNetworkEvent{}, NetworkEvents: []*model.ArchivalNetworkEvent{},
Queries: []*model.ArchivalDNSLookupResult{}, Queries: []*model.ArchivalDNSLookupResult{},
}, },
DNSLateReplies: []*model.ArchivalDNSLookupResult{}, DNSDuplicateResponses: []*model.ArchivalDNSLookupResult{},
Queries: []*model.ArchivalDNSLookupResult{}, Queries: []*model.ArchivalDNSLookupResult{},
Requests: []*model.ArchivalHTTPRequestResult{}, Requests: []*model.ArchivalHTTPRequestResult{},
TCPConnect: []*model.ArchivalTCPConnectResult{}, TCPConnect: []*model.ArchivalTCPConnectResult{},