ooni-probe-cli/internal/netxlite/integration_test.go

539 lines
14 KiB
Go
Raw Normal View History

package netxlite_test
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
fix(netxlite): robust {ReadAll,Copy}Context with wrapped io.EOF (#661) * chore(netxlite): add currently failing test case This diff introduces a test cases that will fail because of the reason explained in https://github.com/ooni/probe/issues/1965. * chore(netxlite/iox_test.go): add failing unit tests These tests directly show how the Go implementation of ReadAll and Copy has the issue of checking for io.EOF equality. * fix(netxlite): make {ReadAll,Copy}Context robust to wrapped io.EOF The fix is simple: we just need to check for `errors.Is(err, io.EOF)` after either io.ReadAll or io.Copy has returned. When this condition is true, we need to convert the error back to `nil` as it ought to be. While there, observe that the unit tests I committed in the previous commit are wrongly asserting that the error must be wrapped. This assertion is not correct, because in both cases we have just ensured that the returned error is `nil` (i.e., success). See https://github.com/ooni/probe/issues/1965. * cleanup: remove previous workaround for wrapped io.EOF These workarounds were partial, meaning that they would cover some cases in which the issue occurred but not all of them. Handling the problem in `netxlite.{ReadAll,Copy}Context` is the right thing to do _as long as_ we always use these functions instead of `io.{ReadAll,Copy}`. This is why it's now important to ensure we clearly mention that inside of the `CONTRIBUTING.md` guide and to also ensure that we're not using these functions in the code base. * fix(urlgetter): repair tests who assumed to see EOF error Now that we have established that we should normalize EOF when reading bodies like the stdlib does and now that it's clear why our behavior diverged from the stdlib, we also need to repair all the tests that assumed this incorrect behavior. * fix(all): don't use io{,util}.{Copy,ReadAll} * feat: add checks to ensure we don't use io.{Copy,ReadAll} * doc(netxlite): document we know how to deal w/ wrapped io.EOF * fix(nocopyreadall.bash): add exception for i/n/iox.go
2022-01-12 14:26:10 +01:00
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/apex/log"
"github.com/lucas-clemente/quic-go"
"github.com/ooni/probe-cli/v3/internal/model"
"github.com/ooni/probe-cli/v3/internal/netxlite"
"github.com/ooni/probe-cli/v3/internal/netxlite/filtering"
"github.com/ooni/probe-cli/v3/internal/netxlite/quictesting"
"github.com/ooni/probe-cli/v3/internal/randx"
fix(netxlite): robust {ReadAll,Copy}Context with wrapped io.EOF (#661) * chore(netxlite): add currently failing test case This diff introduces a test cases that will fail because of the reason explained in https://github.com/ooni/probe/issues/1965. * chore(netxlite/iox_test.go): add failing unit tests These tests directly show how the Go implementation of ReadAll and Copy has the issue of checking for io.EOF equality. * fix(netxlite): make {ReadAll,Copy}Context robust to wrapped io.EOF The fix is simple: we just need to check for `errors.Is(err, io.EOF)` after either io.ReadAll or io.Copy has returned. When this condition is true, we need to convert the error back to `nil` as it ought to be. While there, observe that the unit tests I committed in the previous commit are wrongly asserting that the error must be wrapped. This assertion is not correct, because in both cases we have just ensured that the returned error is `nil` (i.e., success). See https://github.com/ooni/probe/issues/1965. * cleanup: remove previous workaround for wrapped io.EOF These workarounds were partial, meaning that they would cover some cases in which the issue occurred but not all of them. Handling the problem in `netxlite.{ReadAll,Copy}Context` is the right thing to do _as long as_ we always use these functions instead of `io.{ReadAll,Copy}`. This is why it's now important to ensure we clearly mention that inside of the `CONTRIBUTING.md` guide and to also ensure that we're not using these functions in the code base. * fix(urlgetter): repair tests who assumed to see EOF error Now that we have established that we should normalize EOF when reading bodies like the stdlib does and now that it's clear why our behavior diverged from the stdlib, we also need to repair all the tests that assumed this incorrect behavior. * fix(all): don't use io{,util}.{Copy,ReadAll} * feat: add checks to ensure we don't use io.{Copy,ReadAll} * doc(netxlite): document we know how to deal w/ wrapped io.EOF * fix(nocopyreadall.bash): add exception for i/n/iox.go
2022-01-12 14:26:10 +01:00
"github.com/ooni/probe-cli/v3/internal/runtimex"
utls "gitlab.com/yawning/utls.git"
)
// This set of integration tests ensures that we continue to
// be able to measure the conditions we care about
func TestMeasureWithSystemResolver(t *testing.T) {
if testing.Short() {
t.Skip("skip test in short mode")
}
//
// Measurement conditions we care about:
//
// - success
//
// - nxdomain
//
// - timeout
//
t.Run("on success", func(t *testing.T) {
r := netxlite.NewStdlibResolver(log.Log)
defer r.CloseIdleConnections()
ctx := context.Background()
addrs, err := r.LookupHost(ctx, "dns.google.com")
if err != nil {
t.Fatal(err)
}
if addrs == nil {
t.Fatal("expected non-nil result here")
}
})
t.Run("for nxdomain", func(t *testing.T) {
r := netxlite.NewStdlibResolver(log.Log)
defer r.CloseIdleConnections()
ctx := context.Background()
addrs, err := r.LookupHost(ctx, "antani.ooni.org")
if err == nil || err.Error() != netxlite.FailureDNSNXDOMAINError {
t.Fatal("not the error we expected", err)
}
if addrs != nil {
t.Fatal("expected nil result here")
}
})
t.Run("for timeout", func(t *testing.T) {
r := netxlite.NewStdlibResolver(log.Log)
defer r.CloseIdleConnections()
const timeout = time.Nanosecond
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// Implementation note: Windows' resolver has caching so back to back tests
// will fail unless we query for something that could bypass the cache itself
// e.g. a domain containing a few random letters
addrs, err := r.LookupHost(ctx, randx.Letters(7)+".ooni.org")
if err == nil || err.Error() != netxlite.FailureGenericTimeoutError {
t.Fatal("not the error we expected", err)
}
if addrs != nil {
t.Fatal("expected nil result here")
}
})
}
func TestMeasureWithUDPResolver(t *testing.T) {
if testing.Short() {
t.Skip("skip test in short mode")
}
//
// Measurement conditions we care about:
//
// - success
//
// - nxdomain
//
// - refused
//
// - timeout
//
t.Run("on success", func(t *testing.T) {
dlr := netxlite.NewDialerWithoutResolver(log.Log)
r := netxlite.NewParallelUDPResolver(log.Log, dlr, "8.8.4.4:53")
defer r.CloseIdleConnections()
ctx := context.Background()
addrs, err := r.LookupHost(ctx, "dns.google.com")
if err != nil {
t.Fatal(err)
}
if addrs == nil {
t.Fatal("expected non-nil result here")
}
})
t.Run("for nxdomain", func(t *testing.T) {
feat(netxlite): observe additional DNS-over-UDP responses (#762) This diff introduces support for observing additional DNS-over-UDP responses in some censored environments (e.g. China). After some uncertainty around whether to use connected or unconnected UDP sockets, I eventually settled for connected. Here's a recap: | | connected | unconnected | | ----------------------- | --------- | ----------- | | see ICMP errors | ✔️ | ❌ | | responses from any server | ❌ | ✔️ | Because most if not all DNS resolvers expect answers from exactly the same servers to which they sent the query, I would say that it's more important to have some limited ability of observing the effect of ICMP errors (e.g., host_unreachable when we set a low TTL and send out a query to a server). Therefore, my choice was to modify the existing DNS-over-UDP transport. Here's an overview of the changes: 1. introduce a new API for performing an async round trip that returns a channel wrapper where all responses are posted. The channel will not ever be closed, so the reader needs to use select for safely reading. If the reader users the wrapper's Next or TryNextResponses methods, these details do not matter because they already implement a safe reading pattern. 2. the async round trip API performs the round trip in the background and stops processing when it sees the first error. 3. the background running code will use an overall deadline derived from the DNSTransport.IOTimeout field to know when to stop. 4. the background running code will additionally stop running if noone is reading the channel and there are no empty slots in the channel's buffer. 5. the RoundTrip method has been rewritten in terms of the async API. The design I'm using here implements the proposal for async round trips defined at https://github.com/ooni/probe/issues/2099. I have chosen not to make all transports async because the DNS transport seems the only transport that needs to also work in async mode. While there, I noticed that we were not propagating CloseIdleConnection to the underlying dialer, which was potentially wrong, so I did it.
2022-05-26 20:09:00 +02:00
proxy := &filtering.DNSServer{
OnQuery: func(domain string) filtering.DNSAction {
return filtering.DNSActionNXDOMAIN
},
}
listener, err := proxy.Start("127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer listener.Close()
dlr := netxlite.NewDialerWithoutResolver(log.Log)
r := netxlite.NewParallelUDPResolver(log.Log, dlr, listener.LocalAddr().String())
defer r.CloseIdleConnections()
ctx := context.Background()
addrs, err := r.LookupHost(ctx, "ooni.org")
if err == nil || err.Error() != netxlite.FailureDNSNXDOMAINError {
t.Fatal("not the error we expected", err)
}
if addrs != nil {
t.Fatal("expected nil result here")
}
})
t.Run("for refused", func(t *testing.T) {
feat(netxlite): observe additional DNS-over-UDP responses (#762) This diff introduces support for observing additional DNS-over-UDP responses in some censored environments (e.g. China). After some uncertainty around whether to use connected or unconnected UDP sockets, I eventually settled for connected. Here's a recap: | | connected | unconnected | | ----------------------- | --------- | ----------- | | see ICMP errors | ✔️ | ❌ | | responses from any server | ❌ | ✔️ | Because most if not all DNS resolvers expect answers from exactly the same servers to which they sent the query, I would say that it's more important to have some limited ability of observing the effect of ICMP errors (e.g., host_unreachable when we set a low TTL and send out a query to a server). Therefore, my choice was to modify the existing DNS-over-UDP transport. Here's an overview of the changes: 1. introduce a new API for performing an async round trip that returns a channel wrapper where all responses are posted. The channel will not ever be closed, so the reader needs to use select for safely reading. If the reader users the wrapper's Next or TryNextResponses methods, these details do not matter because they already implement a safe reading pattern. 2. the async round trip API performs the round trip in the background and stops processing when it sees the first error. 3. the background running code will use an overall deadline derived from the DNSTransport.IOTimeout field to know when to stop. 4. the background running code will additionally stop running if noone is reading the channel and there are no empty slots in the channel's buffer. 5. the RoundTrip method has been rewritten in terms of the async API. The design I'm using here implements the proposal for async round trips defined at https://github.com/ooni/probe/issues/2099. I have chosen not to make all transports async because the DNS transport seems the only transport that needs to also work in async mode. While there, I noticed that we were not propagating CloseIdleConnection to the underlying dialer, which was potentially wrong, so I did it.
2022-05-26 20:09:00 +02:00
proxy := &filtering.DNSServer{
OnQuery: func(domain string) filtering.DNSAction {
return filtering.DNSActionRefused
},
}
listener, err := proxy.Start("127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer listener.Close()
dlr := netxlite.NewDialerWithoutResolver(log.Log)
r := netxlite.NewParallelUDPResolver(log.Log, dlr, listener.LocalAddr().String())
defer r.CloseIdleConnections()
ctx := context.Background()
addrs, err := r.LookupHost(ctx, "ooni.org")
if err == nil || err.Error() != netxlite.FailureDNSRefusedError {
t.Fatal("not the error we expected", err)
}
if addrs != nil {
t.Fatal("expected nil result here")
}
})
t.Run("for timeout", func(t *testing.T) {
feat(netxlite): observe additional DNS-over-UDP responses (#762) This diff introduces support for observing additional DNS-over-UDP responses in some censored environments (e.g. China). After some uncertainty around whether to use connected or unconnected UDP sockets, I eventually settled for connected. Here's a recap: | | connected | unconnected | | ----------------------- | --------- | ----------- | | see ICMP errors | ✔️ | ❌ | | responses from any server | ❌ | ✔️ | Because most if not all DNS resolvers expect answers from exactly the same servers to which they sent the query, I would say that it's more important to have some limited ability of observing the effect of ICMP errors (e.g., host_unreachable when we set a low TTL and send out a query to a server). Therefore, my choice was to modify the existing DNS-over-UDP transport. Here's an overview of the changes: 1. introduce a new API for performing an async round trip that returns a channel wrapper where all responses are posted. The channel will not ever be closed, so the reader needs to use select for safely reading. If the reader users the wrapper's Next or TryNextResponses methods, these details do not matter because they already implement a safe reading pattern. 2. the async round trip API performs the round trip in the background and stops processing when it sees the first error. 3. the background running code will use an overall deadline derived from the DNSTransport.IOTimeout field to know when to stop. 4. the background running code will additionally stop running if noone is reading the channel and there are no empty slots in the channel's buffer. 5. the RoundTrip method has been rewritten in terms of the async API. The design I'm using here implements the proposal for async round trips defined at https://github.com/ooni/probe/issues/2099. I have chosen not to make all transports async because the DNS transport seems the only transport that needs to also work in async mode. While there, I noticed that we were not propagating CloseIdleConnection to the underlying dialer, which was potentially wrong, so I did it.
2022-05-26 20:09:00 +02:00
proxy := &filtering.DNSServer{
OnQuery: func(domain string) filtering.DNSAction {
return filtering.DNSActionTimeout
},
}
listener, err := proxy.Start("127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer listener.Close()
dlr := netxlite.NewDialerWithoutResolver(log.Log)
r := netxlite.NewParallelUDPResolver(log.Log, dlr, listener.LocalAddr().String())
defer r.CloseIdleConnections()
ctx := context.Background()
addrs, err := r.LookupHost(ctx, "ooni.org")
if err == nil || err.Error() != netxlite.FailureGenericTimeoutError {
t.Fatal("not the error we expected", err)
}
if addrs != nil {
t.Fatal("expected nil result here")
}
})
}
func TestMeasureWithDialer(t *testing.T) {
if testing.Short() {
t.Skip("skip test in short mode")
}
//
// Measurement conditions we care about:
//
// - success
//
// - connection refused
//
// - timeout
//
t.Run("on success", func(t *testing.T) {
d := netxlite.NewDialerWithoutResolver(log.Log)
defer d.CloseIdleConnections()
ctx := context.Background()
conn, err := d.DialContext(ctx, "tcp", "8.8.4.4:443")
if err != nil {
t.Fatal(err)
}
if conn == nil {
t.Fatal("expected non-nil conn here")
}
conn.Close()
})
t.Run("on connection refused", func(t *testing.T) {
d := netxlite.NewDialerWithoutResolver(log.Log)
defer d.CloseIdleConnections()
ctx := context.Background()
// Here we assume that no-one is listening on 127.0.0.1:1
conn, err := d.DialContext(ctx, "tcp", "127.0.0.1:1")
if err == nil || err.Error() != netxlite.FailureConnectionRefused {
t.Fatal("not the error we expected", err)
}
if conn != nil {
t.Fatal("expected nil conn here")
}
})
t.Run("on timeout", func(t *testing.T) {
// Note: this test was flaky sometimes on macOS. I've seen in
// particular this failure on 2021-09-29:
//
// ```
// --- FAIL: TestMeasureWithDialer (8.25s)
// --- FAIL: TestMeasureWithDialer/on_timeout (8.22s)
// integration_test.go:233: not the error we expected timed_out
// ```
//
// My explanation of this failure is that the ETIMEDOUT from
// the kernel races with the timeout we've configured. For this
// reason, I have set a smaller context timeout (see below).
//
d := netxlite.NewDialerWithoutResolver(log.Log)
defer d.CloseIdleConnections()
const timeout = 5 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// Here we assume 8.8.4.4:1 is filtered
conn, err := d.DialContext(ctx, "tcp", "8.8.4.4:1")
if err == nil || err.Error() != netxlite.FailureGenericTimeoutError {
t.Fatal("not the error we expected", err)
}
if conn != nil {
t.Fatal("expected nil conn here")
}
})
}
func TestMeasureWithTLSHandshaker(t *testing.T) {
if testing.Short() {
t.Skip("skip test in short mode")
}
//
// Measurement conditions we care about:
//
// - success
//
// - connection reset
//
// - timeout
//
dial := func(ctx context.Context, address string) (net.Conn, error) {
d := netxlite.NewDialerWithoutResolver(log.Log)
return d.DialContext(ctx, "tcp", address)
}
successFlow := func(th model.TLSHandshaker) error {
ctx := context.Background()
conn, err := dial(ctx, "8.8.4.4:443")
if err != nil {
return fmt.Errorf("dial failed: %w", err)
}
defer conn.Close()
config := &tls.Config{
ServerName: "dns.google",
NextProtos: []string{"h2", "http/1.1"},
RootCAs: netxlite.NewDefaultCertPool(),
}
tconn, _, err := th.Handshake(ctx, conn, config)
if err != nil {
return fmt.Errorf("tls handshake failed: %w", err)
}
tconn.Close()
return nil
}
connectionResetFlow := func(th model.TLSHandshaker) error {
server := filtering.NewTLSServer(filtering.TLSActionReset)
defer server.Close()
ctx := context.Background()
conn, err := dial(ctx, server.Endpoint())
if err != nil {
return fmt.Errorf("dial failed: %w", err)
}
defer conn.Close()
config := &tls.Config{
ServerName: "dns.google",
NextProtos: []string{"h2", "http/1.1"},
RootCAs: netxlite.NewDefaultCertPool(),
}
tconn, _, err := th.Handshake(ctx, conn, config)
if err == nil {
return fmt.Errorf("tls handshake succeded unexpectedly")
}
if err.Error() != netxlite.FailureConnectionReset {
return fmt.Errorf("not the error we expected: %w", err)
}
if tconn != nil {
return fmt.Errorf("expected nil tconn here")
}
return nil
}
timeoutFlow := func(th model.TLSHandshaker) error {
server := filtering.NewTLSServer(filtering.TLSActionTimeout)
defer server.Close()
ctx := context.Background()
conn, err := dial(ctx, server.Endpoint())
if err != nil {
return fmt.Errorf("dial failed: %w", err)
}
defer conn.Close()
config := &tls.Config{
ServerName: "dns.google",
NextProtos: []string{"h2", "http/1.1"},
RootCAs: netxlite.NewDefaultCertPool(),
}
tconn, _, err := th.Handshake(ctx, conn, config)
if err == nil {
return fmt.Errorf("tls handshake succeded unexpectedly")
}
if err.Error() != netxlite.FailureGenericTimeoutError {
return fmt.Errorf("not the error we expected: %w", err)
}
if tconn != nil {
return fmt.Errorf("expected nil tconn here")
}
return nil
}
t.Run("for stdlib handshaker", func(t *testing.T) {
t.Run("on success", func(t *testing.T) {
th := netxlite.NewTLSHandshakerStdlib(log.Log)
err := successFlow(th)
if err != nil {
t.Fatal(err)
}
})
t.Run("on connection reset", func(t *testing.T) {
th := netxlite.NewTLSHandshakerStdlib(log.Log)
err := connectionResetFlow(th)
if err != nil {
t.Fatal(err)
}
})
t.Run("on timeout", func(t *testing.T) {
th := netxlite.NewTLSHandshakerStdlib(log.Log)
err := timeoutFlow(th)
if err != nil {
t.Fatal(err)
}
})
})
t.Run("for utls handshaker", func(t *testing.T) {
t.Run("on success", func(t *testing.T) {
th := netxlite.NewTLSHandshakerUTLS(log.Log, &utls.HelloFirefox_55)
err := successFlow(th)
if err != nil {
t.Fatal(err)
}
})
t.Run("on connection reset", func(t *testing.T) {
th := netxlite.NewTLSHandshakerUTLS(log.Log, &utls.HelloFirefox_55)
err := connectionResetFlow(th)
if err != nil {
t.Fatal(err)
}
})
t.Run("on timeout", func(t *testing.T) {
th := netxlite.NewTLSHandshakerUTLS(log.Log, &utls.HelloFirefox_55)
err := timeoutFlow(th)
if err != nil {
t.Fatal(err)
}
})
})
}
func TestMeasureWithQUICDialer(t *testing.T) {
if testing.Short() {
t.Skip("skip test in short mode")
}
feature: merge measurex and netx archival layer (1/N) (#663) This diff introduces a new package called `./internal/archival`. This package collects data from `./internal/model` network interfaces (e.g., `Dialer`, `QUICDialer`, `HTTPTransport`), saves such data into an internal tabular data format suitable for on-line processing and analysis, and allows exporting data into the OONI data format. The code for collecting and the internal tabular data formats are adapted from `measurex`. The code for formatting and exporting OONI data-format-compliant structures is adapted from `netx/archival`. My original objective was to _also_ (1) fully replace `netx/archival` with this package and (2) adapt `measurex` to use this package rather than its own code. Both operations seem easily feasible because: (a) this code is `measurex` code without extensions that are `measurex` related, which will need to be added back as part of the process; (b) the API provided by this code allows for trivially converting from using `netx/archival` to using this code. Yet, both changes should not be taken lightly. After implementing them, there's need to spend some time doing QA and ensuring all nettests work as intended. However, I am planning a release in the next two weeks, and this QA task is likely going to defer the release. For this reason, I have chosen to commit the work done so far into the tree and defer the second part of this refactoring for a later moment in time. (This explains why the title mentions "1/N"). On a more high-level perspective, it would also be beneficial, I guess, to explain _why_ I am doing these changes. There are two intertwined reasons. The first reason is that `netx/archival` has shortcomings deriving from its original https://github.com/ooni/netx legacy. The most relevant shortcoming is that it saves all kind of data into the same tabular structure named `Event`. This design choice is unfortunate because it does not allow one to apply data-type specific logic when processing the results. In turn, this choice results in complex processing code. Therefore, I believe that replacing the code with event-specific data structures is clearly an improvement in terms of code maintainability and would quite likely lead us to more confidently change and evolve the codebase. The second reason why I would like to move forward these changes is to unify the codepaths used for measuring. At this point in time, we basically have two codepaths: `./internal/engine/netx` and `./internal/measurex`. They both have pros and cons and I don't think we want to rewrite whole experiments using `netx`. Rather, what we probably want is to gradually merge these two codepaths such that `netx` is a set of abstractions on top of `measurex` (which is more low-level and has a more-easily-testable design). Because saving events and generating an archival data format out of them consists of at least 50% of the complexity of both `netx` and `measurex`, it seems reasonable to unify this archival-related part of the two codebases as the first step. At the highest level of abstraction, these changes are part of the train of changes which will eventually lead us to bless `websteps` as a first class citizen in OONI land. Because `websteps` requires different underlying primitives, I chose to develop these primitives from scratch rather than wrestling with `netx`, which used another model. The model used by `websteps` is that we perform each operation in isolation and immediately we save the results, while `netx` creates whole data structures and collects all the events happening via tracing. We believe the model used by `websteps` to be better because it does not require your code to figure out everything that happened after the measurement, which is a source of subtle bugs in the current implementation. So, when I started implementing websteps I extracted the bits of `netx` that could also be beneficial to `websteps` into a separate library, thus `netxlite` was born. The reference issue describing merging the archival of `netx` and `measurex` is https://github.com/ooni/probe/issues/1957. As of this writing the issue still references the original plan, which I could not complete by the end of this Sprint, so I am going to adapt the text of the issue to only refer to what was done in here next. Of course, I also need follow-up issues.
2022-01-14 12:13:10 +01:00
// TODO(bassosimone): here we're not testing the case in which
// the certificate is invalid for the required SNI.
//
// Measurement conditions we care about:
//
// - success
//
// - timeout
//
t.Run("on success", func(t *testing.T) {
ql := netxlite.NewQUICListener()
d := netxlite.NewQUICDialerWithoutResolver(ql, log.Log)
defer d.CloseIdleConnections()
ctx := context.Background()
config := &tls.Config{
ServerName: quictesting.Domain,
NextProtos: []string{"h3"},
RootCAs: netxlite.NewDefaultCertPool(),
}
sess, err := d.DialContext(ctx, quictesting.Endpoint("443"), config, &quic.Config{})
if err != nil {
t.Fatal(err)
}
if sess == nil {
t.Fatal("expected non-nil sess here")
}
sess.CloseWithError(0, "")
})
t.Run("on timeout", func(t *testing.T) {
ql := netxlite.NewQUICListener()
d := netxlite.NewQUICDialerWithoutResolver(ql, log.Log)
defer d.CloseIdleConnections()
ctx := context.Background()
config := &tls.Config{
ServerName: quictesting.Domain,
NextProtos: []string{"h3"},
RootCAs: netxlite.NewDefaultCertPool(),
}
// Here we assume <target-address>:1 is filtered
sess, err := d.DialContext(ctx, quictesting.Endpoint("1"), config, &quic.Config{})
if err == nil || err.Error() != netxlite.FailureGenericTimeoutError {
t.Fatal("not the error we expected", err)
}
if sess != nil {
t.Fatal("expected nil sess here")
}
})
}
func TestHTTPTransport(t *testing.T) {
if testing.Short() {
t.Skip("skip test in short mode")
}
t.Run("works as intended", func(t *testing.T) {
d := netxlite.NewDialerWithResolver(log.Log, netxlite.NewStdlibResolver(log.Log))
td := netxlite.NewTLSDialer(d, netxlite.NewTLSHandshakerStdlib(log.Log))
txp := netxlite.NewHTTPTransport(log.Log, d, td)
client := &http.Client{Transport: txp}
resp, err := client.Get("https://www.google.com/robots.txt")
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
client.CloseIdleConnections()
})
fix(netxlite): robust {ReadAll,Copy}Context with wrapped io.EOF (#661) * chore(netxlite): add currently failing test case This diff introduces a test cases that will fail because of the reason explained in https://github.com/ooni/probe/issues/1965. * chore(netxlite/iox_test.go): add failing unit tests These tests directly show how the Go implementation of ReadAll and Copy has the issue of checking for io.EOF equality. * fix(netxlite): make {ReadAll,Copy}Context robust to wrapped io.EOF The fix is simple: we just need to check for `errors.Is(err, io.EOF)` after either io.ReadAll or io.Copy has returned. When this condition is true, we need to convert the error back to `nil` as it ought to be. While there, observe that the unit tests I committed in the previous commit are wrongly asserting that the error must be wrapped. This assertion is not correct, because in both cases we have just ensured that the returned error is `nil` (i.e., success). See https://github.com/ooni/probe/issues/1965. * cleanup: remove previous workaround for wrapped io.EOF These workarounds were partial, meaning that they would cover some cases in which the issue occurred but not all of them. Handling the problem in `netxlite.{ReadAll,Copy}Context` is the right thing to do _as long as_ we always use these functions instead of `io.{ReadAll,Copy}`. This is why it's now important to ensure we clearly mention that inside of the `CONTRIBUTING.md` guide and to also ensure that we're not using these functions in the code base. * fix(urlgetter): repair tests who assumed to see EOF error Now that we have established that we should normalize EOF when reading bodies like the stdlib does and now that it's clear why our behavior diverged from the stdlib, we also need to repair all the tests that assumed this incorrect behavior. * fix(all): don't use io{,util}.{Copy,ReadAll} * feat: add checks to ensure we don't use io.{Copy,ReadAll} * doc(netxlite): document we know how to deal w/ wrapped io.EOF * fix(nocopyreadall.bash): add exception for i/n/iox.go
2022-01-12 14:26:10 +01:00
t.Run("we can read the body when the connection is closed", func(t *testing.T) {
// See https://github.com/ooni/probe/issues/1965
srvr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
hj := w.(http.Hijacker) // panic if not possible
conn, bufrw, err := hj.Hijack()
runtimex.PanicOnError(err, "hj.Hijack failed")
bufrw.WriteString("HTTP/1.0 302 Found\r\n")
bufrw.WriteString("Location: /text\r\n\r\n")
bufrw.Flush()
conn.Close()
}))
defer srvr.Close()
txp := netxlite.NewHTTPTransportStdlib(model.DiscardLogger)
req, err := http.NewRequest("GET", srvr.URL, nil)
if err != nil {
t.Fatal(err)
}
resp, err := txp.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
data, err := netxlite.ReadAllContext(req.Context(), resp.Body)
if err != nil {
t.Fatal(err)
}
t.Log(string(data))
})
}
func TestHTTP3Transport(t *testing.T) {
if testing.Short() {
t.Skip("skip test in short mode")
}
t.Run("works as intended", func(t *testing.T) {
d := netxlite.NewQUICDialerWithResolver(
netxlite.NewQUICListener(),
log.Log,
netxlite.NewStdlibResolver(log.Log),
)
txp := netxlite.NewHTTP3Transport(log.Log, d, &tls.Config{})
client := &http.Client{Transport: txp}
URL := (&url.URL{Scheme: "https", Host: quictesting.Domain, Path: "/"}).String()
resp, err := client.Get(URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
txp.CloseIdleConnections()
})
}