ooni-probe-cli/internal/engine/experiment/torsf/torsf_test.go
Simone Basso b7cc309901
feat: re-implement the vanilla_tor experiment (#718)
This diff re-implements the vanilla_tor experiment. This experiment was
part of the ooni/probe-legacy implementation.

The reference issue is https://github.com/ooni/probe/issues/803. We didn't
consider the possible improvements mentioned by the
https://github.com/ooni/probe/issues/803#issuecomment-598715694 comment,
which means we'll need to create a follow-up issue for them. We will
then decide whether, when, and how to implement those follow-up measurements
either into `vanilla_tor` or into the existing `tor` experiment.

This novel `vanilla_tor` implementation emits test_keys that are mostly
compatible with the original implementation, however:

1. the `timeout` is a `float64` rather than integer (but the default
timeout is an integer, so there are no JSON-visible changes);

2. the `tor_log` string is gone and replaced by the `tor_logs` list
of strings, which contains the same information;

3. the definition of `error` has been augmented to include the
case in which there is an unknown error;

4. the implementation of vanilla_tor mirrors closely the one of torsf
and we have taken steps to make the two implementations as comparable
as possible in terms of the generated JSON measurement.

The main reason why we replaced `tor_log` with `tor_logs` are:

1. that `torsf` already used that;

2. that reading the JSON is easier with this implementation compared to
an implementation where all logs are into the same string.

If one is processing the new data format using Python, then it will
not be difficult convert `tor_log` to `tor_logs`. In any case, because
we extract the most interesting fields (e.g., the percentage of the
bootstrap where tor fails), it seems that logs are probably more useful
as something you want to read in edge cases (I guess).

Also, because we want `torsf` and `vanilla_tor` to have similar JSONs,
we renamed `torsf`'s `default_timeout` to `timeout`. This change has little
to none real-world impact, because no stable version of OONI Probe has
ever shipped a `torsf` producing the `default_timeout` field.

Regarding the structure of this diff, we have:

1. factored code to parse tor logs into a separate package;

2. implemented `vanilla_tor` as a stripped down `torsf` and added further
changes to ensure compatibility with the previous `vanilla_tor`'s data format;

3. improved `torsf` to merge back the changes in `vanilla_tor`, so the two
data formats of the two experiments are as similar as possible.

We believe producing as similar as possible data formats helps anyone who's
reading measurements generated by both experiments.

We have retained/introduced `vanilla_tor`'s `error` field, which is not very
useful when one has a more precise failure but is still what `vanilla_tor`
used to emit, so it makes sense to also have this field.

In addition to changing the implementation, we also updated the specs.

As part of our future work, we may want to consider factoring the common code
of these two experiments into the same underlying support library.
2022-05-10 15:43:28 +02:00

383 lines
9.5 KiB
Go

package torsf
import (
"context"
"errors"
"path/filepath"
"testing"
"time"
"github.com/ooni/probe-cli/v3/internal/atomicx"
"github.com/ooni/probe-cli/v3/internal/engine/mockable"
"github.com/ooni/probe-cli/v3/internal/model"
"github.com/ooni/probe-cli/v3/internal/ptx"
"github.com/ooni/probe-cli/v3/internal/tunnel"
"github.com/ooni/probe-cli/v3/internal/tunnel/mocks"
)
// Implementation note: this file is written with easy diffing with respect
// to internal/engine/experiment/vanillator/vanillator_test.go in mind.
//
// We may want to have a single implementation for both nettests in the future.
func TestExperimentNameAndVersion(t *testing.T) {
m := NewExperimentMeasurer(Config{})
if m.ExperimentName() != "torsf" {
t.Fatal("invalid experiment name")
}
if m.ExperimentVersion() != "0.3.0" {
t.Fatal("invalid experiment version")
}
}
func TestFailureWithInvalidRendezvousMethod(t *testing.T) {
m := &Measurer{
config: Config{
DisablePersistentDatadir: false,
DisableProgress: false,
RendezvousMethod: "antani",
},
mockStartTunnel: nil,
}
ctx := context.Background()
measurement := &model.Measurement{}
sess := &mockable.Session{
MockableLogger: model.DiscardLogger,
}
callbacks := &model.PrinterCallbacks{
Logger: model.DiscardLogger,
}
err := m.Run(ctx, sess, measurement, callbacks)
if !errors.Is(err, ptx.ErrSnowflakeNoSuchRendezvousMethod) {
t.Fatal("unexpected error", err)
}
if measurement.TestKeys != nil {
t.Fatal("expected nil test keys")
}
}
func TestFailureToStartPTXListener(t *testing.T) {
expected := errors.New("mocked error")
m := &Measurer{
config: Config{},
mockStartListener: func() error {
return expected
},
}
ctx := context.Background()
measurement := &model.Measurement{}
sess := &mockable.Session{}
callbacks := &model.PrinterCallbacks{
Logger: model.DiscardLogger,
}
if err := m.Run(ctx, sess, measurement, callbacks); !errors.Is(err, expected) {
t.Fatal("not the error we expected", err)
}
if tk := measurement.TestKeys; tk != nil {
t.Fatal("expected nil bootstrap time here")
}
}
func TestSuccessWithMockedTunnelStart(t *testing.T) {
bootstrapTime := 3 * time.Second
called := &atomicx.Int64{}
m := &Measurer{
config: Config{},
mockStartTunnel: func(
ctx context.Context, config *tunnel.Config) (tunnel.Tunnel, tunnel.DebugInfo, error) {
// run for some time so we also exercise printing progress.
time.Sleep(bootstrapTime)
return &mocks.Tunnel{
MockBootstrapTime: func() time.Duration {
return bootstrapTime
},
MockStop: func() {
called.Add(1)
},
}, tunnel.DebugInfo{
Name: "tor",
LogFilePath: filepath.Join("testdata", "tor.log"),
}, nil
},
}
ctx := context.Background()
measurement := &model.Measurement{}
sess := &mockable.Session{
MockableLogger: model.DiscardLogger,
}
callbacks := &model.PrinterCallbacks{
Logger: model.DiscardLogger,
}
if err := m.Run(ctx, sess, measurement, callbacks); err != nil {
t.Fatal(err)
}
if called.Load() != 1 {
t.Fatal("stop was not called")
}
tk := measurement.TestKeys.(*TestKeys)
if tk.BootstrapTime != bootstrapTime.Seconds() {
t.Fatal("unexpected bootstrap time")
}
if tk.Error != nil {
t.Fatal("unexpected error")
}
if tk.Failure != nil {
t.Fatal("unexpected failure")
}
if !tk.PersistentDatadir {
t.Fatal("unexpected persistent data dir")
}
if tk.RendezvousMethod != "domain_fronting" {
t.Fatal("unexpected rendezvous method")
}
if !tk.Success {
t.Fatal("unexpected success value")
}
if tk.Timeout != maxRuntime.Seconds() {
t.Fatal("unexpected timeout")
}
if count := len(tk.TorLogs); count != 9 {
t.Fatal("unexpected length of tor logs", count)
}
if tk.TorProgress != 100 {
t.Fatal("unexpected tor progress")
}
if tk.TorProgressTag != "done" {
t.Fatal("unexpected tor progress tag")
}
if tk.TorProgressSummary != "Done" {
t.Fatal("unexpected tor progress tag")
}
if tk.TransportName != "snowflake" {
t.Fatal("invalid transport name")
}
}
func TestWithCancelledContext(t *testing.T) {
// This test calls the real tunnel.Start function so we cover
// it but fails immediately because of the cancelled ctx.
m := &Measurer{
config: Config{},
}
ctx, cancel := context.WithCancel(context.Background())
cancel() // fail immediately
measurement := &model.Measurement{}
sess := &mockable.Session{
MockableLogger: model.DiscardLogger,
}
callbacks := &model.PrinterCallbacks{
Logger: model.DiscardLogger,
}
if err := m.Run(ctx, sess, measurement, callbacks); err != nil {
t.Fatal(err)
}
tk := measurement.TestKeys.(*TestKeys)
if tk.BootstrapTime != 0 {
t.Fatal("unexpected bootstrap time")
}
if tk.Error == nil || *tk.Error != "unknown-error" {
t.Fatal("unexpected error")
}
if *tk.Failure != "interrupted" {
t.Fatal("unexpected failure")
}
if !tk.PersistentDatadir {
t.Fatal("unexpected persistent data dir")
}
if tk.RendezvousMethod != "domain_fronting" {
t.Fatal("unexpected rendezvous method")
}
if tk.Success {
t.Fatal("unexpected success value")
}
if tk.Timeout != maxRuntime.Seconds() {
t.Fatal("unexpected timeout")
}
if len(tk.TorLogs) != 0 {
t.Fatal("unexpected length of tor logs")
}
if tk.TorProgress != 0 {
t.Fatal("unexpected tor progress")
}
if tk.TorProgressTag != "" {
t.Fatal("unexpected tor progress tag")
}
if tk.TorProgressSummary != "" {
t.Fatal("unexpected tor progress tag")
}
if tk.TransportName != "snowflake" {
t.Fatal("invalid transport name")
}
}
func TestFailureToStartTunnel(t *testing.T) {
expected := context.DeadlineExceeded // error occurring on bootstrap timeout
m := &Measurer{
config: Config{},
mockStartTunnel: func(
ctx context.Context, config *tunnel.Config) (tunnel.Tunnel, tunnel.DebugInfo, error) {
return nil,
tunnel.DebugInfo{
Name: "tor",
LogFilePath: filepath.Join("testdata", "partial.log"),
}, expected
},
}
ctx := context.Background()
measurement := &model.Measurement{}
sess := &mockable.Session{
MockableLogger: model.DiscardLogger,
}
callbacks := &model.PrinterCallbacks{
Logger: model.DiscardLogger,
}
if err := m.Run(ctx, sess, measurement, callbacks); err != nil {
t.Fatal(err)
}
tk := measurement.TestKeys.(*TestKeys)
if tk.BootstrapTime != 0 {
t.Fatal("unexpected bootstrap time")
}
if tk.Error == nil || *tk.Error != "timeout-reached" {
t.Fatal("unexpected error")
}
if tk.Failure == nil {
t.Fatal("unexpectedly nil failure string")
}
if *tk.Failure != "generic_timeout_error" {
t.Fatal("unexpected failure string", *tk.Failure)
}
if !tk.PersistentDatadir {
t.Fatal("unexpected persistent datadir")
}
if tk.RendezvousMethod != "domain_fronting" {
t.Fatal("unexpected rendezvous method")
}
if tk.Success {
t.Fatal("unexpected success value")
}
if tk.Timeout != maxRuntime.Seconds() {
t.Fatal("unexpected timeout")
}
if count := len(tk.TorLogs); count != 6 {
t.Fatal("unexpected length of tor logs", count)
}
if tk.TorProgress != 15 {
t.Fatal("unexpected tor progress")
}
if tk.TorProgressTag != "handshake_done" {
t.Fatal("unexpected tor progress tag")
}
if tk.TorProgressSummary != "Handshake with a relay done" {
t.Fatal("unexpected tor progress tag")
}
if tk.TransportName != "snowflake" {
t.Fatal("invalid transport name")
}
}
func TestBaseTunnelDir(t *testing.T) {
t.Run("without persistent data dir", func(t *testing.T) {
m := &Measurer{
config: Config{
DisablePersistentDatadir: true,
},
}
sess := &mockable.Session{
MockableTunnelDir: "a",
MockableTempDir: "b",
}
dir := m.baseTunnelDir(sess)
if dir != "b" {
t.Fatal("unexpected base tunnel dir", dir)
}
})
t.Run("with persistent data dir", func(t *testing.T) {
m := &Measurer{
config: Config{
DisablePersistentDatadir: false,
},
}
sess := &mockable.Session{
MockableTunnelDir: "a",
MockableTempDir: "b",
}
dir := m.baseTunnelDir(sess)
if dir != "a" {
t.Fatal("unexpected base tunnel dir", dir)
}
})
}
func TestGetSummaryKeys(t *testing.T) {
t.Run("in case of untyped nil TestKeys", func(t *testing.T) {
measurement := &model.Measurement{
TestKeys: nil,
}
m := &Measurer{}
_, err := m.GetSummaryKeys(measurement)
if !errors.Is(err, errInvalidTestKeysType) {
t.Fatal("unexpected error", err)
}
})
t.Run("in case of typed nil TestKeys", func(t *testing.T) {
var tk *TestKeys
measurement := &model.Measurement{
TestKeys: tk,
}
m := &Measurer{}
_, err := m.GetSummaryKeys(measurement)
if !errors.Is(err, errNilTestKeys) {
t.Fatal("unexpected error", err)
}
})
t.Run("in case of invalid TestKeys type", func(t *testing.T) {
measurement := &model.Measurement{
TestKeys: make(chan int),
}
m := &Measurer{}
_, err := m.GetSummaryKeys(measurement)
if !errors.Is(err, errInvalidTestKeysType) {
t.Fatal("unexpected error", err)
}
})
t.Run("in case of success", func(t *testing.T) {
measurement := &model.Measurement{
TestKeys: &TestKeys{
Failure: nil,
},
}
m := &Measurer{}
sk, err := m.GetSummaryKeys(measurement)
if err != nil {
t.Fatal(err)
}
rsk := sk.(SummaryKeys)
if rsk.IsAnomaly {
t.Fatal("expected no anomaly here")
}
})
t.Run("in case of failure", func(t *testing.T) {
failure := "generic_timeout_error"
measurement := &model.Measurement{
TestKeys: &TestKeys{
Failure: &failure,
},
}
m := &Measurer{}
sk, err := m.GetSummaryKeys(measurement)
if err != nil {
t.Fatal(err)
}
rsk := sk.(SummaryKeys)
if !rsk.IsAnomaly {
t.Fatal("expected anomaly here")
}
})
}