fix(oonimkall): only set annotations on success (#821)

This bug is one of these bugs that definitely help one to stay
humble and focused on improving the codebase.

Of course I `<facepalmed>` when I understood the root cause.

We did not move the annotations below the `if` which is checking
whether the measurement was successful when we refactored the
codebase to support returning multiple measurements per run, which
happened in https://github.com/ooni/probe-cli/pull/527.

While I am not going to whip myself too much because of this, it's
clearly a bummer that we didn't notice this bug back then. On top
of this, it's also quite sad it took us so much time to notice that
there was this bug inside the tree.

The lesson (hopefully) learned is probably that we need to be more
careful when we refactor and we should always ask the question of
whether, not only we have tests, but whether these tests could maybe
be improved to give us even more confidence about correctness.

The reference issue is https://github.com/ooni/probe/issues/2173.
This commit is contained in:
Simone Basso 2022-07-01 09:54:35 +02:00 committed by GitHub
parent 74aebedac3
commit 9b08dcac3f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 34 additions and 1 deletions

View File

@ -291,7 +291,6 @@ func (r *runnerForTask) Run(rootCtx context.Context) {
// submit measurement and stop at beginning of next iteration
break
}
m.AddAnnotations(r.settings.Annotations)
if err != nil {
r.emitter.Emit(eventTypeFailureMeasurement, eventMeasurementGeneric{
Failure: err.Error(),
@ -304,6 +303,7 @@ func (r *runnerForTask) Run(rootCtx context.Context) {
// now the only valid strategy here is to continue.
continue
}
m.AddAnnotations(r.settings.Annotations)
data, err := json.Marshal(m)
runtimex.PanicOnError(err, "measurement.MarshalJSON failed")
r.emitter.Emit(eventTypeMeasurement, eventMeasurementGeneric{

View File

@ -469,6 +469,39 @@ func TestTaskRunnerRun(t *testing.T) {
assertReducedEventsLike(t, expect, reduced)
})
t.Run("with measurement failure and annotations", func(t *testing.T) {
// See https://github.com/ooni/probe/issues/2173. We want to be sure that
// we are not crashing when the measurement fails and there are annotations,
// which is what was happening in the above referenced issue.
runner, emitter := newRunnerForTesting()
fake := fakeSuccessfulRun()
fake.MockableInputPolicy = func() engine.InputPolicy {
return engine.InputNone
}
fake.MockableMeasureWithContext = func(ctx context.Context, input string) (measurement *model.Measurement, err error) {
return nil, errors.New("preconditions error")
}
runner.sessionBuilder = fake
runner.settings.Annotations = map[string]string{
"architecture": "arm64",
}
events := runAndCollect(runner, emitter)
reduced := reduceEventsKeysIgnoreLog(events)
expect := []eventKeyCount{
{Key: eventTypeStatusQueued, Count: 1},
{Key: eventTypeStatusStarted, Count: 1},
{Key: eventTypeStatusProgress, Count: 3},
{Key: eventTypeStatusGeoIPLookup, Count: 1},
{Key: eventTypeStatusResolverLookup, Count: 1},
{Key: eventTypeStatusProgress, Count: 1},
{Key: eventTypeStatusReportCreate, Count: 1},
{Key: eventTypeStatusMeasurementStart, Count: 1},
{Key: eventTypeFailureMeasurement, Count: 1},
{Key: eventTypeStatusEnd, Count: 1},
}
assertReducedEventsLike(t, expect, reduced)
})
t.Run("with success and InputStrictlyRequired", func(t *testing.T) {
runner, emitter := newRunnerForTesting()
runner.settings.Inputs = []string{"a", "b", "c", "d"}