copy Dependencies before sorting in state

Instances of the same AbsResource may share the same Dependencies, which
could point to the same backing array of values. Since address values
are not pointers, and not meant to be shared, we must copy the value
before sorting the slice in-place. Because individual instances of the
same resource may be encoded to state concurrently, failure to copy the
slice first can result in a data race.
This commit is contained in:
James Bardin 2022-04-28 11:16:27 -04:00
parent 4cfb6bc893
commit 34a01c92d6
2 changed files with 58 additions and 20 deletions

View File

@ -115,7 +115,12 @@ func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*Res
// stored in state as an array. To avoid pointless thrashing of state in
// refresh-only runs, we can either override comparison of dependency lists
// (more desirable, but tricky for Reasons) or just sort when encoding.
sort.Slice(o.Dependencies, func(i, j int) bool { return o.Dependencies[i].String() < o.Dependencies[j].String() })
// Encoding of instances can happen concurrently, so we must copy the
// dependencies to avoid mutating what may be a shared array of values.
dependencies := make([]addrs.ConfigResource, len(o.Dependencies))
copy(dependencies, o.Dependencies)
sort.Slice(dependencies, func(i, j int) bool { return dependencies[i].String() < dependencies[j].String() })
return &ResourceInstanceObjectSrc{
SchemaVersion: schemaVersion,
@ -123,7 +128,7 @@ func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*Res
AttrSensitivePaths: pvm,
Private: o.Private,
Status: o.Status,
Dependencies: o.Dependencies,
Dependencies: dependencies,
CreateBeforeDestroy: o.CreateBeforeDestroy,
}, nil
}

View File

@ -1,6 +1,7 @@
package states
import (
"sync"
"testing"
"github.com/google/go-cmp/cmp"
@ -24,27 +25,59 @@ func TestResourceInstanceObject_encode(t *testing.T) {
addrs.RootModule.Resource(addrs.ManagedResourceMode, "test", "boop"),
addrs.RootModule.Resource(addrs.ManagedResourceMode, "test", "honk"),
}
rioOne := &ResourceInstanceObject{
Value: value,
Status: ObjectPlanned,
Dependencies: depsOne,
// multiple instances may have been assigned the same deps slice
objs := []*ResourceInstanceObject{
&ResourceInstanceObject{
Value: value,
Status: ObjectPlanned,
Dependencies: depsOne,
},
&ResourceInstanceObject{
Value: value,
Status: ObjectPlanned,
Dependencies: depsTwo,
},
&ResourceInstanceObject{
Value: value,
Status: ObjectPlanned,
Dependencies: depsOne,
},
&ResourceInstanceObject{
Value: value,
Status: ObjectPlanned,
Dependencies: depsOne,
},
}
rioTwo := &ResourceInstanceObject{
Value: value,
Status: ObjectPlanned,
Dependencies: depsTwo,
}
riosOne, err := rioOne.Encode(value.Type(), 0)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
riosTwo, err := rioTwo.Encode(value.Type(), 0)
if err != nil {
t.Fatalf("unexpected error: %s", err)
var encoded []*ResourceInstanceObjectSrc
// Encoding can happen concurrently, so we need to make sure the shared
// Dependencies are safely handled
var wg sync.WaitGroup
var mu sync.Mutex
for _, obj := range objs {
obj := obj
wg.Add(1)
go func() {
defer wg.Done()
rios, err := obj.Encode(value.Type(), 0)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
mu.Lock()
encoded = append(encoded, rios)
mu.Unlock()
}()
}
wg.Wait()
// However, identical sets of dependencies should always be written to state
// in an identical order, so we don't do meaningless state updates on refresh.
if diff := cmp.Diff(riosOne.Dependencies, riosTwo.Dependencies); diff != "" {
t.Errorf("identical dependencies got encoded in different orders:\n%s", diff)
for i := 0; i < len(encoded)-1; i++ {
if diff := cmp.Diff(encoded[i].Dependencies, encoded[i+1].Dependencies); diff != "" {
t.Errorf("identical dependencies got encoded in different orders:\n%s", diff)
}
}
}