tofu: context.Context plumbed into the graph walk driver

Earlier commits arranged for each of our tofu.Context exported methods that
perform graph-based operations to take a context.Context from their
callers, and for the main callers in package command and package backend
to connect those contexts to the top-level context from "package main"
that can potentially have an OpenTelemetry span attached to it.

This propagates those contexts a little deeper into the guts of the
language runtime, getting it as far as the shared logic that drives a
graph walk.

The next step from here would be to change the interfaces
GraphNodeExecutable and GraphNodeDynamicExpandable so that their methods
both take a context.Context, but that would involve a big sprawling
update to every implementation of each of those interfaces and so
we'll save that for a later commit to keep this one relatively clean.

This commit also reaches the first point of ambiguity where our older
conventions call for "ctx" to be the variable name for a tofu.EvalContext
rather than a context.Context. Since "ctx context.Context" is a core idiom
in the Go community, we'll switch to using evalCtx as the variable name
for tofu.EvalContext both here and in our future commits that will
modify the two main graph walk interfaces that make extensive use of the
tofu.EvalContext interface.

Signed-off-by: Martin Atkins <mart@degeneration.co.uk>
This commit is contained in:
Martin Atkins 2024-11-12 17:01:39 -08:00
parent a8b6342556
commit 2448204201
7 changed files with 28 additions and 28 deletions

View File

@ -63,7 +63,7 @@ func (c *Context) Apply(ctx context.Context, plan *plans.Plan, config *configs.C
}
workingState := plan.PriorState.DeepCopy()
walker, walkDiags := c.walk(graph, operation, &graphWalkOpts{
walker, walkDiags := c.walk(ctx, graph, operation, &graphWalkOpts{
Config: config,
InputState: workingState,
Changes: plan.Changes,

View File

@ -85,7 +85,7 @@ func (c *Context) Eval(ctx context.Context, config *configs.Config, state *state
ProviderFunctionTracker: providerFunctionTracker,
}
walker, moreDiags = c.walk(graph, walkEval, walkOpts)
walker, moreDiags = c.walk(ctx, graph, walkEval, walkOpts)
diags = diags.Append(moreDiags)
if walker != nil {
diags = diags.Append(walker.NonFatalDiagnostics)

View File

@ -273,7 +273,7 @@ func (c *Context) Import(ctx context.Context, config *configs.Config, prevRunSta
}
// Walk it
walker, walkDiags := c.walk(graph, walkImport, &graphWalkOpts{
walker, walkDiags := c.walk(ctx, graph, walkImport, &graphWalkOpts{
Config: config,
InputState: state,
ProviderFunctionTracker: providerFunctionTracker,

View File

@ -213,11 +213,11 @@ The -target and -exclude options are not for routine use, and are provided only
var planDiags tfdiags.Diagnostics
switch opts.Mode {
case plans.NormalMode:
plan, planDiags = c.plan(config, prevRunState, opts)
plan, planDiags = c.plan(ctx, config, prevRunState, opts)
case plans.DestroyMode:
plan, planDiags = c.destroyPlan(config, prevRunState, opts)
plan, planDiags = c.destroyPlan(ctx, config, prevRunState, opts)
case plans.RefreshOnlyMode:
plan, planDiags = c.refreshOnlyPlan(config, prevRunState, opts)
plan, planDiags = c.refreshOnlyPlan(ctx, config, prevRunState, opts)
default:
panic(fmt.Sprintf("unsupported plan mode %s", opts.Mode))
}
@ -318,7 +318,7 @@ func SimplePlanOpts(mode plans.Mode, setVariables InputValues) *PlanOpts {
}
}
func (c *Context) plan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
func (c *Context) plan(ctx context.Context, config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if opts.Mode != plans.NormalMode {
@ -340,20 +340,20 @@ func (c *Context) plan(config *configs.Config, prevRunState *states.State, opts
return nil, diags
}
plan, walkDiags := c.planWalk(config, prevRunState, opts)
plan, walkDiags := c.planWalk(ctx, config, prevRunState, opts)
diags = diags.Append(walkDiags)
return plan, diags
}
func (c *Context) refreshOnlyPlan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
func (c *Context) refreshOnlyPlan(ctx context.Context, config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if opts.Mode != plans.RefreshOnlyMode {
panic(fmt.Sprintf("called Context.refreshOnlyPlan with %s", opts.Mode))
}
plan, walkDiags := c.planWalk(config, prevRunState, opts)
plan, walkDiags := c.planWalk(ctx, config, prevRunState, opts)
diags = diags.Append(walkDiags)
if diags.HasErrors() {
// Non-nil plan along with errors indicates a non-applyable partial
@ -390,7 +390,7 @@ func (c *Context) refreshOnlyPlan(config *configs.Config, prevRunState *states.S
return plan, diags
}
func (c *Context) destroyPlan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
func (c *Context) destroyPlan(ctx context.Context, config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if opts.Mode != plans.DestroyMode {
@ -422,7 +422,7 @@ func (c *Context) destroyPlan(config *configs.Config, prevRunState *states.State
// the destroy plan should take care of refreshing instances itself,
// where the special cases of evaluation and skipping condition checks
// can be done.
refreshPlan, refreshDiags := c.plan(config, prevRunState, &refreshOpts)
refreshPlan, refreshDiags := c.plan(ctx, config, prevRunState, &refreshOpts)
if refreshDiags.HasErrors() {
// NOTE: Normally we'd append diagnostics regardless of whether
// there are errors, just in case there are warnings we'd want to
@ -451,7 +451,7 @@ func (c *Context) destroyPlan(config *configs.Config, prevRunState *states.State
log.Printf("[TRACE] Context.destroyPlan: now _really_ creating a destroy plan")
}
destroyPlan, walkDiags := c.planWalk(config, priorState, opts)
destroyPlan, walkDiags := c.planWalk(ctx, config, priorState, opts)
diags = diags.Append(walkDiags)
if walkDiags.HasErrors() {
// Non-nil plan along with errors indicates a non-applyable partial
@ -741,7 +741,7 @@ func importResourceWithoutConfigDiags(addressStr string, config *configs.Import)
return &diag
}
func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
func (c *Context) planWalk(ctx context.Context, config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
log.Printf("[DEBUG] Building and walking plan graph for %s", opts.Mode)
@ -770,7 +770,7 @@ func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, o
// If we get here then we should definitely have a non-nil "graph", which
// we can now walk.
changes := plans.NewChanges()
walker, walkDiags := c.walk(graph, walkOp, &graphWalkOpts{
walker, walkDiags := c.walk(ctx, graph, walkOp, &graphWalkOpts{
Config: config,
InputState: prevRunState,
Changes: changes,

View File

@ -76,7 +76,7 @@ func (c *Context) Validate(ctx context.Context, config *configs.Config) tfdiags.
return diags
}
walker, walkDiags := c.walk(graph, walkValidate, &graphWalkOpts{
walker, walkDiags := c.walk(ctx, graph, walkValidate, &graphWalkOpts{
Config: config,
ProviderFunctionTracker: providerFunctionTracker,
})

View File

@ -6,6 +6,7 @@
package tofu
import (
"context"
"log"
"time"
@ -48,7 +49,7 @@ type graphWalkOpts struct {
ProviderFunctionTracker ProviderFunctionMapping
}
func (c *Context) walk(graph *Graph, operation walkOperation, opts *graphWalkOpts) (*ContextGraphWalker, tfdiags.Diagnostics) {
func (c *Context) walk(ctx context.Context, graph *Graph, operation walkOperation, opts *graphWalkOpts) (*ContextGraphWalker, tfdiags.Diagnostics) {
log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
walker := c.graphWalker(operation, opts)
@ -57,7 +58,7 @@ func (c *Context) walk(graph *Graph, operation walkOperation, opts *graphWalkOpt
watchStop, watchWait := c.watchStop(walker)
// Walk the real graph, this will block until it completes
diags := graph.Walk(walker)
diags := graph.Walk(ctx, walker)
// Close the channel so the watcher stops, and wait for it to return.
close(watchStop)

View File

@ -6,16 +6,15 @@
package tofu
import (
"context"
"fmt"
"log"
"strings"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/dag"
"github.com/opentofu/opentofu/internal/logging"
"github.com/opentofu/opentofu/internal/tfdiags"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/dag"
)
// Graph represents the graph that OpenTofu uses to represent resources
@ -36,13 +35,13 @@ func (g *Graph) DirectedGraph() dag.Grapher {
// Walk walks the graph with the given walker for callbacks. The graph
// will be walked with full parallelism, so the walker should expect
// to be called in concurrently.
func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics {
return g.walk(walker)
func (g *Graph) Walk(ctx context.Context, walker GraphWalker) tfdiags.Diagnostics {
return g.walk(ctx, walker)
}
func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics {
func (g *Graph) walk(ctx context.Context, walker GraphWalker) tfdiags.Diagnostics {
// The callbacks for enter/exiting a graph
ctx := walker.EvalContext()
evalCtx := walker.EvalContext()
// We explicitly create the panicHandler before
// spawning many go routines for vertex evaluation
@ -75,7 +74,7 @@ func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics {
// vertexCtx is the context that we use when evaluating. This
// is normally the context of our graph but can be overridden
// with a GraphNodeModuleInstance impl.
vertexCtx := ctx
vertexCtx := evalCtx
if pn, ok := v.(GraphNodeModuleInstance); ok {
vertexCtx = walker.EnterPath(pn.Path())
defer walker.ExitPath(pn.Path())
@ -124,7 +123,7 @@ func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics {
// Walk the subgraph
log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v))
subDiags := g.walk(walker)
subDiags := g.walk(ctx, walker)
diags = diags.Append(subDiags)
if subDiags.HasErrors() {
var errs []string