diff --git a/internal/lsp/cache/pkg.go b/internal/lsp/cache/pkg.go index eb90209c..725cc78d 100644 --- a/internal/lsp/cache/pkg.go +++ b/internal/lsp/cache/pkg.go @@ -32,7 +32,8 @@ type Package struct { } type analysisEntry struct { - ready chan struct{} + done chan struct{} + succeeded bool *source.Action } @@ -49,14 +50,20 @@ func (pkg *Package) GetActionGraph(ctx context.Context, a *analysis.Analyzer) (* // wait for entry to become ready or the context to be cancelled select { - case <-e.ready: + case <-e.done: + // If the goroutine we are waiting on was cancelled, we should retry. + // If errors other than cancelation/timeout become possible, it may + // no longer be appropriate to always retry here. + if !e.succeeded { + return pkg.GetActionGraph(ctx, a) + } case <-ctx.Done(): return nil, ctx.Err() } } else { // cache miss e = &analysisEntry{ - ready: make(chan struct{}), + done: make(chan struct{}), Action: &source.Action{ Analyzer: a, Pkg: pkg, @@ -65,6 +72,21 @@ func (pkg *Package) GetActionGraph(ctx context.Context, a *analysis.Analyzer) (* pkg.analyses[a] = e pkg.mu.Unlock() + defer func() { + // If we got an error, clear out our defunct cache entry. We don't cache + // errors since they could depend on our dependencies, which can change. + // Currently the only possible error is context.Canceled, though, which + // should also not be cached. + if !e.succeeded { + pkg.mu.Lock() + delete(pkg.analyses, a) + pkg.mu.Unlock() + } + + // Always close done so waiters don't get stuck. + close(e.done) + }() + // This goroutine becomes responsible for populating // the entry and broadcasting its readiness. @@ -94,7 +116,8 @@ func (pkg *Package) GetActionGraph(ctx context.Context, a *analysis.Analyzer) (* e.Deps = append(e.Deps, act) } } - close(e.ready) + + e.succeeded = true } return e.Action, nil }