Compare commits

..

No commits in common. "master" and "gopls/v0.1.3" have entirely different histories.

113 changed files with 1746 additions and 4576 deletions

View File

@ -1,83 +1,18 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
The digraph command performs queries over unlabelled directed graphs
represented in text form. It is intended to integrate nicely with
typical UNIX command pipelines.
Usage:
your-application | digraph [command]
The support commands are:
nodes
the set of all nodes
degree
the in-degree and out-degree of each node
preds <node> ...
the set of immediate predecessors of the specified nodes
succs <node> ...
the set of immediate successors of the specified nodes
forward <node> ...
the set of nodes transitively reachable from the specified nodes
reverse <node> ...
the set of nodes that transitively reach the specified nodes
somepath <node> <node>
the list of nodes on some arbitrary path from the first node to the second
allpaths <node> <node>
the set of nodes on all paths from the first node to the second
sccs
all strongly connected components (one per line)
scc <node>
the set of nodes nodes strongly connected to the specified one
Input format:
Each line contains zero or more words. Words are separated by unquoted
whitespace; words may contain Go-style double-quoted portions, allowing spaces
and other characters to be expressed.
Each word declares a node, and if there are more than one, an edge from the
first to each subsequent one. The graph is provided on the standard input.
For instance, the following (acyclic) graph specifies a partial order among the
subtasks of getting dressed:
$ cat clothes.txt
socks shoes
"boxer shorts" pants
pants belt shoes
shirt tie sweater
sweater jacket
hat
The line "shirt tie sweater" indicates the two edges shirt -> tie and
shirt -> sweater, not shirt -> tie -> sweater.
Example usage:
Using digraph with existing Go tools:
$ go mod graph | digraph nodes # Operate on the Go module graph.
$ go list -m all | digraph nodes # Operate on the Go package graph.
Show the transitive closure of imports of the digraph tool itself:
$ go list -f '{{.ImportPath}} {{join .Imports " "}}' ... | digraph forward golang.org/x/tools/cmd/digraph
Show which clothes (see above) must be donned before a jacket:
$ digraph reverse jacket
*/
package main // import "golang.org/x/tools/cmd/digraph"
// The digraph command performs queries over unlabelled directed graphs
// represented in text form. It is intended to integrate nicely with
// typical UNIX command pipelines.
//
// Since directed graphs (import graphs, reference graphs, call graphs,
// etc) often arise during software tool development and debugging, this
// command is included in the go.tools repository.
//
// TODO(adonovan):
// - support input files other than stdin
// - support alternative formats (AT&T GraphViz, CSV, etc),
// a comment syntax, etc.
// - allow queries to nest, like Blaze query language.
//
package main // import "golang.org/x/tools/cmd/digraph"
import (
"bufio"
@ -93,41 +28,74 @@ import (
"unicode/utf8"
)
func usage() {
fmt.Fprintf(os.Stderr, `Usage: your-application | digraph [command]
const Usage = `digraph: queries over directed graphs in text form.
The support commands are:
nodes
the set of all nodes
degree
the in-degree and out-degree of each node
preds <node> ...
the set of immediate predecessors of the specified nodes
succs <node> ...
the set of immediate successors of the specified nodes
forward <node> ...
the set of nodes transitively reachable from the specified nodes
reverse <node> ...
the set of nodes that transitively reach the specified nodes
somepath <node> <node>
the list of nodes on some arbitrary path from the first node to the second
allpaths <node> <node>
the set of nodes on all paths from the first node to the second
sccs
all strongly connected components (one per line)
scc <node>
the set of nodes nodes strongly connected to the specified one
`)
os.Exit(2)
}
Graph format:
Each line contains zero or more words. Words are separated by
unquoted whitespace; words may contain Go-style double-quoted portions,
allowing spaces and other characters to be expressed.
Each field declares a node, and if there are more than one,
an edge from the first to each subsequent one.
The graph is provided on the standard input.
For instance, the following (acyclic) graph specifies a partial order
among the subtasks of getting dressed:
% cat clothes.txt
socks shoes
"boxer shorts" pants
pants belt shoes
shirt tie sweater
sweater jacket
hat
The line "shirt tie sweater" indicates the two edges shirt -> tie and
shirt -> sweater, not shirt -> tie -> sweater.
Supported queries:
nodes
the set of all nodes
degree
the in-degree and out-degree of each node.
preds <label> ...
the set of immediate predecessors of the specified nodes
succs <label> ...
the set of immediate successors of the specified nodes
forward <label> ...
the set of nodes transitively reachable from the specified nodes
reverse <label> ...
the set of nodes that transitively reach the specified nodes
somepath <label> <label>
the list of nodes on some arbitrary path from the first node to the second
allpaths <label> <label>
the set of nodes on all paths from the first node to the second
sccs
all strongly connected components (one per line)
scc <label>
the set of nodes nodes strongly connected to the specified one
Example usage:
Show the transitive closure of imports of the digraph tool itself:
% go list -f '{{.ImportPath}}{{.Imports}}' ... | tr '[]' ' ' |
digraph forward golang.org/x/tools/cmd/digraph
Show which clothes (see above) must be donned before a jacket:
% digraph reverse jacket <clothes.txt
`
func main() {
flag.Usage = usage
flag.Usage = func() { fmt.Fprintln(os.Stderr, Usage) }
flag.Parse()
args := flag.Args()
if len(args) == 0 {
usage()
fmt.Fprintln(os.Stderr, Usage)
return
}
if err := digraph(args[0], args[1:]); err != nil {
@ -262,47 +230,6 @@ func (g graph) sccs() []nodeset {
return sccs
}
func (g graph) allpaths(from, to string) error {
// Mark all nodes to "to".
seen := make(nodeset) // value of seen[x] indicates whether x is on some path to "to"
var visit func(node string) bool
visit = func(node string) bool {
reachesTo, ok := seen[node]
if !ok {
reachesTo = node == to
seen[node] = reachesTo
for e := range g[node] {
if visit(e) {
reachesTo = true
}
}
if reachesTo && node != to {
seen[node] = true
}
}
return reachesTo
}
visit(from)
// For each marked node, collect its marked successors.
var edges []string
for n := range seen {
for succ := range g[n] {
if seen[succ] {
edges = append(edges, n+" "+succ)
}
}
}
// Sort (so that this method is deterministic) and print edges.
sort.Strings(edges)
for _, e := range edges {
fmt.Fprintln(stdout, e)
}
return nil
}
func parse(rd io.Reader) (graph, error) {
g := make(graph)
@ -325,7 +252,6 @@ func parse(rd io.Reader) (graph, error) {
return g, nil
}
// Overridable for testing purposes.
var stdin io.Reader = os.Stdin
var stdout io.Writer = os.Stdout
@ -440,7 +366,33 @@ func digraph(cmd string, args []string) error {
if g[to] == nil {
return fmt.Errorf("no such 'to' node %q", to)
}
g.allpaths(from, to)
seen := make(nodeset) // value of seen[x] indicates whether x is on some path to 'to'
var visit func(label string) bool
visit = func(label string) bool {
reachesTo, ok := seen[label]
if !ok {
reachesTo = label == to
seen[label] = reachesTo
for e := range g[label] {
if visit(e) {
reachesTo = true
}
}
seen[label] = reachesTo
}
return reachesTo
}
if !visit(from) {
return fmt.Errorf("no path from %q to %q", from, to)
}
for label, reachesTo := range seen {
if !reachesTo {
delete(seen, label)
}
}
seen.sort().println("\n")
case "sccs":
if len(args) != 0 {

View File

@ -1,6 +1,3 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
@ -29,34 +26,35 @@ d c
`
for _, test := range []struct {
name string
input string
cmd string
args []string
want string
}{
{"nodes", g1, "nodes", nil, "belt\nhat\njacket\npants\nshirt\nshoes\nshorts\nsocks\nsweater\ntie\n"},
{"reverse", g1, "reverse", []string{"jacket"}, "jacket\nshirt\nsweater\n"},
{"forward", g1, "forward", []string{"socks"}, "shoes\nsocks\n"},
{"forward multiple args", g1, "forward", []string{"socks", "sweater"}, "jacket\nshoes\nsocks\nsweater\n"},
{"scss", g2, "sccs", nil, "a\nb\nc d\n"},
{"scc", g2, "scc", []string{"d"}, "c\nd\n"},
{"succs", g2, "succs", []string{"a"}, "b\nc\n"},
{"preds", g2, "preds", []string{"c"}, "a\nd\n"},
{"preds multiple args", g2, "preds", []string{"c", "d"}, "a\nb\nc\nd\n"},
} {
t.Run(test.name, func(t *testing.T) {
stdin = strings.NewReader(test.input)
stdout = new(bytes.Buffer)
if err := digraph(test.cmd, test.args); err != nil {
t.Fatal(err)
}
{g1, "nodes", nil, "belt\nhat\njacket\npants\nshirt\nshoes\nshorts\nsocks\nsweater\ntie\n"},
{g1, "reverse", []string{"jacket"}, "jacket\nshirt\nsweater\n"},
{g1, "forward", []string{"socks"}, "shoes\nsocks\n"},
{g1, "forward", []string{"socks", "sweater"}, "jacket\nshoes\nsocks\nsweater\n"},
got := stdout.(fmt.Stringer).String()
if got != test.want {
t.Errorf("digraph(%s, %s) = got %q, want %q", test.cmd, test.args, got, test.want)
}
})
{g2, "allpaths", []string{"a", "d"}, "a\nb\nc\nd\n"},
{g2, "sccs", nil, "a\nb\nc d\n"},
{g2, "scc", []string{"d"}, "c\nd\n"},
{g2, "succs", []string{"a"}, "b\nc\n"},
{g2, "preds", []string{"c"}, "a\nd\n"},
{g2, "preds", []string{"c", "d"}, "a\nb\nc\nd\n"},
} {
stdin = strings.NewReader(test.input)
stdout = new(bytes.Buffer)
if err := digraph(test.cmd, test.args); err != nil {
t.Error(err)
continue
}
got := stdout.(fmt.Stringer).String()
if got != test.want {
t.Errorf("digraph(%s, %s) = %q, want %q", test.cmd, test.args, got, test.want)
}
}
// TODO(adonovan):
@ -64,110 +62,6 @@ d c
// - test errors
}
func TestAllpaths(t *testing.T) {
for _, test := range []struct {
name string
in string
to string // from is always "A"
want string
}{
{
name: "Basic",
in: "A B\nB C",
to: "B",
want: "A B\n",
},
{
name: "Long",
in: "A B\nB C\n",
to: "C",
want: "A B\nB C\n",
},
{
name: "Cycle Basic",
in: "A B\nB A",
to: "B",
want: "A B\nB A\n",
},
{
name: "Cycle Path Out",
// A <-> B -> C -> D
in: "A B\nB A\nB C\nC D",
to: "C",
want: "A B\nB A\nB C\n",
},
{
name: "Cycle Path Out Further Out",
// A -> B <-> C -> D -> E
in: "A B\nB C\nC D\nC B\nD E",
to: "D",
want: "A B\nB C\nC B\nC D\n",
},
{
name: "Two Paths Basic",
// /-> C --\
// A -> B -- -> E -> F
// \-> D --/
in: "A B\nB C\nC E\nB D\nD E\nE F",
to: "E",
want: "A B\nB C\nB D\nC E\nD E\n",
},
{
name: "Two Paths With One Immediately From Start",
// /-> B -+ -> D
// A -- |
// \-> C <+
in: "A B\nA C\nB C\nB D",
to: "C",
want: "A B\nA C\nB C\n",
},
{
name: "Two Paths Further Up",
// /-> B --\
// A -- -> D -> E -> F
// \-> C --/
in: "A B\nA C\nB D\nC D\nD E\nE F",
to: "E",
want: "A B\nA C\nB D\nC D\nD E\n",
},
{
// We should include A - C - D even though it's further up the
// second path than D (which would already be in the graph by
// the time we get around to integrating the second path).
name: "Two Splits",
// /-> B --\ /-> E --\
// A -- -> D -- -> G -> H
// \-> C --/ \-> F --/
in: "A B\nA C\nB D\nC D\nD E\nD F\nE G\nF G\nG H",
to: "G",
want: "A B\nA C\nB D\nC D\nD E\nD F\nE G\nF G\n",
},
{
// D - E should not be duplicated.
name: "Two Paths - Two Splits With Gap",
// /-> B --\ /-> F --\
// A -- -> D -> E -- -> H -> I
// \-> C --/ \-> G --/
in: "A B\nA C\nB D\nC D\nD E\nE F\nE G\nF H\nG H\nH I",
to: "H",
want: "A B\nA C\nB D\nC D\nD E\nE F\nE G\nF H\nG H\n",
},
} {
t.Run(test.name, func(t *testing.T) {
stdin = strings.NewReader(test.in)
stdout = new(bytes.Buffer)
if err := digraph("allpaths", []string{"A", test.to}); err != nil {
t.Fatal(err)
}
got := stdout.(fmt.Stringer).String()
if got != test.want {
t.Errorf("digraph(allpaths, A, %s) = got %q, want %q", test.to, got, test.want)
}
})
}
}
func TestSplit(t *testing.T) {
for _, test := range []struct {
line string

View File

@ -19,5 +19,5 @@ import (
func main() {
debug.Version += "-cmd.gopls"
tool.Main(context.Background(), cmd.New("gopls-legacy", "", nil), os.Args[1:])
tool.Main(context.Background(), cmd.New("", nil), os.Args[1:])
}

View File

@ -99,16 +99,6 @@ func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) {
s.mu.Unlock()
}
func (s *Set) AllObjectFacts() []analysis.ObjectFact {
var facts []analysis.ObjectFact
for k, v := range s.m {
if k.obj != nil {
facts = append(facts, analysis.ObjectFact{k.obj, v})
}
}
return facts
}
// ImportPackageFact implements analysis.Pass.ImportPackageFact.
func (s *Set) ImportPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
if pkg == nil {
@ -132,16 +122,6 @@ func (s *Set) ExportPackageFact(fact analysis.Fact) {
s.mu.Unlock()
}
func (s *Set) AllPackageFacts() []analysis.PackageFact {
var facts []analysis.PackageFact
for k, v := range s.m {
if k.obj == nil {
facts = append(facts, analysis.PackageFact{k.pkg, v})
}
}
return facts
}
// gobFact is the Gob declaration of a serialized fact.
type gobFact struct {
PkgPath string // path of package

View File

@ -334,10 +334,8 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re
Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
ImportObjectFact: facts.ImportObjectFact,
ExportObjectFact: facts.ExportObjectFact,
AllObjectFacts: facts.AllObjectFacts,
ImportPackageFact: facts.ImportPackageFact,
ExportPackageFact: facts.ExportPackageFact,
AllPackageFacts: facts.AllPackageFacts,
}
t0 := time.Now()

View File

@ -149,7 +149,7 @@ func (b *builder) branchStmt(s *ast.BranchStmt) {
}
case token.FALLTHROUGH:
for t := b.targets; t != nil && block == nil; t = t.tail {
for t := b.targets; t != nil; t = t.tail {
block = t._fallthrough
}

View File

@ -811,15 +811,7 @@ func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, err
// Import of incomplete package: this indicates a cycle.
fromPath := from.Pkg.Path()
if cycle := imp.findPath(path, fromPath); cycle != nil {
// Normalize cycle: start from alphabetically largest node.
pos, start := -1, ""
for i, s := range cycle {
if pos < 0 || s > start {
pos, start = i, s
}
}
cycle = append(cycle, cycle[:pos]...)[pos:] // rotate cycle to start from largest
cycle = append(cycle, cycle[0]) // add start node to end to show cycliness
cycle = append([]string{fromPath}, cycle...)
return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> "))
}

View File

@ -316,7 +316,9 @@ func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, quer
startWalk := time.Now()
gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
cfg.Logf("%v for walk", time.Since(startWalk))
if debug {
log.Printf("%v for walk", time.Since(startWalk))
}
// Weird special case: the top-level package in a module will be in
// whatever directory the user checked the repository out into. It's
@ -757,9 +759,11 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
cmd.Dir = cfg.Dir
cmd.Stdout = stdout
cmd.Stderr = stderr
defer func(start time.Time) {
cfg.Logf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr)
}(time.Now())
if debug {
defer func(start time.Time) {
log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr)
}(time.Now())
}
if err := cmd.Run(); err != nil {
// Check for 'go' executable not being found.

View File

@ -103,12 +103,6 @@ type Config struct {
// If Context is nil, the load cannot be cancelled.
Context context.Context
// Logf is the logger for the config.
// If the user provides a logger, debug logging is enabled.
// If the GOPACKAGESDEBUG environment variable is set to true,
// but the logger is nil, default to log.Printf.
Logf func(format string, args ...interface{})
// Dir is the directory in which to run the build system's query tool
// that provides information about the packages.
// If Dir is empty, the tool is run in the current directory.
@ -435,17 +429,6 @@ func newLoader(cfg *Config) *loader {
}
if cfg != nil {
ld.Config = *cfg
// If the user has provided a logger, use it.
ld.Config.Logf = cfg.Logf
}
if ld.Config.Logf == nil {
// If the GOPACKAGESDEBUG environment variable is set to true,
// but the user has not provided a logger, default to log.Printf.
if debug {
ld.Config.Logf = log.Printf
} else {
ld.Config.Logf = func(format string, args ...interface{}) {}
}
}
if ld.Config.Mode == 0 {
ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility.

View File

@ -930,12 +930,6 @@ func testNewPackagesInOverlay(t *testing.T, exporter packagestest.Exporter) {
"b/b.go": `package b; import "golang.org/fake/c"; const B = "b" + c.C`,
"c/c.go": `package c; const C = "c"`,
"d/d.go": `package d; const D = "d"`,
// TODO: Remove these temporary files when golang.org/issue/33157 is resolved.
filepath.Join("e/e_temp.go"): ``,
filepath.Join("f/f_temp.go"): ``,
filepath.Join("g/g_temp.go"): ``,
filepath.Join("h/h_temp.go"): ``,
}}})
defer exported.Cleanup()
@ -992,11 +986,7 @@ func testNewPackagesInOverlay(t *testing.T, exporter packagestest.Exporter) {
} {
exported.Config.Overlay = test.overlay
exported.Config.Mode = packages.LoadAllSyntax
exported.Config.Logf = t.Logf
// With an overlay, we don't know the expected import path,
// so load with the absolute path of the directory.
initial, err := packages.Load(exported.Config, filepath.Join(dir, "e"))
initial, err := packages.Load(exported.Config, "golang.org/fake/e")
if err != nil {
t.Error(err)
continue

View File

@ -2,6 +2,6 @@ module golang.org/x/tools/gopls
go 1.11
require golang.org/x/tools v0.0.0-20190723021737-8bb11ff117ca
require golang.org/x/tools v0.0.0-20190710153321-831012c29e42
replace golang.org/x/tools => ../

View File

@ -1,8 +1,8 @@
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190723021737-8bb11ff117ca h1:SqwJrz6xPBlCUltcEHz2/p01HRPR+VGD+aYLikk8uas=
golang.org/x/tools v0.0.0-20190723021737-8bb11ff117ca/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=

View File

@ -17,5 +17,5 @@ import (
)
func main() {
tool.Main(context.Background(), cmd.New("gopls", "", nil), os.Args[1:])
tool.Main(context.Background(), cmd.New("", nil), os.Args[1:])
}

View File

@ -181,8 +181,8 @@ func collectReferences(f *ast.File) references {
return refs
}
// collectImports returns all the imports in f.
// Unnamed imports (., _) and "C" are ignored.
// collectImports returns all the imports in f, keyed by their package name as
// determined by pathToName. Unnamed imports (., _) and "C" are ignored.
func collectImports(f *ast.File) []*importInfo {
var imports []*importInfo
for _, imp := range f.Imports {
@ -272,7 +272,7 @@ func (p *pass) loadPackageNames(imports []*importInfo) error {
unknown = append(unknown, imp.importPath)
}
names, err := p.env.GetResolver().loadPackageNames(unknown, p.srcDir)
names, err := p.env.getResolver().loadPackageNames(unknown, p.srcDir)
if err != nil {
return err
}
@ -444,7 +444,7 @@ func apply(fset *token.FileSet, f *ast.File, fixes []*importFix) bool {
case setImportName:
// Find the matching import path and change the name.
for _, spec := range f.Imports {
path := strings.Trim(spec.Path.Value, `"`)
path := strings.Trim(spec.Path.Value, `""`)
if path == fix.info.importPath {
spec.Name = &ast.Ident{
Name: fix.info.name,
@ -514,7 +514,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P
return err
}
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
// getFixes gets the getFixes that need to be made to f in order to fix the imports.
// It does not modify the ast.
func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*importFix, error) {
abs, err := filepath.Abs(filename)
@ -595,7 +595,7 @@ type ProcessEnv struct {
// Logf is the default logger for the ProcessEnv.
Logf func(format string, args ...interface{})
resolver Resolver
resolver resolver
}
func (e *ProcessEnv) env() []string {
@ -617,7 +617,7 @@ func (e *ProcessEnv) env() []string {
return env
}
func (e *ProcessEnv) GetResolver() Resolver {
func (e *ProcessEnv) getResolver() resolver {
if e.resolver != nil {
return e.resolver
}
@ -631,7 +631,7 @@ func (e *ProcessEnv) GetResolver() Resolver {
e.resolver = &gopathResolver{env: e}
return e.resolver
}
e.resolver = &ModuleResolver{env: e}
e.resolver = &moduleResolver{env: e}
return e.resolver
}
@ -700,23 +700,20 @@ func addStdlibCandidates(pass *pass, refs references) {
}
}
// A Resolver does the build-system-specific parts of goimports.
type Resolver interface {
// A resolver does the build-system-specific parts of goimports.
type resolver interface {
// loadPackageNames loads the package names in importPaths.
loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
// scan finds (at least) the packages satisfying refs. The returned slice is unordered.
scan(refs references) ([]*pkg, error)
}
// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages.
// gopathResolver implements resolver for GOPATH and module workspaces using go/packages.
type goPackagesResolver struct {
env *ProcessEnv
}
func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
if len(importPaths) == 0 {
return nil, nil
}
cfg := r.env.newPackagesConfig(packages.LoadFiles)
pkgs, err := packages.Load(cfg, importPaths...)
if err != nil {
@ -761,7 +758,7 @@ func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) {
}
func addExternalCandidates(pass *pass, refs references, filename string) error {
dirScan, err := pass.env.GetResolver().scan(refs)
dirScan, err := pass.env.getResolver().scan(refs)
if err != nil {
return err
}
@ -870,7 +867,7 @@ func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (
return names, nil
}
// importPathToName finds out the actual package name, as declared in its .go files.
// importPathToNameGoPath finds out the actual package name, as declared in its .go files.
// If there's a problem, it returns "".
func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName string) {
// Fast path for standard library without going to disk.
@ -890,8 +887,8 @@ func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName s
}
// packageDirToName is a faster version of build.Import if
// the only thing desired is the package name. Given a directory,
// packageDirToName then only parses one file in the package,
// the only thing desired is the package name. It uses build.FindOnly
// to find the directory and then only parses one file in the package,
// trusting that the files in the directory are consistent.
func packageDirToName(dir string) (packageName string, err error) {
d, err := os.Open(dir)

View File

@ -1855,7 +1855,7 @@ func TestImportPathToNameGoPathParse(t *testing.T) {
if strings.Contains(t.Name(), "GoPackages") {
t.Skip("go/packages does not ignore package main")
}
r := t.env.GetResolver()
r := t.env.getResolver()
srcDir := filepath.Dir(t.exported.File("example.net/pkg", "z.go"))
names, err := r.loadPackageNames([]string{"example.net/pkg"}, srcDir)
if err != nil {

View File

@ -18,39 +18,37 @@ import (
"golang.org/x/tools/internal/module"
)
// ModuleResolver implements resolver for modules using the go command as little
// moduleResolver implements resolver for modules using the go command as little
// as feasible.
type ModuleResolver struct {
type moduleResolver struct {
env *ProcessEnv
Initialized bool
Main *ModuleJSON
ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path...
ModsByDir []*ModuleJSON // ...or Dir.
ModCachePkgs map[string]*pkg // Packages in the mod cache, keyed by absolute directory.
initialized bool
main *moduleJSON
modsByModPath []*moduleJSON // All modules, ordered by # of path components in module Path...
modsByDir []*moduleJSON // ...or Dir.
}
type ModuleJSON struct {
type moduleJSON struct {
Path string // module path
Version string // module version
Versions []string // available module versions (with -versions)
Replace *ModuleJSON // replaced by this module
Replace *moduleJSON // replaced by this module
Time *time.Time // time version was created
Update *ModuleJSON // available update, if any (with -u)
Update *moduleJSON // available update, if any (with -u)
Main bool // is this the main module?
Indirect bool // is this module only an indirect dependency of main module?
Dir string // directory holding files for this module, if any
GoMod string // path to go.mod file for this module, if any
Error *ModuleErrorJSON // error loading module
Error *moduleErrorJSON // error loading module
}
type ModuleErrorJSON struct {
type moduleErrorJSON struct {
Err string // the error itself
}
func (r *ModuleResolver) init() error {
if r.Initialized {
func (r *moduleResolver) init() error {
if r.initialized {
return nil
}
stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
@ -58,7 +56,7 @@ func (r *ModuleResolver) init() error {
return err
}
for dec := json.NewDecoder(stdout); dec.More(); {
mod := &ModuleJSON{}
mod := &moduleJSON{}
if err := dec.Decode(mod); err != nil {
return err
}
@ -69,36 +67,34 @@ func (r *ModuleResolver) init() error {
// Can't do anything with a module that's not downloaded.
continue
}
r.ModsByModPath = append(r.ModsByModPath, mod)
r.ModsByDir = append(r.ModsByDir, mod)
r.modsByModPath = append(r.modsByModPath, mod)
r.modsByDir = append(r.modsByDir, mod)
if mod.Main {
r.Main = mod
r.main = mod
}
}
sort.Slice(r.ModsByModPath, func(i, j int) bool {
sort.Slice(r.modsByModPath, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.ModsByModPath[x].Path, "/")
return strings.Count(r.modsByModPath[x].Path, "/")
}
return count(j) < count(i) // descending order
})
sort.Slice(r.ModsByDir, func(i, j int) bool {
sort.Slice(r.modsByDir, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.ModsByDir[x].Dir, "/")
return strings.Count(r.modsByDir[x].Dir, "/")
}
return count(j) < count(i) // descending order
})
r.ModCachePkgs = make(map[string]*pkg)
r.Initialized = true
r.initialized = true
return nil
}
// findPackage returns the module and directory that contains the package at
// the given import path, or returns nil, "" if no module is in scope.
func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
for _, m := range r.ModsByModPath {
func (r *moduleResolver) findPackage(importPath string) (*moduleJSON, string) {
for _, m := range r.modsByModPath {
if !strings.HasPrefix(importPath, m.Path) {
continue
}
@ -127,7 +123,7 @@ func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
// findModuleByDir returns the module that contains dir, or nil if no such
// module is in scope.
func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON {
// This is quite tricky and may not be correct. dir could be:
// - a package in the main module.
// - a replace target underneath the main module's directory.
@ -138,7 +134,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
// - in /vendor/ in -mod=vendor mode.
// - nested module? Dunno.
// Rumor has it that replace targets cannot contain other replace targets.
for _, m := range r.ModsByDir {
for _, m := range r.modsByDir {
if !strings.HasPrefix(dir, m.Dir) {
continue
}
@ -154,7 +150,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
// dirIsNestedModule reports if dir is contained in a nested module underneath
// mod, not actually in mod.
func dirIsNestedModule(dir string, mod *ModuleJSON) bool {
func dirIsNestedModule(dir string, mod *moduleJSON) bool {
if !strings.HasPrefix(dir, mod.Dir) {
return false
}
@ -180,7 +176,7 @@ func findModFile(dir string) string {
}
}
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
func (r *moduleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
if err := r.init(); err != nil {
return nil, err
}
@ -199,7 +195,7 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (
return names, nil
}
func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
if err := r.init(); err != nil {
return nil, err
}
@ -208,15 +204,15 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
roots := []gopathwalk.Root{
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
}
if r.Main != nil {
roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule})
if r.main != nil {
roots = append(roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
}
for _, p := range filepath.SplitList(r.env.GOPATH) {
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
}
// Walk replace targets, just in case they're not in any of the above.
for _, mod := range r.ModsByModPath {
for _, mod := range r.modsByModPath {
if mod.Replace != nil {
roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
}
@ -236,15 +232,6 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
dupCheck[dir] = true
absDir := dir
// Packages in the module cache are immutable. If we have
// already seen this package on a previous scan of the module
// cache, return that result.
if p, ok := r.ModCachePkgs[absDir]; ok {
result = append(result, p)
return
}
subdir := ""
if dir != root.Path {
subdir = dir[len(root.Path)+len("/"):]
@ -260,7 +247,7 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
}
switch root.Type {
case gopathwalk.RootCurrentModule:
importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir))
importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
case gopathwalk.RootModuleCache:
matches := modCacheRegexp.FindStringSubmatch(subdir)
modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
@ -311,18 +298,10 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
dir = canonicalDir
}
res := &pkg{
result = append(result, &pkg{
importPathShort: VendorlessPath(importPath),
dir: dir,
}
switch root.Type {
case gopathwalk.RootModuleCache:
// Save the results of processing this directory.
r.ModCachePkgs[absDir] = res
}
result = append(result, res)
})
}, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true})
return result, nil
}

View File

@ -118,25 +118,6 @@ import _ "example.com"
mt.assertFound("example.com", "x")
}
// Tests that scanning the module cache > 1 time is able to find the same module.
func TestModMultipleScans(t *testing.T) {
mt := setup(t, `
-- go.mod --
module x
require example.com v1.0.0
-- x.go --
package x
import _ "example.com"
`, "")
defer mt.cleanup()
mt.assertScanFinds("example.com", "x")
mt.assertScanFinds("example.com", "x")
}
// Tests that -mod=vendor sort of works. Adapted from mod_getmode_vendor.txt.
func TestModeGetmodeVendor(t *testing.T) {
mt := setup(t, `
@ -159,7 +140,7 @@ import _ "rsc.io/quote"
mt.env.GOFLAGS = ""
// Clear out the resolver's cache, since we've changed the environment.
mt.resolver = &ModuleResolver{env: mt.env}
mt.resolver = &moduleResolver{env: mt.env}
mt.assertModuleFoundInDir("rsc.io/quote", "quote", `pkg.*mod.*/quote@.*$`)
}
@ -505,7 +486,7 @@ var proxyDir string
type modTest struct {
*testing.T
env *ProcessEnv
resolver *ModuleResolver
resolver *moduleResolver
cleanup func()
}
@ -557,7 +538,7 @@ func setup(t *testing.T, main, wd string) *modTest {
return &modTest{
T: t,
env: env,
resolver: &ModuleResolver{env: env},
resolver: &moduleResolver{env: env},
cleanup: func() {
_ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {

View File

@ -1,124 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonrpc2
import (
"context"
)
// Handler is the interface used to hook into the mesage handling of an rpc
// connection.
type Handler interface {
// Deliver is invoked to handle incoming requests.
// If the request returns false from IsNotify then the Handler must eventually
// call Reply on the Conn with the supplied request.
// Handlers are called synchronously, they should pass the work off to a go
// routine if they are going to take a long time.
// If Deliver returns true all subsequent handlers will be invoked with
// delivered set to true, and should not attempt to deliver the message.
Deliver(ctx context.Context, r *Request, delivered bool) bool
// Cancel is invoked for cancelled outgoing requests.
// It is okay to use the connection to send notifications, but the context will
// be in the cancelled state, so you must do it with the background context
// instead.
// If Cancel returns true all subsequent handlers will be invoked with
// cancelled set to true, and should not attempt to cancel the message.
Cancel(ctx context.Context, conn *Conn, id ID, cancelled bool) bool
// Log is invoked for all messages flowing through a Conn.
// direction indicates if the message being received or sent
// id is the message id, if not set it was a notification
// elapsed is the time between a call being seen and the response, and is
// negative for anything that is not a response.
// method is the method name specified in the message
// payload is the parameters for a call or notification, and the result for a
// response
// Request is called near the start of processing any request.
Request(ctx context.Context, direction Direction, r *WireRequest) context.Context
// Response is called near the start of processing any response.
Response(ctx context.Context, direction Direction, r *WireResponse) context.Context
// Done is called when any request is fully processed.
// For calls, this means the response has also been processed, for notifies
// this is as soon as the message has been written to the stream.
// If err is set, it implies the request failed.
Done(ctx context.Context, err error)
// Read is called with a count each time some data is read from the stream.
// The read calls are delayed until after the data has been interpreted so
// that it can be attributed to a request/response.
Read(ctx context.Context, bytes int64) context.Context
// Wrote is called each time some data is written to the stream.
Wrote(ctx context.Context, bytes int64) context.Context
// Error is called with errors that cannot be delivered through the normal
// mechanisms, for instance a failure to process a notify cannot be delivered
// back to the other party.
Error(ctx context.Context, err error)
}
// Direction is used to indicate to a logger whether the logged message was being
// sent or received.
type Direction bool
const (
// Send indicates the message is outgoing.
Send = Direction(true)
// Receive indicates the message is incoming.
Receive = Direction(false)
)
func (d Direction) String() string {
switch d {
case Send:
return "send"
case Receive:
return "receive"
default:
panic("unreachable")
}
}
type EmptyHandler struct{}
func (EmptyHandler) Deliver(ctx context.Context, r *Request, delivered bool) bool {
return false
}
func (EmptyHandler) Cancel(ctx context.Context, conn *Conn, id ID, cancelled bool) bool {
return false
}
func (EmptyHandler) Request(ctx context.Context, direction Direction, r *WireRequest) context.Context {
return ctx
}
func (EmptyHandler) Response(ctx context.Context, direction Direction, r *WireResponse) context.Context {
return ctx
}
func (EmptyHandler) Done(ctx context.Context, err error) {
}
func (EmptyHandler) Read(ctx context.Context, bytes int64) context.Context {
return ctx
}
func (EmptyHandler) Wrote(ctx context.Context, bytes int64) context.Context {
return ctx
}
func (EmptyHandler) Error(ctx context.Context, err error) {}
type defaultHandler struct{ EmptyHandler }
func (defaultHandler) Deliver(ctx context.Context, r *Request, delivered bool) bool {
if delivered {
return false
}
if !r.IsNotify() {
r.Reply(ctx, nil, NewErrorf(CodeMethodNotFound, "method %q not found", r.Method))
}
return true
}

View File

@ -13,19 +13,28 @@ import (
"fmt"
"sync"
"sync/atomic"
"time"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/lsp/telemetry/trace"
)
// Conn is a JSON RPC 2 client server connection.
// Conn is bidirectional; it does not have a designated server or client end.
type Conn struct {
seq int64 // must only be accessed using atomic operations
handlers []Handler
stream Stream
err error
pendingMu sync.Mutex // protects the pending map
pending map[ID]chan *WireResponse
handlingMu sync.Mutex // protects the handling map
handling map[ID]*Request
seq int64 // must only be accessed using atomic operations
Handler Handler
Canceler Canceler
Logger Logger
Capacity int
RejectIfOverloaded bool
stream Stream
err error
pendingMu sync.Mutex // protects the pending map
pending map[ID]chan *wireResponse
handlingMu sync.Mutex // protects the handling map
handling map[ID]*Request
}
type requestState int
@ -42,11 +51,73 @@ const (
type Request struct {
conn *Conn
cancel context.CancelFunc
start time.Time
state requestState
nextRequest chan struct{}
// The Wire values of the request.
WireRequest
// Method is a string containing the method name to invoke.
Method string
// Params is either a struct or an array with the parameters of the method.
Params *json.RawMessage
// The id of this request, used to tie the response back to the request.
// Will be either a string or a number. If not set, the request is a notify,
// and no response is possible.
ID *ID
}
// Handler is an option you can pass to NewConn to handle incoming requests.
// If the request returns false from IsNotify then the Handler must eventually
// call Reply on the Conn with the supplied request.
// Handlers are called synchronously, they should pass the work off to a go
// routine if they are going to take a long time.
type Handler func(context.Context, *Request)
// Canceler is an option you can pass to NewConn which is invoked for
// cancelled outgoing requests.
// It is okay to use the connection to send notifications, but the context will
// be in the cancelled state, so you must do it with the background context
// instead.
type Canceler func(context.Context, *Conn, ID)
type rpcStats struct {
server bool
method string
span trace.Span
start time.Time
}
func start(ctx context.Context, server bool, method string, id *ID) (context.Context, *rpcStats) {
if method == "" {
panic("no method in rpc stats")
}
s := &rpcStats{
server: server,
method: method,
start: time.Now(),
}
mode := telemetry.Outbound
if server {
mode = telemetry.Inbound
}
ctx, s.span = trace.StartSpan(ctx, method,
tag.Tag{Key: telemetry.Method, Value: method},
tag.Tag{Key: telemetry.RPCDirection, Value: mode},
tag.Tag{Key: telemetry.RPCID, Value: id},
)
telemetry.Started.Record(ctx, 1)
return ctx, s
}
func (s *rpcStats) end(ctx context.Context, err *error) {
if err != nil && *err != nil {
ctx = telemetry.StatusCode.With(ctx, "ERROR")
} else {
ctx = telemetry.StatusCode.With(ctx, "OK")
}
elapsedTime := time.Since(s.start)
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
telemetry.Latency.Record(ctx, latencyMillis)
s.span.End()
}
// NewErrorf builds a Error struct for the suppied message and code.
@ -62,23 +133,23 @@ func NewErrorf(code int64, format string, args ...interface{}) *Error {
// You must call Run for the connection to be active.
func NewConn(s Stream) *Conn {
conn := &Conn{
handlers: []Handler{defaultHandler{}},
stream: s,
pending: make(map[ID]chan *WireResponse),
pending: make(map[ID]chan *wireResponse),
handling: make(map[ID]*Request),
}
// the default handler reports a method error
conn.Handler = func(ctx context.Context, r *Request) {
if !r.IsNotify() {
r.Reply(ctx, nil, NewErrorf(CodeMethodNotFound, "method %q not found", r.Method))
}
}
// the default canceler does nothing
conn.Canceler = func(context.Context, *Conn, ID) {}
// the default logger does nothing
conn.Logger = func(Direction, *ID, time.Duration, string, *json.RawMessage, *Error) {}
return conn
}
// AddHandler adds a new handler to the set the connection will invoke.
// Handlers are invoked in the reverse order of how they were added, this
// allows the most recent addition to be the first one to attempt to handle a
// message.
func (c *Conn) AddHandler(handler Handler) {
// prepend the new handlers so we use them first
c.handlers = append([]Handler{handler}, c.handlers...)
}
// Cancel cancels a pending Call on the server side.
// The call is identified by its id.
// JSON RPC 2 does not specify a cancel message, so cancellation support is not
@ -97,11 +168,14 @@ func (c *Conn) Cancel(id ID) {
// It will return as soon as the notification has been sent, as no response is
// possible.
func (c *Conn) Notify(ctx context.Context, method string, params interface{}) (err error) {
ctx, rpcStats := start(ctx, false, method, nil)
defer rpcStats.end(ctx, &err)
jsonParams, err := marshalToRaw(params)
if err != nil {
return fmt.Errorf("marshalling notify parameters: %v", err)
}
request := &WireRequest{
request := &wireRequest{
Method: method,
Params: jsonParams,
}
@ -109,18 +183,9 @@ func (c *Conn) Notify(ctx context.Context, method string, params interface{}) (e
if err != nil {
return fmt.Errorf("marshalling notify request: %v", err)
}
for _, h := range c.handlers {
ctx = h.Request(ctx, Send, request)
}
defer func() {
for _, h := range c.handlers {
h.Done(ctx, err)
}
}()
c.Logger(Send, nil, -1, request.Method, request.Params, nil)
n, err := c.stream.Write(ctx, data)
for _, h := range c.handlers {
ctx = h.Wrote(ctx, n)
}
telemetry.SentBytes.Record(ctx, n)
return err
}
@ -130,11 +195,13 @@ func (c *Conn) Notify(ctx context.Context, method string, params interface{}) (e
func (c *Conn) Call(ctx context.Context, method string, params, result interface{}) (err error) {
// generate a new request identifier
id := ID{Number: atomic.AddInt64(&c.seq, 1)}
ctx, rpcStats := start(ctx, false, method, &id)
defer rpcStats.end(ctx, &err)
jsonParams, err := marshalToRaw(params)
if err != nil {
return fmt.Errorf("marshalling call parameters: %v", err)
}
request := &WireRequest{
request := &wireRequest{
ID: &id,
Method: method,
Params: jsonParams,
@ -144,12 +211,9 @@ func (c *Conn) Call(ctx context.Context, method string, params, result interface
if err != nil {
return fmt.Errorf("marshalling call request: %v", err)
}
for _, h := range c.handlers {
ctx = h.Request(ctx, Send, request)
}
// we have to add ourselves to the pending map before we send, otherwise we
// are racing the response
rchan := make(chan *WireResponse)
rchan := make(chan *wireResponse)
c.pendingMu.Lock()
c.pending[id] = rchan
c.pendingMu.Unlock()
@ -158,15 +222,12 @@ func (c *Conn) Call(ctx context.Context, method string, params, result interface
c.pendingMu.Lock()
delete(c.pending, id)
c.pendingMu.Unlock()
for _, h := range c.handlers {
h.Done(ctx, err)
}
}()
// now we are ready to send
before := time.Now()
c.Logger(Send, request.ID, -1, request.Method, request.Params, nil)
n, err := c.stream.Write(ctx, data)
for _, h := range c.handlers {
ctx = h.Wrote(ctx, n)
}
telemetry.SentBytes.Record(ctx, n)
if err != nil {
// sending failed, we will never get a response, so don't leave it pending
return err
@ -174,9 +235,8 @@ func (c *Conn) Call(ctx context.Context, method string, params, result interface
// now wait for the response
select {
case response := <-rchan:
for _, h := range c.handlers {
ctx = h.Response(ctx, Receive, response)
}
elapsed := time.Since(before)
c.Logger(Receive, response.ID, elapsed, request.Method, response.Result, response.Error)
// is it an error response?
if response.Error != nil {
return response.Error
@ -190,12 +250,7 @@ func (c *Conn) Call(ctx context.Context, method string, params, result interface
return nil
case <-ctx.Done():
// allow the handler to propagate the cancel
cancelled := false
for _, h := range c.handlers {
if h.Cancel(ctx, c, id, cancelled) {
cancelled = true
}
}
c.Canceler(ctx, c, id)
return ctx.Err()
}
}
@ -235,6 +290,9 @@ func (r *Request) Reply(ctx context.Context, result interface{}, err error) erro
if r.IsNotify() {
return fmt.Errorf("reply not invoked with a valid call")
}
ctx, st := trace.StartSpan(ctx, r.Method+":reply")
defer st.End()
// reply ends the handling phase of a call, so if we are not yet
// parallel we should be now. The go routine is allowed to continue
// to do work after replying, which is why it is important to unlock
@ -242,11 +300,12 @@ func (r *Request) Reply(ctx context.Context, result interface{}, err error) erro
r.Parallel()
r.state = requestReplied
elapsed := time.Since(r.start)
var raw *json.RawMessage
if err == nil {
raw, err = marshalToRaw(result)
}
response := &WireResponse{
response := &wireResponse{
Result: raw,
ID: r.ID,
}
@ -261,13 +320,9 @@ func (r *Request) Reply(ctx context.Context, result interface{}, err error) erro
if err != nil {
return err
}
for _, h := range r.conn.handlers {
ctx = h.Response(ctx, Send, response)
}
r.conn.Logger(Send, response.ID, elapsed, r.Method, response.Result, response.Error)
n, err := r.conn.stream.Write(ctx, data)
for _, h := range r.conn.handlers {
ctx = h.Wrote(ctx, n)
}
telemetry.SentBytes.Record(ctx, n)
if err != nil {
// TODO(iancottrell): if a stream write fails, we really need to shut down
@ -305,7 +360,7 @@ type combined struct {
// caused the termination.
// It must be called exactly once for each Conn.
// It returns only when the reader is closed or there is an error in the stream.
func (c *Conn) Run(runCtx context.Context) error {
func (c *Conn) Run(ctx context.Context) error {
// we need to make the next request "lock" in an unlocked state to allow
// the first incoming request to proceed. All later requests are unlocked
// by the preceding request going to parallel mode.
@ -313,7 +368,7 @@ func (c *Conn) Run(runCtx context.Context) error {
close(nextRequest)
for {
// get the data for a message
data, n, err := c.stream.Read(runCtx)
data, n, err := c.stream.Read(ctx)
if err != nil {
// the stream failed, we cannot continue
return err
@ -323,32 +378,26 @@ func (c *Conn) Run(runCtx context.Context) error {
if err := json.Unmarshal(data, msg); err != nil {
// a badly formed message arrived, log it and continue
// we trust the stream to have isolated the error to just this message
for _, h := range c.handlers {
h.Error(runCtx, fmt.Errorf("unmarshal failed: %v", err))
}
c.Logger(Receive, nil, -1, "", nil, NewErrorf(0, "unmarshal failed: %v", err))
continue
}
// work out which kind of message we have
switch {
case msg.Method != "":
// if method is set it must be a request
reqCtx, cancelReq := context.WithCancel(runCtx)
reqCtx, cancelReq := context.WithCancel(ctx)
reqCtx, rpcStats := start(reqCtx, true, msg.Method, msg.ID)
telemetry.ReceivedBytes.Record(ctx, n)
thisRequest := nextRequest
nextRequest = make(chan struct{})
req := &Request{
conn: c,
cancel: cancelReq,
nextRequest: nextRequest,
WireRequest: WireRequest{
VersionTag: msg.VersionTag,
Method: msg.Method,
Params: msg.Params,
ID: msg.ID,
},
}
for _, h := range c.handlers {
reqCtx = h.Request(reqCtx, Receive, &req.WireRequest)
reqCtx = h.Read(reqCtx, n)
start: time.Now(),
Method: msg.Method,
Params: msg.Params,
ID: msg.ID,
}
c.setHandling(req, true)
go func() {
@ -360,17 +409,11 @@ func (c *Conn) Run(runCtx context.Context) error {
req.Reply(reqCtx, nil, NewErrorf(CodeInternalError, "method %q did not reply", req.Method))
}
req.Parallel()
for _, h := range c.handlers {
h.Done(reqCtx, err)
}
rpcStats.end(reqCtx, nil)
cancelReq()
}()
delivered := false
for _, h := range c.handlers {
if h.Deliver(reqCtx, req, delivered) {
delivered = true
}
}
c.Logger(Receive, req.ID, -1, req.Method, req.Params, nil)
c.Handler(reqCtx, req)
}()
case msg.ID != nil:
// we have a response, get the pending entry from the map
@ -381,7 +424,7 @@ func (c *Conn) Run(runCtx context.Context) error {
}
c.pendingMu.Unlock()
// and send the reply to the channel
response := &WireResponse{
response := &wireResponse{
Result: msg.Result,
Error: msg.Error,
ID: msg.ID,
@ -389,9 +432,7 @@ func (c *Conn) Run(runCtx context.Context) error {
rchan <- response
close(rchan)
default:
for _, h := range c.handlers {
h.Error(runCtx, fmt.Errorf("message not a call, notify or response, ignoring"))
}
c.Logger(Receive, nil, -1, "", nil, NewErrorf(0, "message not a call, notify or response, ignoring"))
}
}
}

View File

@ -10,11 +10,9 @@ import (
"flag"
"fmt"
"io"
"log"
"path"
"reflect"
"testing"
"time"
"golang.org/x/tools/internal/jsonrpc2"
)
@ -108,7 +106,10 @@ func run(ctx context.Context, t *testing.T, withHeaders bool, r io.ReadCloser, w
stream = jsonrpc2.NewStream(r, w)
}
conn := jsonrpc2.NewConn(stream)
conn.AddHandler(&handle{log: *logRPC})
conn.Handler = handle
if *logRPC {
conn.Logger = jsonrpc2.Log
}
go func() {
defer func() {
r.Close()
@ -121,82 +122,36 @@ func run(ctx context.Context, t *testing.T, withHeaders bool, r io.ReadCloser, w
return conn
}
type handle struct {
log bool
}
func (h *handle) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool {
func handle(ctx context.Context, r *jsonrpc2.Request) {
switch r.Method {
case "no_args":
if r.Params != nil {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidParams, "Expected no params"))
return true
return
}
r.Reply(ctx, true, nil)
case "one_string":
var v string
if err := json.Unmarshal(*r.Params, &v); err != nil {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeParseError, "%v", err.Error()))
return true
return
}
r.Reply(ctx, "got:"+v, nil)
case "one_number":
var v int
if err := json.Unmarshal(*r.Params, &v); err != nil {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeParseError, "%v", err.Error()))
return true
return
}
r.Reply(ctx, fmt.Sprintf("got:%d", v), nil)
case "join":
var v []string
if err := json.Unmarshal(*r.Params, &v); err != nil {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeParseError, "%v", err.Error()))
return true
return
}
r.Reply(ctx, path.Join(v...), nil)
default:
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeMethodNotFound, "method %q not found", r.Method))
}
return true
}
func (h *handle) Cancel(ctx context.Context, conn *jsonrpc2.Conn, id jsonrpc2.ID, cancelled bool) bool {
return false
}
func (h *handle) Request(ctx context.Context, direction jsonrpc2.Direction, r *jsonrpc2.WireRequest) context.Context {
if h.log {
if r.ID != nil {
log.Printf("%v call [%v] %s %v", direction, r.ID, r.Method, r.Params)
} else {
log.Printf("%v notification %s %v", direction, r.Method, r.Params)
}
ctx = context.WithValue(ctx, "method", r.Method)
ctx = context.WithValue(ctx, "start", time.Now())
}
return ctx
}
func (h *handle) Response(ctx context.Context, direction jsonrpc2.Direction, r *jsonrpc2.WireResponse) context.Context {
if h.log {
method := ctx.Value("method")
elapsed := time.Since(ctx.Value("start").(time.Time))
log.Printf("%v response in %v [%v] %s %v", direction, elapsed, r.ID, method, r.Result)
}
return ctx
}
func (h *handle) Done(ctx context.Context, err error) {
}
func (h *handle) Read(ctx context.Context, bytes int64) context.Context {
return ctx
}
func (h *handle) Wrote(ctx context.Context, bytes int64) context.Context {
return ctx
}
func (h *handle) Error(ctx context.Context, err error) {
log.Printf("%v", err)
}

59
internal/jsonrpc2/log.go Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonrpc2
import (
"encoding/json"
"log"
"time"
)
// Logger is an option you can pass to NewConn which is invoked for
// all messages flowing through a Conn.
// direction indicates if the message being recieved or sent
// id is the message id, if not set it was a notification
// elapsed is the time between a call being seen and the response, and is
// negative for anything that is not a response.
// method is the method name specified in the message
// payload is the parameters for a call or notification, and the result for a
// response
type Logger = func(direction Direction, id *ID, elapsed time.Duration, method string, payload *json.RawMessage, err *Error)
// Direction is used to indicate to a logger whether the logged message was being
// sent or received.
type Direction bool
const (
// Send indicates the message is outgoing.
Send = Direction(true)
// Receive indicates the message is incoming.
Receive = Direction(false)
)
func (d Direction) String() string {
switch d {
case Send:
return "send"
case Receive:
return "receive"
default:
panic("unreachable")
}
}
// Log is an implementation of Logger that outputs using log.Print
// It is not used by default, but is provided for easy logging in users code.
func Log(direction Direction, id *ID, elapsed time.Duration, method string, payload *json.RawMessage, err *Error) {
switch {
case err != nil:
log.Printf("%v failure [%v] %s %v", direction, id, method, err)
case id == nil:
log.Printf("%v notification %s %s", direction, method, *payload)
case elapsed >= 0:
log.Printf("%v response in %v [%v] %s %s", direction, elapsed, id, method, *payload)
default:
log.Printf("%v call [%v] %s %s", direction, id, method, *payload)
}
}

View File

@ -34,8 +34,8 @@ const (
CodeServerOverloaded = -32000
)
// WireRequest is sent to a server to represent a Call or Notify operaton.
type WireRequest struct {
// wireRequest is sent to a server to represent a Call or Notify operaton.
type wireRequest struct {
// VersionTag is always encoded as the string "2.0"
VersionTag VersionTag `json:"jsonrpc"`
// Method is a string containing the method name to invoke.
@ -48,11 +48,11 @@ type WireRequest struct {
ID *ID `json:"id,omitempty"`
}
// WireResponse is a reply to a Request.
// wireResponse is a reply to a Request.
// It will always have the ID field set to tie it back to a request, and will
// have either the Result or Error fields set depending on whether it is a
// success or failure response.
type WireResponse struct {
type wireResponse struct {
// VersionTag is always encoded as the string "2.0"
VersionTag VersionTag `json:"jsonrpc"`
// Result is the response value, and is required on success.

View File

@ -14,6 +14,7 @@ import (
"golang.org/x/tools/internal/lsp/debug"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/xlog"
"golang.org/x/tools/internal/memoize"
"golang.org/x/tools/internal/span"
)
@ -71,11 +72,12 @@ func (c *cache) GetFile(uri span.URI) source.FileHandle {
}
}
func (c *cache) NewSession(ctx context.Context) source.Session {
func (c *cache) NewSession(log xlog.Logger) source.Session {
index := atomic.AddInt64(&sessionIndex, 1)
s := &session{
cache: c,
id: strconv.FormatInt(index, 10),
log: log,
overlays: make(map[span.URI]*overlay),
filesWatchMap: NewWatchMap(),
}

View File

@ -16,8 +16,6 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/span"
)
@ -70,22 +68,18 @@ func (imp *importer) getPkg(ctx context.Context, id packageID) (*pkg, error) {
// This goroutine becomes responsible for populating
// the entry and broadcasting its readiness.
e.pkg, e.err = imp.typeCheck(ctx, id)
if e.err != nil {
// Don't cache failed packages. If we didn't successfully cache the package
// in each file, then this pcache entry won't get invalidated as those files
// change.
imp.view.pcache.mu.Lock()
if imp.view.pcache.packages[id] == e {
delete(imp.view.pcache.packages, id)
}
imp.view.pcache.mu.Unlock()
}
close(e.ready)
}
if e.err != nil {
// If the import had been previously canceled, and that error cached, try again.
if e.err == context.Canceled && ctx.Err() == nil {
imp.view.pcache.mu.Lock()
// Clear out canceled cache entry if it is still there.
if imp.view.pcache.packages[id] == e {
delete(imp.view.pcache.packages, id)
}
imp.view.pcache.mu.Unlock()
return imp.getPkg(ctx, id)
}
return nil, e.err
@ -95,8 +89,8 @@ func (imp *importer) getPkg(ctx context.Context, id packageID) (*pkg, error) {
}
func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error) {
ctx, done := trace.StartSpan(ctx, "cache.importer.typeCheck", telemetry.Package.Of(id))
defer done()
ctx, ts := trace.StartSpan(ctx, "cache.importer.typeCheck")
defer ts.End()
meta, ok := imp.view.mcache.packages[id]
if !ok {
return nil, fmt.Errorf("no metadata for %v", id)
@ -123,42 +117,42 @@ func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error)
mode = source.ParseExported
}
var (
files = make([]*ast.File, len(meta.files))
errors = make([]error, len(meta.files))
wg sync.WaitGroup
files []*astFile
phs []source.ParseGoHandle
wg sync.WaitGroup
)
for _, filename := range meta.files {
uri := span.FileURI(filename)
f, err := imp.view.getFile(ctx, uri)
if err != nil {
log.Error(ctx, "unable to get file", err, telemetry.File.Of(f.URI()))
continue
}
pkg.files = append(pkg.files, imp.view.session.cache.ParseGoHandle(f.Handle(ctx), mode))
ph := imp.view.session.cache.ParseGoHandle(f.Handle(ctx), mode)
phs = append(phs, ph)
files = append(files, &astFile{
uri: ph.File().Identity().URI,
isTrimmed: mode == source.ParseExported,
ph: ph,
})
}
for i, ph := range pkg.files {
for i, ph := range phs {
wg.Add(1)
go func(i int, ph source.ParseGoHandle) {
defer wg.Done()
files[i], errors[i] = ph.Parse(ctx)
files[i].file, files[i].err = ph.Parse(ctx)
}(i, ph)
}
wg.Wait()
var i int
for _, f := range files {
if f != nil {
files[i] = f
i++
}
}
for _, err := range errors {
if err == context.Canceled {
return nil, err
}
if err != nil {
imp.view.session.cache.appendPkgError(pkg, err)
pkg.files = append(pkg.files, f)
if f.err != nil {
if f.err == context.Canceled {
return nil, f.err
}
imp.view.session.cache.appendPkgError(pkg, f.err)
}
}
@ -194,7 +188,7 @@ func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error)
check := types.NewChecker(cfg, imp.fset, pkg.types, pkg.typesInfo)
// Ignore type-checking errors.
check.Files(files)
check.Files(pkg.GetSyntax())
// Add every file in this package to our cache.
if err := imp.cachePackage(ctx, pkg, meta, mode); err != nil {
@ -205,17 +199,16 @@ func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error)
}
func (imp *importer) cachePackage(ctx context.Context, pkg *pkg, meta *metadata, mode source.ParseMode) error {
for _, ph := range pkg.files {
uri := ph.File().Identity().URI
f, err := imp.view.getFile(ctx, uri)
for _, file := range pkg.files {
f, err := imp.view.getFile(ctx, file.uri)
if err != nil {
return fmt.Errorf("no such file %s: %v", uri, err)
return fmt.Errorf("no such file %s: %v", file.uri, err)
}
gof, ok := f.(*goFile)
if !ok {
return fmt.Errorf("non Go file %s", uri)
return fmt.Errorf("non Go file %s", file.uri)
}
if err := imp.cachePerFile(gof, ph, pkg); err != nil {
if err := imp.cachePerFile(gof, file, pkg); err != nil {
return fmt.Errorf("failed to cache file %s: %v", gof.URI(), err)
}
}
@ -234,7 +227,7 @@ func (imp *importer) cachePackage(ctx context.Context, pkg *pkg, meta *metadata,
return nil
}
func (imp *importer) cachePerFile(gof *goFile, ph source.ParseGoHandle, p *pkg) error {
func (imp *importer) cachePerFile(gof *goFile, file *astFile, p *pkg) error {
gof.mu.Lock()
defer gof.mu.Unlock()
@ -244,11 +237,25 @@ func (imp *importer) cachePerFile(gof *goFile, ph source.ParseGoHandle, p *pkg)
}
gof.pkgs[p.id] = p
file, err := ph.Parse(imp.ctx)
if file == nil {
return fmt.Errorf("no AST for %s: %v", ph.File().Identity().URI, err)
// Get the AST for the file.
gof.ast = file
if gof.ast == nil {
return fmt.Errorf("no AST information for %s", file.uri)
}
gof.imports = file.Imports
if gof.ast.file == nil {
return fmt.Errorf("no AST for %s", file.uri)
}
// Get the *token.File directly from the AST.
pos := gof.ast.file.Pos()
if !pos.IsValid() {
return fmt.Errorf("AST for %s has an invalid position", file.uri)
}
tok := imp.view.session.cache.FileSet().File(pos)
if tok == nil {
return fmt.Errorf("no *token.File for %s", file.uri)
}
gof.token = tok
gof.imports = gof.ast.file.Imports
return nil
}

View File

@ -10,7 +10,6 @@ import (
"os"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/span"
)
@ -52,8 +51,8 @@ func (h *nativeFileHandle) Kind() source.FileKind {
}
func (h *nativeFileHandle) Read(ctx context.Context) ([]byte, string, error) {
ctx, done := trace.StartSpan(ctx, "cache.nativeFileHandle.Read", telemetry.File.Of(h.identity.URI.Filename()))
defer done()
ctx, ts := trace.StartSpan(ctx, "cache.nativeFileHandle.Read")
defer ts.End()
//TODO: this should fail if the version is not the same as the handle
data, err := ioutil.ReadFile(h.identity.URI.Filename())
if err != nil {

View File

@ -34,6 +34,8 @@ type fileBase struct {
handleMu sync.Mutex
handle source.FileHandle
token *token.File
}
func basename(filename string) string {

View File

@ -6,14 +6,11 @@ package cache
import (
"context"
"fmt"
"go/ast"
"go/token"
"sync"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/span"
)
@ -36,6 +33,7 @@ type goFile struct {
imports []*ast.ImportSpec
ast *astFile
pkgs map[packageID]*pkg
meta map[packageID]*metadata
}
@ -48,53 +46,71 @@ type astFile struct {
isTrimmed bool
}
func (f *goFile) GetToken(ctx context.Context) (*token.File, error) {
file, err := f.GetAST(ctx, source.ParseFull)
if file == nil {
return nil, err
}
return f.view.session.cache.fset.File(file.Pos()), nil
}
func (f *goFile) GetAST(ctx context.Context, mode source.ParseMode) (*ast.File, error) {
func (f *goFile) GetToken(ctx context.Context) *token.File {
f.view.mu.Lock()
defer f.view.mu.Unlock()
ctx = telemetry.File.With(ctx, f.URI())
if f.isDirty(ctx) || f.wrongParseMode(ctx, mode) {
if f.isDirty() || f.astIsTrimmed() {
if _, err := f.view.loadParseTypecheck(ctx, f); err != nil {
return nil, fmt.Errorf("GetAST: unable to check package for %s: %v", f.URI(), err)
f.View().Session().Logger().Errorf(ctx, "unable to check package for %s: %v", f.URI(), err)
return nil
}
}
fh := f.Handle(ctx)
// Check for a cached AST first, in case getting a trimmed version would actually cause a re-parse.
for _, m := range []source.ParseMode{
source.ParseHeader,
source.ParseExported,
source.ParseFull,
} {
if m < mode {
continue
}
if v, ok := f.view.session.cache.store.Cached(parseKey{
file: fh.Identity(),
mode: m,
}).(*parseGoData); ok {
return v.ast, v.err
f.mu.Lock()
defer f.mu.Unlock()
if unexpectedAST(ctx, f) {
return nil
}
return f.token
}
func (f *goFile) GetAnyAST(ctx context.Context) *ast.File {
f.view.mu.Lock()
defer f.view.mu.Unlock()
if f.isDirty() {
if _, err := f.view.loadParseTypecheck(ctx, f); err != nil {
f.View().Session().Logger().Errorf(ctx, "unable to check package for %s: %v", f.URI(), err)
return nil
}
}
ph := f.view.session.cache.ParseGoHandle(fh, mode)
return ph.Parse(ctx)
f.mu.Lock()
defer f.mu.Unlock()
if f.ast == nil {
return nil
}
return f.ast.file
}
func (f *goFile) GetAST(ctx context.Context) *ast.File {
f.view.mu.Lock()
defer f.view.mu.Unlock()
if f.isDirty() || f.astIsTrimmed() {
if _, err := f.view.loadParseTypecheck(ctx, f); err != nil {
f.View().Session().Logger().Errorf(ctx, "unable to check package for %s: %v", f.URI(), err)
return nil
}
}
f.mu.Lock()
defer f.mu.Unlock()
if unexpectedAST(ctx, f) {
return nil
}
return f.ast.file
}
func (f *goFile) GetPackages(ctx context.Context) []source.Package {
f.view.mu.Lock()
defer f.view.mu.Unlock()
ctx = telemetry.File.With(ctx, f.URI())
if f.isDirty(ctx) || f.wrongParseMode(ctx, source.ParseFull) {
if f.isDirty() || f.astIsTrimmed() {
if errs, err := f.view.loadParseTypecheck(ctx, f); err != nil {
log.Error(ctx, "unable to check package", err, telemetry.File)
f.View().Session().Logger().Errorf(ctx, "unable to check package for %s: %v", f.URI(), err)
// Create diagnostics for errors if we are able to.
if len(errs) > 0 {
@ -107,6 +123,9 @@ func (f *goFile) GetPackages(ctx context.Context) []source.Package {
f.mu.Lock()
defer f.mu.Unlock()
if unexpectedAST(ctx, f) {
return nil
}
var pkgs []source.Package
for _, pkg := range f.pkgs {
pkgs = append(pkgs, pkg)
@ -129,24 +148,23 @@ func (f *goFile) GetPackage(ctx context.Context) source.Package {
return result
}
func (f *goFile) wrongParseMode(ctx context.Context, mode source.ParseMode) bool {
f.mu.Lock()
defer f.mu.Unlock()
fh := f.Handle(ctx)
for _, pkg := range f.pkgs {
for _, ph := range pkg.files {
if fh.Identity() == ph.File().Identity() {
return ph.Mode() < mode
}
}
func unexpectedAST(ctx context.Context, f *goFile) bool {
// If the AST comes back nil, something has gone wrong.
if f.ast == nil {
f.View().Session().Logger().Errorf(ctx, "expected full AST for %s, returned nil", f.URI())
return true
}
// If the AST comes back trimmed, something has gone wrong.
if f.ast.isTrimmed {
f.View().Session().Logger().Errorf(ctx, "expected full AST for %s, returned trimmed", f.URI())
return true
}
return false
}
// isDirty is true if the file needs to be type-checked.
// It assumes that the file's view's mutex is held by the caller.
func (f *goFile) isDirty(ctx context.Context) bool {
func (f *goFile) isDirty() bool {
f.mu.Lock()
defer f.mu.Unlock()
@ -166,16 +184,14 @@ func (f *goFile) isDirty(ctx context.Context) bool {
if len(f.missingImports) > 0 {
return true
}
fh := f.Handle(ctx)
for _, pkg := range f.pkgs {
for _, file := range pkg.files {
// There is a type-checked package for the current file handle.
if file.File().Identity() == fh.Identity() {
return false
}
}
}
return true
return f.token == nil || f.ast == nil
}
func (f *goFile) astIsTrimmed() bool {
f.mu.Lock()
defer f.mu.Unlock()
return f.ast != nil && f.ast.isTrimmed
}
func (f *goFile) GetActiveReverseDeps(ctx context.Context) []source.GoFile {

View File

@ -10,10 +10,6 @@ import (
"golang.org/x/tools/go/packages"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/span"
)
@ -23,7 +19,7 @@ func (v *view) loadParseTypecheck(ctx context.Context, f *goFile) ([]packages.Er
// If the AST for this file is trimmed, and we are explicitly type-checking it,
// don't ignore function bodies.
if f.wrongParseMode(ctx, source.ParseFull) {
if f.astIsTrimmed() {
v.pcache.mu.Lock()
f.invalidateAST(ctx)
v.pcache.mu.Unlock()
@ -88,9 +84,7 @@ func (v *view) checkMetadata(ctx context.Context, f *goFile) (map[packageID]*met
return nil, nil, ctx.Err()
}
ctx, done := trace.StartSpan(ctx, "packages.Load", telemetry.File.Of(f.filename()))
defer done()
pkgs, err := packages.Load(v.Config(ctx), fmt.Sprintf("file=%s", f.filename()))
pkgs, err := packages.Load(v.Config(), fmt.Sprintf("file=%s", f.filename()))
if len(pkgs) == 0 {
if err == nil {
err = fmt.Errorf("go/packages.Load: no packages found for %s", f.filename())
@ -105,10 +99,7 @@ func (v *view) checkMetadata(ctx context.Context, f *goFile) (map[packageID]*met
}
// Track missing imports as we look at the package's errors.
missingImports := make(map[packagePath]struct{})
log.Print(ctx, "go/packages.Load", tag.Of("packages", len(pkgs)))
for _, pkg := range pkgs {
log.Print(ctx, "go/packages.Load", tag.Of("package", pkg.PkgPath), tag.Of("files", pkg.CompiledGoFiles))
// If the package comes back with errors from `go list`,
// don't bother type-checking it.
if len(pkg.Errors) > 0 {
@ -233,13 +224,11 @@ func (v *view) link(ctx context.Context, pkgPath packagePath, pkg *packages.Pack
for _, filename := range m.files {
f, err := v.getFile(ctx, span.FileURI(filename))
if err != nil {
log.Error(ctx, "no file", err, telemetry.File.Of(filename))
continue
v.session.log.Errorf(ctx, "no file %s: %v", filename, err)
}
gof, ok := f.(*goFile)
if !ok {
log.Error(ctx, "not a Go file", nil, telemetry.File.Of(filename))
continue
v.session.log.Errorf(ctx, "not a Go file: %s", f.URI())
}
if gof.meta == nil {
gof.meta = make(map[packageID]*metadata)
@ -263,7 +252,7 @@ func (v *view) link(ctx context.Context, pkgPath packagePath, pkg *packages.Pack
}
if _, ok := m.children[packageID(importPkg.ID)]; !ok {
if err := v.link(ctx, importPkgPath, importPkg, m, missingImports); err != nil {
log.Error(ctx, "error in dependency", err, telemetry.Package.Of(importPkgPath))
v.session.log.Errorf(ctx, "error in dependency %s: %v", importPkgPath, err)
}
}
}

View File

@ -6,7 +6,6 @@ package cache
import (
"context"
"fmt"
"go/token"
)
@ -15,10 +14,7 @@ type modFile struct {
fileBase
}
func (*modFile) GetToken(context.Context) (*token.File, error) {
return nil, fmt.Errorf("GetToken: not implemented")
}
func (*modFile) setContent(content []byte) {}
func (*modFile) filename() string { return "" }
func (*modFile) isActive() bool { return false }
func (*modFile) GetToken(context.Context) *token.File { return nil }
func (*modFile) setContent(content []byte) {}
func (*modFile) filename() string { return "" }
func (*modFile) isActive() bool { return false }

View File

@ -13,13 +13,12 @@ import (
"go/token"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/memoize"
)
// Limits the number of parallel parser calls per process.
var parseLimit = make(chan struct{}, 20)
var parseLimit = make(chan bool, 20)
// parseKey uniquely identifies a parsed Go file.
type parseKey struct {
@ -75,13 +74,13 @@ func (h *parseGoHandle) Parse(ctx context.Context) (*ast.File, error) {
}
func parseGo(ctx context.Context, c *cache, fh source.FileHandle, mode source.ParseMode) (*ast.File, error) {
ctx, done := trace.StartSpan(ctx, "cache.parseGo", telemetry.File.Of(fh.Identity().URI.Filename()))
defer done()
ctx, ts := trace.StartSpan(ctx, "cache.parseGo")
defer ts.End()
buf, _, err := fh.Read(ctx)
if err != nil {
return nil, err
}
parseLimit <- struct{}{}
parseLimit <- true
defer func() { <-parseLimit }()
parserMode := parser.AllErrors | parser.ParseComments
if mode == source.ParseHeader {
@ -141,8 +140,8 @@ func isEllipsisArray(n ast.Expr) bool {
return ok
}
// fix inspects the AST and potentially modifies any *ast.BadStmts so that it can be
// type-checked more effectively.
// fix inspects and potentially modifies any *ast.BadStmts or *ast.BadExprs in the AST.
// We attempt to modify the AST such that we can type-check it more effectively.
func fix(ctx context.Context, file *ast.File, tok *token.File, src []byte) error {
var parent ast.Node
var err error
@ -208,7 +207,7 @@ func parseDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src
var to, curr token.Pos
FindTo:
for {
curr, tkn, _ = s.Scan()
curr, tkn, lit = s.Scan()
// TODO(rstambler): This still needs more handling to work correctly.
// We encounter a specific issue with code that looks like this:
//

View File

@ -22,7 +22,7 @@ type pkg struct {
id packageID
pkgPath packagePath
files []source.ParseGoHandle
files []*astFile
errors []packages.Error
imports map[packagePath]*pkg
types *types.Package
@ -149,18 +149,17 @@ func (pkg *pkg) PkgPath() string {
func (pkg *pkg) GetFilenames() []string {
filenames := make([]string, 0, len(pkg.files))
for _, ph := range pkg.files {
filenames = append(filenames, ph.File().Identity().URI.Filename())
for _, f := range pkg.files {
filenames = append(filenames, f.uri.Filename())
}
return filenames
}
func (pkg *pkg) GetSyntax(ctx context.Context) []*ast.File {
func (pkg *pkg) GetSyntax() []*ast.File {
var syntax []*ast.File
for _, ph := range pkg.files {
file, _ := ph.Parse(ctx)
if file != nil {
syntax = append(syntax, file)
for _, f := range pkg.files {
if f.file != nil {
syntax = append(syntax, f.file)
}
}
return syntax

View File

@ -16,16 +16,15 @@ import (
"golang.org/x/tools/internal/lsp/debug"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/lsp/xlog"
"golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/xcontext"
)
type session struct {
cache *cache
id string
// the logger to use to communicate back with the client
log xlog.Logger
viewMu sync.Mutex
views []*view
@ -65,18 +64,16 @@ func (s *session) Cache() source.Cache {
return s.cache
}
func (s *session) NewView(ctx context.Context, name string, folder span.URI) source.View {
func (s *session) NewView(name string, folder span.URI) source.View {
index := atomic.AddInt64(&viewIndex, 1)
s.viewMu.Lock()
defer s.viewMu.Unlock()
// We want a true background context and not a detatched context here
// the spans need to be unrelated and no tag values should pollute it.
baseCtx := trace.Detach(xcontext.Detach(ctx))
backgroundCtx, cancel := context.WithCancel(baseCtx)
ctx := context.Background()
backgroundCtx, cancel := context.WithCancel(ctx)
v := &view{
session: s,
id: strconv.FormatInt(index, 10),
baseCtx: baseCtx,
baseCtx: ctx,
backgroundCtx: backgroundCtx,
cancel: cancel,
name: name,
@ -95,7 +92,7 @@ func (s *session) NewView(ctx context.Context, name string, folder span.URI) sou
}
// Preemptively build the builtin package,
// so we immediately add builtin.go to the list of ignored files.
v.buildBuiltinPkg(ctx)
v.buildBuiltinPkg()
s.views = append(s.views, v)
// we always need to drop the view map
@ -181,9 +178,11 @@ func (s *session) removeView(ctx context.Context, view *view) error {
return fmt.Errorf("view %s for %v not found", view.Name(), view.Folder())
}
// TODO: Propagate the language ID through to the view.
func (s *session) DidOpen(ctx context.Context, uri span.URI, _ source.FileKind, text []byte) {
ctx = telemetry.File.With(ctx, uri)
func (s *session) Logger() xlog.Logger {
return s.log
}
func (s *session) DidOpen(ctx context.Context, uri span.URI, text []byte) {
// Mark the file as open.
s.openFiles.Store(uri, true)
@ -198,12 +197,12 @@ func (s *session) DidOpen(ctx context.Context, uri span.URI, _ source.FileKind,
if strings.HasPrefix(string(uri), string(view.Folder())) {
f, err := view.GetFile(ctx, uri)
if err != nil {
log.Error(ctx, "error getting file", nil, telemetry.File)
s.log.Errorf(ctx, "error getting file for %s", uri)
return
}
gof, ok := f.(*goFile)
if !ok {
log.Error(ctx, "not a Go file", nil, telemetry.File)
s.log.Errorf(ctx, "%s is not a Go file", uri)
return
}
// Mark file as open.
@ -275,7 +274,7 @@ func (s *session) openOverlay(ctx context.Context, uri span.URI, data []byte) {
}
_, hash, err := s.cache.GetFile(uri).Read(ctx)
if err != nil {
log.Error(ctx, "failed to read", err, telemetry.File)
s.log.Errorf(ctx, "failed to read %s: %v", uri, err)
return
}
if hash == s.overlays[uri].hash {

View File

@ -6,7 +6,6 @@ package cache
import (
"context"
"fmt"
"go/token"
)
@ -15,10 +14,7 @@ type sumFile struct {
fileBase
}
func (*sumFile) GetToken(context.Context) (*token.File, error) {
return nil, fmt.Errorf("GetToken: not implemented")
}
func (*sumFile) setContent(content []byte) {}
func (*sumFile) filename() string { return "" }
func (*sumFile) isActive() bool { return false }
func (*sumFile) GetToken(context.Context) *token.File { return nil }
func (*sumFile) setContent(content []byte) {}
func (*sumFile) filename() string { return "" }
func (*sumFile) isActive() bool { return false }

View File

@ -6,22 +6,17 @@ package cache
import (
"context"
"fmt"
"go/ast"
"go/parser"
"go/token"
"go/types"
"os"
"path/filepath"
"strings"
"sync"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/internal/imports"
"golang.org/x/tools/internal/lsp/debug"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/span"
)
@ -53,19 +48,6 @@ type view struct {
// env is the environment to use when invoking underlying tools.
env []string
// process is the process env for this view.
// Note: this contains cached module and filesystem state.
//
// TODO(suzmue): the state cached in the process env is specific to each view,
// however, there is state that can be shared between views that is not currently
// cached, like the module cache.
processEnv *imports.ProcessEnv
// modFileVersions stores the last seen versions of the module files that are used
// by processEnvs resolver.
// TODO(suzmue): These versions may not actually be on disk.
modFileVersions map[string]string
// buildFlags is the build flags to use when invoking underlying tools.
buildFlags []string
@ -129,7 +111,7 @@ func (v *view) Folder() span.URI {
// Config returns the configuration used for the view's interaction with the
// go/packages API. It is shared across all views.
func (v *view) Config(ctx context.Context) *packages.Config {
func (v *view) Config() *packages.Config {
// TODO: Should we cache the config and/or overlay somewhere?
return &packages.Config{
Dir: v.folder.Filename(),
@ -146,112 +128,10 @@ func (v *view) Config(ctx context.Context) *packages.Config {
ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) {
panic("go/packages must not be used to parse files")
},
Logf: func(format string, args ...interface{}) {
log.Print(ctx, fmt.Sprintf(format, args...))
},
Tests: true,
}
}
func (v *view) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error, opts *imports.Options) error {
v.mu.Lock()
defer v.mu.Unlock()
if v.processEnv == nil {
v.processEnv = v.buildProcessEnv(ctx)
}
// Before running the user provided function, clear caches in the resolver.
if v.modFilesChanged() {
if r, ok := v.processEnv.GetResolver().(*imports.ModuleResolver); ok {
// Clear the resolver cache and set Initialized to false.
r.Initialized = false
r.Main = nil
r.ModsByModPath = nil
r.ModsByDir = nil
// Reset the modFileVersions.
v.modFileVersions = nil
}
}
// Run the user function.
opts.Env = v.processEnv
if err := fn(opts); err != nil {
return err
}
// If applicable, store the file versions of the 'go.mod' files that are
// looked at by the resolver.
v.storeModFileVersions()
return nil
}
func (v *view) buildProcessEnv(ctx context.Context) *imports.ProcessEnv {
cfg := v.Config(ctx)
env := &imports.ProcessEnv{
WorkingDir: cfg.Dir,
Logf: func(format string, args ...interface{}) {
log.Print(ctx, fmt.Sprintf(format, args...))
},
}
for _, kv := range cfg.Env {
split := strings.Split(kv, "=")
if len(split) < 2 {
continue
}
switch split[0] {
case "GOPATH":
env.GOPATH = split[1]
case "GOROOT":
env.GOROOT = split[1]
case "GO111MODULE":
env.GO111MODULE = split[1]
case "GOPROXY":
env.GOROOT = split[1]
case "GOFLAGS":
env.GOFLAGS = split[1]
case "GOSUMDB":
env.GOSUMDB = split[1]
}
}
return env
}
func (v *view) modFilesChanged() bool {
// Check the versions of the 'go.mod' files of the main module
// and modules included by a replace directive. Return true if
// any of these file versions do not match.
for filename, version := range v.modFileVersions {
if version != v.fileVersion(filename) {
return true
}
}
return false
}
func (v *view) storeModFileVersions() {
// Store the mod files versions, if we are using a ModuleResolver.
r, moduleMode := v.processEnv.GetResolver().(*imports.ModuleResolver)
if !moduleMode || !r.Initialized {
return
}
v.modFileVersions = make(map[string]string)
// Get the file versions of the 'go.mod' files of the main module
// and modules included by a replace directive in the resolver.
for _, mod := range r.ModsByModPath {
if (mod.Main || mod.Replace != nil) && mod.GoMod != "" {
v.modFileVersions[mod.GoMod] = v.fileVersion(mod.GoMod)
}
}
}
func (v *view) fileVersion(filename string) string {
uri := span.FileURI(filename)
f := v.session.GetFile(uri)
return f.Identity().Version
}
func (v *view) Env() []string {
v.mu.Lock()
defer v.mu.Unlock()
@ -263,7 +143,6 @@ func (v *view) SetEnv(env []string) {
defer v.mu.Unlock()
//TODO: this should invalidate the entire view
v.env = env
v.processEnv = nil // recompute process env
}
func (v *view) SetBuildFlags(buildFlags []string) {
@ -307,12 +186,9 @@ func (v *view) BuiltinPackage() *ast.Package {
// buildBuiltinPkg builds the view's builtin package.
// It assumes that the view is not active yet,
// i.e. it has not been added to the session's list of views.
func (v *view) buildBuiltinPkg(ctx context.Context) {
cfg := *v.Config(ctx)
pkgs, err := packages.Load(&cfg, "builtin")
if err != nil {
log.Error(ctx, "error getting package metadata for \"builtin\" package", err)
}
func (v *view) buildBuiltinPkg() {
cfg := *v.Config()
pkgs, _ := packages.Load(&cfg, "builtin")
if len(pkgs) != 1 {
v.builtinPkg, _ = ast.NewPackage(cfg.Fset, nil, nil, nil)
return
@ -368,6 +244,8 @@ func (f *goFile) invalidateContent(ctx context.Context) {
// including any position and type information that depends on it.
func (f *goFile) invalidateAST(ctx context.Context) {
f.mu.Lock()
f.ast = nil
f.token = nil
pkgs := f.pkgs
f.mu.Unlock()
@ -399,25 +277,15 @@ func (v *view) remove(ctx context.Context, id packageID, seen map[packageID]stru
for _, filename := range m.files {
f, err := v.findFile(span.FileURI(filename))
if err != nil {
log.Error(ctx, "cannot find file", err, telemetry.File.Of(f.URI()))
v.session.log.Errorf(ctx, "cannot find file %s: %v", f.URI(), err)
continue
}
gof, ok := f.(*goFile)
if !ok {
log.Error(ctx, "non-Go file", nil, telemetry.File.Of(f.URI()))
v.session.log.Errorf(ctx, "non-Go file %v", f.URI())
continue
}
gof.mu.Lock()
if pkg, ok := gof.pkgs[id]; ok {
// TODO: Ultimately, we shouldn't need this.
// Preemptively delete all of the cached keys if we are invalidating a package.
for _, ph := range pkg.files {
v.session.cache.store.Delete(parseKey{
file: ph.File().Identity(),
mode: ph.Mode(),
})
}
}
delete(gof.pkgs, id)
gof.mu.Unlock()
}

View File

@ -5,6 +5,7 @@
package cmd_test
import (
"context"
"fmt"
"strings"
"testing"
@ -22,7 +23,7 @@ func (r *runner) Diagnostics(t *testing.T, data tests.Diagnostics) {
fname := uri.Filename()
args := []string{"-remote=internal", "check", fname}
out := captureStdOut(t, func() {
tool.Main(r.ctx, r.app, args)
tool.Main(context.Background(), r.app, args)
})
// parse got into a collection of reports
got := map[string]struct{}{}

View File

@ -24,10 +24,8 @@ import (
"golang.org/x/tools/internal/lsp/cache"
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry/ocagent"
"golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/tool"
"golang.org/x/tools/internal/xcontext"
)
// Application is the main application as passed to tool.Main
@ -46,9 +44,6 @@ type Application struct {
// The base cache to use for sessions from this application.
cache source.Cache
// The name of the binary, used in help and telemetry.
name string
// The working directory to run commands in.
wd string
@ -60,28 +55,23 @@ type Application struct {
// Enable verbose logging
Verbose bool `flag:"v" help:"Verbose output"`
// Control ocagent export of telemetry
OCAgent string `flag:"ocagent" help:"The address of the ocagent, or off"`
}
// Returns a new Application ready to run.
func New(name, wd string, env []string) *Application {
func New(wd string, env []string) *Application {
if wd == "" {
wd, _ = os.Getwd()
}
app := &Application{
cache: cache.New(),
name: name,
wd: wd,
env: env,
OCAgent: "off", //TODO: Remove this line to default the exporter to on
cache: cache.New(),
wd: wd,
env: env,
}
return app
}
// Name implements tool.Application returning the binary name.
func (app *Application) Name() string { return app.name }
func (app *Application) Name() string { return "gopls" }
// Usage implements tool.Application returning empty extra argument usage.
func (app *Application) Usage() string { return "<command> [command-flags] [command-args]" }
@ -111,7 +101,6 @@ gopls flags are:
// If no arguments are passed it will invoke the server sub command, as a
// temporary measure for compatibility.
func (app *Application) Run(ctx context.Context, args ...string) error {
ocagent.Export(app.name, app.OCAgent)
app.Serve.app = app
if len(args) == 0 {
tool.Main(ctx, &app.Serve, args)
@ -150,7 +139,7 @@ func (app *Application) connect(ctx context.Context) (*connection, error) {
switch app.Remote {
case "":
connection := newConnection(app)
ctx, connection.Server = lsp.NewClientServer(ctx, app.cache, connection.Client)
connection.Server = lsp.NewClientServer(app.cache, connection.Client)
return connection, connection.initialize(ctx)
case "internal":
internalMu.Lock()
@ -159,16 +148,13 @@ func (app *Application) connect(ctx context.Context) (*connection, error) {
return c, nil
}
connection := newConnection(app)
ctx := xcontext.Detach(ctx) //TODO:a way of shutting down the internal server
ctx := context.Background() //TODO:a way of shutting down the internal server
cr, sw, _ := os.Pipe()
sr, cw, _ := os.Pipe()
var jc *jsonrpc2.Conn
ctx, jc, connection.Server = protocol.NewClient(ctx, jsonrpc2.NewHeaderStream(cr, cw), connection.Client)
jc, connection.Server, _ = protocol.NewClient(jsonrpc2.NewHeaderStream(cr, cw), connection.Client)
go jc.Run(ctx)
go func() {
ctx, srv := lsp.NewServer(ctx, app.cache, jsonrpc2.NewHeaderStream(sr, sw))
srv.Run(ctx)
}()
go lsp.NewServer(app.cache, jsonrpc2.NewHeaderStream(sr, sw)).Run(ctx)
if err := connection.initialize(ctx); err != nil {
return nil, err
}
@ -182,7 +168,7 @@ func (app *Application) connect(ctx context.Context) (*connection, error) {
}
stream := jsonrpc2.NewHeaderStream(conn, conn)
var jc *jsonrpc2.Conn
ctx, jc, connection.Server = protocol.NewClient(ctx, stream, connection.Client)
jc, connection.Server, _ = protocol.NewClient(stream, connection.Client)
go jc.Run(ctx)
return connection, connection.initialize(ctx)
}
@ -348,14 +334,12 @@ func (c *cmdClient) getFile(ctx context.Context, uri span.URI) *cmdFile {
func (c *connection) AddFile(ctx context.Context, uri span.URI) *cmdFile {
c.Client.filesMu.Lock()
defer c.Client.filesMu.Unlock()
file := c.Client.getFile(ctx, uri)
if !file.added {
file.added = true
p := &protocol.DidOpenTextDocumentParams{}
p.TextDocument.URI = string(uri)
p.TextDocument.Text = string(file.mapper.Content)
p.TextDocument.LanguageID = source.DetectLanguage("", file.uri.Filename()).String()
if err := c.Server.DidOpen(ctx, p); err != nil {
file.err = fmt.Errorf("%v: %v", uri, err)
}

View File

@ -6,7 +6,6 @@ package cmd_test
import (
"bytes"
"context"
"io/ioutil"
"os"
"path/filepath"
@ -25,7 +24,6 @@ type runner struct {
exporter packagestest.Exporter
data *tests.Data
app *cmd.Application
ctx context.Context
}
func TestCommandLine(t *testing.T) {
@ -39,8 +37,7 @@ func testCommandLine(t *testing.T, exporter packagestest.Exporter) {
r := &runner{
exporter: exporter,
data: data,
app: cmd.New("gopls-test", data.Config.Dir, data.Exported.Config.Env),
ctx: tests.Context(t),
app: cmd.New(data.Config.Dir, data.Exported.Config.Env),
}
tests.Run(t, r, data)
}

View File

@ -5,6 +5,7 @@
package cmd_test
import (
"context"
"fmt"
"os"
"path/filepath"
@ -55,7 +56,7 @@ func TestDefinitionHelpExample(t *testing.T) {
fmt.Sprintf("%v:#%v", thisFile, cmd.ExampleOffset)} {
args := append(baseArgs, query)
got := captureStdOut(t, func() {
tool.Main(tests.Context(t), cmd.New("gopls-test", "", nil), args)
tool.Main(context.Background(), cmd.New("", nil), args)
})
if !expect.MatchString(got) {
t.Errorf("test with %v\nexpected:\n%s\ngot:\n%s", args, expect, got)
@ -83,7 +84,7 @@ func (r *runner) Definition(t *testing.T, data tests.Definitions) {
uri := d.Src.URI()
args = append(args, fmt.Sprint(d.Src))
got := captureStdOut(t, func() {
tool.Main(r.ctx, r.app, args)
tool.Main(context.Background(), r.app, args)
})
got = normalizePaths(r.data, got)
if mode&jsonGoDef != 0 && runtime.GOOS == "windows" {

View File

@ -5,6 +5,7 @@
package cmd_test
import (
"context"
"os/exec"
"regexp"
"strings"
@ -37,9 +38,9 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
//TODO: our error handling differs, for now just skip unformattable files
continue
}
app := cmd.New("gopls-test", r.data.Config.Dir, r.data.Config.Env)
app := cmd.New(r.data.Config.Dir, r.data.Config.Env)
got := captureStdOut(t, func() {
tool.Main(r.ctx, app, append([]string{"-remote=internal", "format"}, args...))
tool.Main(context.Background(), app, append([]string{"-remote=internal", "format"}, args...))
})
got = normalizePaths(r.data, got)
// check the first two lines are the expected file header

View File

@ -20,9 +20,6 @@ import (
"golang.org/x/tools/internal/jsonrpc2"
"golang.org/x/tools/internal/lsp"
"golang.org/x/tools/internal/lsp/debug"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/tool"
)
@ -82,8 +79,8 @@ func (s *Serve) Run(ctx context.Context, args ...string) error {
}
// For debugging purposes only.
run := func(ctx context.Context, srv *lsp.Server) {
srv.Conn.AddHandler(&handler{loggingRPCs: s.Trace, out: out})
run := func(srv *lsp.Server) {
srv.Conn.Logger = logger(s.Trace, out)
go srv.Run(ctx)
}
if s.Address != "" {
@ -93,8 +90,8 @@ func (s *Serve) Run(ctx context.Context, args ...string) error {
return lsp.RunServerOnPort(ctx, s.app.cache, s.Port, run)
}
stream := jsonrpc2.NewHeaderStream(os.Stdin, os.Stdout)
ctx, srv := lsp.NewServer(ctx, s.app.cache, stream)
srv.Conn.AddHandler(&handler{loggingRPCs: s.Trace, out: out})
srv := lsp.NewServer(s.app.cache, stream)
srv.Conn.Logger = logger(s.Trace, out)
return srv.Run(ctx)
}
@ -118,161 +115,55 @@ func (s *Serve) forward() error {
return <-errc
}
type handler struct {
loggingRPCs bool
out io.Writer
}
type rpcStats struct {
method string
direction jsonrpc2.Direction
id *jsonrpc2.ID
payload *json.RawMessage
start time.Time
delivering func()
close func()
}
type statsKeyType int
const statsKey = statsKeyType(0)
func (h *handler) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool {
stats := h.getStats(ctx)
if stats != nil {
stats.delivering()
}
return false
}
func (h *handler) Cancel(ctx context.Context, conn *jsonrpc2.Conn, id jsonrpc2.ID, cancelled bool) bool {
return false
}
func (h *handler) Request(ctx context.Context, direction jsonrpc2.Direction, r *jsonrpc2.WireRequest) context.Context {
if r.Method == "" {
panic("no method in rpc stats")
}
stats := &rpcStats{
method: r.Method,
start: time.Now(),
direction: direction,
payload: r.Params,
}
ctx = context.WithValue(ctx, statsKey, stats)
mode := telemetry.Outbound
if direction == jsonrpc2.Receive {
mode = telemetry.Inbound
}
ctx, stats.close = trace.StartSpan(ctx, r.Method,
tag.Tag{Key: telemetry.Method, Value: r.Method},
tag.Tag{Key: telemetry.RPCDirection, Value: mode},
tag.Tag{Key: telemetry.RPCID, Value: r.ID},
)
telemetry.Started.Record(ctx, 1)
_, stats.delivering = trace.StartSpan(ctx, "queued")
return ctx
}
func (h *handler) Response(ctx context.Context, direction jsonrpc2.Direction, r *jsonrpc2.WireResponse) context.Context {
stats := h.getStats(ctx)
h.logRPC(direction, r.ID, 0, stats.method, r.Result, nil)
return ctx
}
func (h *handler) Done(ctx context.Context, err error) {
stats := h.getStats(ctx)
h.logRPC(stats.direction, stats.id, time.Since(stats.start), stats.method, stats.payload, err)
if err != nil {
ctx = telemetry.StatusCode.With(ctx, "ERROR")
} else {
ctx = telemetry.StatusCode.With(ctx, "OK")
}
elapsedTime := time.Since(stats.start)
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
telemetry.Latency.Record(ctx, latencyMillis)
stats.close()
}
func (h *handler) Read(ctx context.Context, bytes int64) context.Context {
telemetry.SentBytes.Record(ctx, bytes)
return ctx
}
func (h *handler) Wrote(ctx context.Context, bytes int64) context.Context {
telemetry.ReceivedBytes.Record(ctx, bytes)
return ctx
}
const eol = "\r\n\r\n\r\n"
func (h *handler) Error(ctx context.Context, err error) {
stats := h.getStats(ctx)
h.logRPC(stats.direction, stats.id, 0, stats.method, nil, err)
}
func (h *handler) getStats(ctx context.Context) *rpcStats {
stats, ok := ctx.Value(statsKey).(*rpcStats)
if !ok || stats == nil {
method, ok := ctx.Value(telemetry.Method).(string)
if !ok {
method = "???"
func logger(trace bool, out io.Writer) jsonrpc2.Logger {
return func(direction jsonrpc2.Direction, id *jsonrpc2.ID, elapsed time.Duration, method string, payload *json.RawMessage, err *jsonrpc2.Error) {
if !trace {
return
}
stats = &rpcStats{
method: method,
close: func() {},
const eol = "\r\n\r\n\r\n"
if err != nil {
fmt.Fprintf(out, "[Error - %v] %s %s%s %v%s", time.Now().Format("3:04:05 PM"),
direction, method, id, err, eol)
return
}
outx := new(strings.Builder)
fmt.Fprintf(outx, "[Trace - %v] ", time.Now().Format("3:04:05 PM"))
switch direction {
case jsonrpc2.Send:
fmt.Fprint(outx, "Received ")
case jsonrpc2.Receive:
fmt.Fprint(outx, "Sending ")
}
switch {
case id == nil:
fmt.Fprint(outx, "notification ")
case elapsed >= 0:
fmt.Fprint(outx, "response ")
default:
fmt.Fprint(outx, "request ")
}
fmt.Fprintf(outx, "'%s", method)
switch {
case id == nil:
// do nothing
case id.Name != "":
fmt.Fprintf(outx, " - (%s)", id.Name)
default:
fmt.Fprintf(outx, " - (%d)", id.Number)
}
fmt.Fprint(outx, "'")
if elapsed >= 0 {
msec := int(elapsed.Round(time.Millisecond) / time.Millisecond)
fmt.Fprintf(outx, " in %dms", msec)
}
params := "null"
if payload != nil {
params = string(*payload)
}
if params == "null" {
params = "{}"
}
fmt.Fprintf(outx, ".\r\nParams: %s%s", params, eol)
fmt.Fprintf(out, "%s", outx.String())
}
return stats
}
func (h *handler) logRPC(direction jsonrpc2.Direction, id *jsonrpc2.ID, elapsed time.Duration, method string, payload *json.RawMessage, err error) {
if !h.loggingRPCs {
return
}
const eol = "\r\n\r\n\r\n"
if err != nil {
fmt.Fprintf(h.out, "[Error - %v] %s %s%s %v%s", time.Now().Format("3:04:05 PM"),
direction, method, id, err, eol)
return
}
outx := new(strings.Builder)
fmt.Fprintf(outx, "[Trace - %v] ", time.Now().Format("3:04:05 PM"))
switch direction {
case jsonrpc2.Send:
fmt.Fprint(outx, "Received ")
case jsonrpc2.Receive:
fmt.Fprint(outx, "Sending ")
}
switch {
case id == nil:
fmt.Fprint(outx, "notification ")
case elapsed >= 0:
fmt.Fprint(outx, "response ")
default:
fmt.Fprint(outx, "request ")
}
fmt.Fprintf(outx, "'%s", method)
switch {
case id == nil:
// do nothing
case id.Name != "":
fmt.Fprintf(outx, " - (%s)", id.Name)
default:
fmt.Fprintf(outx, " - (%d)", id.Number)
}
fmt.Fprint(outx, "'")
if elapsed >= 0 {
msec := int(elapsed.Round(time.Millisecond) / time.Millisecond)
fmt.Fprintf(outx, " in %dms", msec)
}
params := "null"
if payload != nil {
params = string(*payload)
}
if params == "null" {
params = "{}"
}
fmt.Fprintf(outx, ".\r\nParams: %s%s", params, eol)
fmt.Fprintf(h.out, "%s", outx.String())
}

View File

@ -11,8 +11,6 @@ import (
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/span"
)
@ -59,7 +57,7 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara
if s.wantSuggestedFixes {
qf, err := quickFixes(ctx, view, gof)
if err != nil {
log.Error(ctx, "quick fixes failed", err, telemetry.File.Of(uri))
view.Session().Logger().Errorf(ctx, "quick fixes failed for %s: %v", uri, err)
}
codeActions = append(codeActions, qf...)
}
@ -123,8 +121,7 @@ func findImportErrors(diagnostics []protocol.Diagnostic) bool {
return true
}
// "X imported but not used" is an unused import.
// "X imported but not used as Y" is an unused import.
if strings.Contains(diagnostic.Message, " imported but not used") {
if strings.HasSuffix(diagnostic.Message, " imported but not used") {
return true
}
}

View File

@ -12,8 +12,6 @@ import (
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/span"
)
@ -37,7 +35,7 @@ func (s *Server) completion(ctx context.Context, params *protocol.CompletionPara
WantDocumentaton: s.wantCompletionDocumentation,
})
if err != nil {
log.Print(ctx, "no completions found", tag.Of("At", rng), tag.Of("Failure", err))
s.session.Logger().Infof(ctx, "no completions found for %s:%v:%v: %v", uri, int(params.Position.Line), int(params.Position.Character), err)
}
return &protocol.CompletionList{
IsIncomplete: false,
@ -64,11 +62,11 @@ func (s *Server) toProtocolCompletionItems(ctx context.Context, view source.View
prefix = strings.ToLower(surrounding.Prefix())
spn, err := surrounding.Range.Span()
if err != nil {
log.Print(ctx, "failed to get span for surrounding position: %s:%v:%v: %v", tag.Of("Position", pos), tag.Of("Failure", err))
s.session.Logger().Infof(ctx, "failed to get span for surrounding position: %s:%v:%v: %v", m.URI, int(pos.Line), int(pos.Character), err)
} else {
rng, err := m.Range(spn)
if err != nil {
log.Print(ctx, "failed to convert surrounding position", tag.Of("Position", pos), tag.Of("Failure", err))
s.session.Logger().Infof(ctx, "failed to convert surrounding position: %s:%v:%v: %v", m.URI, int(pos.Line), int(pos.Character), err)
} else {
insertionRange = rng
}

View File

@ -1,49 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package debug
import (
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/metric"
)
var (
// the distributions we use for histograms
bytesDistribution = []int64{1 << 10, 1 << 11, 1 << 12, 1 << 14, 1 << 16, 1 << 20}
millisecondsDistribution = []float64{0.1, 0.5, 1, 2, 5, 10, 50, 100, 500, 1000, 5000, 10000, 50000, 100000}
receivedBytes = metric.HistogramInt64{
Name: "received_bytes",
Description: "Distribution of received bytes, by method.",
Keys: []interface{}{telemetry.RPCDirection, telemetry.Method},
Buckets: bytesDistribution,
}.Record(telemetry.ReceivedBytes)
sentBytes = metric.HistogramInt64{
Name: "sent_bytes",
Description: "Distribution of sent bytes, by method.",
Keys: []interface{}{telemetry.RPCDirection, telemetry.Method},
Buckets: bytesDistribution,
}.Record(telemetry.SentBytes)
latency = metric.HistogramFloat64{
Name: "latency",
Description: "Distribution of latency in milliseconds, by method.",
Keys: []interface{}{telemetry.RPCDirection, telemetry.Method},
Buckets: millisecondsDistribution,
}.Record(telemetry.Latency)
started = metric.Scalar{
Name: "started",
Description: "Count of RPCs started by method.",
Keys: []interface{}{telemetry.RPCDirection, telemetry.Method},
}.CountInt64(telemetry.Started)
completed = metric.Scalar{
Name: "completed",
Description: "Count of RPCs completed by method and status.",
Keys: []interface{}{telemetry.RPCDirection, telemetry.Method, telemetry.StatusCode},
}.CountFloat64(telemetry.Latency)
)

View File

@ -1,111 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package debug
import (
"bytes"
"fmt"
"net/http"
"sort"
"golang.org/x/tools/internal/lsp/telemetry/metric"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/lsp/telemetry/worker"
)
type prometheus struct {
metrics []metric.Data
}
func (p *prometheus) observeMetric(data metric.Data) {
name := data.Handle().Name()
index := sort.Search(len(p.metrics), func(i int) bool {
return p.metrics[i].Handle().Name() >= name
})
if index >= len(p.metrics) || p.metrics[index].Handle().Name() != name {
old := p.metrics
p.metrics = make([]metric.Data, len(old)+1)
copy(p.metrics, old[:index])
copy(p.metrics[index+1:], old[index:])
}
p.metrics[index] = data
}
func (p *prometheus) header(w http.ResponseWriter, name, description string, isGauge, isHistogram bool) {
kind := "counter"
if isGauge {
kind = "gauge"
}
if isHistogram {
kind = "histogram"
}
fmt.Fprintf(w, "# HELP %s %s\n", name, description)
fmt.Fprintf(w, "# TYPE %s %s\n", name, kind)
}
func (p *prometheus) row(w http.ResponseWriter, name string, group tag.List, extra string, value interface{}) {
fmt.Fprint(w, name)
buf := &bytes.Buffer{}
fmt.Fprint(buf, group)
if extra != "" {
if buf.Len() > 0 {
fmt.Fprint(buf, ",")
}
fmt.Fprint(buf, extra)
}
if buf.Len() > 0 {
fmt.Fprint(w, "{")
buf.WriteTo(w)
fmt.Fprint(w, "}")
}
fmt.Fprintf(w, " %v\n", value)
}
func (p *prometheus) serve(w http.ResponseWriter, r *http.Request) {
done := make(chan struct{})
worker.Do(func() {
defer close(done)
for _, data := range p.metrics {
switch data := data.(type) {
case *metric.Int64Data:
p.header(w, data.Info.Name, data.Info.Description, data.IsGauge, false)
for i, group := range data.Groups() {
p.row(w, data.Info.Name, group, "", data.Rows[i])
}
case *metric.Float64Data:
p.header(w, data.Info.Name, data.Info.Description, data.IsGauge, false)
for i, group := range data.Groups() {
p.row(w, data.Info.Name, group, "", data.Rows[i])
}
case *metric.HistogramInt64Data:
p.header(w, data.Info.Name, data.Info.Description, false, true)
for i, group := range data.Groups() {
row := data.Rows[i]
for j, b := range data.Info.Buckets {
p.row(w, data.Info.Name+"_bucket", group, fmt.Sprintf(`le="%v"`, b), row.Values[j])
}
p.row(w, data.Info.Name+"_bucket", group, `le="+Inf"`, row.Count)
p.row(w, data.Info.Name+"_count", group, "", row.Count)
p.row(w, data.Info.Name+"_sum", group, "", row.Sum)
}
case *metric.HistogramFloat64Data:
p.header(w, data.Info.Name, data.Info.Description, false, true)
for i, group := range data.Groups() {
row := data.Rows[i]
for j, b := range data.Info.Buckets {
p.row(w, data.Info.Name+"_bucket", group, fmt.Sprintf(`le="%v"`, b), row.Values[j])
}
p.row(w, data.Info.Name+"_bucket", group, `le="+Inf"`, row.Count)
p.row(w, data.Info.Name+"_count", group, "", row.Count)
p.row(w, data.Info.Name+"_sum", group, "", row.Sum)
}
}
}
})
<-done
}

View File

@ -1,209 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package debug
import (
"fmt"
"html/template"
"log"
"net/http"
"sort"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/metric"
)
var rpcTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
{{define "title"}}RPC Information{{end}}
{{define "body"}}
<H2>Inbound</H2>
{{template "rpcSection" .Inbound}}
<H2>Outbound</H2>
{{template "rpcSection" .Outbound}}
{{end}}
{{define "rpcSection"}}
{{range .}}<P>
<b>{{.Method}}</b> {{.Started}} <a href="/trace/{{.Method}}">traces</a> ({{.InProgress}} in progress)
<br>
<i>Latency</i> {{with .Latency}}{{.Mean}} ({{.Min}}<{{.Max}}){{end}}
<i>By bucket</i> 0s {{range .Latency.Values}}<b>{{.Count}}</b> {{.Limit}} {{end}}
<br>
<i>Received</i> {{with .Received}}{{.Mean}} ({{.Min}}<{{.Max}}){{end}}
<i>Sent</i> {{with .Sent}}{{.Mean}} ({{.Min}}<{{.Max}}){{end}}
<br>
<i>Result codes</i> {{range .Codes}}{{.Key}}={{.Count}} {{end}}
</P>
{{end}}
{{end}}
`))
type rpcs struct {
Inbound []*rpcStats
Outbound []*rpcStats
}
type rpcStats struct {
Method string
Started int64
Completed int64
InProgress int64
Latency rpcTimeHistogram
Received rpcBytesHistogram
Sent rpcBytesHistogram
Codes []*rpcCodeBucket
}
type rpcTimeHistogram struct {
Sum timeUnits
Count int64
Mean timeUnits
Min timeUnits
Max timeUnits
Values []rpcTimeBucket
}
type rpcTimeBucket struct {
Limit timeUnits
Count int64
}
type rpcBytesHistogram struct {
Sum byteUnits
Count int64
Mean byteUnits
Min byteUnits
Max byteUnits
Values []rpcBytesBucket
}
type rpcBytesBucket struct {
Limit byteUnits
Count int64
}
type rpcCodeBucket struct {
Key string
Count int64
}
func (r *rpcs) observeMetric(data metric.Data) {
for i, group := range data.Groups() {
set := &r.Inbound
if group.Get(telemetry.RPCDirection) == telemetry.Outbound {
set = &r.Outbound
}
method, ok := group.Get(telemetry.Method).(string)
if !ok {
log.Printf("Not a method... %v", group)
continue
}
index := sort.Search(len(*set), func(i int) bool {
return (*set)[i].Method >= method
})
if index >= len(*set) || (*set)[index].Method != method {
old := *set
*set = make([]*rpcStats, len(old)+1)
copy(*set, old[:index])
copy((*set)[index+1:], old[index:])
(*set)[index] = &rpcStats{Method: method}
}
stats := (*set)[index]
switch data.Handle() {
case started:
stats.Started = data.(*metric.Int64Data).Rows[i]
case completed:
status, ok := group.Get(telemetry.StatusCode).(string)
if !ok {
log.Printf("Not status... %v", group)
continue
}
var b *rpcCodeBucket
for c, entry := range stats.Codes {
if entry.Key == status {
b = stats.Codes[c]
break
}
}
if b == nil {
b = &rpcCodeBucket{Key: status}
stats.Codes = append(stats.Codes, b)
sort.Slice(stats.Codes, func(i int, j int) bool {
return stats.Codes[i].Key < stats.Codes[i].Key
})
}
b.Count = data.(*metric.Int64Data).Rows[i]
case latency:
data := data.(*metric.HistogramFloat64Data)
row := data.Rows[i]
stats.Latency.Count = row.Count
stats.Latency.Sum = timeUnits(row.Sum)
stats.Latency.Min = timeUnits(row.Min)
stats.Latency.Max = timeUnits(row.Max)
stats.Latency.Mean = timeUnits(row.Sum) / timeUnits(row.Count)
stats.Latency.Values = make([]rpcTimeBucket, len(data.Info.Buckets))
last := int64(0)
for i, b := range data.Info.Buckets {
stats.Latency.Values[i].Limit = timeUnits(b)
stats.Latency.Values[i].Count = row.Values[i] - last
last = row.Values[i]
}
case sentBytes:
data := data.(*metric.HistogramInt64Data)
row := data.Rows[i]
stats.Sent.Count = row.Count
stats.Sent.Sum = byteUnits(row.Sum)
stats.Sent.Min = byteUnits(row.Min)
stats.Sent.Max = byteUnits(row.Max)
stats.Sent.Mean = byteUnits(row.Sum) / byteUnits(row.Count)
case receivedBytes:
data := data.(*metric.HistogramInt64Data)
row := data.Rows[i]
stats.Received.Count = row.Count
stats.Received.Sum = byteUnits(row.Sum)
stats.Sent.Min = byteUnits(row.Min)
stats.Sent.Max = byteUnits(row.Max)
stats.Received.Mean = byteUnits(row.Sum) / byteUnits(row.Count)
}
}
for _, set := range [][]*rpcStats{r.Inbound, r.Outbound} {
for _, stats := range set {
stats.Completed = 0
for _, b := range stats.Codes {
stats.Completed += b.Count
}
stats.InProgress = stats.Started - stats.Completed
}
}
}
func (r *rpcs) getData(req *http.Request) interface{} {
return r
}
func units(v float64, suffixes []string) string {
s := ""
for _, s = range suffixes {
n := v / 1000
if n < 1 {
break
}
v = n
}
return fmt.Sprintf("%.2f%s", v, s)
}
type timeUnits float64
func (v timeUnits) String() string {
v = v * 1000 * 1000
return units(float64(v), []string{"ns", "μs", "ms", "s"})
}
type byteUnits float64
func (v byteUnits) String() string {
return units(float64(v), []string{"B", "KB", "MB", "GB", "TB"})
}

View File

@ -9,6 +9,7 @@ import (
"context"
"go/token"
"html/template"
"log"
"net"
"net/http"
"net/http/pprof"
@ -18,11 +19,6 @@ import (
"strconv"
"sync"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/metric"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/lsp/telemetry/worker"
"golang.org/x/tools/internal/span"
)
@ -215,13 +211,7 @@ func Serve(ctx context.Context, addr string) error {
if err != nil {
return err
}
log.Print(ctx, "Debug serving", tag.Of("Port", listener.Addr().(*net.TCPAddr).Port))
prometheus := prometheus{}
metric.RegisterObservers(prometheus.observeMetric)
rpcs := rpcs{}
metric.RegisterObservers(rpcs.observeMetric)
traces := traces{}
trace.RegisterObservers(traces.export)
log.Printf("Debug serving on port: %d", listener.Addr().(*net.TCPAddr).Port)
go func() {
mux := http.NewServeMux()
mux.HandleFunc("/", Render(mainTmpl, func(*http.Request) interface{} { return data }))
@ -231,9 +221,6 @@ func Serve(ctx context.Context, addr string) error {
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
mux.HandleFunc("/metrics/", prometheus.serve)
mux.HandleFunc("/rpc/", Render(rpcTmpl, rpcs.getData))
mux.HandleFunc("/trace/", Render(traceTmpl, traces.getData))
mux.HandleFunc("/cache/", Render(cacheTmpl, getCache))
mux.HandleFunc("/session/", Render(sessionTmpl, getSession))
mux.HandleFunc("/view/", Render(viewTmpl, getView))
@ -241,28 +228,23 @@ func Serve(ctx context.Context, addr string) error {
mux.HandleFunc("/info", Render(infoTmpl, getInfo))
mux.HandleFunc("/memory", Render(memoryTmpl, getMemory))
if err := http.Serve(listener, mux); err != nil {
log.Error(ctx, "Debug server failed", err)
log.Printf("Debug server failed with %v", err)
return
}
log.Print(ctx, "Debug server finished")
log.Printf("Debug server finished")
}()
return nil
}
func Render(tmpl *template.Template, fun func(*http.Request) interface{}) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
done := make(chan struct{})
worker.Do(func() {
defer close(done)
var data interface{}
if fun != nil {
data = fun(r)
}
if err := tmpl.Execute(w, data); err != nil {
log.Error(context.Background(), "", err)
}
})
<-done
var data interface{}
if fun != nil {
data = fun(r)
}
if err := tmpl.Execute(w, data); err != nil {
log.Print(err)
}
}
}
@ -294,10 +276,6 @@ var BaseTemplate = template.Must(template.New("").Parse(`
td.value {
text-align: right;
}
ul.events {
list-style-type: none;
}
</style>
{{block "head" .}}{{end}}
</head>
@ -305,9 +283,7 @@ ul.events {
<a href="/">Main</a>
<a href="/info">Info</a>
<a href="/memory">Memory</a>
<a href="/metrics">Metrics</a>
<a href="/rpc">RPC</a>
<a href="/trace">Trace</a>
<a href="/debug/">Debug</a>
<hr>
<h1>{{template "title" .}}</h1>
{{block "body" .}}
@ -378,6 +354,8 @@ var debugTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
{{define "title"}}GoPls Debug pages{{end}}
{{define "body"}}
<a href="/debug/pprof">Profiling</a>
<a href="/debug/rpcz">RPCz</a>
<a href="/debug/tracez">Tracez</a>
{{end}}
`))

View File

@ -1,172 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package debug
import (
"bytes"
"fmt"
"html/template"
"net/http"
"sort"
"strings"
"time"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/lsp/telemetry/trace"
)
var traceTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
{{define "title"}}Trace Information{{end}}
{{define "body"}}
{{range .Traces}}<a href="/trace/{{.Name}}">{{.Name}}</a> last: {{.Last.Duration}}, longest: {{.Longest.Duration}}<br>{{end}}
{{if .Selected}}
<H2>{{.Selected.Name}}</H2>
{{if .Selected.Last}}<H3>Last</H3><ul>{{template "details" .Selected.Last}}</ul>{{end}}
{{if .Selected.Longest}}<H3>Longest</H3><ul>{{template "details" .Selected.Longest}}</ul>{{end}}
{{end}}
{{end}}
{{define "details"}}
<li>{{.Offset}} {{.Name}} {{.Duration}} {{.Tags}}</li>
{{if .Events}}<ul class=events>{{range .Events}}<li>{{.Offset}} {{.Tags}}</li>{{end}}</ul>{{end}}
{{if .Children}}<ul>{{range .Children}}{{template "details" .}}{{end}}</ul>{{end}}
{{end}}
`))
type traces struct {
sets map[string]*traceSet
unfinished map[trace.SpanID]*traceData
}
type traceResults struct {
Traces []*traceSet
Selected *traceSet
}
type traceSet struct {
Name string
Last *traceData
Longest *traceData
}
type traceData struct {
ID trace.SpanID
ParentID trace.SpanID
Name string
Start time.Time
Finish time.Time
Offset time.Duration
Duration time.Duration
Tags string
Events []traceEvent
Children []*traceData
}
type traceEvent struct {
Time time.Time
Offset time.Duration
Tags string
}
func (t *traces) export(span *trace.Span) {
if t.sets == nil {
t.sets = make(map[string]*traceSet)
t.unfinished = make(map[trace.SpanID]*traceData)
}
// is this a completed span?
if span.Finish.IsZero() {
t.start(span)
} else {
t.finish(span)
}
}
func (t *traces) start(span *trace.Span) {
// just starting, add it to the unfinished map
td := &traceData{
ID: span.SpanID,
ParentID: span.ParentID,
Name: span.Name,
Start: span.Start,
Tags: renderTags(span.Tags),
}
t.unfinished[span.SpanID] = td
// and wire up parents if we have them
if !span.ParentID.IsValid() {
return
}
parent, found := t.unfinished[span.ParentID]
if !found {
// trace had an invalid parent, so it cannot itself be valid
return
}
parent.Children = append(parent.Children, td)
}
func (t *traces) finish(span *trace.Span) {
// finishing, must be already in the map
td, found := t.unfinished[span.SpanID]
if !found {
return // if this happens we are in a bad place
}
delete(t.unfinished, span.SpanID)
td.Finish = span.Finish
td.Duration = span.Finish.Sub(span.Start)
td.Events = make([]traceEvent, len(span.Events))
for i, event := range span.Events {
td.Events[i] = traceEvent{
Time: event.Time,
Tags: renderTags(event.Tags),
}
}
set, ok := t.sets[span.Name]
if !ok {
set = &traceSet{Name: span.Name}
t.sets[span.Name] = set
}
set.Last = td
if set.Longest == nil || set.Last.Duration > set.Longest.Duration {
set.Longest = set.Last
}
if !td.ParentID.IsValid() {
fillOffsets(td, td.Start)
}
}
func (t *traces) getData(req *http.Request) interface{} {
if len(t.sets) == 0 {
return nil
}
data := traceResults{}
data.Traces = make([]*traceSet, 0, len(t.sets))
for _, set := range t.sets {
data.Traces = append(data.Traces, set)
}
sort.Slice(data.Traces, func(i, j int) bool { return data.Traces[i].Name < data.Traces[j].Name })
if bits := strings.SplitN(req.URL.Path, "/trace/", 2); len(bits) > 1 {
data.Selected = t.sets[bits[1]]
}
return data
}
func fillOffsets(td *traceData, start time.Time) {
td.Offset = td.Start.Sub(start)
for i := range td.Events {
td.Events[i].Offset = td.Events[i].Time.Sub(start)
}
for _, child := range td.Children {
fillOffsets(child, start)
}
}
func renderTags(tags tag.List) string {
buf := &bytes.Buffer{}
for _, tag := range tags {
fmt.Fprintf(buf, "%v=%q ", tag.Key, tag.Value)
}
return buf.String()
}

View File

@ -10,16 +10,13 @@ import (
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/span"
)
func (s *Server) Diagnostics(ctx context.Context, view source.View, uri span.URI) {
ctx = telemetry.File.With(ctx, uri)
f, err := view.GetFile(ctx, uri)
if err != nil {
log.Error(ctx, "no file", err, telemetry.File)
s.session.Logger().Errorf(ctx, "no file for %s: %v", uri, err)
return
}
// For non-Go files, don't return any diagnostics.
@ -29,7 +26,7 @@ func (s *Server) Diagnostics(ctx context.Context, view source.View, uri span.URI
}
reports, err := source.Diagnostics(ctx, view, gof, s.disabledAnalyses)
if err != nil {
log.Error(ctx, "failed to compute diagnostics", err, telemetry.File)
s.session.Logger().Errorf(ctx, "failed to compute diagnostics for %s: %v", gof.URI(), err)
return
}
@ -41,7 +38,7 @@ func (s *Server) Diagnostics(ctx context.Context, view source.View, uri span.URI
if s.undelivered == nil {
s.undelivered = make(map[span.URI][]source.Diagnostic)
}
log.Error(ctx, "failed to deliver diagnostic (will retry)", err, telemetry.File)
s.session.Logger().Errorf(ctx, "failed to deliver diagnostic for %s (will retry): %v", uri, err)
s.undelivered[uri] = diagnostics
continue
}
@ -52,7 +49,7 @@ func (s *Server) Diagnostics(ctx context.Context, view source.View, uri span.URI
// undelivered ones (only for remaining URIs).
for uri, diagnostics := range s.undelivered {
if err := s.publishDiagnostics(ctx, view, uri, diagnostics); err != nil {
log.Error(ctx, "failed to deliver diagnostic for (will not retry)", err, telemetry.File)
s.session.Logger().Errorf(ctx, "failed to deliver diagnostic for %s (will not retry): %v", uri, err)
}
// If we fail to deliver the same diagnostics twice, just give up.
delete(s.undelivered, uri)

View File

@ -6,6 +6,7 @@ package lsp
import (
"context"
"fmt"
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
@ -38,9 +39,9 @@ func spanToRange(ctx context.Context, view source.View, s span.Span) (source.GoF
}
if rng.Start == rng.End {
// If we have a single point, assume we want the whole file.
tok, err := f.GetToken(ctx)
if err != nil {
return nil, nil, span.Range{}, err
tok := f.GetToken(ctx)
if tok == nil {
return nil, nil, span.Range{}, fmt.Errorf("no file information for %s", f.URI())
}
rng.End = tok.Pos(tok.Size())
}

View File

@ -15,21 +15,16 @@ import (
"golang.org/x/tools/internal/lsp/debug"
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/span"
)
func (s *Server) initialize(ctx context.Context, params *protocol.InitializeParams) (*protocol.InitializeResult, error) {
s.stateMu.Lock()
state := s.state
s.stateMu.Unlock()
if state >= serverInitializing {
s.initializedMu.Lock()
defer s.initializedMu.Unlock()
if s.isInitialized {
return nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidRequest, "server already initialized")
}
s.stateMu.Lock()
s.state = serverInitializing
s.stateMu.Unlock()
s.isInitialized = true // mark server as initialized now
// TODO: Remove the option once we are certain there are no issues here.
s.textDocumentSyncKind = protocol.Incremental
@ -130,10 +125,6 @@ func (s *Server) setClientCapabilities(caps protocol.ClientCapabilities) {
}
func (s *Server) initialized(ctx context.Context, params *protocol.InitializedParams) error {
s.stateMu.Lock()
s.state = serverInitialized
s.stateMu.Unlock()
if s.configurationSupported {
if s.dynamicConfigurationSupported {
s.client.RegisterCapability(ctx, &protocol.RegistrationParams{
@ -147,36 +138,23 @@ func (s *Server) initialized(ctx context.Context, params *protocol.InitializedPa
})
}
for _, view := range s.session.Views() {
if err := s.fetchConfig(ctx, view); err != nil {
config, err := s.client.Configuration(ctx, &protocol.ConfigurationParams{
Items: []protocol.ConfigurationItem{{
ScopeURI: protocol.NewURI(view.Folder()),
Section: "gopls",
}},
})
if err != nil {
return err
}
if err := s.processConfig(ctx, view, config[0]); err != nil {
return err
}
}
}
buf := &bytes.Buffer{}
debug.PrintVersionInfo(buf, true, debug.PlainText)
log.Print(ctx, buf.String())
return nil
}
func (s *Server) fetchConfig(ctx context.Context, view source.View) error {
configs, err := s.client.Configuration(ctx, &protocol.ConfigurationParams{
Items: []protocol.ConfigurationItem{{
ScopeURI: protocol.NewURI(view.Folder()),
Section: "gopls",
}, {
ScopeURI: protocol.NewURI(view.Folder()),
Section: view.Name(),
},
},
})
if err != nil {
return err
}
for _, config := range configs {
if err := s.processConfig(ctx, view, config); err != nil {
return err
}
}
s.session.Logger().Infof(ctx, "%s", buf)
return nil
}
@ -231,7 +209,7 @@ func (s *Server) processConfig(ctx context.Context, view source.View, config int
case "FullDocumentation":
s.hoverKind = source.FullDocumentation
default:
log.Error(ctx, "unsupported hover kind", nil, tag.Of("HoverKind", hoverKind))
view.Session().Logger().Errorf(ctx, "unsupported hover kind %s", hoverKind)
// The default value is already be set to synopsis.
}
}
@ -256,21 +234,19 @@ func (s *Server) processConfig(ctx context.Context, view source.View, config int
}
func (s *Server) shutdown(ctx context.Context) error {
s.stateMu.Lock()
defer s.stateMu.Unlock()
if s.state < serverInitialized {
s.initializedMu.Lock()
defer s.initializedMu.Unlock()
if !s.isInitialized {
return jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidRequest, "server not initialized")
}
// drop all the active views
s.session.Shutdown(ctx)
s.state = serverShutDown
s.isInitialized = false
return nil
}
func (s *Server) exit(ctx context.Context) error {
s.stateMu.Lock()
defer s.stateMu.Unlock()
if s.state != serverShutDown {
if s.isInitialized {
os.Exit(1)
}
os.Exit(0)

View File

@ -9,8 +9,6 @@ import (
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/span"
)
@ -31,7 +29,7 @@ func (s *Server) documentHighlight(ctx context.Context, params *protocol.TextDoc
}
spans, err := source.Highlight(ctx, f, rng.Start)
if err != nil {
log.Error(ctx, "no highlight", err, tag.Of("Span", spn))
view.Session().Logger().Errorf(ctx, "no highlight for %s: %v", spn, err)
}
return toProtocolHighlight(m, spans), nil
}

View File

@ -28,7 +28,7 @@ func (s *Server) hover(ctx context.Context, params *protocol.TextDocumentPositio
}
ident, err := source.Identifier(ctx, view, f, identRange.Start)
if err != nil {
return nil, nil
return nil, err
}
hover, err := ident.Hover(ctx, s.preferredContentFormat == protocol.Markdown, s.hoverKind)
if err != nil {

View File

@ -7,16 +7,9 @@ package lsp
import (
"context"
"fmt"
"go/ast"
"go/token"
"regexp"
"strconv"
"sync"
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/span"
)
@ -27,105 +20,30 @@ func (s *Server) documentLink(ctx context.Context, params *protocol.DocumentLink
if err != nil {
return nil, err
}
file, err := f.GetAST(ctx, source.ParseFull)
file := f.GetAST(ctx)
if file == nil {
return nil, err
return nil, fmt.Errorf("no AST for %v", uri)
}
var links []protocol.DocumentLink
ast.Inspect(file, func(node ast.Node) bool {
switch n := node.(type) {
case *ast.ImportSpec:
target, err := strconv.Unquote(n.Path.Value)
if err != nil {
log.Error(ctx, "cannot unquote import path", err, tag.Of("Path", n.Path.Value))
return false
}
target = "https://godoc.org/" + target
l, err := toProtocolLink(view, m, target, n.Pos(), n.End())
if err != nil {
log.Error(ctx, "cannot initialize DocumentLink", err, tag.Of("Path", n.Path.Value))
return false
}
links = append(links, l)
return false
case *ast.BasicLit:
if n.Kind != token.STRING {
return false
}
l, err := findLinksInString(n.Value, n.Pos(), view, m)
if err != nil {
log.Error(ctx, "cannot find links in string", err)
return false
}
links = append(links, l...)
return false
}
return true
})
for _, commentGroup := range file.Comments {
for _, comment := range commentGroup.List {
l, err := findLinksInString(comment.Text, comment.Pos(), view, m)
if err != nil {
log.Error(ctx, "cannot find links in comment", err)
continue
}
links = append(links, l...)
}
}
return links, nil
}
func findLinksInString(src string, pos token.Pos, view source.View, mapper *protocol.ColumnMapper) ([]protocol.DocumentLink, error) {
var links []protocol.DocumentLink
re, err := getURLRegexp()
if err != nil {
return nil, fmt.Errorf("cannot create regexp for links: %s", err.Error())
}
for _, urlIndex := range re.FindAllIndex([]byte(src), -1) {
start := urlIndex[0]
end := urlIndex[1]
startPos := token.Pos(int(pos) + start)
endPos := token.Pos(int(pos) + end)
target := src[start:end]
l, err := toProtocolLink(view, mapper, target, startPos, endPos)
// Add a Godoc link for each imported package.
var result []protocol.DocumentLink
for _, imp := range file.Imports {
spn, err := span.NewRange(view.Session().Cache().FileSet(), imp.Pos(), imp.End()).Span()
if err != nil {
return nil, err
}
links = append(links, l)
rng, err := m.Range(spn)
if err != nil {
return nil, err
}
target, err := strconv.Unquote(imp.Path.Value)
if err != nil {
continue
}
target = "https://godoc.org/" + target
result = append(result, protocol.DocumentLink{
Range: rng,
Target: target,
})
}
return links, nil
}
const urlRegexpString = "(http|ftp|https)://([\\w_-]+(?:(?:\\.[\\w_-]+)+))([\\w.,@?^=%&:/~+#-]*[\\w@?^=%&/~+#-])?"
var (
urlRegexp *regexp.Regexp
regexpOnce sync.Once
regexpErr error
)
func getURLRegexp() (*regexp.Regexp, error) {
regexpOnce.Do(func() {
urlRegexp, regexpErr = regexp.Compile(urlRegexpString)
})
return urlRegexp, regexpErr
}
func toProtocolLink(view source.View, mapper *protocol.ColumnMapper, target string, start, end token.Pos) (protocol.DocumentLink, error) {
spn, err := span.NewRange(view.Session().Cache().FileSet(), start, end).Span()
if err != nil {
return protocol.DocumentLink{}, err
}
rng, err := mapper.Range(spn)
if err != nil {
return protocol.DocumentLink{}, err
}
l := protocol.DocumentLink{
Range: rng,
Target: target,
}
return l, nil
return result, nil
}

View File

@ -21,6 +21,7 @@ import (
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/tests"
"golang.org/x/tools/internal/lsp/xlog"
"golang.org/x/tools/internal/span"
)
@ -31,19 +32,18 @@ func TestLSP(t *testing.T) {
type runner struct {
server *Server
data *tests.Data
ctx context.Context
}
const viewName = "lsp_test"
func testLSP(t *testing.T, exporter packagestest.Exporter) {
ctx := tests.Context(t)
data := tests.Load(t, exporter, "testdata")
defer data.Exported.Cleanup()
log := xlog.New(xlog.StdSink{})
cache := cache.New()
session := cache.NewSession(ctx)
view := session.NewView(ctx, viewName, span.FileURI(data.Config.Dir))
session := cache.NewSession(log)
view := session.NewView(viewName, span.FileURI(data.Config.Dir))
view.SetEnv(data.Config.Env)
for filename, content := range data.Config.Overlay {
session.SetOverlay(span.FileURI(filename), content)
@ -59,7 +59,6 @@ func testLSP(t *testing.T, exporter packagestest.Exporter) {
hoverKind: source.SynopsisDocumentation,
},
data: data,
ctx: ctx,
}
tests.Run(t, r, data)
}
@ -68,7 +67,7 @@ func testLSP(t *testing.T, exporter packagestest.Exporter) {
func (r *runner) Diagnostics(t *testing.T, data tests.Diagnostics) {
v := r.server.session.View(viewName)
for uri, want := range data {
f, err := v.GetFile(r.ctx, uri)
f, err := v.GetFile(context.Background(), uri)
if err != nil {
t.Fatalf("no file for %s: %v", f, err)
}
@ -76,7 +75,7 @@ func (r *runner) Diagnostics(t *testing.T, data tests.Diagnostics) {
if !ok {
t.Fatalf("%s is not a Go file: %v", uri, err)
}
results, err := source.Diagnostics(r.ctx, v, gof, nil)
results, err := source.Diagnostics(context.Background(), v, gof, nil)
if err != nil {
t.Fatal(err)
}
@ -219,7 +218,7 @@ func (r *runner) Completion(t *testing.T, data tests.Completions, snippets tests
func (r *runner) runCompletion(t *testing.T, src span.Span) *protocol.CompletionList {
t.Helper()
list, err := r.server.Completion(r.ctx, &protocol.CompletionParams{
list, err := r.server.Completion(context.Background(), &protocol.CompletionParams{
TextDocumentPositionParams: protocol.TextDocumentPositionParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.NewURI(src.URI()),
@ -296,6 +295,7 @@ func summarizeCompletionItems(i int, want []source.CompletionItem, got []protoco
}
func (r *runner) Format(t *testing.T, data tests.Formats) {
ctx := context.Background()
for _, spn := range data {
uri := spn.URI()
filename := uri.Filename()
@ -305,7 +305,7 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
return out, nil
}))
edits, err := r.server.Formatting(r.ctx, &protocol.DocumentFormattingParams{
edits, err := r.server.Formatting(context.Background(), &protocol.DocumentFormattingParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.NewURI(uri),
},
@ -316,7 +316,7 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
}
continue
}
_, m, err := getSourceFile(r.ctx, r.server.session.ViewOf(uri), uri)
_, m, err := getSourceFile(ctx, r.server.session.ViewOf(uri), uri)
if err != nil {
t.Error(err)
}
@ -333,6 +333,7 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
}
func (r *runner) Import(t *testing.T, data tests.Imports) {
ctx := context.Background()
for _, spn := range data {
uri := spn.URI()
filename := uri.Filename()
@ -342,7 +343,7 @@ func (r *runner) Import(t *testing.T, data tests.Imports) {
return out, nil
}))
actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
actions, err := r.server.CodeAction(context.Background(), &protocol.CodeActionParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.NewURI(uri),
},
@ -353,7 +354,7 @@ func (r *runner) Import(t *testing.T, data tests.Imports) {
}
continue
}
_, m, err := getSourceFile(r.ctx, r.server.session.ViewOf(uri), uri)
_, m, err := getSourceFile(ctx, r.server.session.ViewOf(uri), uri)
if err != nil {
t.Error(err)
}
@ -392,13 +393,13 @@ func (r *runner) Definition(t *testing.T, data tests.Definitions) {
var locs []protocol.Location
var hover *protocol.Hover
if d.IsType {
locs, err = r.server.TypeDefinition(r.ctx, params)
locs, err = r.server.TypeDefinition(context.Background(), params)
} else {
locs, err = r.server.Definition(r.ctx, params)
locs, err = r.server.Definition(context.Background(), params)
if err != nil {
t.Fatalf("failed for %v: %v", d.Src, err)
}
hover, err = r.server.Hover(r.ctx, params)
hover, err = r.server.Hover(context.Background(), params)
}
if err != nil {
t.Fatalf("failed for %v: %v", d.Src, err)
@ -445,7 +446,7 @@ func (r *runner) Highlight(t *testing.T, data tests.Highlights) {
TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
Position: loc.Range.Start,
}
highlights, err := r.server.DocumentHighlight(r.ctx, params)
highlights, err := r.server.DocumentHighlight(context.Background(), params)
if err != nil {
t.Fatal(err)
}
@ -491,7 +492,7 @@ func (r *runner) Reference(t *testing.T, data tests.References) {
Position: loc.Range.Start,
},
}
got, err := r.server.References(r.ctx, params)
got, err := r.server.References(context.Background(), params)
if err != nil {
t.Fatalf("failed for %v: %v", src, err)
}
@ -508,6 +509,7 @@ func (r *runner) Reference(t *testing.T, data tests.References) {
}
func (r *runner) Rename(t *testing.T, data tests.Renames) {
ctx := context.Background()
for spn, newText := range data {
tag := fmt.Sprintf("%s-rename", newText)
@ -522,7 +524,7 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) {
t.Fatalf("failed for %v: %v", spn, err)
}
workspaceEdits, err := r.server.Rename(r.ctx, &protocol.RenameParams{
workspaceEdits, err := r.server.Rename(ctx, &protocol.RenameParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.NewURI(uri),
},
@ -542,7 +544,7 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) {
var res []string
for uri, edits := range *workspaceEdits.Changes {
spnURI := span.URI(uri)
_, m, err := getSourceFile(r.ctx, r.server.session.ViewOf(span.URI(spnURI)), spnURI)
_, m, err := getSourceFile(ctx, r.server.session.ViewOf(span.URI(spnURI)), spnURI)
if err != nil {
t.Error(err)
}
@ -580,6 +582,7 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) {
func applyEdits(contents string, edits []source.TextEdit) string {
res := contents
sortSourceTextEdits(edits)
// Apply the edits from the end of the file forward
// to preserve the offsets
@ -593,6 +596,15 @@ func applyEdits(contents string, edits []source.TextEdit) string {
return res
}
func sortSourceTextEdits(d []source.TextEdit) {
sort.Slice(d, func(i int, j int) bool {
if r := span.Compare(d[i].Span, d[j].Span); r != 0 {
return r < 0
}
return d[i].NewText < d[j].NewText
})
}
func (r *runner) Symbol(t *testing.T, data tests.Symbols) {
for uri, expectedSymbols := range data {
params := &protocol.DocumentSymbolParams{
@ -600,7 +612,7 @@ func (r *runner) Symbol(t *testing.T, data tests.Symbols) {
URI: string(uri),
},
}
symbols, err := r.server.DocumentSymbol(r.ctx, params)
symbols, err := r.server.DocumentSymbol(context.Background(), params)
if err != nil {
t.Fatal(err)
}
@ -676,7 +688,7 @@ func (r *runner) SignatureHelp(t *testing.T, data tests.Signatures) {
if err != nil {
t.Fatalf("failed for %v: %v", loc, err)
}
gotSignatures, err := r.server.SignatureHelp(r.ctx, &protocol.TextDocumentPositionParams{
gotSignatures, err := r.server.SignatureHelp(context.Background(), &protocol.TextDocumentPositionParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.NewURI(spn.URI()),
},
@ -742,7 +754,7 @@ func (r *runner) Link(t *testing.T, data tests.Links) {
if err != nil {
t.Fatal(err)
}
gotLinks, err := r.server.DocumentLink(r.ctx, &protocol.DocumentLinkParams{
gotLinks, err := r.server.DocumentLink(context.Background(), &protocol.DocumentLinkParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.NewURI(uri),
},
@ -750,30 +762,15 @@ func (r *runner) Link(t *testing.T, data tests.Links) {
if err != nil {
t.Fatal(err)
}
var notePositions []token.Position
links := make(map[span.Span]string, len(wantLinks))
for _, link := range wantLinks {
links[link.Src] = link.Target
notePositions = append(notePositions, link.NotePosition)
}
for _, link := range gotLinks {
spn, err := m.RangeSpan(link.Range)
if err != nil {
t.Fatal(err)
}
linkInNote := false
for _, notePosition := range notePositions {
// Drop the links found inside expectation notes arguments as this links are not collected by expect package
if notePosition.Line == spn.Start().Line() &&
notePosition.Column <= spn.Start().Column() {
delete(links, spn)
linkInNote = true
}
}
if linkInNote {
continue
}
if target, ok := links[spn]; ok {
delete(links, spn)
if target != link.Target {

View File

@ -1,40 +0,0 @@
package protocol
import (
"context"
"fmt"
"time"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/xcontext"
)
func init() {
log.AddLogger(logger)
}
type contextKey int
const (
clientKey = contextKey(iota)
)
func WithClient(ctx context.Context, client Client) context.Context {
return context.WithValue(ctx, clientKey, client)
}
// logger implements log.Logger in terms of the LogMessage call to a client.
func logger(ctx context.Context, at time.Time, tags tag.List) bool {
client, ok := ctx.Value(clientKey).(Client)
if !ok {
return false
}
entry := log.ToEntry(ctx, time.Time{}, tags)
msg := &LogMessageParams{Type: Info, Message: fmt.Sprint(entry)}
if entry.Error != nil {
msg.Type = Error
}
go client.LogMessage(xcontext.Detach(ctx), msg)
return true
}

View File

@ -0,0 +1,21 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package protocol
import (
"context"
"time"
)
// detatch returns a context that keeps all the values of its parent context
// but detatches from the cancellation and error handling.
func detatchContext(ctx context.Context) context.Context { return detatchedContext{ctx} }
type detatchedContext struct{ parent context.Context }
func (v detatchedContext) Deadline() (time.Time, bool) { return time.Time{}, false }
func (v detatchedContext) Done() <-chan struct{} { return nil }
func (v detatchedContext) Err() error { return nil }
func (v detatchedContext) Value(key interface{}) interface{} { return v.parent.Value(key) }

View File

@ -13,7 +13,7 @@ var (
namesInitializeError [int(UnknownProtocolVersion) + 1]string
namesMessageType [int(Log) + 1]string
namesFileChangeType [int(Deleted) + 1]string
namesWatchKind [int(WatchDelete) + 1]string
namesWatchKind [int(Change) + 1]string
namesCompletionTriggerKind [int(TriggerForIncompleteCompletions) + 1]string
namesDiagnosticSeverity [int(SeverityHint) + 1]string
namesDiagnosticTag [int(Unnecessary) + 1]string
@ -40,9 +40,7 @@ func init() {
namesFileChangeType[int(Changed)] = "Changed"
namesFileChangeType[int(Deleted)] = "Deleted"
namesWatchKind[int(WatchCreate)] = "WatchCreate"
namesWatchKind[int(WatchChange)] = "WatchChange"
namesWatchKind[int(WatchDelete)] = "WatchDelete"
namesWatchKind[int(Change)] = "Change"
namesCompletionTriggerKind[int(Invoked)] = "Invoked"
namesCompletionTriggerKind[int(TriggerCharacter)] = "TriggerCharacter"

View File

@ -0,0 +1,32 @@
package protocol
import (
"context"
"golang.org/x/tools/internal/lsp/xlog"
)
// logSink implements xlog.Sink in terms of the LogMessage call to a client.
type logSink struct {
client Client
}
// NewLogger returns an xlog.Sink that sends its messages using client.LogMessage.
// It maps Debug to the Log level, Info and Error to their matching levels, and
// does not support warnings.
func NewLogger(client Client) xlog.Sink {
return logSink{client: client}
}
func (s logSink) Log(ctx context.Context, level xlog.Level, message string) {
typ := Log
switch level {
case xlog.ErrorLevel:
typ = Error
case xlog.InfoLevel:
typ = Info
case xlog.DebugLevel:
typ = Log
}
s.client.LogMessage(ctx, &LogMessageParams{Type: typ, Message: message})
}

View File

@ -8,56 +8,46 @@ import (
"context"
"golang.org/x/tools/internal/jsonrpc2"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/xcontext"
"golang.org/x/tools/internal/lsp/xlog"
)
type DocumentUri = string
const defaultMessageBufferSize = 20
const defaultRejectIfOverloaded = false
type canceller struct{ jsonrpc2.EmptyHandler }
type clientHandler struct {
canceller
client Client
}
type serverHandler struct {
canceller
server Server
}
func (canceller) Cancel(ctx context.Context, conn *jsonrpc2.Conn, id jsonrpc2.ID, cancelled bool) bool {
if cancelled {
return false
}
ctx = xcontext.Detach(ctx)
ctx, done := trace.StartSpan(ctx, "protocol.canceller")
defer done()
func canceller(ctx context.Context, conn *jsonrpc2.Conn, id jsonrpc2.ID) {
ctx = detatchContext(ctx)
ctx, span := trace.StartSpan(ctx, "protocol.canceller")
defer span.End()
conn.Notify(ctx, "$/cancelRequest", &CancelParams{ID: id})
return true
}
func NewClient(ctx context.Context, stream jsonrpc2.Stream, client Client) (context.Context, *jsonrpc2.Conn, Server) {
ctx = WithClient(ctx, client)
func NewClient(stream jsonrpc2.Stream, client Client) (*jsonrpc2.Conn, Server, xlog.Logger) {
log := xlog.New(NewLogger(client))
conn := jsonrpc2.NewConn(stream)
conn.AddHandler(&clientHandler{client: client})
return ctx, conn, &serverDispatcher{Conn: conn}
conn.Capacity = defaultMessageBufferSize
conn.RejectIfOverloaded = defaultRejectIfOverloaded
conn.Handler = clientHandler(log, client)
conn.Canceler = jsonrpc2.Canceler(canceller)
return conn, &serverDispatcher{Conn: conn}, log
}
func NewServer(ctx context.Context, stream jsonrpc2.Stream, server Server) (context.Context, *jsonrpc2.Conn, Client) {
func NewServer(stream jsonrpc2.Stream, server Server) (*jsonrpc2.Conn, Client, xlog.Logger) {
conn := jsonrpc2.NewConn(stream)
client := &clientDispatcher{Conn: conn}
ctx = WithClient(ctx, client)
conn.AddHandler(&serverHandler{server: server})
return ctx, conn, client
log := xlog.New(NewLogger(client))
conn.Capacity = defaultMessageBufferSize
conn.RejectIfOverloaded = defaultRejectIfOverloaded
conn.Handler = serverHandler(log, server)
conn.Canceler = jsonrpc2.Canceler(canceller)
return conn, client, log
}
func sendParseError(ctx context.Context, req *jsonrpc2.Request, err error) {
func sendParseError(ctx context.Context, log xlog.Logger, req *jsonrpc2.Request, err error) {
if _, ok := err.(*jsonrpc2.Error); !ok {
err = jsonrpc2.NewErrorf(jsonrpc2.CodeParseError, "%v", err)
}
if err := req.Reply(ctx, nil, err); err != nil {
log.Error(ctx, "", err)
log.Errorf(ctx, "%v", err)
}
}

View File

@ -7,7 +7,7 @@ import (
"encoding/json"
"golang.org/x/tools/internal/jsonrpc2"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/xlog"
)
type Client interface {
@ -23,127 +23,117 @@ type Client interface {
ApplyEdit(context.Context, *ApplyWorkspaceEditParams) (*ApplyWorkspaceEditResponse, error)
}
func (h clientHandler) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool {
if delivered {
return false
}
switch r.Method {
case "$/cancelRequest":
var params CancelParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
r.Conn().Cancel(params.ID)
return true
case "window/showMessage": // notif
var params ShowMessageParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.client.ShowMessage(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "window/logMessage": // notif
var params LogMessageParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.client.LogMessage(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "telemetry/event": // notif
var params interface{}
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.client.Event(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/publishDiagnostics": // notif
var params PublishDiagnosticsParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.client.PublishDiagnostics(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "workspace/workspaceFolders": // req
if r.Params != nil {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidParams, "Expected no params"))
return true
}
resp, err := h.client.WorkspaceFolders(ctx)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "workspace/configuration": // req
var params ConfigurationParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.client.Configuration(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "client/registerCapability": // req
var params RegistrationParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
err := h.client.RegisterCapability(ctx, &params)
if err := r.Reply(ctx, nil, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "client/unregisterCapability": // req
var params UnregistrationParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
err := h.client.UnregisterCapability(ctx, &params)
if err := r.Reply(ctx, nil, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "window/showMessageRequest": // req
var params ShowMessageRequestParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.client.ShowMessageRequest(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "workspace/applyEdit": // req
var params ApplyWorkspaceEditParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.client.ApplyEdit(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
func clientHandler(log xlog.Logger, client Client) jsonrpc2.Handler {
return func(ctx context.Context, r *jsonrpc2.Request) {
switch r.Method {
case "$/cancelRequest":
var params CancelParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
r.Conn().Cancel(params.ID)
case "window/showMessage": // notif
var params ShowMessageParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := client.ShowMessage(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "window/logMessage": // notif
var params LogMessageParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := client.LogMessage(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "telemetry/event": // notif
var params interface{}
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := client.Event(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/publishDiagnostics": // notif
var params PublishDiagnosticsParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := client.PublishDiagnostics(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "workspace/workspaceFolders": // req
if r.Params != nil {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidParams, "Expected no params"))
return
}
resp, err := client.WorkspaceFolders(ctx)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "workspace/configuration": // req
var params ConfigurationParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := client.Configuration(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "client/registerCapability": // req
var params RegistrationParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
err := client.RegisterCapability(ctx, &params)
if err := r.Reply(ctx, nil, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "client/unregisterCapability": // req
var params UnregistrationParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
err := client.UnregisterCapability(ctx, &params)
if err := r.Reply(ctx, nil, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "window/showMessageRequest": // req
var params ShowMessageRequestParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := client.ShowMessageRequest(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "workspace/applyEdit": // req
var params ApplyWorkspaceEditParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := client.ApplyEdit(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
default:
return false
default:
if r.IsNotify() {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeMethodNotFound, "method %q not found", r.Method))
}
}
}
}

View File

@ -1,7 +1,7 @@
// Package protocol contains data types and code for LSP jsonrpcs
// generated automatically from vscode-languageserver-node
// commit: 8801c20b667945f455d7e023c71d2f741caeda25
// last fetched Sat Jul 13 2019 18:33:10 GMT-0700 (Pacific Daylight Time)
// commit: c1e8923f8ea3b1f9c61dadf97448244d9ffbf7ae
// last fetched Tue May 21 2019 07:36:27 GMT-0400 (Eastern Daylight Time)
package protocol
// Code generated (see typescript/README.md) DO NOT EDIT.
@ -155,26 +155,6 @@ type FoldingRangeParams struct {
TextDocument TextDocumentIdentifier `json:"textDocument"`
}
// SelectionRangeProviderOptions is
type SelectionRangeProviderOptions struct {
}
/*SelectionRangeParams defined:
* A parameter literal used in selection range requests.
*/
type SelectionRangeParams struct {
/*TextDocument defined:
* The text document.
*/
TextDocument TextDocumentIdentifier `json:"textDocument"`
/*Positions defined:
* The positions inside the text document.
*/
Positions []Position `json:"positions"`
}
/*Registration defined:
* General parameters to to register for an notification or to register a provider.
*/
@ -1261,19 +1241,6 @@ type ClientCapabilities struct {
*/
LinkSupport bool `json:"linkSupport,omitempty"`
} `json:"declaration,omitempty"`
/*SelectionRange defined:
* Capabilities specific to `textDocument/selectionRange` requests
*/
SelectionRange struct {
/*DynamicRegistration defined:
* Whether implementation supports dynamic registration for selection range providers. If this is set to `true`
* the client supports the new `(SelectionRangeProviderOptions & TextDocumentRegistrationOptions & StaticRegistrationOptions)`
* return value for the corresponding server capability as well.
*/
DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
} `json:"selectionRange,omitempty"`
} `json:"textDocument,omitempty"`
/*Window defined:
@ -1633,11 +1600,6 @@ type ServerCapabilities struct {
* The server provides Goto Type Definition support.
*/
DeclarationProvider bool `json:"declarationProvider,omitempty"` // boolean | (TextDocumentRegistrationOptions & StaticRegistrationOptions)
/*SelectionRangeProvider defined:
* The server provides selection range support.
*/
SelectionRangeProvider bool `json:"selectionRangeProvider,omitempty"` // boolean | (TextDocumentRegistrationOptions & StaticRegistrationOptions & SelectionRangeProviderOptions)
}
// InitializeParams is
@ -1664,7 +1626,7 @@ type InitializeParams struct {
*
* @deprecated in favour of workspaceFolders.
*/
RootURI DocumentUri `json:"rootUri"`
RootURI string `json:"rootUri"`
/*Capabilities defined:
* The capabilities provided by the client (editor or tool)
@ -1899,7 +1861,7 @@ type FileEvent struct {
/*URI defined:
* The file's uri.
*/
URI DocumentUri `json:"uri"`
URI string `json:"uri"`
/*Type defined:
* The change type.
@ -1948,12 +1910,10 @@ type PublishDiagnosticsParams struct {
/*URI defined:
* The URI for which diagnostic information is reported.
*/
URI DocumentUri `json:"uri"`
URI string `json:"uri"`
/*Version defined:
* Optional the version number of the document the diagnostics are published for.
*
* @since 3.15
*/
Version float64 `json:"version,omitempty"`
@ -2304,7 +2264,7 @@ type Range struct {
type Location struct {
// URI is
URI DocumentUri `json:"uri"`
URI string `json:"uri"`
// Range is
Range Range `json:"range"`
@ -2327,7 +2287,7 @@ type LocationLink struct {
/*TargetURI defined:
* The target resource identifier of this link.
*/
TargetURI DocumentUri `json:"targetUri"`
TargetURI string `json:"targetUri"`
/*TargetRange defined:
* The full target range of this link. If the target for example is a symbol then target range is the
@ -2568,7 +2528,7 @@ type CreateFile struct {
/*URI defined:
* The resource to create.
*/
URI DocumentUri `json:"uri"`
URI string `json:"uri"`
/*Options defined:
* Additional options
@ -2605,12 +2565,12 @@ type RenameFile struct {
/*OldURI defined:
* The old (existing) location.
*/
OldURI DocumentUri `json:"oldUri"`
OldURI string `json:"oldUri"`
/*NewURI defined:
* The new location.
*/
NewURI DocumentUri `json:"newUri"`
NewURI string `json:"newUri"`
/*Options defined:
* Rename options.
@ -2647,7 +2607,7 @@ type DeleteFile struct {
/*URI defined:
* The file to delete.
*/
URI DocumentUri `json:"uri"`
URI string `json:"uri"`
/*Options defined:
* Delete options.
@ -2696,7 +2656,7 @@ type TextDocumentIdentifier struct {
/*URI defined:
* The text document's uri.
*/
URI DocumentUri `json:"uri"`
URI string `json:"uri"`
}
/*VersionedTextDocumentIdentifier defined:
@ -2724,7 +2684,7 @@ type TextDocumentItem struct {
/*URI defined:
* The text document's uri.
*/
URI DocumentUri `json:"uri"`
URI string `json:"uri"`
/*LanguageID defined:
* The text document's language identifier
@ -2849,6 +2809,8 @@ type CompletionItem struct {
* and a completion item with an `insertText` of `console` is provided it
* will only insert `sole`. Therefore it is recommended to use `textEdit` instead
* since it avoids additional client side interpretation.
*
* @deprecated Use textEdit instead.
*/
InsertText string `json:"insertText,omitempty"`
@ -3300,23 +3262,6 @@ type DocumentLink struct {
Data interface{} `json:"data,omitempty"`
}
/*SelectionRange defined:
* A selection range represents a part of a selection hierarchy. A selection range
* may have a parent selection range that contains it.
*/
type SelectionRange struct {
/*Range defined:
* The [range](#Range) of this selection range.
*/
Range Range `json:"range"`
/*Parent defined:
* The parent selection range containing this range. Therefore `parent.range` must contain `this.range`.
*/
Parent *SelectionRange `json:"parent,omitempty"`
}
/*TextDocument defined:
* A simple text document. Not to be implemented.
*/
@ -3329,7 +3274,7 @@ type TextDocument struct {
*
* @readonly
*/
URI DocumentUri `json:"uri"`
URI string `json:"uri"`
/*LanguageID defined:
* The identifier of the language associated with this document.
@ -3611,20 +3556,10 @@ const (
*/
Deleted FileChangeType = 3
/*WatchCreate defined:
* Interested in create events.
*/
WatchCreate WatchKind = 1
/*WatchChange defined:
/*Change defined:
* Interested in change events
*/
WatchChange WatchKind = 2
/*WatchDelete defined:
* Interested in delete events
*/
WatchDelete WatchKind = 4
Change WatchKind = 2
/*Invoked defined:
* Completion was triggered by typing an identifier (24x7 code
@ -4017,12 +3952,6 @@ type DocumentFilter struct {
*/
type DocumentSelector []DocumentFilter
// DocumentURI is a type
/**
* A tagging type for string properties that are actually URIs.
*/
type DocumentURI string
// DefinitionLink is a type
/**
* Information about where a symbol is defined.

View File

@ -7,7 +7,7 @@ import (
"encoding/json"
"golang.org/x/tools/internal/jsonrpc2"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/xlog"
)
type Server interface {
@ -29,7 +29,6 @@ type Server interface {
ColorPresentation(context.Context, *ColorPresentationParams) ([]ColorPresentation, error)
FoldingRange(context.Context, *FoldingRangeParams) ([]FoldingRange, error)
Declaration(context.Context, *TextDocumentPositionParams) ([]DeclarationLink, error)
SelectionRange(context.Context, *SelectionRangeParams) ([]SelectionRange, error)
Initialize(context.Context, *InitializeParams) (*InitializeResult, error)
Shutdown(context.Context) error
WillSaveWaitUntil(context.Context, *WillSaveTextDocumentParams) ([]TextEdit, error)
@ -55,466 +54,414 @@ type Server interface {
ExecuteCommand(context.Context, *ExecuteCommandParams) (interface{}, error)
}
func (h serverHandler) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool {
if delivered {
return false
}
switch r.Method {
case "$/cancelRequest":
var params CancelParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
r.Conn().Cancel(params.ID)
return true
case "workspace/didChangeWorkspaceFolders": // notif
var params DidChangeWorkspaceFoldersParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.DidChangeWorkspaceFolders(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "initialized": // notif
var params InitializedParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.Initialized(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "exit": // notif
if err := h.server.Exit(ctx); err != nil {
log.Error(ctx, "", err)
}
return true
case "workspace/didChangeConfiguration": // notif
var params DidChangeConfigurationParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.DidChangeConfiguration(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/didOpen": // notif
var params DidOpenTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.DidOpen(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/didChange": // notif
var params DidChangeTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.DidChange(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/didClose": // notif
var params DidCloseTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.DidClose(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/didSave": // notif
var params DidSaveTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.DidSave(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/willSave": // notif
var params WillSaveTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.WillSave(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "workspace/didChangeWatchedFiles": // notif
var params DidChangeWatchedFilesParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.DidChangeWatchedFiles(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "$/setTraceNotification": // notif
var params SetTraceParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.SetTraceNotification(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "$/logTraceNotification": // notif
var params LogTraceParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
if err := h.server.LogTraceNotification(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/implementation": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.Implementation(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/typeDefinition": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.TypeDefinition(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/documentColor": // req
var params DocumentColorParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.DocumentColor(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/colorPresentation": // req
var params ColorPresentationParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.ColorPresentation(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/foldingRange": // req
var params FoldingRangeParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.FoldingRange(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/declaration": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.Declaration(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/selectionRange": // req
var params SelectionRangeParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.SelectionRange(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "initialize": // req
var params InitializeParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.Initialize(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "shutdown": // req
if r.Params != nil {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidParams, "Expected no params"))
return true
}
err := h.server.Shutdown(ctx)
if err := r.Reply(ctx, nil, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/willSaveWaitUntil": // req
var params WillSaveTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.WillSaveWaitUntil(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/completion": // req
var params CompletionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.Completion(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "completionItem/resolve": // req
var params CompletionItem
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.Resolve(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/hover": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.Hover(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/signatureHelp": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.SignatureHelp(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/definition": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.Definition(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/references": // req
var params ReferenceParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.References(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/documentHighlight": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.DocumentHighlight(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/documentSymbol": // req
var params DocumentSymbolParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.DocumentSymbol(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "workspace/symbol": // req
var params WorkspaceSymbolParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.Symbol(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/codeAction": // req
var params CodeActionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.CodeAction(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/codeLens": // req
var params CodeLensParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.CodeLens(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "codeLens/resolve": // req
var params CodeLens
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.ResolveCodeLens(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/formatting": // req
var params DocumentFormattingParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.Formatting(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/rangeFormatting": // req
var params DocumentRangeFormattingParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.RangeFormatting(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/onTypeFormatting": // req
var params DocumentOnTypeFormattingParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.OnTypeFormatting(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/rename": // req
var params RenameParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.Rename(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/prepareRename": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.PrepareRename(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "textDocument/documentLink": // req
var params DocumentLinkParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.DocumentLink(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "documentLink/resolve": // req
var params DocumentLink
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.ResolveDocumentLink(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
case "workspace/executeCommand": // req
var params ExecuteCommandParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
}
resp, err := h.server.ExecuteCommand(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true
func serverHandler(log xlog.Logger, server Server) jsonrpc2.Handler {
return func(ctx context.Context, r *jsonrpc2.Request) {
switch r.Method {
case "$/cancelRequest":
var params CancelParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
r.Conn().Cancel(params.ID)
case "workspace/didChangeWorkspaceFolders": // notif
var params DidChangeWorkspaceFoldersParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.DidChangeWorkspaceFolders(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "initialized": // notif
var params InitializedParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.Initialized(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "exit": // notif
if err := server.Exit(ctx); err != nil {
log.Errorf(ctx, "%v", err)
}
case "workspace/didChangeConfiguration": // notif
var params DidChangeConfigurationParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.DidChangeConfiguration(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/didOpen": // notif
var params DidOpenTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.DidOpen(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/didChange": // notif
var params DidChangeTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.DidChange(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/didClose": // notif
var params DidCloseTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.DidClose(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/didSave": // notif
var params DidSaveTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.DidSave(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/willSave": // notif
var params WillSaveTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.WillSave(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "workspace/didChangeWatchedFiles": // notif
var params DidChangeWatchedFilesParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.DidChangeWatchedFiles(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "$/setTraceNotification": // notif
var params SetTraceParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.SetTraceNotification(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "$/logTraceNotification": // notif
var params LogTraceParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
if err := server.LogTraceNotification(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/implementation": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.Implementation(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/typeDefinition": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.TypeDefinition(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/documentColor": // req
var params DocumentColorParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.DocumentColor(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/colorPresentation": // req
var params ColorPresentationParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.ColorPresentation(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/foldingRange": // req
var params FoldingRangeParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.FoldingRange(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/declaration": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.Declaration(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "initialize": // req
var params InitializeParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.Initialize(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "shutdown": // req
if r.Params != nil {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidParams, "Expected no params"))
return
}
err := server.Shutdown(ctx)
if err := r.Reply(ctx, nil, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/willSaveWaitUntil": // req
var params WillSaveTextDocumentParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.WillSaveWaitUntil(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/completion": // req
var params CompletionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.Completion(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "completionItem/resolve": // req
var params CompletionItem
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.Resolve(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/hover": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.Hover(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/signatureHelp": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.SignatureHelp(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/definition": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.Definition(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/references": // req
var params ReferenceParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.References(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/documentHighlight": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.DocumentHighlight(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/documentSymbol": // req
var params DocumentSymbolParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.DocumentSymbol(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "workspace/symbol": // req
var params WorkspaceSymbolParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.Symbol(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/codeAction": // req
var params CodeActionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.CodeAction(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/codeLens": // req
var params CodeLensParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.CodeLens(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "codeLens/resolve": // req
var params CodeLens
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.ResolveCodeLens(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/formatting": // req
var params DocumentFormattingParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.Formatting(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/rangeFormatting": // req
var params DocumentRangeFormattingParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.RangeFormatting(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/onTypeFormatting": // req
var params DocumentOnTypeFormattingParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.OnTypeFormatting(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/rename": // req
var params RenameParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.Rename(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/prepareRename": // req
var params TextDocumentPositionParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.PrepareRename(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "textDocument/documentLink": // req
var params DocumentLinkParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.DocumentLink(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "documentLink/resolve": // req
var params DocumentLink
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.ResolveDocumentLink(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
case "workspace/executeCommand": // req
var params ExecuteCommandParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, log, r, err)
return
}
resp, err := server.ExecuteCommand(ctx, &params)
if err := r.Reply(ctx, resp, err); err != nil {
log.Errorf(ctx, "%v", err)
}
default:
return false
default:
if r.IsNotify() {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeMethodNotFound, "method %q not found", r.Method))
}
}
}
}
@ -617,14 +564,6 @@ func (s *serverDispatcher) Declaration(ctx context.Context, params *TextDocument
return result, nil
}
func (s *serverDispatcher) SelectionRange(ctx context.Context, params *SelectionRangeParams) ([]SelectionRange, error) {
var result []SelectionRange
if err := s.Conn.Call(ctx, "textDocument/selectionRange", params, &result); err != nil {
return nil, err
}
return result, nil
}
func (s *serverDispatcher) Initialize(ctx context.Context, params *InitializeParams) (*InitializeResult, error) {
var result InitializeResult
if err := s.Conn.Call(ctx, "initialize", params, &result); err != nil {

View File

@ -4,10 +4,10 @@
1. Make sure `node` is installed.
As explained at the [node site](<https://nodejs.org> Node)
you may need `npm install @types/node` for the node runtime types
2. Install the typescript compiler, with `npm install typescript`.
you may need `node install @types/node` for the node runtime types
2. Install the typescript compiler, with `node install typescript`.
3. Make sure `tsc` and `node` are in your execution path.
4. Get the typescript code for the jsonrpc protocol with `git clone git@github.com:microsoft/vscode-languageserver-node.git`
4. Get the typescript code for the jsonrpc protocol with `git clone vscode-lanuageserver-node.git`
## Usage

View File

@ -582,7 +582,7 @@ function generate(files: string[], options: ts.CompilerOptions): void {
}
if (x[0].goType == 'bool') { // take it
if (x[1].goType == 'RenameOptions') {
return ({goType: 'interface{}', gostuff: getText(node)})
return ({goType: 'RenameOptions', gostuff: getText(node)})
}
return ({goType: 'bool', gostuff: getText(node)})
}
@ -927,7 +927,7 @@ let byName = new Map<string, Struct>();
// consts are unique. (Go consts are package-level, but Typescript's are
// not.) Use suffixes to minimize changes to gopls.
let pref = new Map<string, string>(
[['DiagnosticSeverity', 'Severity'], ['WatchKind', 'Watch']]) // typeName->prefix
[['DiagnosticSeverity', 'Severity']]) // typeName->prefix
let suff = new Map<string, string>([
['CompletionItemKind', 'Completion'], ['InsertTextFormat', 'TextFormat']
])

View File

@ -59,7 +59,7 @@ function generate(files: string[], options: ts.CompilerOptions): void {
setReceives(); // distinguish client and server
// for each of Client and Server there are 3 parts to the output:
// 1. type X interface {methods}
// 2. func (h *serverHandler) Deliver(...) { switch r.method }
// 2. serverHandler(...) { return func(...) { switch r.method}}
// 3. func (x *xDispatcher) Method(ctx, parm)
not.forEach(
(v, k) => {
@ -99,7 +99,7 @@ function sig(nm: string, a: string, b: string, names?: boolean): string {
const notNil = `if r.Params != nil {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidParams, "Expected no params"))
return true
return
}`;
// Go code for notifications. Side is client or server, m is the request method
function goNot(side: side, m: string) {
@ -113,18 +113,16 @@ function goNot(side: side, m: string) {
if (a != '') {
case1 = `var params ${a}
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
sendParseError(ctx, log, r, err)
return
}
if err := h.${side.name}.${nm}(ctx, &params); err != nil {
log.Error(ctx, "", err)
}
return true`;
if err := ${side.name}.${nm}(ctx, &params); err != nil {
log.Errorf(ctx, "%v", err)
}`;
} else {
case1 = `if err := h.${side.name}.${nm}(ctx); err != nil {
log.Error(ctx, "", err)
}
return true`;
case1 = `if err := ${side.name}.${nm}(ctx); err != nil {
log.Errorf(ctx, "%v", err)
}`;
}
side.cases.push(`${caseHdr}\n${case1}`);
@ -154,26 +152,24 @@ function goReq(side: side, m: string) {
if (a != '') {
case1 = `var params ${a}
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
sendParseError(ctx, log, r, err)
return
}`;
}
const arg2 = a == '' ? '' : ', &params';
let case2 = `if err := h.${side.name}.${nm}(ctx${arg2}); err != nil {
log.Error(ctx, "", err)
let case2 = `if err := ${side.name}.${nm}(ctx${arg2}); err != nil {
log.Errorf(ctx, "%v", err)
}`;
if (b != '') {
case2 = `resp, err := h.${side.name}.${nm}(ctx${arg2})
case2 = `resp, err := ${side.name}.${nm}(ctx${arg2})
if err := r.Reply(ctx, resp, err); err != nil {
log.Error(ctx, "", err)
}
return true`;
log.Errorf(ctx, "%v", err)
}`;
} else { // response is nil
case2 = `err := h.${side.name}.${nm}(ctx${arg2})
case2 = `err := ${side.name}.${nm}(ctx${arg2})
if err := r.Reply(ctx, nil, err); err != nil {
log.Error(ctx, "", err)
}
return true`
log.Errorf(ctx, "%v", err)
}`
}
side.cases.push(`${caseHdr}\n${case1}\n${case2}`);
@ -226,31 +222,32 @@ function output(side: side) {
"encoding/json"
"golang.org/x/tools/internal/jsonrpc2"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/xlog"
)
`);
const a = side.name[0].toUpperCase() + side.name.substring(1)
f(`type ${a} interface {`);
side.methods.forEach((v) => { f(v) });
f('}\n');
f(`func (h ${side.name}Handler) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool {
if delivered {
return false
}
f(`func ${side.name}Handler(log xlog.Logger, ${side.name} ${
side.goName}) jsonrpc2.Handler {
return func(ctx context.Context, r *jsonrpc2.Request) {
switch r.Method {
case "$/cancelRequest":
var params CancelParams
if err := json.Unmarshal(*r.Params, &params); err != nil {
sendParseError(ctx, r, err)
return true
sendParseError(ctx, log, r, err)
return
}
r.Conn().Cancel(params.ID)
return true`);
r.Conn().Cancel(params.ID)`);
side.cases.forEach((v) => { f(v) });
f(`
default:
return false
if r.IsNotify() {
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeMethodNotFound, "method %q not found", r.Method))
}
}
}
}`);
f(`
type ${side.name}Dispatcher struct {

View File

@ -9,8 +9,6 @@ import (
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/span"
)
@ -36,7 +34,7 @@ func (s *Server) references(ctx context.Context, params *protocol.ReferenceParam
}
references, err := ident.References(ctx)
if err != nil {
log.Error(ctx, "no references", err, tag.Of("Identifier", ident.Name))
view.Session().Logger().Errorf(ctx, "no references for %s: %v", ident.Name, err)
}
if params.Context.IncludeDeclaration {
// The declaration of this identifier may not be in the

View File

@ -13,36 +13,37 @@ import (
"golang.org/x/tools/internal/jsonrpc2"
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/xlog"
"golang.org/x/tools/internal/span"
)
// NewClientServer
func NewClientServer(ctx context.Context, cache source.Cache, client protocol.Client) (context.Context, *Server) {
ctx = protocol.WithClient(ctx, client)
return ctx, &Server{
func NewClientServer(cache source.Cache, client protocol.Client) *Server {
return &Server{
client: client,
session: cache.NewSession(ctx),
session: cache.NewSession(xlog.New(protocol.NewLogger(client))),
}
}
// NewServer starts an LSP server on the supplied stream, and waits until the
// stream is closed.
func NewServer(ctx context.Context, cache source.Cache, stream jsonrpc2.Stream) (context.Context, *Server) {
func NewServer(cache source.Cache, stream jsonrpc2.Stream) *Server {
s := &Server{}
ctx, s.Conn, s.client = protocol.NewServer(ctx, stream, s)
s.session = cache.NewSession(ctx)
return ctx, s
var log xlog.Logger
s.Conn, s.client, log = protocol.NewServer(stream, s)
s.session = cache.NewSession(log)
return s
}
// RunServerOnPort starts an LSP server on the given port and does not exit.
// This function exists for debugging purposes.
func RunServerOnPort(ctx context.Context, cache source.Cache, port int, h func(ctx context.Context, s *Server)) error {
func RunServerOnPort(ctx context.Context, cache source.Cache, port int, h func(s *Server)) error {
return RunServerOnAddress(ctx, cache, fmt.Sprintf(":%v", port), h)
}
// RunServerOnPort starts an LSP server on the given port and does not exit.
// This function exists for debugging purposes.
func RunServerOnAddress(ctx context.Context, cache source.Cache, addr string, h func(ctx context.Context, s *Server)) error {
func RunServerOnAddress(ctx context.Context, cache source.Cache, addr string, h func(s *Server)) error {
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
@ -52,7 +53,7 @@ func RunServerOnAddress(ctx context.Context, cache source.Cache, addr string, h
if err != nil {
return err
}
h(NewServer(ctx, cache, jsonrpc2.NewHeaderStream(conn, conn)))
h(NewServer(cache, jsonrpc2.NewHeaderStream(conn, conn)))
}
}
@ -60,21 +61,12 @@ func (s *Server) Run(ctx context.Context) error {
return s.Conn.Run(ctx)
}
type serverState int
const (
serverCreated = serverState(iota)
serverInitializing // set once the server has received "initialize" request
serverInitialized // set once the server has received "initialized" request
serverShutDown
)
type Server struct {
Conn *jsonrpc2.Conn
client protocol.Client
stateMu sync.Mutex
state serverState
initializedMu sync.Mutex
isInitialized bool // set once the server has received "initialize" request
// Configurations.
// TODO(rstambler): Separate these into their own struct?
@ -272,11 +264,6 @@ func (s *Server) PrepareRename(context.Context, *protocol.TextDocumentPositionPa
func (s *Server) SetTraceNotification(context.Context, *protocol.SetTraceParams) error {
return notImplemented("SetTraceNotification")
}
func (s *Server) SelectionRange(context.Context, *protocol.SelectionRangeParams) ([]protocol.SelectionRange, error) {
return nil, notImplemented("SelectionRange")
}
func notImplemented(method string) *jsonrpc2.Error {
return jsonrpc2.NewErrorf(jsonrpc2.CodeMethodNotFound, "method %q not yet implemented", method)
}

View File

@ -9,8 +9,6 @@ import (
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/span"
)
@ -31,7 +29,7 @@ func (s *Server) signatureHelp(ctx context.Context, params *protocol.TextDocumen
}
info, err := source.SignatureHelp(ctx, f, rng.Start)
if err != nil {
log.Print(ctx, "no signature help", tag.Of("At", rng), tag.Of("Failure", err))
s.session.Logger().Infof(ctx, "no signature help for %s:%v:%v : %s", uri, int(params.Position.Line), int(params.Position.Character), err)
return nil, nil
}
return toProtocolSignatureHelp(info), nil

View File

@ -11,6 +11,7 @@ import (
"fmt"
"go/token"
"go/types"
"log"
"reflect"
"sort"
"strings"
@ -23,8 +24,8 @@ import (
)
func analyze(ctx context.Context, v View, pkgs []Package, analyzers []*analysis.Analyzer) ([]*Action, error) {
ctx, done := trace.StartSpan(ctx, "source.analyze")
defer done()
ctx, ts := trace.StartSpan(ctx, "source.analyze")
defer ts.End()
if ctx.Err() != nil {
return nil, ctx.Err()
}
@ -147,7 +148,7 @@ func (act *Action) execOnce(ctx context.Context, fset *token.FileSet) error {
pass := &analysis.Pass{
Analyzer: act.Analyzer,
Fset: fset,
Files: act.Pkg.GetSyntax(ctx),
Files: act.Pkg.GetSyntax(),
Pkg: act.Pkg.GetTypes(),
TypesInfo: act.Pkg.GetTypesInfo(),
TypesSizes: act.Pkg.GetTypesSizes(),
@ -244,12 +245,12 @@ func (act *Action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
// exportObjectFact implements Pass.ExportObjectFact.
func (act *Action) exportObjectFact(obj types.Object, fact analysis.Fact) {
if act.pass.ExportObjectFact == nil {
panic(fmt.Sprintf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact))
log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact)
}
if obj.Pkg() != act.Pkg.GetTypes() {
panic(fmt.Sprintf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
act.Analyzer, act.Pkg, obj, fact))
log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
act.Analyzer, act.Pkg, obj, fact)
}
key := objectFactKey{obj, factType(fact)}
@ -283,7 +284,7 @@ func (act *Action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool
// exportPackageFact implements Pass.ExportPackageFact.
func (act *Action) exportPackageFact(fact analysis.Fact) {
if act.pass.ExportPackageFact == nil {
panic(fmt.Sprintf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact))
log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact)
}
key := packageFactKey{act.pass.Pkg, factType(fact)}
@ -293,7 +294,7 @@ func (act *Action) exportPackageFact(fact analysis.Fact) {
func factType(fact analysis.Fact) reflect.Type {
t := reflect.TypeOf(fact)
if t.Kind() != reflect.Ptr {
panic(fmt.Sprintf("invalid Fact type: got %T, want pointer", t))
log.Fatalf("invalid Fact type: got %T, want pointer", t)
}
return t
}

View File

@ -278,13 +278,13 @@ type CompletionOptions struct {
// the client to score the quality of the completion. For instance, some clients
// may tolerate imperfect matches as valid completion results, since users may make typos.
func Completion(ctx context.Context, view View, f GoFile, pos token.Pos, opts CompletionOptions) ([]CompletionItem, *Selection, error) {
ctx, done := trace.StartSpan(ctx, "source.Completion")
defer done()
file, err := f.GetAST(ctx, ParseFull)
ctx, ts := trace.StartSpan(ctx, "source.Completion")
defer ts.End()
file := f.GetAST(ctx)
if file == nil {
return nil, nil, err
return nil, nil, fmt.Errorf("no AST for %s", f.URI())
}
pkg := f.GetPackage(ctx)
if pkg == nil || pkg.IsIllTyped() {
return nil, nil, fmt.Errorf("package for %s is ill typed", f.URI())
@ -509,7 +509,6 @@ func (c *completer) lexical() error {
if scope == types.Universe {
score *= 0.1
}
// If we haven't already added a candidate for an object with this name.
if _, ok := seen[obj.Name()]; !ok {
seen[obj.Name()] = struct{}{}

View File

@ -14,8 +14,6 @@ import (
"strings"
"golang.org/x/tools/internal/lsp/snippet"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/span"
)
@ -99,38 +97,31 @@ func (c *completer) item(cand candidate) (CompletionItem, error) {
if c.opts.WantDocumentaton {
declRange, err := objToRange(c.ctx, c.view.Session().Cache().FileSet(), obj)
if err != nil {
log.Error(c.ctx, "failed to get declaration range for object", err, tag.Of("Name", obj.Name()))
goto Return
return CompletionItem{}, err
}
pos := declRange.FileSet.Position(declRange.Start)
if !pos.IsValid() {
log.Error(c.ctx, "invalid declaration position", err, tag.Of("Label", item.Label))
goto Return
return CompletionItem{}, fmt.Errorf("invalid declaration position for %v", item.Label)
}
uri := span.FileURI(pos.Filename)
f, err := c.view.GetFile(c.ctx, uri)
if err != nil {
log.Error(c.ctx, "unable to get file", err, tag.Of("URI", uri))
goto Return
return CompletionItem{}, err
}
gof, ok := f.(GoFile)
if !ok {
log.Error(c.ctx, "declaration in a Go file", err, tag.Of("Label", item.Label))
goto Return
return CompletionItem{}, fmt.Errorf("declaration for %s not in a Go file: %s", item.Label, uri)
}
ident, err := Identifier(c.ctx, c.view, gof, declRange.Start)
if err != nil {
log.Error(c.ctx, "no identifier", err, tag.Of("Name", obj.Name()))
goto Return
return CompletionItem{}, err
}
documentation, err := ident.Documentation(c.ctx, SynopsisDocumentation)
if err != nil {
log.Error(c.ctx, "no documentation", err, tag.Of("Name", obj.Name()))
goto Return
return CompletionItem{}, err
}
item.Documentation = documentation
}
Return:
return item, nil
}
@ -201,7 +192,7 @@ func formatFieldList(ctx context.Context, v View, list *ast.FieldList) ([]string
cfg := printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 4}
b := &bytes.Buffer{}
if err := cfg.Fprint(b, v.Session().Cache().FileSet(), p.Type); err != nil {
log.Error(ctx, "unable to print type", nil, tag.Of("Type", p.Type))
v.Session().Logger().Errorf(ctx, "unable to print type %v", p.Type)
continue
}
typ := replacer.Replace(b.String())

View File

@ -34,9 +34,6 @@ import (
"golang.org/x/tools/go/analysis/passes/unsafeptr"
"golang.org/x/tools/go/analysis/passes/unusedresult"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/internal/lsp/telemetry"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/span"
)
@ -62,8 +59,6 @@ const (
)
func Diagnostics(ctx context.Context, view View, f GoFile, disabledAnalyses map[string]struct{}) (map[span.URI][]Diagnostic, error) {
ctx, done := trace.StartSpan(ctx, "source.Diagnostics", telemetry.File.Of(f.URI()))
defer done()
pkg := f.GetPackage(ctx)
if pkg == nil {
return singleDiagnostic(f.URI(), "%s is not part of a package", f.URI()), nil
@ -86,7 +81,7 @@ func Diagnostics(ctx context.Context, view View, f GoFile, disabledAnalyses map[
if !diagnostics(ctx, view, pkg, reports) {
// If we don't have any list, parse, or type errors, run analyses.
if err := analyses(ctx, view, pkg, disabledAnalyses, reports); err != nil {
log.Error(ctx, "failed to run analyses", err, telemetry.File)
view.Session().Logger().Errorf(ctx, "failed to run analyses for %s: %v", f.URI(), err)
}
}
// Updates to the diagnostics for this package may need to be propagated.
@ -109,8 +104,6 @@ type diagnosticSet struct {
}
func diagnostics(ctx context.Context, v View, pkg Package, reports map[span.URI][]Diagnostic) bool {
ctx, done := trace.StartSpan(ctx, "source.diagnostics", telemetry.Package.Of(pkg.ID()))
defer done()
diagSets := make(map[span.URI]*diagnosticSet)
for _, err := range pkg.GetErrors() {
diag := Diagnostic{
@ -236,31 +229,30 @@ func parseDiagnosticMessage(input string) span.Span {
func pointToSpan(ctx context.Context, view View, spn span.Span) span.Span {
f, err := view.GetFile(ctx, spn.URI())
ctx = telemetry.File.With(ctx, spn.URI())
if err != nil {
log.Error(ctx, "could not find file for diagnostic", nil, telemetry.File)
view.Session().Logger().Errorf(ctx, "could not find file for diagnostic: %v", spn.URI())
return spn
}
diagFile, ok := f.(GoFile)
if !ok {
log.Error(ctx, "not a Go file", nil, telemetry.File)
view.Session().Logger().Errorf(ctx, "%s is not a Go file", spn.URI())
return spn
}
tok, err := diagFile.GetToken(ctx)
if err != nil {
log.Error(ctx, "could not find token.File for diagnostic", err, telemetry.File)
tok := diagFile.GetToken(ctx)
if tok == nil {
view.Session().Logger().Errorf(ctx, "could not find token.File for diagnostic: %v", spn.URI())
return spn
}
data, _, err := diagFile.Handle(ctx).Read(ctx)
if err != nil {
log.Error(ctx, "could not find content for diagnostic", err, telemetry.File)
view.Session().Logger().Errorf(ctx, "could not find content for diagnostic: %v", spn.URI())
return spn
}
c := span.NewTokenConverter(diagFile.FileSet(), tok)
s, err := spn.WithOffset(c)
//we just don't bother producing an error if this failed
if err != nil {
log.Error(ctx, "invalid span for diagnostic", err, telemetry.File)
view.Session().Logger().Errorf(ctx, "invalid span for diagnostic: %v: %v", spn.URI(), err)
return spn
}
start := s.Start()

View File

@ -10,36 +10,27 @@ import (
"context"
"fmt"
"go/format"
"strings"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/internal/imports"
"golang.org/x/tools/internal/lsp/diff"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/span"
)
// Format formats a file with a given range.
func Format(ctx context.Context, f GoFile, rng span.Range) ([]TextEdit, error) {
ctx, done := trace.StartSpan(ctx, "source.Format")
defer done()
file, err := f.GetAST(ctx, ParseFull)
ctx, ts := trace.StartSpan(ctx, "source.Format")
defer ts.End()
file := f.GetAST(ctx)
if file == nil {
return nil, err
return nil, fmt.Errorf("no AST for %s", f.URI())
}
pkg := f.GetPackage(ctx)
if hasListErrors(pkg.GetErrors()) || hasParseErrors(pkg.GetErrors()) {
// Even if this package has list or parse errors, this file may not
// have any parse errors and can still be formatted. Using format.Node
// on an ast with errors may result in code being added or removed.
// Attempt to format the source of this file instead.
formatted, err := formatSource(ctx, f)
if err != nil {
return nil, err
}
return computeTextEdits(ctx, f, string(formatted)), nil
return nil, fmt.Errorf("%s has parse errors, not formatting", f.URI())
}
path, exact := astutil.PathEnclosingInterval(file, rng.Start, rng.End)
if !exact || len(path) == 0 {
@ -60,20 +51,10 @@ func Format(ctx context.Context, f GoFile, rng span.Range) ([]TextEdit, error) {
return computeTextEdits(ctx, f, buf.String()), nil
}
func formatSource(ctx context.Context, file File) ([]byte, error) {
ctx, done := trace.StartSpan(ctx, "source.formatSource")
defer done()
data, _, err := file.Handle(ctx).Read(ctx)
if err != nil {
return nil, err
}
return format.Source(data)
}
// Imports formats a file using the goimports tool.
func Imports(ctx context.Context, view View, f GoFile, rng span.Range) ([]TextEdit, error) {
ctx, done := trace.StartSpan(ctx, "source.Imports")
defer done()
ctx, ts := trace.StartSpan(ctx, "source.Imports")
defer ts.End()
data, _, err := f.Handle(ctx).Read(ctx)
if err != nil {
return nil, err
@ -85,8 +66,8 @@ func Imports(ctx context.Context, view View, f GoFile, rng span.Range) ([]TextEd
if hasListErrors(pkg.GetErrors()) {
return nil, fmt.Errorf("%s has list errors, not running goimports", f.URI())
}
options := &imports.Options{
Env: buildProcessEnv(ctx, view),
// Defaults.
AllErrors: true,
Comments: true,
@ -95,16 +76,10 @@ func Imports(ctx context.Context, view View, f GoFile, rng span.Range) ([]TextEd
TabIndent: true,
TabWidth: 8,
}
var formatted []byte
importFn := func(opts *imports.Options) error {
formatted, err = imports.Process(f.URI().Filename(), data, opts)
return err
}
err = view.RunProcessEnvFunc(ctx, importFn, options)
formatted, err := imports.Process(f.URI().Filename(), data, options)
if err != nil {
return nil, err
}
return computeTextEdits(ctx, f, string(formatted)), nil
}
@ -126,12 +101,43 @@ func hasListErrors(errors []packages.Error) bool {
return false
}
func buildProcessEnv(ctx context.Context, view View) *imports.ProcessEnv {
cfg := view.Config()
env := &imports.ProcessEnv{
WorkingDir: cfg.Dir,
Logf: func(format string, v ...interface{}) {
view.Session().Logger().Infof(ctx, format, v...)
},
}
for _, kv := range cfg.Env {
split := strings.Split(kv, "=")
if len(split) < 2 {
continue
}
switch split[0] {
case "GOPATH":
env.GOPATH = split[1]
case "GOROOT":
env.GOROOT = split[1]
case "GO111MODULE":
env.GO111MODULE = split[1]
case "GOPROXY":
env.GOROOT = split[1]
case "GOFLAGS":
env.GOFLAGS = split[1]
case "GOSUMDB":
env.GOSUMDB = split[1]
}
}
return env
}
func computeTextEdits(ctx context.Context, file File, formatted string) (edits []TextEdit) {
ctx, done := trace.StartSpan(ctx, "source.computeTextEdits")
defer done()
ctx, ts := trace.StartSpan(ctx, "source.computeTextEdits")
defer ts.End()
data, _, err := file.Handle(ctx).Read(ctx)
if err != nil {
log.Error(ctx, "Cannot compute text edits", err)
file.View().Session().Logger().Errorf(ctx, "Cannot compute text edits: %v", err)
return nil
}
u := diff.SplitLines(string(data))

View File

@ -16,12 +16,11 @@ import (
)
func Highlight(ctx context.Context, f GoFile, pos token.Pos) ([]span.Span, error) {
ctx, done := trace.StartSpan(ctx, "source.Highlight")
defer done()
file, err := f.GetAST(ctx, ParseFull)
ctx, ts := trace.StartSpan(ctx, "source.Highlight")
defer ts.End()
file := f.GetAST(ctx)
if file == nil {
return nil, err
return nil, fmt.Errorf("no AST for %s", f.URI())
}
fset := f.FileSet()
path, _ := astutil.PathEnclosingInterval(file, pos, pos)

View File

@ -33,8 +33,8 @@ const (
)
func (i *IdentifierInfo) Hover(ctx context.Context, markdownSupported bool, hoverKind HoverKind) (string, error) {
ctx, done := trace.StartSpan(ctx, "source.Hover")
defer done()
ctx, ts := trace.StartSpan(ctx, "source.Hover")
defer ts.End()
h, err := i.decl.hover(ctx)
if err != nil {
return "", err
@ -80,12 +80,10 @@ func (i *IdentifierInfo) Documentation(ctx context.Context, hoverKind HoverKind)
}
func (d declaration) hover(ctx context.Context) (*documentation, error) {
ctx, done := trace.StartSpan(ctx, "source.hover")
defer done()
ctx, ts := trace.StartSpan(ctx, "source.hover")
defer ts.End()
obj := d.obj
switch node := d.node.(type) {
case *ast.ImportSpec:
return &documentation{node, nil}, nil
case *ast.GenDecl:
switch obj := obj.(type) {
case *types.TypeName, *types.Var, *types.Const, *types.Func:

View File

@ -63,12 +63,11 @@ func Identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*Ident
// identifier checks a single position for a potential identifier.
func identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*IdentifierInfo, error) {
ctx, done := trace.StartSpan(ctx, "source.identifier")
defer done()
file, err := f.GetAST(ctx, ParseFull)
ctx, ts := trace.StartSpan(ctx, "source.identifier")
defer ts.End()
file := f.GetAST(ctx)
if file == nil {
return nil, err
return nil, fmt.Errorf("no AST for %s", f.URI())
}
pkg := f.GetPackage(ctx)
if pkg == nil || pkg.IsIllTyped() {
@ -122,6 +121,8 @@ func identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*Ident
}
}
var err error
// Handle builtins separately.
if result.decl.obj.Parent() == types.Universe {
decl, ok := lookupBuiltinDecl(f.View(), result.Name).(ast.Node)
@ -234,13 +235,14 @@ func objToNode(ctx context.Context, view View, originPkg *types.Package, obj typ
}
// If the object is exported from a different package,
// we don't need its full AST to find the definition.
mode := ParseFull
var declAST *ast.File
if obj.Exported() && obj.Pkg() != originPkg {
mode = ParseExported
declAST = declFile.GetAnyAST(ctx)
} else {
declAST = declFile.GetAST(ctx)
}
declAST, err := declFile.GetAST(ctx, mode)
if declAST == nil {
return nil, err
return nil, fmt.Errorf("no AST for %s", f.URI())
}
path, _ := astutil.PathEnclosingInterval(declAST, rng.Start, rng.End)
if path == nil {
@ -290,12 +292,12 @@ func importSpec(ctx context.Context, f GoFile, fAST *ast.File, pkg Package, pos
if importedPkg == nil {
return nil, fmt.Errorf("no import for %q", importPath)
}
if importedPkg.GetSyntax(ctx) == nil {
if importedPkg.GetSyntax() == nil {
return nil, fmt.Errorf("no syntax for for %q", importPath)
}
// Heuristic: Jump to the longest (most "interesting") file of the package.
var dest *ast.File
for _, f := range importedPkg.GetSyntax(ctx) {
for _, f := range importedPkg.GetSyntax() {
if dest == nil || f.End()-f.Pos() > dest.End()-dest.Pos() {
dest = f
}
@ -304,7 +306,6 @@ func importSpec(ctx context.Context, f GoFile, fAST *ast.File, pkg Package, pos
return nil, fmt.Errorf("package %q has no files", importPath)
}
result.decl.rng = span.NewRange(f.FileSet(), dest.Name.Pos(), dest.Name.End())
result.decl.node = imp
return result, nil
}

View File

@ -27,8 +27,8 @@ type ReferenceInfo struct {
// References returns a list of references for a given identifier within the packages
// containing i.File. Declarations appear first in the result.
func (i *IdentifierInfo) References(ctx context.Context) ([]*ReferenceInfo, error) {
ctx, done := trace.StartSpan(ctx, "source.References")
defer done()
ctx, ts := trace.StartSpan(ctx, "source.References")
defer ts.End()
var references []*ReferenceInfo
// If the object declaration is nil, assume it is an import spec and do not look for references.

View File

@ -37,9 +37,8 @@ type renamer struct {
// Rename returns a map of TextEdits for each file modified when renaming a given identifier within a package.
func (i *IdentifierInfo) Rename(ctx context.Context, newName string) (map[span.URI][]TextEdit, error) {
ctx, done := trace.StartSpan(ctx, "source.Rename")
defer done()
ctx, ts := trace.StartSpan(ctx, "source.Rename")
defer ts.End()
if i.Name == newName {
return nil, fmt.Errorf("old and new names are the same: %s", newName)
}
@ -86,16 +85,7 @@ func (i *IdentifierInfo) Rename(ctx context.Context, newName string) (map[span.U
return nil, fmt.Errorf(r.errors)
}
changes, err := r.update()
if err != nil {
return nil, err
}
// Sort edits for each file.
for _, edits := range changes {
sortTextEdits(edits)
}
return changes, nil
return r.update()
}
// Rename all references to the identifier.

View File

@ -113,7 +113,7 @@ func (r *renamer) checkInPackageBlock(from types.Object) {
}
// Check for conflicts between package block and all file blocks.
for _, f := range pkg.GetSyntax(r.ctx) {
for _, f := range pkg.GetSyntax() {
fileScope := pkg.GetTypesInfo().Scopes[f]
b, prev := fileScope.LookupParent(r.to, token.NoPos)
if b == fileScope {
@ -328,7 +328,7 @@ func forEachLexicalRef(ctx context.Context, pkg Package, obj types.Object, fn fu
return true
}
for _, f := range pkg.GetSyntax(ctx) {
for _, f := range pkg.GetSyntax() {
ast.Inspect(f, visit)
if len(stack) != 0 {
panic(stack)
@ -802,7 +802,7 @@ func (r *renamer) satisfy() map[satisfy.Constraint]bool {
r.from, r.to, pkg.PkgPath())
return nil
}
f.Find(pkg.GetTypesInfo(), pkg.GetSyntax(r.ctx))
f.Find(pkg.GetTypesInfo(), pkg.GetSyntax())
}
r.satisfyConstraints = f.Result
}
@ -835,7 +835,7 @@ func someUse(info *types.Info, obj types.Object) *ast.Ident {
//
func pathEnclosingInterval(ctx context.Context, fset *token.FileSet, pkg Package, start, end token.Pos) (resPkg Package, path []ast.Node, exact bool) {
var pkgs = []Package{pkg}
for _, f := range pkg.GetSyntax(ctx) {
for _, f := range pkg.GetSyntax() {
for _, imp := range f.Imports {
if imp == nil {
continue
@ -848,7 +848,7 @@ func pathEnclosingInterval(ctx context.Context, fset *token.FileSet, pkg Package
}
}
for _, p := range pkgs {
for _, f := range p.GetSyntax(ctx) {
for _, f := range p.GetSyntax() {
if f.Pos() == token.NoPos {
// This can happen if the parser saw
// too many errors and bailed out.

View File

@ -26,12 +26,11 @@ type ParameterInformation struct {
}
func SignatureHelp(ctx context.Context, f GoFile, pos token.Pos) (*SignatureInformation, error) {
ctx, done := trace.StartSpan(ctx, "source.SignatureHelp")
defer done()
file, err := f.GetAST(ctx, ParseFull)
ctx, ts := trace.StartSpan(ctx, "source.SignatureHelp")
defer ts.End()
file := f.GetAST(ctx)
if file == nil {
return nil, err
return nil, fmt.Errorf("no AST for %s", f.URI())
}
pkg := f.GetPackage(ctx)
if pkg == nil || pkg.IsIllTyped() {

View File

@ -19,6 +19,7 @@ import (
"golang.org/x/tools/internal/lsp/diff"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/lsp/tests"
"golang.org/x/tools/internal/lsp/xlog"
"golang.org/x/tools/internal/span"
)
@ -29,20 +30,18 @@ func TestSource(t *testing.T) {
type runner struct {
view source.View
data *tests.Data
ctx context.Context
}
func testSource(t *testing.T, exporter packagestest.Exporter) {
ctx := tests.Context(t)
data := tests.Load(t, exporter, "../testdata")
defer data.Exported.Cleanup()
log := xlog.New(xlog.StdSink{})
cache := cache.New()
session := cache.NewSession(ctx)
session := cache.NewSession(log)
r := &runner{
view: session.NewView(ctx, "source_test", span.FileURI(data.Config.Dir)),
view: session.NewView("source_test", span.FileURI(data.Config.Dir)),
data: data,
ctx: ctx,
}
r.view.SetEnv(data.Config.Env)
for filename, content := range data.Config.Overlay {
@ -53,11 +52,11 @@ func testSource(t *testing.T, exporter packagestest.Exporter) {
func (r *runner) Diagnostics(t *testing.T, data tests.Diagnostics) {
for uri, want := range data {
f, err := r.view.GetFile(r.ctx, uri)
f, err := r.view.GetFile(context.Background(), uri)
if err != nil {
t.Fatal(err)
}
results, err := source.Diagnostics(r.ctx, r.view, f.(source.GoFile), nil)
results, err := source.Diagnostics(context.Background(), r.view, f.(source.GoFile), nil)
if err != nil {
t.Fatal(err)
}
@ -133,7 +132,7 @@ func summarizeDiagnostics(i int, want []source.Diagnostic, got []source.Diagnost
}
func (r *runner) Completion(t *testing.T, data tests.Completions, snippets tests.CompletionSnippets, items tests.CompletionItems) {
ctx := r.ctx
ctx := context.Background()
for src, itemList := range data {
var want []source.CompletionItem
for _, pos := range itemList {
@ -143,9 +142,9 @@ func (r *runner) Completion(t *testing.T, data tests.Completions, snippets tests
if err != nil {
t.Fatalf("failed for %v: %v", src, err)
}
tok, err := f.(source.GoFile).GetToken(ctx)
if err != nil {
t.Fatalf("failed to get token for %s: %v", src.URI(), err)
tok := f.(source.GoFile).GetToken(ctx)
if tok == nil {
t.Fatalf("failed to get token for %v", src)
}
pos := tok.Pos(src.Start().Offset())
list, surrounding, err := source.Completion(ctx, r.view, f.(source.GoFile), pos, source.CompletionOptions{
@ -181,10 +180,7 @@ func (r *runner) Completion(t *testing.T, data tests.Completions, snippets tests
if err != nil {
t.Fatalf("failed for %v: %v", src, err)
}
tok, err := f.(source.GoFile).GetToken(ctx)
if err != nil {
t.Fatalf("failed to get token for %s: %v", src.URI(), err)
}
tok := f.GetToken(ctx)
pos := tok.Pos(src.Start().Offset())
list, _, err := source.Completion(ctx, r.view, f.(source.GoFile), pos, source.CompletionOptions{
DeepComplete: strings.Contains(string(src.URI()), "deepcomplete"),
@ -293,7 +289,7 @@ func summarizeCompletionItems(i int, want []source.CompletionItem, got []source.
}
func (r *runner) Format(t *testing.T, data tests.Formats) {
ctx := r.ctx
ctx := context.Background()
for _, spn := range data {
uri := spn.URI()
filename := uri.Filename()
@ -306,11 +302,7 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
if err != nil {
t.Fatalf("failed for %v: %v", spn, err)
}
tok, err := f.(source.GoFile).GetToken(ctx)
if err != nil {
t.Fatalf("failed to get token for %s: %v", spn.URI(), err)
}
rng, err := spn.Range(span.NewTokenConverter(f.FileSet(), tok))
rng, err := spn.Range(span.NewTokenConverter(f.FileSet(), f.GetToken(ctx)))
if err != nil {
t.Fatalf("failed for %v: %v", spn, err)
}
@ -335,7 +327,7 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
}
func (r *runner) Import(t *testing.T, data tests.Imports) {
ctx := r.ctx
ctx := context.Background()
for _, spn := range data {
uri := spn.URI()
filename := uri.Filename()
@ -348,11 +340,7 @@ func (r *runner) Import(t *testing.T, data tests.Imports) {
if err != nil {
t.Fatalf("failed for %v: %v", spn, err)
}
tok, err := f.(source.GoFile).GetToken(ctx)
if err != nil {
t.Fatalf("failed to get token for %s: %v", spn.URI(), err)
}
rng, err := spn.Range(span.NewTokenConverter(f.FileSet(), tok))
rng, err := spn.Range(span.NewTokenConverter(f.FileSet(), f.GetToken(ctx)))
if err != nil {
t.Fatalf("failed for %v: %v", spn, err)
}
@ -377,16 +365,13 @@ func (r *runner) Import(t *testing.T, data tests.Imports) {
}
func (r *runner) Definition(t *testing.T, data tests.Definitions) {
ctx := r.ctx
ctx := context.Background()
for _, d := range data {
f, err := r.view.GetFile(ctx, d.Src.URI())
if err != nil {
t.Fatalf("failed for %v: %v", d.Src, err)
}
tok, err := f.(source.GoFile).GetToken(ctx)
if err != nil {
t.Fatalf("failed to get token for %s: %v", d.Src.URI(), err)
}
tok := f.GetToken(ctx)
pos := tok.Pos(d.Src.Start().Offset())
ident, err := source.Identifier(ctx, r.view, f.(source.GoFile), pos)
if err != nil {
@ -422,17 +407,14 @@ func (r *runner) Definition(t *testing.T, data tests.Definitions) {
}
func (r *runner) Highlight(t *testing.T, data tests.Highlights) {
ctx := r.ctx
ctx := context.Background()
for name, locations := range data {
src := locations[0]
f, err := r.view.GetFile(ctx, src.URI())
if err != nil {
t.Fatalf("failed for %v: %v", src, err)
}
tok, err := f.(source.GoFile).GetToken(ctx)
if err != nil {
t.Fatalf("failed to get token for %s: %v", src.URI(), err)
}
tok := f.GetToken(ctx)
pos := tok.Pos(src.Start().Offset())
highlights, err := source.Highlight(ctx, f.(source.GoFile), pos)
if err != nil {
@ -450,16 +432,14 @@ func (r *runner) Highlight(t *testing.T, data tests.Highlights) {
}
func (r *runner) Reference(t *testing.T, data tests.References) {
ctx := r.ctx
ctx := context.Background()
for src, itemList := range data {
f, err := r.view.GetFile(ctx, src.URI())
if err != nil {
t.Fatalf("failed for %v: %v", src, err)
}
tok, err := f.(source.GoFile).GetToken(ctx)
if err != nil {
t.Fatalf("failed to get token for %s: %v", src.URI(), err)
}
tok := f.GetToken(ctx)
pos := tok.Pos(src.Start().Offset())
ident, err := source.Identifier(ctx, r.view, f.(source.GoFile), pos)
if err != nil {
@ -498,7 +478,7 @@ func (r *runner) Reference(t *testing.T, data tests.References) {
}
func (r *runner) Rename(t *testing.T, data tests.Renames) {
ctx := r.ctx
ctx := context.Background()
for spn, newText := range data {
tag := fmt.Sprintf("%s-rename", newText)
@ -506,18 +486,14 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) {
if err != nil {
t.Fatalf("failed for %v: %v", spn, err)
}
tok, err := f.(source.GoFile).GetToken(ctx)
if err != nil {
t.Fatalf("failed to get token for %s: %v", spn.URI(), err)
}
tok := f.GetToken(ctx)
pos := tok.Pos(spn.Start().Offset())
ident, err := source.Identifier(r.ctx, r.view, f.(source.GoFile), pos)
ident, err := source.Identifier(context.Background(), r.view, f.(source.GoFile), pos)
if err != nil {
t.Error(err)
continue
}
changes, err := ident.Rename(r.ctx, newText)
changes, err := ident.Rename(context.Background(), newText)
if err != nil {
renamed := string(r.data.Golden(tag, spn.URI().Filename(), func() ([]byte, error) {
return []byte(err.Error()), nil
@ -568,6 +544,7 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) {
func applyEdits(contents string, edits []source.TextEdit) string {
res := contents
sortSourceTextEdits(edits)
// Apply the edits from the end of the file forward
// to preserve the offsets
@ -581,8 +558,17 @@ func applyEdits(contents string, edits []source.TextEdit) string {
return res
}
func sortSourceTextEdits(d []source.TextEdit) {
sort.Slice(d, func(i int, j int) bool {
if r := span.Compare(d[i].Span, d[j].Span); r != 0 {
return r < 0
}
return d[i].NewText < d[j].NewText
})
}
func (r *runner) Symbol(t *testing.T, data tests.Symbols) {
ctx := r.ctx
ctx := context.Background()
for uri, expectedSymbols := range data {
f, err := r.view.GetFile(ctx, uri)
if err != nil {
@ -646,16 +632,13 @@ func summarizeSymbols(i int, want []source.Symbol, got []source.Symbol, reason s
}
func (r *runner) SignatureHelp(t *testing.T, data tests.Signatures) {
ctx := r.ctx
ctx := context.Background()
for spn, expectedSignature := range data {
f, err := r.view.GetFile(ctx, spn.URI())
if err != nil {
t.Fatalf("failed for %v: %v", spn, err)
}
tok, err := f.(source.GoFile).GetToken(ctx)
if err != nil {
t.Fatalf("failed to get token for %s: %v", spn.URI(), err)
}
tok := f.GetToken(ctx)
pos := tok.Pos(spn.Start().Offset())
gotSignature, err := source.SignatureHelp(ctx, f.(source.GoFile), pos)
if err != nil {

View File

@ -8,10 +8,10 @@ import (
"golang.org/x/tools/internal/span"
)
func getCodeActions(fset *token.FileSet, diag analysis.Diagnostic) ([]SuggestedFixes, error) {
var cas []SuggestedFixes
func getCodeActions(fset *token.FileSet, diag analysis.Diagnostic) ([]CodeAction, error) {
var cas []CodeAction
for _, fix := range diag.SuggestedFixes {
var ca SuggestedFixes
var ca CodeAction
ca.Title = fix.Message
for _, te := range fix.TextEdits {
span, err := span.NewRange(fset, te.Pos, te.End).Span()

View File

@ -42,13 +42,12 @@ type Symbol struct {
}
func DocumentSymbols(ctx context.Context, f GoFile) ([]Symbol, error) {
ctx, done := trace.StartSpan(ctx, "source.DocumentSymbols")
defer done()
ctx, ts := trace.StartSpan(ctx, "source.DocumentSymbols")
defer ts.End()
fset := f.FileSet()
file, err := f.GetAST(ctx, ParseFull)
file := f.GetAST(ctx)
if file == nil {
return nil, err
return nil, fmt.Errorf("no AST for %s", f.URI())
}
pkg := f.GetPackage(ctx)
if pkg == nil || pkg.IsIllTyped() {

View File

@ -9,41 +9,9 @@ import (
"go/ast"
"go/token"
"go/types"
"path/filepath"
"strings"
)
func DetectLanguage(langID, filename string) FileKind {
switch langID {
case "go":
return Go
case "go.mod":
return Mod
case "go.sum":
return Sum
}
// Fallback to detecting the language based on the file extension.
switch filepath.Ext(filename) {
case ".mod":
return Mod
case ".sum":
return Sum
default: // fallback to Go
return Go
}
}
func (k FileKind) String() string {
switch k {
case Mod:
return "go.mod"
case Sum:
return "go.sum"
default:
return "go"
}
}
// indexExprAtPos returns the index of the expression containing pos.
func indexExprAtPos(pos token.Pos, args []ast.Expr) int {
for i, expr := range args {

View File

@ -9,13 +9,12 @@ import (
"go/ast"
"go/token"
"go/types"
"sort"
"strings"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/internal/imports"
"golang.org/x/tools/internal/lsp/diff"
"golang.org/x/tools/internal/lsp/xlog"
"golang.org/x/tools/internal/span"
)
@ -112,7 +111,7 @@ type Cache interface {
FileSystem
// NewSession creates a new Session manager and returns it.
NewSession(ctx context.Context) Session
NewSession(log xlog.Logger) Session
// FileSet returns the shared fileset used by all files in the system.
FileSet() *token.FileSet
@ -130,11 +129,14 @@ type Cache interface {
// A session may have many active views at any given time.
type Session interface {
// NewView creates a new View and returns it.
NewView(ctx context.Context, name string, folder span.URI) View
NewView(name string, folder span.URI) View
// Cache returns the cache that created this session.
Cache() Cache
// Returns the logger in use for this session.
Logger() xlog.Logger
// View returns a view with a mathing name, if the session has one.
View(name string) View
@ -152,7 +154,7 @@ type Session interface {
FileSystem
// DidOpen is invoked each time a file is opened in the editor.
DidOpen(ctx context.Context, uri span.URI, kind FileKind, text []byte)
DidOpen(ctx context.Context, uri span.URI, text []byte)
// DidSave is invoked each time an open file is saved in the editor.
DidSave(uri span.URI)
@ -208,11 +210,7 @@ type View interface {
// Ignore returns true if this file should be ignored by this view.
Ignore(span.URI) bool
Config(ctx context.Context) *packages.Config
// RunProcessEnvFunc runs fn with the process env for this view inserted into opts.
// Note: the process env contains cached module and filesystem state.
RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error, opts *imports.Options) error
Config() *packages.Config
}
// File represents a source file of any type.
@ -221,15 +219,19 @@ type File interface {
View() View
Handle(ctx context.Context) FileHandle
FileSet() *token.FileSet
GetToken(ctx context.Context) (*token.File, error)
GetToken(ctx context.Context) *token.File
}
// GoFile represents a Go source file that has been type-checked.
type GoFile interface {
File
// GetAnyAST returns an AST that may or may not contain function bodies.
// It should be used in scenarios where function bodies are not necessary.
GetAnyAST(ctx context.Context) *ast.File
// GetAST returns the full AST for the file.
GetAST(ctx context.Context, mode ParseMode) (*ast.File, error)
GetAST(ctx context.Context) *ast.File
// GetPackage returns the package that this file belongs to.
GetPackage(ctx context.Context) Package
@ -256,7 +258,7 @@ type Package interface {
ID() string
PkgPath() string
GetFilenames() []string
GetSyntax(context.Context) []*ast.File
GetSyntax() []*ast.File
GetErrors() []packages.Error
GetTypes() *types.Package
GetTypesInfo() *types.Info
@ -320,10 +322,3 @@ func EditsToDiff(edits []TextEdit) []*diff.Op {
}
return ops
}
func sortTextEdits(d []TextEdit) {
// Use a stable sort to maintain the order of edits inserted at the same position.
sort.SliceStable(d, func(i int, j int) bool {
return span.Compare(d[i].Span, d[j].Span) < 0
})
}

View File

@ -14,8 +14,8 @@ import (
)
func (s *Server) documentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) ([]protocol.DocumentSymbol, error) {
ctx, done := trace.StartSpan(ctx, "lsp.Server.documentSymbol")
defer done()
ctx, ts := trace.StartSpan(ctx, "lsp.Server.documentSymbol")
defer ts.End()
uri := span.NewURI(params.TextDocument.URI)
view := s.session.ViewOf(uri)
f, m, err := getGoFile(ctx, view, uri)

View File

@ -1,51 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package log
import (
"context"
"fmt"
"time"
"golang.org/x/tools/internal/lsp/telemetry/tag"
)
type Entry struct {
At time.Time
Message string
Error error
Tags tag.List
}
func ToEntry(ctx context.Context, at time.Time, tags tag.List) Entry {
//TODO: filter more efficiently for the common case of stripping prefixes only
entry := Entry{
At: at,
}
for _, t := range tags {
switch t.Key {
case MessageTag:
entry.Message = t.Value.(string)
case ErrorTag:
entry.Error = t.Value.(error)
default:
entry.Tags = append(entry.Tags, t)
}
}
return entry
}
func (e Entry) Format(f fmt.State, r rune) {
if !e.At.IsZero() {
fmt.Fprint(f, e.At.Format("2006/01/02 15:04:05 "))
}
fmt.Fprint(f, e.Message)
if e.Error != nil {
fmt.Fprintf(f, ": %v", e.Error)
}
for _, tag := range e.Tags {
fmt.Fprintf(f, "\n\t%v = %v", tag.Key, tag.Value)
}
}

View File

@ -1,92 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package log is a context based logging package, designed to interact well
// with both the lsp protocol and the other telemetry packages.
package log
import (
"context"
"fmt"
"os"
"time"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/lsp/telemetry/worker"
)
const (
// The well known tag keys for the logging system.
MessageTag = tag.Key("message")
ErrorTag = tag.Key("error")
)
// Logger is a function that handles logging messages.
// Loggers are registered at start up, and may use information in the context
// to decide what to do with a given log message.
type Logger func(ctx context.Context, at time.Time, tags tag.List) bool
// With sends a tag list to the installed loggers.
func With(ctx context.Context, tags ...tag.Tag) {
at := time.Now()
worker.Do(func() {
deliver(ctx, at, tags)
})
}
// Print takes a message and a tag list and combines them into a single tag
// list before delivering them to the loggers.
func Print(ctx context.Context, message string, tags ...tag.Tagger) {
at := time.Now()
worker.Do(func() {
tags := append(tag.Tags(ctx, tags...), MessageTag.Of(message))
deliver(ctx, at, tags)
})
}
type errorString string
// Error allows errorString to conform to the error interface.
func (err errorString) Error() string { return string(err) }
// Print takes a message and a tag list and combines them into a single tag
// list before delivering them to the loggers.
func Error(ctx context.Context, message string, err error, tags ...tag.Tagger) {
at := time.Now()
worker.Do(func() {
if err == nil {
err = errorString(message)
message = ""
}
tags := append(tag.Tags(ctx, tags...), MessageTag.Of(message), ErrorTag.Of(err))
deliver(ctx, at, tags)
})
}
func deliver(ctx context.Context, at time.Time, tags tag.List) {
delivered := false
for _, logger := range loggers {
if logger(ctx, at, tags) {
delivered = true
}
}
if !delivered {
// no logger processed the message, so we log to stderr just in case
Stderr(ctx, at, tags)
}
}
var loggers = []Logger{}
func AddLogger(logger Logger) {
worker.Do(func() {
loggers = append(loggers, logger)
})
}
// Stderr is a logger that logs to stderr in the standard format.
func Stderr(ctx context.Context, at time.Time, tags tag.List) bool {
fmt.Fprintf(os.Stderr, "%v\n", ToEntry(ctx, at, tags))
return true
}

View File

@ -1,412 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package metric aggregates stats into metrics that can be exported.
package metric
import (
"context"
"sort"
"golang.org/x/tools/internal/lsp/telemetry/stats"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/lsp/telemetry/worker"
)
// Handle uniquely identifies a constructed metric.
// It can be used to detect which observed data objects belong
// to that metric.
type Handle struct {
name string
}
// Data represents a single point in the time series of a metric.
// This provides the common interface to all metrics no matter their data
// format.
// To get the actual values for the metric you must type assert to a concrete
// metric type.
type Data interface {
// Handle returns the metric handle this data is for.
Handle() Handle
// Groups reports the rows that currently exist for this metric.
Groups() []tag.List
}
// Scalar represents the construction information for a scalar metric.
type Scalar struct {
// Name is the unique name of this metric.
Name string
// Description can be used by observers to describe the metric to users.
Description string
// Keys is the set of tags that collectively describe rows of the metric.
Keys []interface{}
}
// HistogramInt64 represents the construction information for an int64 histogram metric.
type HistogramInt64 struct {
// Name is the unique name of this metric.
Name string
// Description can be used by observers to describe the metric to users.
Description string
// Keys is the set of tags that collectively describe rows of the metric.
Keys []interface{}
// Buckets holds the inclusive upper bound of each bucket in the histogram.
Buckets []int64
}
// HistogramFloat64 represents the construction information for an float64 histogram metric.
type HistogramFloat64 struct {
// Name is the unique name of this metric.
Name string
// Description can be used by observers to describe the metric to users.
Description string
// Keys is the set of tags that collectively describe rows of the metric.
Keys []interface{}
// Buckets holds the inclusive upper bound of each bucket in the histogram.
Buckets []float64
}
// Observer is the type for functions that want to observe metric values
// as they arrive.
// Each data point delivered to an observer is immutable and can be stored if
// needed.
type Observer func(Data)
// CountInt64 creates a new metric based on the Scalar information that counts
// the number of times the supplied int64 measure is set.
// Metrics of this type will use Int64Data.
func (info Scalar) CountInt64(measure *stats.Int64Measure) Handle {
data := &Int64Data{Info: &info}
measure.Subscribe(data.countInt64)
return Handle{info.Name}
}
// SumInt64 creates a new metric based on the Scalar information that sums all
// the values recorded on the int64 measure.
// Metrics of this type will use Int64Data.
func (info Scalar) SumInt64(measure *stats.Int64Measure) Handle {
data := &Int64Data{Info: &info}
measure.Subscribe(data.sum)
_ = data
return Handle{info.Name}
}
// LatestInt64 creates a new metric based on the Scalar information that tracks
// the most recent value recorded on the int64 measure.
// Metrics of this type will use Int64Data.
func (info Scalar) LatestInt64(measure *stats.Int64Measure) Handle {
data := &Int64Data{Info: &info, IsGauge: true}
measure.Subscribe(data.latest)
return Handle{info.Name}
}
// CountFloat64 creates a new metric based on the Scalar information that counts
// the number of times the supplied float64 measure is set.
// Metrics of this type will use Int64Data.
func (info Scalar) CountFloat64(measure *stats.Float64Measure) Handle {
data := &Int64Data{Info: &info}
measure.Subscribe(data.countFloat64)
return Handle{info.Name}
}
// SumFloat64 creates a new metric based on the Scalar information that sums all
// the values recorded on the float64 measure.
// Metrics of this type will use Float64Data.
func (info Scalar) SumFloat64(measure *stats.Float64Measure) Handle {
data := &Float64Data{Info: &info}
measure.Subscribe(data.sum)
return Handle{info.Name}
}
// LatestFloat64 creates a new metric based on the Scalar information that tracks
// the most recent value recorded on the float64 measure.
// Metrics of this type will use Float64Data.
func (info Scalar) LatestFloat64(measure *stats.Float64Measure) Handle {
data := &Float64Data{Info: &info, IsGauge: true}
measure.Subscribe(data.latest)
return Handle{info.Name}
}
// Record creates a new metric based on the HistogramInt64 information that
// tracks the bucketized counts of values recorded on the int64 measure.
// Metrics of this type will use HistogramInt64Data.
func (info HistogramInt64) Record(measure *stats.Int64Measure) Handle {
data := &HistogramInt64Data{Info: &info}
measure.Subscribe(data.record)
return Handle{info.Name}
}
// Record creates a new metric based on the HistogramFloat64 information that
// tracks the bucketized counts of values recorded on the float64 measure.
// Metrics of this type will use HistogramFloat64Data.
func (info HistogramFloat64) Record(measure *stats.Float64Measure) Handle {
data := &HistogramFloat64Data{Info: &info}
measure.Subscribe(data.record)
return Handle{info.Name}
}
// Int64Data is a concrete implementation of Data for int64 scalar metrics.
type Int64Data struct {
// Info holds the original consruction information.
Info *Scalar
// IsGauge is true for metrics that track values, rather than increasing over time.
IsGauge bool
// Rows holds the per group values for the metric.
Rows []int64
groups []tag.List
}
// Float64Data is a concrete implementation of Data for float64 scalar metrics.
type Float64Data struct {
// Info holds the original consruction information.
Info *Scalar
// IsGauge is true for metrics that track values, rather than increasing over time.
IsGauge bool
// Rows holds the per group values for the metric.
Rows []float64
groups []tag.List
}
// HistogramInt64Data is a concrete implementation of Data for int64 histogram metrics.
type HistogramInt64Data struct {
// Info holds the original consruction information.
Info *HistogramInt64
// Rows holds the per group values for the metric.
Rows []*HistogramInt64Row
groups []tag.List
}
// HistogramInt64Row holds the values for a single row of a HistogramInt64Data.
type HistogramInt64Row struct {
// Values is the counts per bucket.
Values []int64
// Count is the total count.
Count int64
// Sum is the sum of all the values recorded.
Sum int64
// Min is the smallest recorded value.
Min int64
// Max is the largest recorded value.
Max int64
}
// HistogramFloat64Data is a concrete implementation of Data for float64 histogram metrics.
type HistogramFloat64Data struct {
// Info holds the original consruction information.
Info *HistogramFloat64
// Rows holds the per group values for the metric.
Rows []*HistogramFloat64Row
groups []tag.List
}
// HistogramFloat64Row holds the values for a single row of a HistogramFloat64Data.
type HistogramFloat64Row struct {
// Values is the counts per bucket.
Values []int64
// Count is the total count.
Count int64
// Sum is the sum of all the values recorded.
Sum float64
// Min is the smallest recorded value.
Min float64
// Max is the largest recorded value.
Max float64
}
// Name returns the name of the metric this is a handle for.
func (h Handle) Name() string { return h.name }
var observers []Observer
// RegisterObservers adds a new metric observer to the system.
// There is no way to unregister an observer.
func RegisterObservers(e ...Observer) {
worker.Do(func() {
observers = append(e, observers...)
})
}
// export must only be called from inside a worker
func export(m Data) {
for _, e := range observers {
e(m)
}
}
func getGroup(ctx context.Context, g *[]tag.List, keys []interface{}) (int, bool) {
group := tag.Get(ctx, keys...)
old := *g
index := sort.Search(len(old), func(i int) bool {
return !old[i].Less(group)
})
if index < len(old) && group.Equal(old[index]) {
// not a new group
return index, false
}
*g = make([]tag.List, len(old)+1)
copy(*g, old[:index])
copy((*g)[index+1:], old[index:])
(*g)[index] = group
return index, true
}
func (data *Int64Data) Handle() Handle { return Handle{data.Info.Name} }
func (data *Int64Data) Groups() []tag.List { return data.groups }
func (data *Int64Data) modify(ctx context.Context, f func(v int64) int64) {
worker.Do(func() {
index, insert := getGroup(ctx, &data.groups, data.Info.Keys)
old := data.Rows
if insert {
data.Rows = make([]int64, len(old)+1)
copy(data.Rows, old[:index])
copy(data.Rows[index+1:], old[index:])
} else {
data.Rows = make([]int64, len(old))
copy(data.Rows, old)
}
data.Rows[index] = f(data.Rows[index])
frozen := *data
export(&frozen)
})
}
func (data *Int64Data) countInt64(ctx context.Context, measure *stats.Int64Measure, value int64) {
data.modify(ctx, func(v int64) int64 { return v + 1 })
}
func (data *Int64Data) countFloat64(ctx context.Context, measure *stats.Float64Measure, value float64) {
data.modify(ctx, func(v int64) int64 { return v + 1 })
}
func (data *Int64Data) sum(ctx context.Context, measure *stats.Int64Measure, value int64) {
data.modify(ctx, func(v int64) int64 { return v + value })
}
func (data *Int64Data) latest(ctx context.Context, measure *stats.Int64Measure, value int64) {
data.modify(ctx, func(v int64) int64 { return value })
}
func (data *Float64Data) Handle() Handle { return Handle{data.Info.Name} }
func (data *Float64Data) Groups() []tag.List { return data.groups }
func (data *Float64Data) modify(ctx context.Context, f func(v float64) float64) {
worker.Do(func() {
index, insert := getGroup(ctx, &data.groups, data.Info.Keys)
old := data.Rows
if insert {
data.Rows = make([]float64, len(old)+1)
copy(data.Rows, old[:index])
copy(data.Rows[index+1:], old[index:])
} else {
data.Rows = make([]float64, len(old))
copy(data.Rows, old)
}
data.Rows[index] = f(data.Rows[index])
frozen := *data
export(&frozen)
})
}
func (data *Float64Data) sum(ctx context.Context, measure *stats.Float64Measure, value float64) {
data.modify(ctx, func(v float64) float64 { return v + value })
}
func (data *Float64Data) latest(ctx context.Context, measure *stats.Float64Measure, value float64) {
data.modify(ctx, func(v float64) float64 { return value })
}
func (data *HistogramInt64Data) Handle() Handle { return Handle{data.Info.Name} }
func (data *HistogramInt64Data) Groups() []tag.List { return data.groups }
func (data *HistogramInt64Data) modify(ctx context.Context, f func(v *HistogramInt64Row)) {
worker.Do(func() {
index, insert := getGroup(ctx, &data.groups, data.Info.Keys)
old := data.Rows
var v HistogramInt64Row
if insert {
data.Rows = make([]*HistogramInt64Row, len(old)+1)
copy(data.Rows, old[:index])
copy(data.Rows[index+1:], old[index:])
} else {
data.Rows = make([]*HistogramInt64Row, len(old))
copy(data.Rows, old)
v = *data.Rows[index]
}
oldValues := v.Values
v.Values = make([]int64, len(data.Info.Buckets))
copy(v.Values, oldValues)
f(&v)
data.Rows[index] = &v
frozen := *data
export(&frozen)
})
}
func (data *HistogramInt64Data) record(ctx context.Context, measure *stats.Int64Measure, value int64) {
data.modify(ctx, func(v *HistogramInt64Row) {
v.Sum += value
if v.Min > value || v.Count == 0 {
v.Min = value
}
if v.Max < value || v.Count == 0 {
v.Max = value
}
v.Count++
for i, b := range data.Info.Buckets {
if value <= b {
v.Values[i]++
}
}
})
}
func (data *HistogramFloat64Data) Handle() Handle { return Handle{data.Info.Name} }
func (data *HistogramFloat64Data) Groups() []tag.List { return data.groups }
func (data *HistogramFloat64Data) modify(ctx context.Context, f func(v *HistogramFloat64Row)) {
worker.Do(func() {
index, insert := getGroup(ctx, &data.groups, data.Info.Keys)
old := data.Rows
var v HistogramFloat64Row
if insert {
data.Rows = make([]*HistogramFloat64Row, len(old)+1)
copy(data.Rows, old[:index])
copy(data.Rows[index+1:], old[index:])
} else {
data.Rows = make([]*HistogramFloat64Row, len(old))
copy(data.Rows, old)
v = *data.Rows[index]
}
oldValues := v.Values
v.Values = make([]int64, len(data.Info.Buckets))
copy(v.Values, oldValues)
f(&v)
data.Rows[index] = &v
frozen := *data
export(&frozen)
})
}
func (data *HistogramFloat64Data) record(ctx context.Context, measure *stats.Float64Measure, value float64) {
data.modify(ctx, func(v *HistogramFloat64Row) {
v.Sum += value
if v.Min > value || v.Count == 0 {
v.Min = value
}
if v.Max < value || v.Count == 0 {
v.Max = value
}
v.Count++
for i, b := range data.Info.Buckets {
if value <= b {
v.Values[i]++
}
}
})
}

View File

@ -1,242 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ocagent adds the ability to export all telemetry to an ocagent.
// This keeps the complie time dependencies to zero and allows the agent to
// have the exporters needed for telemetry aggregation and viewing systems.
package ocagent
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
"time"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/metric"
"golang.org/x/tools/internal/lsp/telemetry/ocagent/wire"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/lsp/telemetry/trace"
"golang.org/x/tools/internal/lsp/telemetry/worker"
)
const DefaultAddress = "http://localhost:55678"
const exportRate = 2 * time.Second
type exporter struct {
address string
node *wire.Node
spans []*wire.Span
metrics []*wire.Metric
}
func Export(service, address string) {
if address == "off" {
return
}
hostname, _ := os.Hostname()
exporter := &exporter{
address: address,
node: &wire.Node{
Identifier: &wire.ProcessIdentifier{
HostName: hostname,
Pid: uint32(os.Getpid()),
StartTimestamp: convertTimestamp(time.Now()),
},
LibraryInfo: &wire.LibraryInfo{
Language: wire.LanguageGo,
ExporterVersion: "0.0.1",
CoreLibraryVersion: "x/tools",
},
ServiceInfo: &wire.ServiceInfo{
Name: service,
},
},
}
if exporter.address == "" {
exporter.address = DefaultAddress
}
//TODO: add metrics once the ocagent json metric interface works
trace.RegisterObservers(exporter.observeTrace)
go func() {
for _ = range time.Tick(exportRate) {
worker.Do(func() {
exporter.flush()
})
}
}()
}
func (e *exporter) observeTrace(span *trace.Span) {
// is this a completed span?
if span.Finish.IsZero() {
return
}
e.spans = append(e.spans, convertSpan(span))
}
func (e *exporter) observeMetric(data metric.Data) {
e.metrics = append(e.metrics, convertMetric(data))
}
func (e *exporter) flush() {
spans := e.spans
e.spans = nil
metrics := e.metrics
e.metrics = nil
if len(spans) > 0 {
e.send("/v1/trace", &wire.ExportTraceServiceRequest{
Node: e.node,
Spans: spans,
//TODO: Resource?
})
}
if len(metrics) > 0 {
e.send("/v1/metrics", &wire.ExportMetricsServiceRequest{
Node: e.node,
Metrics: metrics,
//TODO: Resource?
})
}
}
func (e *exporter) send(endpoint string, message interface{}) {
blob, err := json.Marshal(message)
if err != nil {
errorInExport("ocagent failed to marshal message for %v: %v", endpoint, err)
return
}
uri := e.address + endpoint
req, err := http.NewRequest("POST", uri, bytes.NewReader(blob))
if err != nil {
errorInExport("ocagent failed to build request for %v: %v", uri, err)
return
}
req.Header.Set("Content-Type", "application/json")
res, err := http.DefaultClient.Do(req)
if err != nil {
errorInExport("ocagent failed to send message: %v \n", err)
return
}
res.Body.Close()
return
}
func errorInExport(message string, args ...interface{}) {
// This function is useful when debugging the exporter, but in general we
// want to just drop any export
}
func convertTimestamp(t time.Time) wire.Timestamp {
return t.Format(time.RFC3339Nano)
}
func toTruncatableString(s string) *wire.TruncatableString {
return &wire.TruncatableString{Value: s}
}
func convertSpan(span *trace.Span) *wire.Span {
result := &wire.Span{
TraceId: span.TraceID[:],
SpanId: span.SpanID[:],
TraceState: nil, //TODO?
ParentSpanId: span.ParentID[:],
Name: toTruncatableString(span.Name),
Kind: wire.UnspecifiedSpanKind,
StartTime: convertTimestamp(span.Start),
EndTime: convertTimestamp(span.Finish),
Attributes: convertAttributes(span.Tags),
TimeEvents: convertEvents(span.Events),
SameProcessAsParentSpan: true,
//TODO: StackTrace?
//TODO: Links?
//TODO: Status?
//TODO: Resource?
}
return result
}
func convertMetric(data metric.Data) *wire.Metric {
return nil //TODO:
}
func convertAttributes(tags tag.List) *wire.Attributes {
if len(tags) == 0 {
return nil
}
attributes := make(map[string]wire.Attribute)
for _, tag := range tags {
attributes[fmt.Sprint(tag.Key)] = convertAttribute(tag.Value)
}
return &wire.Attributes{AttributeMap: attributes}
}
func convertAttribute(v interface{}) wire.Attribute {
switch v := v.(type) {
case int8:
return wire.IntAttribute{IntValue: int64(v)}
case int16:
return wire.IntAttribute{IntValue: int64(v)}
case int32:
return wire.IntAttribute{IntValue: int64(v)}
case int64:
return wire.IntAttribute{IntValue: v}
case uint8:
return wire.IntAttribute{IntValue: int64(v)}
case uint16:
return wire.IntAttribute{IntValue: int64(v)}
case uint32:
return wire.IntAttribute{IntValue: int64(v)}
case uint64:
return wire.IntAttribute{IntValue: int64(v)}
case uint:
return wire.IntAttribute{IntValue: int64(v)}
case float32:
return wire.DoubleAttribute{DoubleValue: float64(v)}
case float64:
return wire.DoubleAttribute{DoubleValue: v}
case bool:
return wire.BoolAttribute{BoolValue: v}
case string:
return wire.StringAttribute{StringValue: toTruncatableString(v)}
default:
return wire.StringAttribute{StringValue: toTruncatableString(fmt.Sprint(v))}
}
}
func convertEvents(events []trace.Event) *wire.TimeEvents {
//TODO: MessageEvents?
result := make([]wire.TimeEvent, len(events))
for i, event := range events {
result[i] = convertEvent(event)
}
return &wire.TimeEvents{TimeEvent: result}
}
func convertEvent(event trace.Event) wire.TimeEvent {
return wire.TimeEvent{
Time: convertTimestamp(event.Time),
Annotation: convertAnnotation(event.Tags),
}
}
func convertAnnotation(tags tag.List) *wire.Annotation {
entry := log.ToEntry(nil, time.Time{}, tags)
description := entry.Message
if description == "" && entry.Error != nil {
description = entry.Error.Error()
entry.Error = nil
}
tags = entry.Tags
if entry.Error != nil {
tags = append(tags, tag.Of("Error", entry.Error))
}
return &wire.Annotation{
Description: toTruncatableString(description),
Attributes: convertAttributes(tags),
}
}

View File

@ -1,101 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wire
// This file holds common ocagent types
type Node struct {
Identifier *ProcessIdentifier `json:"identifier,omitempty"`
LibraryInfo *LibraryInfo `json:"library_info,omitempty"`
ServiceInfo *ServiceInfo `json:"service_info,omitempty"`
Attributes map[string]string `json:"attributes,omitempty"`
}
type Resource struct {
Type string `json:"type,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
}
type TruncatableString struct {
Value string `json:"value,omitempty"`
TruncatedByteCount int32 `json:"truncated_byte_count,omitempty"`
}
type Attributes struct {
AttributeMap map[string]Attribute `json:"attributeMap,omitempty"`
DroppedAttributesCount int32 `json:"dropped_attributes_count,omitempty"`
}
type StringAttribute struct {
StringValue *TruncatableString `json:"stringValue,omitempty"`
}
type IntAttribute struct {
IntValue int64 `json:"intValue,omitempty"`
}
type BoolAttribute struct {
BoolValue bool `json:"boolValue,omitempty"`
}
type DoubleAttribute struct {
DoubleValue float64 `json:"doubleValue,omitempty"`
}
type Attribute interface {
tagAttribute()
}
func (StringAttribute) tagAttribute() {}
func (IntAttribute) tagAttribute() {}
func (BoolAttribute) tagAttribute() {}
func (DoubleAttribute) tagAttribute() {}
type StackTrace struct {
StackFrames *StackFrames `json:"stack_frames,omitempty"`
StackTraceHashId uint64 `json:"stack_trace_hash_id,omitempty"`
}
type StackFrames struct {
Frame []*StackFrame `json:"frame,omitempty"`
DroppedFramesCount int32 `json:"dropped_frames_count,omitempty"`
}
type StackFrame struct {
FunctionName *TruncatableString `json:"function_name,omitempty"`
OriginalFunctionName *TruncatableString `json:"original_function_name,omitempty"`
FileName *TruncatableString `json:"file_name,omitempty"`
LineNumber int64 `json:"line_number,omitempty"`
ColumnNumber int64 `json:"column_number,omitempty"`
LoadModule *Module `json:"load_module,omitempty"`
SourceVersion *TruncatableString `json:"source_version,omitempty"`
}
type Module struct {
Module *TruncatableString `json:"module,omitempty"`
BuildId *TruncatableString `json:"build_id,omitempty"`
}
type ProcessIdentifier struct {
HostName string `json:"host_name,omitempty"`
Pid uint32 `json:"pid,omitempty"`
StartTimestamp Timestamp `json:"start_timestamp,omitempty"`
}
type LibraryInfo struct {
Language Language `json:"language,omitempty"`
ExporterVersion string `json:"exporter_version,omitempty"`
CoreLibraryVersion string `json:"core_library_version,omitempty"`
}
type Language int32
const (
LanguageGo Language = 4
)
type ServiceInfo struct {
Name string `json:"name,omitempty"`
}

View File

@ -1,17 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wire
// This file contains type that match core proto types
type Timestamp = string
type Int64Value struct {
Value int64 `json:"value,omitempty"`
}
type DoubleValue struct {
Value float64 `json:"value,omitempty"`
}

View File

@ -1,130 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wire
type ExportMetricsServiceRequest struct {
Node *Node `json:"node,omitempty"`
Metrics []*Metric `json:"metrics,omitempty"`
Resource *Resource `json:"resource,omitempty"`
}
type Metric struct {
MetricDescriptor *MetricDescriptor `json:"metric_descriptor,omitempty"`
Timeseries []*TimeSeries `json:"timeseries,omitempty"`
Resource *Resource `json:"resource,omitempty"`
}
type MetricDescriptor struct {
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
Unit string `json:"unit,omitempty"`
Type MetricDescriptor_Type `json:"type,omitempty"`
LabelKeys []*LabelKey `json:"label_keys,omitempty"`
}
type MetricDescriptor_Type int32
const (
MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0
MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1
MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2
MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3
MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4
MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5
MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6
MetricDescriptor_SUMMARY MetricDescriptor_Type = 7
)
type LabelKey struct {
Key string `json:"key,omitempty"`
Description string `json:"description,omitempty"`
}
type TimeSeries struct {
StartTimestamp *Timestamp `json:"start_timestamp,omitempty"`
LabelValues []*LabelValue `json:"label_values,omitempty"`
Points []*Point `json:"points,omitempty"`
}
type LabelValue struct {
Value string `json:"value,omitempty"`
HasValue bool `json:"has_value,omitempty"`
}
type Point struct {
Timestamp *Timestamp `json:"timestamp,omitempty"`
Value PointValue `json:"value,omitempty"`
}
type PointInt64Value struct {
Int64Value int64 `json:"int64Value,omitempty"`
}
type PointDoubleValue struct {
DoubleValue float64 `json:"doubleValue,omitempty"`
}
type PointDistributionValue struct {
DistributionValue *DistributionValue `json:"distributionValue,omitempty"`
}
type PointSummaryValue struct {
SummaryValue *SummaryValue `json:"summaryValue,omitempty"`
}
type PointValue interface {
tagPointValue()
}
func (PointInt64Value) tagPointValue() {}
func (PointDoubleValue) tagPointValue() {}
func (PointDistributionValue) tagPointValue() {}
func (PointSummaryValue) tagPointValue() {}
type DistributionValue struct {
Count int64 `json:"count,omitempty"`
Sum float64 `json:"sum,omitempty"`
SumOfSquaredDeviation float64 `json:"sum_of_squared_deviation,omitempty"`
BucketOptions BucketOptions `json:"bucket_options,omitempty"`
Buckets []*Bucket `json:"buckets,omitempty"`
}
type BucketOptionsExplicit struct {
Bounds []float64 `json:"bounds,omitempty"`
}
type BucketOptions interface {
tagBucketOptions()
}
func (BucketOptionsExplicit) tagBucketOptions() {}
type Bucket struct {
Count int64 `json:"count,omitempty"`
Exemplar *Exemplar `json:"exemplar,omitempty"`
}
type Exemplar struct {
Value float64 `json:"value,omitempty"`
Timestamp *Timestamp `json:"timestamp,omitempty"`
Attachments map[string]string `json:"attachments,omitempty"`
}
type SummaryValue struct {
Count *Int64Value `json:"count,omitempty"`
Sum *DoubleValue `json:"sum,omitempty"`
Snapshot *Snapshot `json:"snapshot,omitempty"`
}
type Snapshot struct {
Count *Int64Value `json:"count,omitempty"`
Sum *DoubleValue `json:"sum,omitempty"`
PercentileValues []*SnapshotValueAtPercentile `json:"percentile_values,omitempty"`
}
type SnapshotValueAtPercentile struct {
Percentile float64 `json:"percentile,omitempty"`
Value float64 `json:"value,omitempty"`
}

View File

@ -1,112 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wire
type ExportTraceServiceRequest struct {
Node *Node `json:"node,omitempty"`
Spans []*Span `json:"spans,omitempty"`
Resource *Resource `json:"resource,omitempty"`
}
type Span struct {
TraceId []byte `json:"trace_id,omitempty"`
SpanId []byte `json:"span_id,omitempty"`
TraceState *TraceState `json:"tracestate,omitempty"`
ParentSpanId []byte `json:"parent_span_id,omitempty"`
Name *TruncatableString `json:"name,omitempty"`
Kind SpanKind `json:"kind,omitempty"`
StartTime Timestamp `json:"start_time,omitempty"`
EndTime Timestamp `json:"end_time,omitempty"`
Attributes *Attributes `json:"attributes,omitempty"`
StackTrace *StackTrace `json:"stack_trace,omitempty"`
TimeEvents *TimeEvents `json:"time_events,omitempty"`
Links *Links `json:"links,omitempty"`
Status *Status `json:"status,omitempty"`
Resource *Resource `json:"resource,omitempty"`
SameProcessAsParentSpan bool `json:"same_process_as_parent_span,omitempty"`
ChildSpanCount bool `json:"child_span_count,omitempty"`
}
type TraceState struct {
Entries []*TraceStateEntry `json:"entries,omitempty"`
}
type TraceStateEntry struct {
Key string `json:"key,omitempty"`
Value string `json:"value,omitempty"`
}
type SpanKind int32
const (
UnspecifiedSpanKind SpanKind = 0
ServerSpanKind SpanKind = 1
ClientSpanKind SpanKind = 2
)
type TimeEvents struct {
TimeEvent []TimeEvent `json:"timeEvent,omitempty"`
DroppedAnnotationsCount int32 `json:"dropped_annotations_count,omitempty"`
DroppedMessageEventsCount int32 `json:"dropped_message_events_count,omitempty"`
}
type TimeEvent struct {
Time Timestamp `json:"time,omitempty"`
MessageEvent *MessageEvent `json:"messageEvent,omitempty"`
Annotation *Annotation `json:"annotation,omitempty"`
}
type Annotation struct {
Description *TruncatableString `json:"description,omitempty"`
Attributes *Attributes `json:"attributes,omitempty"`
}
type MessageEvent struct {
Type MessageEventType `json:"type,omitempty"`
Id uint64 `json:"id,omitempty"`
UncompressedSize uint64 `json:"uncompressed_size,omitempty"`
CompressedSize uint64 `json:"compressed_size,omitempty"`
}
type MessageEventType int32
const (
UnspecifiedMessageEvent MessageEventType = iota
SentMessageEvent
ReceivedMessageEvent
)
type TimeEventValue interface {
tagTimeEventValue()
}
func (Annotation) tagTimeEventValue() {}
func (MessageEvent) tagTimeEventValue() {}
type Links struct {
Link []*Link `json:"link,omitempty"`
DroppedLinksCount int32 `json:"dropped_links_count,omitempty"`
}
type Link struct {
TraceId []byte `json:"trace_id,omitempty"`
SpanId []byte `json:"span_id,omitempty"`
Type LinkType `json:"type,omitempty"`
Attributes *Attributes `json:"attributes,omitempty"`
TraceState *TraceState `json:"tracestate,omitempty"`
}
type LinkType int32
const (
UnspecifiedLinkType LinkType = 0
ChildLinkType LinkType = 1
ParentLinkType LinkType = 2
)
type Status struct {
Code int32 `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}

View File

@ -17,24 +17,12 @@ import (
// those values in the context.
type Key string
// Of returns a Tag for a key and value.
// This is a trivial helper that makes common logging easier to read.
func Of(key interface{}, value interface{}) Tag {
return Tag{Key: key, Value: value}
}
// Of creates a new Tag with this key and the supplied value.
// You can use this when building a tag list.
func (k Key) Of(v interface{}) Tag {
return Tag{Key: k, Value: v}
}
// Tag can be used to get a tag for the key from a context.
// It makes Key conform to the Tagger interface.
func (k Key) Tag(ctx context.Context) Tag {
return Tag{Key: k, Value: ctx.Value(k)}
}
// With applies sets this key to the supplied value on the context and
// returns the new context generated.
// It uses the With package level function so that observers are also notified.

View File

@ -25,14 +25,6 @@ type Tag struct {
Value interface{}
}
// Tagger is the interface to somthing that returns a Tag given a context.
// Both Tag itself and Key support this interface, allowing methods that can
// take either (and other implementations as well)
type Tagger interface {
// Tag returns a Tag potentially using information from the Context.
Tag(context.Context) Tag
}
// List is a way of passing around a collection of key value pairs.
// It is an alternative to the less efficient and unordered method of using
// maps.
@ -72,15 +64,6 @@ func Get(ctx context.Context, keys ...interface{}) List {
return tags
}
// Tags collects a list of tags for the taggers from the context.
func Tags(ctx context.Context, taggers ...Tagger) List {
tags := make(List, len(taggers))
for i, t := range taggers {
tags[i] = t.Tag(ctx)
}
return tags
}
var observers = []Observer{}
// Observe adds a new tag observer to the registered set.
@ -97,12 +80,6 @@ func (t Tag) Format(f fmt.State, r rune) {
fmt.Fprintf(f, `%v="%v"`, t.Key, t.Value)
}
// Get returns the tag unmodified.
// It makes Key conform to the Tagger interface.
func (t Tag) Tag(ctx context.Context) Tag {
return t
}
// Get will get a single key's value from the list.
func (l List) Get(k interface{}) interface{} {
for _, t := range l {

View File

@ -1,72 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tag adds support for telemetry tracins.
package trace
import (
crand "crypto/rand"
"encoding/binary"
"fmt"
"math/rand"
"sync"
"sync/atomic"
)
type TraceID [16]byte
type SpanID [8]byte
func (t TraceID) String() string {
return fmt.Sprintf("%02x", t[:])
}
func (s SpanID) String() string {
return fmt.Sprintf("%02x", s[:])
}
func (s SpanID) IsValid() bool {
return s != SpanID{}
}
var (
generationMu sync.Mutex
nextSpanID uint64
spanIDInc uint64
traceIDAdd [2]uint64
traceIDRand *rand.Rand
)
func initGenerator() {
var rngSeed int64
for _, p := range []interface{}{
&rngSeed, &traceIDAdd, &nextSpanID, &spanIDInc,
} {
binary.Read(crand.Reader, binary.LittleEndian, p)
}
traceIDRand = rand.New(rand.NewSource(rngSeed))
spanIDInc |= 1
}
func newTraceID() TraceID {
generationMu.Lock()
defer generationMu.Unlock()
if traceIDRand == nil {
initGenerator()
}
var tid [16]byte
binary.LittleEndian.PutUint64(tid[0:8], traceIDRand.Uint64()+traceIDAdd[0])
binary.LittleEndian.PutUint64(tid[8:16], traceIDRand.Uint64()+traceIDAdd[1])
return tid
}
func newSpanID() SpanID {
var id uint64
for id == 0 {
id = atomic.AddUint64(&nextSpanID, spanIDInc)
}
var sid [8]byte
binary.LittleEndian.PutUint64(sid[:], id)
return sid
}

View File

@ -2,139 +2,59 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package trace adds support for telemetry tracing.
// Package tag adds support for telemetry tracins.
package trace
import (
"context"
"fmt"
"time"
"golang.org/x/tools/internal/lsp/telemetry/log"
"golang.org/x/tools/internal/lsp/telemetry/tag"
"golang.org/x/tools/internal/lsp/telemetry/worker"
)
type Span struct {
Name string
TraceID TraceID
SpanID SpanID
ParentID SpanID
Start time.Time
Finish time.Time
Tags tag.List
Events []Event
ready bool
type Span interface {
AddAttributes(attributes ...Attribute)
AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64)
AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64)
Annotate(attributes []Attribute, str string)
Annotatef(attributes []Attribute, format string, a ...interface{})
End()
IsRecordingEvents() bool
SetName(name string)
SetStatus(status Status)
}
type Event struct {
Time time.Time
Tags tag.List
type Attribute interface{}
type Status struct {
Code int32
Message string
}
type Observer func(*Span)
type nullSpan struct{}
func RegisterObservers(o ...Observer) {
worker.Do(func() {
if !registered {
registered = true
tag.Observe(tagObserver)
log.AddLogger(logger)
}
observers = append(observers, o...)
})
}
func StartSpan(ctx context.Context, name string, tags ...tag.Tag) (context.Context, func()) {
span := &Span{
Name: name,
Start: time.Now(),
}
if parent := fromContext(ctx); parent != nil {
span.TraceID = parent.TraceID
span.ParentID = parent.SpanID
} else {
span.TraceID = newTraceID()
}
span.SpanID = newSpanID()
ctx = context.WithValue(ctx, contextKey, span)
if len(tags) > 0 {
ctx = tag.With(ctx, tags...)
}
worker.Do(func() {
span.ready = true
for _, o := range observers {
o(span)
}
})
return ctx, span.close
}
func (s *Span) close() {
now := time.Now()
worker.Do(func() {
s.Finish = now
for _, o := range observers {
o(s)
}
})
}
func (s *Span) Format(f fmt.State, r rune) {
fmt.Fprintf(f, "%v %v:%v", s.Name, s.TraceID, s.SpanID)
if s.ParentID.IsValid() {
fmt.Fprintf(f, "[%v]", s.ParentID)
}
fmt.Fprintf(f, " %v->%v", s.Start, s.Finish)
}
type contextKeyType int
var contextKey contextKeyType
func fromContext(ctx context.Context) *Span {
v := ctx.Value(contextKey)
if v == nil {
return nil
}
return v.(*Span)
}
func (nullSpan) AddAttributes(attributes ...Attribute) {}
func (nullSpan) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {}
func (nullSpan) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {}
func (nullSpan) Annotate(attributes []Attribute, str string) {}
func (nullSpan) Annotatef(attributes []Attribute, format string, a ...interface{}) {}
func (nullSpan) End() {}
func (nullSpan) IsRecordingEvents() bool { return false }
func (nullSpan) SetName(name string) {}
func (nullSpan) SetStatus(status Status) {}
var (
observers []Observer
registered bool
FromContext = func(ctx context.Context) Span { return nullSpan{} }
NewContext = func(ctx context.Context, span Span) context.Context { return ctx }
StartSpan = func(ctx context.Context, name string, options ...interface{}) (context.Context, Span) {
return ctx, nullSpan{}
}
BoolAttribute = func(key string, value bool) Attribute { return nil }
Float64Attribute = func(key string, value float64) Attribute { return nil }
Int64Attribute = func(key string, value int64) Attribute { return nil }
StringAttribute = func(key string, value string) Attribute { return nil }
WithSpanKind = func(spanKind int) interface{} { return nil }
)
func tagObserver(ctx context.Context, at time.Time, tags tag.List) {
span := fromContext(ctx)
if span == nil {
return
}
if !span.ready {
span.Tags = append(span.Tags, tags...)
return
}
span.Events = append(span.Events, Event{
Time: at,
Tags: tags,
})
}
func logger(ctx context.Context, at time.Time, tags tag.List) bool {
span := fromContext(ctx)
if span == nil {
return false
}
span.Events = append(span.Events, Event{
Time: at,
Tags: tags,
})
return false
}
// Detach returns a context without an associated span.
// This allows the creation of spans that are not children of the current span.
func Detach(ctx context.Context) context.Context {
return context.WithValue(ctx, contextKey, nil)
}
const (
SpanKindUnspecified = iota
SpanKindServer
SpanKindClient
)

View File

@ -6,13 +6,8 @@
// to work cooperatively and efficiently.
package worker
import (
"fmt"
"os"
)
var (
workQueue = make(chan func(), 1000)
workQueue = make(chan func(), 100)
)
func init() {
@ -31,10 +26,5 @@ func init() {
// This function may block, but in general it will return very quickly and
// before the task has been run.
func Do(task func()) {
select {
case workQueue <- task:
default:
fmt.Fprint(os.Stderr, "work queue is full\n")
workQueue <- task
}
workQueue <- task
}

View File

@ -66,7 +66,6 @@ foo/foo.go:1:9-12: defined here as
}
-- PackageFoo-hover --
myFoo "golang.org/x/tools/internal/lsp/foo" //@godef("foo", PackageFoo),godef("myFoo", PackageFoo)
-- S1-definition --
godef/b/b.go:8:6-8: defined here as S1 struct {

Some files were not shown because too many files have changed in this diff Show More