Compare commits
59 Commits
gopls/v0.1
...
master
Author | SHA1 | Date |
---|---|---|
|
1e85ed8060 | |
|
5a5cfefe1f | |
|
c001e47e7f | |
|
dddb761723 | |
|
09f9cfa882 | |
|
a81e99d748 | |
|
ed3277de27 | |
|
7deaedd405 | |
|
ff9f140924 | |
|
fc6e2057e7 | |
|
db2fa46ec3 | |
|
1bd56024c6 | |
|
2e34cfcb95 | |
|
8aa4eac1a7 | |
|
4e8ec5a316 | |
|
8bb11ff117 | |
|
e377ae9d63 | |
|
5ec23663d0 | |
|
72478f3938 | |
|
87e92536fd | |
|
625c92e46d | |
|
38daa6564b | |
|
82a3ea8a50 | |
|
c81b74871b | |
|
7caf8110c9 | |
|
73497f0562 | |
|
7ec096a112 | |
|
f2838559cb | |
|
128ec6dfca | |
|
e98af23098 | |
|
fdb8f0bb4e | |
|
b0712d6011 | |
|
b667c4c58e | |
|
502543d2ed | |
|
0b5a7f81db | |
|
919acb9f1f | |
|
fefcef05ab | |
|
9b2cb0e5f6 | |
|
a0f5e6c5c2 | |
|
9e48ab1d90 | |
|
607ca053a1 | |
|
ef8e083144 | |
|
565492930f | |
|
1b7e409d2c | |
|
c8ecc7589e | |
|
8b927904ee | |
|
b32ec66a23 | |
|
f4b4e63240 | |
|
128c804424 | |
|
8308f91286 | |
|
63f37bb4d3 | |
|
d5f455491e | |
|
9a621aea19 | |
|
9c57229d8a | |
|
75aaabac35 | |
|
6156d14a7a | |
|
01b81f4f93 | |
|
5f9351755f | |
|
2868181328 |
|
@ -1,18 +1,83 @@
|
|||
// The digraph command performs queries over unlabelled directed graphs
|
||||
// represented in text form. It is intended to integrate nicely with
|
||||
// typical UNIX command pipelines.
|
||||
//
|
||||
// Since directed graphs (import graphs, reference graphs, call graphs,
|
||||
// etc) often arise during software tool development and debugging, this
|
||||
// command is included in the go.tools repository.
|
||||
//
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
The digraph command performs queries over unlabelled directed graphs
|
||||
represented in text form. It is intended to integrate nicely with
|
||||
typical UNIX command pipelines.
|
||||
|
||||
Usage:
|
||||
|
||||
your-application | digraph [command]
|
||||
|
||||
The support commands are:
|
||||
|
||||
nodes
|
||||
the set of all nodes
|
||||
degree
|
||||
the in-degree and out-degree of each node
|
||||
preds <node> ...
|
||||
the set of immediate predecessors of the specified nodes
|
||||
succs <node> ...
|
||||
the set of immediate successors of the specified nodes
|
||||
forward <node> ...
|
||||
the set of nodes transitively reachable from the specified nodes
|
||||
reverse <node> ...
|
||||
the set of nodes that transitively reach the specified nodes
|
||||
somepath <node> <node>
|
||||
the list of nodes on some arbitrary path from the first node to the second
|
||||
allpaths <node> <node>
|
||||
the set of nodes on all paths from the first node to the second
|
||||
sccs
|
||||
all strongly connected components (one per line)
|
||||
scc <node>
|
||||
the set of nodes nodes strongly connected to the specified one
|
||||
|
||||
Input format:
|
||||
|
||||
Each line contains zero or more words. Words are separated by unquoted
|
||||
whitespace; words may contain Go-style double-quoted portions, allowing spaces
|
||||
and other characters to be expressed.
|
||||
|
||||
Each word declares a node, and if there are more than one, an edge from the
|
||||
first to each subsequent one. The graph is provided on the standard input.
|
||||
|
||||
For instance, the following (acyclic) graph specifies a partial order among the
|
||||
subtasks of getting dressed:
|
||||
|
||||
$ cat clothes.txt
|
||||
socks shoes
|
||||
"boxer shorts" pants
|
||||
pants belt shoes
|
||||
shirt tie sweater
|
||||
sweater jacket
|
||||
hat
|
||||
|
||||
The line "shirt tie sweater" indicates the two edges shirt -> tie and
|
||||
shirt -> sweater, not shirt -> tie -> sweater.
|
||||
|
||||
Example usage:
|
||||
|
||||
Using digraph with existing Go tools:
|
||||
|
||||
$ go mod graph | digraph nodes # Operate on the Go module graph.
|
||||
$ go list -m all | digraph nodes # Operate on the Go package graph.
|
||||
|
||||
Show the transitive closure of imports of the digraph tool itself:
|
||||
$ go list -f '{{.ImportPath}} {{join .Imports " "}}' ... | digraph forward golang.org/x/tools/cmd/digraph
|
||||
|
||||
Show which clothes (see above) must be donned before a jacket:
|
||||
$ digraph reverse jacket
|
||||
|
||||
*/
|
||||
package main // import "golang.org/x/tools/cmd/digraph"
|
||||
|
||||
// TODO(adonovan):
|
||||
// - support input files other than stdin
|
||||
// - support alternative formats (AT&T GraphViz, CSV, etc),
|
||||
// a comment syntax, etc.
|
||||
// - allow queries to nest, like Blaze query language.
|
||||
//
|
||||
package main // import "golang.org/x/tools/cmd/digraph"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -28,74 +93,41 @@ import (
|
|||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const Usage = `digraph: queries over directed graphs in text form.
|
||||
|
||||
Graph format:
|
||||
|
||||
Each line contains zero or more words. Words are separated by
|
||||
unquoted whitespace; words may contain Go-style double-quoted portions,
|
||||
allowing spaces and other characters to be expressed.
|
||||
|
||||
Each field declares a node, and if there are more than one,
|
||||
an edge from the first to each subsequent one.
|
||||
The graph is provided on the standard input.
|
||||
|
||||
For instance, the following (acyclic) graph specifies a partial order
|
||||
among the subtasks of getting dressed:
|
||||
|
||||
% cat clothes.txt
|
||||
socks shoes
|
||||
"boxer shorts" pants
|
||||
pants belt shoes
|
||||
shirt tie sweater
|
||||
sweater jacket
|
||||
hat
|
||||
|
||||
The line "shirt tie sweater" indicates the two edges shirt -> tie and
|
||||
shirt -> sweater, not shirt -> tie -> sweater.
|
||||
|
||||
Supported queries:
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, `Usage: your-application | digraph [command]
|
||||
|
||||
The support commands are:
|
||||
nodes
|
||||
the set of all nodes
|
||||
degree
|
||||
the in-degree and out-degree of each node.
|
||||
preds <label> ...
|
||||
the in-degree and out-degree of each node
|
||||
preds <node> ...
|
||||
the set of immediate predecessors of the specified nodes
|
||||
succs <label> ...
|
||||
succs <node> ...
|
||||
the set of immediate successors of the specified nodes
|
||||
forward <label> ...
|
||||
forward <node> ...
|
||||
the set of nodes transitively reachable from the specified nodes
|
||||
reverse <label> ...
|
||||
reverse <node> ...
|
||||
the set of nodes that transitively reach the specified nodes
|
||||
somepath <label> <label>
|
||||
somepath <node> <node>
|
||||
the list of nodes on some arbitrary path from the first node to the second
|
||||
allpaths <label> <label>
|
||||
allpaths <node> <node>
|
||||
the set of nodes on all paths from the first node to the second
|
||||
sccs
|
||||
all strongly connected components (one per line)
|
||||
scc <label>
|
||||
scc <node>
|
||||
the set of nodes nodes strongly connected to the specified one
|
||||
|
||||
Example usage:
|
||||
|
||||
Show the transitive closure of imports of the digraph tool itself:
|
||||
% go list -f '{{.ImportPath}}{{.Imports}}' ... | tr '[]' ' ' |
|
||||
digraph forward golang.org/x/tools/cmd/digraph
|
||||
|
||||
Show which clothes (see above) must be donned before a jacket:
|
||||
% digraph reverse jacket <clothes.txt
|
||||
|
||||
`
|
||||
`)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() { fmt.Fprintln(os.Stderr, Usage) }
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintln(os.Stderr, Usage)
|
||||
return
|
||||
usage()
|
||||
}
|
||||
|
||||
if err := digraph(args[0], args[1:]); err != nil {
|
||||
|
@ -230,6 +262,47 @@ func (g graph) sccs() []nodeset {
|
|||
return sccs
|
||||
}
|
||||
|
||||
func (g graph) allpaths(from, to string) error {
|
||||
// Mark all nodes to "to".
|
||||
seen := make(nodeset) // value of seen[x] indicates whether x is on some path to "to"
|
||||
var visit func(node string) bool
|
||||
visit = func(node string) bool {
|
||||
reachesTo, ok := seen[node]
|
||||
if !ok {
|
||||
reachesTo = node == to
|
||||
seen[node] = reachesTo
|
||||
for e := range g[node] {
|
||||
if visit(e) {
|
||||
reachesTo = true
|
||||
}
|
||||
}
|
||||
if reachesTo && node != to {
|
||||
seen[node] = true
|
||||
}
|
||||
}
|
||||
return reachesTo
|
||||
}
|
||||
visit(from)
|
||||
|
||||
// For each marked node, collect its marked successors.
|
||||
var edges []string
|
||||
for n := range seen {
|
||||
for succ := range g[n] {
|
||||
if seen[succ] {
|
||||
edges = append(edges, n+" "+succ)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort (so that this method is deterministic) and print edges.
|
||||
sort.Strings(edges)
|
||||
for _, e := range edges {
|
||||
fmt.Fprintln(stdout, e)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parse(rd io.Reader) (graph, error) {
|
||||
g := make(graph)
|
||||
|
||||
|
@ -252,6 +325,7 @@ func parse(rd io.Reader) (graph, error) {
|
|||
return g, nil
|
||||
}
|
||||
|
||||
// Overridable for testing purposes.
|
||||
var stdin io.Reader = os.Stdin
|
||||
var stdout io.Writer = os.Stdout
|
||||
|
||||
|
@ -366,33 +440,7 @@ func digraph(cmd string, args []string) error {
|
|||
if g[to] == nil {
|
||||
return fmt.Errorf("no such 'to' node %q", to)
|
||||
}
|
||||
|
||||
seen := make(nodeset) // value of seen[x] indicates whether x is on some path to 'to'
|
||||
var visit func(label string) bool
|
||||
visit = func(label string) bool {
|
||||
reachesTo, ok := seen[label]
|
||||
if !ok {
|
||||
reachesTo = label == to
|
||||
|
||||
seen[label] = reachesTo
|
||||
for e := range g[label] {
|
||||
if visit(e) {
|
||||
reachesTo = true
|
||||
}
|
||||
}
|
||||
seen[label] = reachesTo
|
||||
}
|
||||
return reachesTo
|
||||
}
|
||||
if !visit(from) {
|
||||
return fmt.Errorf("no path from %q to %q", from, to)
|
||||
}
|
||||
for label, reachesTo := range seen {
|
||||
if !reachesTo {
|
||||
delete(seen, label)
|
||||
}
|
||||
}
|
||||
seen.sort().println("\n")
|
||||
g.allpaths(from, to)
|
||||
|
||||
case "sccs":
|
||||
if len(args) != 0 {
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package main
|
||||
|
||||
import (
|
||||
|
@ -26,35 +29,34 @@ d c
|
|||
`
|
||||
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
input string
|
||||
cmd string
|
||||
args []string
|
||||
want string
|
||||
}{
|
||||
{g1, "nodes", nil, "belt\nhat\njacket\npants\nshirt\nshoes\nshorts\nsocks\nsweater\ntie\n"},
|
||||
{g1, "reverse", []string{"jacket"}, "jacket\nshirt\nsweater\n"},
|
||||
{g1, "forward", []string{"socks"}, "shoes\nsocks\n"},
|
||||
{g1, "forward", []string{"socks", "sweater"}, "jacket\nshoes\nsocks\nsweater\n"},
|
||||
|
||||
{g2, "allpaths", []string{"a", "d"}, "a\nb\nc\nd\n"},
|
||||
|
||||
{g2, "sccs", nil, "a\nb\nc d\n"},
|
||||
{g2, "scc", []string{"d"}, "c\nd\n"},
|
||||
{g2, "succs", []string{"a"}, "b\nc\n"},
|
||||
{g2, "preds", []string{"c"}, "a\nd\n"},
|
||||
{g2, "preds", []string{"c", "d"}, "a\nb\nc\nd\n"},
|
||||
{"nodes", g1, "nodes", nil, "belt\nhat\njacket\npants\nshirt\nshoes\nshorts\nsocks\nsweater\ntie\n"},
|
||||
{"reverse", g1, "reverse", []string{"jacket"}, "jacket\nshirt\nsweater\n"},
|
||||
{"forward", g1, "forward", []string{"socks"}, "shoes\nsocks\n"},
|
||||
{"forward multiple args", g1, "forward", []string{"socks", "sweater"}, "jacket\nshoes\nsocks\nsweater\n"},
|
||||
{"scss", g2, "sccs", nil, "a\nb\nc d\n"},
|
||||
{"scc", g2, "scc", []string{"d"}, "c\nd\n"},
|
||||
{"succs", g2, "succs", []string{"a"}, "b\nc\n"},
|
||||
{"preds", g2, "preds", []string{"c"}, "a\nd\n"},
|
||||
{"preds multiple args", g2, "preds", []string{"c", "d"}, "a\nb\nc\nd\n"},
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
stdin = strings.NewReader(test.input)
|
||||
stdout = new(bytes.Buffer)
|
||||
if err := digraph(test.cmd, test.args); err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got := stdout.(fmt.Stringer).String()
|
||||
if got != test.want {
|
||||
t.Errorf("digraph(%s, %s) = %q, want %q", test.cmd, test.args, got, test.want)
|
||||
t.Errorf("digraph(%s, %s) = got %q, want %q", test.cmd, test.args, got, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(adonovan):
|
||||
|
@ -62,6 +64,110 @@ d c
|
|||
// - test errors
|
||||
}
|
||||
|
||||
func TestAllpaths(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
in string
|
||||
to string // from is always "A"
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "Basic",
|
||||
in: "A B\nB C",
|
||||
to: "B",
|
||||
want: "A B\n",
|
||||
},
|
||||
{
|
||||
name: "Long",
|
||||
in: "A B\nB C\n",
|
||||
to: "C",
|
||||
want: "A B\nB C\n",
|
||||
},
|
||||
{
|
||||
name: "Cycle Basic",
|
||||
in: "A B\nB A",
|
||||
to: "B",
|
||||
want: "A B\nB A\n",
|
||||
},
|
||||
{
|
||||
name: "Cycle Path Out",
|
||||
// A <-> B -> C -> D
|
||||
in: "A B\nB A\nB C\nC D",
|
||||
to: "C",
|
||||
want: "A B\nB A\nB C\n",
|
||||
},
|
||||
{
|
||||
name: "Cycle Path Out Further Out",
|
||||
// A -> B <-> C -> D -> E
|
||||
in: "A B\nB C\nC D\nC B\nD E",
|
||||
to: "D",
|
||||
want: "A B\nB C\nC B\nC D\n",
|
||||
},
|
||||
{
|
||||
name: "Two Paths Basic",
|
||||
// /-> C --\
|
||||
// A -> B -- -> E -> F
|
||||
// \-> D --/
|
||||
in: "A B\nB C\nC E\nB D\nD E\nE F",
|
||||
to: "E",
|
||||
want: "A B\nB C\nB D\nC E\nD E\n",
|
||||
},
|
||||
{
|
||||
name: "Two Paths With One Immediately From Start",
|
||||
// /-> B -+ -> D
|
||||
// A -- |
|
||||
// \-> C <+
|
||||
in: "A B\nA C\nB C\nB D",
|
||||
to: "C",
|
||||
want: "A B\nA C\nB C\n",
|
||||
},
|
||||
{
|
||||
name: "Two Paths Further Up",
|
||||
// /-> B --\
|
||||
// A -- -> D -> E -> F
|
||||
// \-> C --/
|
||||
in: "A B\nA C\nB D\nC D\nD E\nE F",
|
||||
to: "E",
|
||||
want: "A B\nA C\nB D\nC D\nD E\n",
|
||||
},
|
||||
{
|
||||
// We should include A - C - D even though it's further up the
|
||||
// second path than D (which would already be in the graph by
|
||||
// the time we get around to integrating the second path).
|
||||
name: "Two Splits",
|
||||
// /-> B --\ /-> E --\
|
||||
// A -- -> D -- -> G -> H
|
||||
// \-> C --/ \-> F --/
|
||||
in: "A B\nA C\nB D\nC D\nD E\nD F\nE G\nF G\nG H",
|
||||
to: "G",
|
||||
want: "A B\nA C\nB D\nC D\nD E\nD F\nE G\nF G\n",
|
||||
},
|
||||
{
|
||||
// D - E should not be duplicated.
|
||||
name: "Two Paths - Two Splits With Gap",
|
||||
// /-> B --\ /-> F --\
|
||||
// A -- -> D -> E -- -> H -> I
|
||||
// \-> C --/ \-> G --/
|
||||
in: "A B\nA C\nB D\nC D\nD E\nE F\nE G\nF H\nG H\nH I",
|
||||
to: "H",
|
||||
want: "A B\nA C\nB D\nC D\nD E\nE F\nE G\nF H\nG H\n",
|
||||
},
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
stdin = strings.NewReader(test.in)
|
||||
stdout = new(bytes.Buffer)
|
||||
if err := digraph("allpaths", []string{"A", test.to}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got := stdout.(fmt.Stringer).String()
|
||||
if got != test.want {
|
||||
t.Errorf("digraph(allpaths, A, %s) = got %q, want %q", test.to, got, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplit(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
line string
|
||||
|
|
|
@ -19,5 +19,5 @@ import (
|
|||
|
||||
func main() {
|
||||
debug.Version += "-cmd.gopls"
|
||||
tool.Main(context.Background(), cmd.New("", nil), os.Args[1:])
|
||||
tool.Main(context.Background(), cmd.New("gopls-legacy", "", nil), os.Args[1:])
|
||||
}
|
||||
|
|
|
@ -99,6 +99,16 @@ func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) {
|
|||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Set) AllObjectFacts() []analysis.ObjectFact {
|
||||
var facts []analysis.ObjectFact
|
||||
for k, v := range s.m {
|
||||
if k.obj != nil {
|
||||
facts = append(facts, analysis.ObjectFact{k.obj, v})
|
||||
}
|
||||
}
|
||||
return facts
|
||||
}
|
||||
|
||||
// ImportPackageFact implements analysis.Pass.ImportPackageFact.
|
||||
func (s *Set) ImportPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
|
||||
if pkg == nil {
|
||||
|
@ -122,6 +132,16 @@ func (s *Set) ExportPackageFact(fact analysis.Fact) {
|
|||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Set) AllPackageFacts() []analysis.PackageFact {
|
||||
var facts []analysis.PackageFact
|
||||
for k, v := range s.m {
|
||||
if k.obj == nil {
|
||||
facts = append(facts, analysis.PackageFact{k.pkg, v})
|
||||
}
|
||||
}
|
||||
return facts
|
||||
}
|
||||
|
||||
// gobFact is the Gob declaration of a serialized fact.
|
||||
type gobFact struct {
|
||||
PkgPath string // path of package
|
||||
|
|
|
@ -334,8 +334,10 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re
|
|||
Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
|
||||
ImportObjectFact: facts.ImportObjectFact,
|
||||
ExportObjectFact: facts.ExportObjectFact,
|
||||
AllObjectFacts: facts.AllObjectFacts,
|
||||
ImportPackageFact: facts.ImportPackageFact,
|
||||
ExportPackageFact: facts.ExportPackageFact,
|
||||
AllPackageFacts: facts.AllPackageFacts,
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
|
|
|
@ -149,7 +149,7 @@ func (b *builder) branchStmt(s *ast.BranchStmt) {
|
|||
}
|
||||
|
||||
case token.FALLTHROUGH:
|
||||
for t := b.targets; t != nil; t = t.tail {
|
||||
for t := b.targets; t != nil && block == nil; t = t.tail {
|
||||
block = t._fallthrough
|
||||
}
|
||||
|
||||
|
|
|
@ -811,7 +811,15 @@ func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, err
|
|||
// Import of incomplete package: this indicates a cycle.
|
||||
fromPath := from.Pkg.Path()
|
||||
if cycle := imp.findPath(path, fromPath); cycle != nil {
|
||||
cycle = append([]string{fromPath}, cycle...)
|
||||
// Normalize cycle: start from alphabetically largest node.
|
||||
pos, start := -1, ""
|
||||
for i, s := range cycle {
|
||||
if pos < 0 || s > start {
|
||||
pos, start = i, s
|
||||
}
|
||||
}
|
||||
cycle = append(cycle, cycle[:pos]...)[pos:] // rotate cycle to start from largest
|
||||
cycle = append(cycle, cycle[0]) // add start node to end to show cycliness
|
||||
return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> "))
|
||||
}
|
||||
|
||||
|
|
|
@ -316,9 +316,7 @@ func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, quer
|
|||
|
||||
startWalk := time.Now()
|
||||
gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
|
||||
if debug {
|
||||
log.Printf("%v for walk", time.Since(startWalk))
|
||||
}
|
||||
cfg.Logf("%v for walk", time.Since(startWalk))
|
||||
|
||||
// Weird special case: the top-level package in a module will be in
|
||||
// whatever directory the user checked the repository out into. It's
|
||||
|
@ -759,11 +757,9 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
|||
cmd.Dir = cfg.Dir
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
if debug {
|
||||
defer func(start time.Time) {
|
||||
log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr)
|
||||
cfg.Logf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr)
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Check for 'go' executable not being found.
|
||||
|
|
|
@ -103,6 +103,12 @@ type Config struct {
|
|||
// If Context is nil, the load cannot be cancelled.
|
||||
Context context.Context
|
||||
|
||||
// Logf is the logger for the config.
|
||||
// If the user provides a logger, debug logging is enabled.
|
||||
// If the GOPACKAGESDEBUG environment variable is set to true,
|
||||
// but the logger is nil, default to log.Printf.
|
||||
Logf func(format string, args ...interface{})
|
||||
|
||||
// Dir is the directory in which to run the build system's query tool
|
||||
// that provides information about the packages.
|
||||
// If Dir is empty, the tool is run in the current directory.
|
||||
|
@ -429,6 +435,17 @@ func newLoader(cfg *Config) *loader {
|
|||
}
|
||||
if cfg != nil {
|
||||
ld.Config = *cfg
|
||||
// If the user has provided a logger, use it.
|
||||
ld.Config.Logf = cfg.Logf
|
||||
}
|
||||
if ld.Config.Logf == nil {
|
||||
// If the GOPACKAGESDEBUG environment variable is set to true,
|
||||
// but the user has not provided a logger, default to log.Printf.
|
||||
if debug {
|
||||
ld.Config.Logf = log.Printf
|
||||
} else {
|
||||
ld.Config.Logf = func(format string, args ...interface{}) {}
|
||||
}
|
||||
}
|
||||
if ld.Config.Mode == 0 {
|
||||
ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility.
|
||||
|
|
|
@ -930,6 +930,12 @@ func testNewPackagesInOverlay(t *testing.T, exporter packagestest.Exporter) {
|
|||
"b/b.go": `package b; import "golang.org/fake/c"; const B = "b" + c.C`,
|
||||
"c/c.go": `package c; const C = "c"`,
|
||||
"d/d.go": `package d; const D = "d"`,
|
||||
|
||||
// TODO: Remove these temporary files when golang.org/issue/33157 is resolved.
|
||||
filepath.Join("e/e_temp.go"): ``,
|
||||
filepath.Join("f/f_temp.go"): ``,
|
||||
filepath.Join("g/g_temp.go"): ``,
|
||||
filepath.Join("h/h_temp.go"): ``,
|
||||
}}})
|
||||
defer exported.Cleanup()
|
||||
|
||||
|
@ -986,7 +992,11 @@ func testNewPackagesInOverlay(t *testing.T, exporter packagestest.Exporter) {
|
|||
} {
|
||||
exported.Config.Overlay = test.overlay
|
||||
exported.Config.Mode = packages.LoadAllSyntax
|
||||
initial, err := packages.Load(exported.Config, "golang.org/fake/e")
|
||||
exported.Config.Logf = t.Logf
|
||||
|
||||
// With an overlay, we don't know the expected import path,
|
||||
// so load with the absolute path of the directory.
|
||||
initial, err := packages.Load(exported.Config, filepath.Join(dir, "e"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
|
|
|
@ -2,6 +2,6 @@ module golang.org/x/tools/gopls
|
|||
|
||||
go 1.11
|
||||
|
||||
require golang.org/x/tools v0.0.0-20190710153321-831012c29e42
|
||||
require golang.org/x/tools v0.0.0-20190723021737-8bb11ff117ca
|
||||
|
||||
replace golang.org/x/tools => ../
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190723021737-8bb11ff117ca h1:SqwJrz6xPBlCUltcEHz2/p01HRPR+VGD+aYLikk8uas=
|
||||
golang.org/x/tools v0.0.0-20190723021737-8bb11ff117ca/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
|
|
|
@ -17,5 +17,5 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
tool.Main(context.Background(), cmd.New("", nil), os.Args[1:])
|
||||
tool.Main(context.Background(), cmd.New("gopls", "", nil), os.Args[1:])
|
||||
}
|
||||
|
|
|
@ -181,8 +181,8 @@ func collectReferences(f *ast.File) references {
|
|||
return refs
|
||||
}
|
||||
|
||||
// collectImports returns all the imports in f, keyed by their package name as
|
||||
// determined by pathToName. Unnamed imports (., _) and "C" are ignored.
|
||||
// collectImports returns all the imports in f.
|
||||
// Unnamed imports (., _) and "C" are ignored.
|
||||
func collectImports(f *ast.File) []*importInfo {
|
||||
var imports []*importInfo
|
||||
for _, imp := range f.Imports {
|
||||
|
@ -272,7 +272,7 @@ func (p *pass) loadPackageNames(imports []*importInfo) error {
|
|||
unknown = append(unknown, imp.importPath)
|
||||
}
|
||||
|
||||
names, err := p.env.getResolver().loadPackageNames(unknown, p.srcDir)
|
||||
names, err := p.env.GetResolver().loadPackageNames(unknown, p.srcDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -444,7 +444,7 @@ func apply(fset *token.FileSet, f *ast.File, fixes []*importFix) bool {
|
|||
case setImportName:
|
||||
// Find the matching import path and change the name.
|
||||
for _, spec := range f.Imports {
|
||||
path := strings.Trim(spec.Path.Value, `""`)
|
||||
path := strings.Trim(spec.Path.Value, `"`)
|
||||
if path == fix.info.importPath {
|
||||
spec.Name = &ast.Ident{
|
||||
Name: fix.info.name,
|
||||
|
@ -514,7 +514,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P
|
|||
return err
|
||||
}
|
||||
|
||||
// getFixes gets the getFixes that need to be made to f in order to fix the imports.
|
||||
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
|
||||
// It does not modify the ast.
|
||||
func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*importFix, error) {
|
||||
abs, err := filepath.Abs(filename)
|
||||
|
@ -595,7 +595,7 @@ type ProcessEnv struct {
|
|||
// Logf is the default logger for the ProcessEnv.
|
||||
Logf func(format string, args ...interface{})
|
||||
|
||||
resolver resolver
|
||||
resolver Resolver
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) env() []string {
|
||||
|
@ -617,7 +617,7 @@ func (e *ProcessEnv) env() []string {
|
|||
return env
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) getResolver() resolver {
|
||||
func (e *ProcessEnv) GetResolver() Resolver {
|
||||
if e.resolver != nil {
|
||||
return e.resolver
|
||||
}
|
||||
|
@ -631,7 +631,7 @@ func (e *ProcessEnv) getResolver() resolver {
|
|||
e.resolver = &gopathResolver{env: e}
|
||||
return e.resolver
|
||||
}
|
||||
e.resolver = &moduleResolver{env: e}
|
||||
e.resolver = &ModuleResolver{env: e}
|
||||
return e.resolver
|
||||
}
|
||||
|
||||
|
@ -700,20 +700,23 @@ func addStdlibCandidates(pass *pass, refs references) {
|
|||
}
|
||||
}
|
||||
|
||||
// A resolver does the build-system-specific parts of goimports.
|
||||
type resolver interface {
|
||||
// A Resolver does the build-system-specific parts of goimports.
|
||||
type Resolver interface {
|
||||
// loadPackageNames loads the package names in importPaths.
|
||||
loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
|
||||
// scan finds (at least) the packages satisfying refs. The returned slice is unordered.
|
||||
scan(refs references) ([]*pkg, error)
|
||||
}
|
||||
|
||||
// gopathResolver implements resolver for GOPATH and module workspaces using go/packages.
|
||||
// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages.
|
||||
type goPackagesResolver struct {
|
||||
env *ProcessEnv
|
||||
}
|
||||
|
||||
func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
if len(importPaths) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
cfg := r.env.newPackagesConfig(packages.LoadFiles)
|
||||
pkgs, err := packages.Load(cfg, importPaths...)
|
||||
if err != nil {
|
||||
|
@ -758,7 +761,7 @@ func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) {
|
|||
}
|
||||
|
||||
func addExternalCandidates(pass *pass, refs references, filename string) error {
|
||||
dirScan, err := pass.env.getResolver().scan(refs)
|
||||
dirScan, err := pass.env.GetResolver().scan(refs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -867,7 +870,7 @@ func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (
|
|||
return names, nil
|
||||
}
|
||||
|
||||
// importPathToNameGoPath finds out the actual package name, as declared in its .go files.
|
||||
// importPathToName finds out the actual package name, as declared in its .go files.
|
||||
// If there's a problem, it returns "".
|
||||
func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName string) {
|
||||
// Fast path for standard library without going to disk.
|
||||
|
@ -887,8 +890,8 @@ func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName s
|
|||
}
|
||||
|
||||
// packageDirToName is a faster version of build.Import if
|
||||
// the only thing desired is the package name. It uses build.FindOnly
|
||||
// to find the directory and then only parses one file in the package,
|
||||
// the only thing desired is the package name. Given a directory,
|
||||
// packageDirToName then only parses one file in the package,
|
||||
// trusting that the files in the directory are consistent.
|
||||
func packageDirToName(dir string) (packageName string, err error) {
|
||||
d, err := os.Open(dir)
|
||||
|
|
|
@ -1855,7 +1855,7 @@ func TestImportPathToNameGoPathParse(t *testing.T) {
|
|||
if strings.Contains(t.Name(), "GoPackages") {
|
||||
t.Skip("go/packages does not ignore package main")
|
||||
}
|
||||
r := t.env.getResolver()
|
||||
r := t.env.GetResolver()
|
||||
srcDir := filepath.Dir(t.exported.File("example.net/pkg", "z.go"))
|
||||
names, err := r.loadPackageNames([]string{"example.net/pkg"}, srcDir)
|
||||
if err != nil {
|
||||
|
|
|
@ -18,37 +18,39 @@ import (
|
|||
"golang.org/x/tools/internal/module"
|
||||
)
|
||||
|
||||
// moduleResolver implements resolver for modules using the go command as little
|
||||
// ModuleResolver implements resolver for modules using the go command as little
|
||||
// as feasible.
|
||||
type moduleResolver struct {
|
||||
type ModuleResolver struct {
|
||||
env *ProcessEnv
|
||||
|
||||
initialized bool
|
||||
main *moduleJSON
|
||||
modsByModPath []*moduleJSON // All modules, ordered by # of path components in module Path...
|
||||
modsByDir []*moduleJSON // ...or Dir.
|
||||
Initialized bool
|
||||
Main *ModuleJSON
|
||||
ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path...
|
||||
ModsByDir []*ModuleJSON // ...or Dir.
|
||||
|
||||
ModCachePkgs map[string]*pkg // Packages in the mod cache, keyed by absolute directory.
|
||||
}
|
||||
|
||||
type moduleJSON struct {
|
||||
type ModuleJSON struct {
|
||||
Path string // module path
|
||||
Version string // module version
|
||||
Versions []string // available module versions (with -versions)
|
||||
Replace *moduleJSON // replaced by this module
|
||||
Replace *ModuleJSON // replaced by this module
|
||||
Time *time.Time // time version was created
|
||||
Update *moduleJSON // available update, if any (with -u)
|
||||
Update *ModuleJSON // available update, if any (with -u)
|
||||
Main bool // is this the main module?
|
||||
Indirect bool // is this module only an indirect dependency of main module?
|
||||
Dir string // directory holding files for this module, if any
|
||||
GoMod string // path to go.mod file for this module, if any
|
||||
Error *moduleErrorJSON // error loading module
|
||||
Error *ModuleErrorJSON // error loading module
|
||||
}
|
||||
|
||||
type moduleErrorJSON struct {
|
||||
type ModuleErrorJSON struct {
|
||||
Err string // the error itself
|
||||
}
|
||||
|
||||
func (r *moduleResolver) init() error {
|
||||
if r.initialized {
|
||||
func (r *ModuleResolver) init() error {
|
||||
if r.Initialized {
|
||||
return nil
|
||||
}
|
||||
stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
|
||||
|
@ -56,7 +58,7 @@ func (r *moduleResolver) init() error {
|
|||
return err
|
||||
}
|
||||
for dec := json.NewDecoder(stdout); dec.More(); {
|
||||
mod := &moduleJSON{}
|
||||
mod := &ModuleJSON{}
|
||||
if err := dec.Decode(mod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -67,34 +69,36 @@ func (r *moduleResolver) init() error {
|
|||
// Can't do anything with a module that's not downloaded.
|
||||
continue
|
||||
}
|
||||
r.modsByModPath = append(r.modsByModPath, mod)
|
||||
r.modsByDir = append(r.modsByDir, mod)
|
||||
r.ModsByModPath = append(r.ModsByModPath, mod)
|
||||
r.ModsByDir = append(r.ModsByDir, mod)
|
||||
if mod.Main {
|
||||
r.main = mod
|
||||
r.Main = mod
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
||||
sort.Slice(r.ModsByModPath, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.modsByModPath[x].Path, "/")
|
||||
return strings.Count(r.ModsByModPath[x].Path, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
sort.Slice(r.modsByDir, func(i, j int) bool {
|
||||
sort.Slice(r.ModsByDir, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.modsByDir[x].Dir, "/")
|
||||
return strings.Count(r.ModsByDir[x].Dir, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
|
||||
r.initialized = true
|
||||
r.ModCachePkgs = make(map[string]*pkg)
|
||||
|
||||
r.Initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory that contains the package at
|
||||
// the given import path, or returns nil, "" if no module is in scope.
|
||||
func (r *moduleResolver) findPackage(importPath string) (*moduleJSON, string) {
|
||||
for _, m := range r.modsByModPath {
|
||||
func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
|
||||
for _, m := range r.ModsByModPath {
|
||||
if !strings.HasPrefix(importPath, m.Path) {
|
||||
continue
|
||||
}
|
||||
|
@ -123,7 +127,7 @@ func (r *moduleResolver) findPackage(importPath string) (*moduleJSON, string) {
|
|||
|
||||
// findModuleByDir returns the module that contains dir, or nil if no such
|
||||
// module is in scope.
|
||||
func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON {
|
||||
func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
|
||||
// This is quite tricky and may not be correct. dir could be:
|
||||
// - a package in the main module.
|
||||
// - a replace target underneath the main module's directory.
|
||||
|
@ -134,7 +138,7 @@ func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON {
|
|||
// - in /vendor/ in -mod=vendor mode.
|
||||
// - nested module? Dunno.
|
||||
// Rumor has it that replace targets cannot contain other replace targets.
|
||||
for _, m := range r.modsByDir {
|
||||
for _, m := range r.ModsByDir {
|
||||
if !strings.HasPrefix(dir, m.Dir) {
|
||||
continue
|
||||
}
|
||||
|
@ -150,7 +154,7 @@ func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON {
|
|||
|
||||
// dirIsNestedModule reports if dir is contained in a nested module underneath
|
||||
// mod, not actually in mod.
|
||||
func dirIsNestedModule(dir string, mod *moduleJSON) bool {
|
||||
func dirIsNestedModule(dir string, mod *ModuleJSON) bool {
|
||||
if !strings.HasPrefix(dir, mod.Dir) {
|
||||
return false
|
||||
}
|
||||
|
@ -176,7 +180,7 @@ func findModFile(dir string) string {
|
|||
}
|
||||
}
|
||||
|
||||
func (r *moduleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -195,7 +199,7 @@ func (r *moduleResolver) loadPackageNames(importPaths []string, srcDir string) (
|
|||
return names, nil
|
||||
}
|
||||
|
||||
func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
|
||||
func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -204,15 +208,15 @@ func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
|
|||
roots := []gopathwalk.Root{
|
||||
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
|
||||
}
|
||||
if r.main != nil {
|
||||
roots = append(roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
|
||||
if r.Main != nil {
|
||||
roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule})
|
||||
}
|
||||
for _, p := range filepath.SplitList(r.env.GOPATH) {
|
||||
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
|
||||
}
|
||||
|
||||
// Walk replace targets, just in case they're not in any of the above.
|
||||
for _, mod := range r.modsByModPath {
|
||||
for _, mod := range r.ModsByModPath {
|
||||
if mod.Replace != nil {
|
||||
roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
|
||||
}
|
||||
|
@ -232,6 +236,15 @@ func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
|
|||
|
||||
dupCheck[dir] = true
|
||||
|
||||
absDir := dir
|
||||
// Packages in the module cache are immutable. If we have
|
||||
// already seen this package on a previous scan of the module
|
||||
// cache, return that result.
|
||||
if p, ok := r.ModCachePkgs[absDir]; ok {
|
||||
result = append(result, p)
|
||||
return
|
||||
}
|
||||
|
||||
subdir := ""
|
||||
if dir != root.Path {
|
||||
subdir = dir[len(root.Path)+len("/"):]
|
||||
|
@ -247,7 +260,7 @@ func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
|
|||
}
|
||||
switch root.Type {
|
||||
case gopathwalk.RootCurrentModule:
|
||||
importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
|
||||
importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir))
|
||||
case gopathwalk.RootModuleCache:
|
||||
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||
modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
|
||||
|
@ -298,10 +311,18 @@ func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
|
|||
dir = canonicalDir
|
||||
}
|
||||
|
||||
result = append(result, &pkg{
|
||||
res := &pkg{
|
||||
importPathShort: VendorlessPath(importPath),
|
||||
dir: dir,
|
||||
})
|
||||
}
|
||||
|
||||
switch root.Type {
|
||||
case gopathwalk.RootModuleCache:
|
||||
// Save the results of processing this directory.
|
||||
r.ModCachePkgs[absDir] = res
|
||||
}
|
||||
|
||||
result = append(result, res)
|
||||
}, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true})
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -118,6 +118,25 @@ import _ "example.com"
|
|||
mt.assertFound("example.com", "x")
|
||||
}
|
||||
|
||||
// Tests that scanning the module cache > 1 time is able to find the same module.
|
||||
func TestModMultipleScans(t *testing.T) {
|
||||
mt := setup(t, `
|
||||
-- go.mod --
|
||||
module x
|
||||
|
||||
require example.com v1.0.0
|
||||
|
||||
-- x.go --
|
||||
package x
|
||||
import _ "example.com"
|
||||
`, "")
|
||||
defer mt.cleanup()
|
||||
|
||||
mt.assertScanFinds("example.com", "x")
|
||||
mt.assertScanFinds("example.com", "x")
|
||||
|
||||
}
|
||||
|
||||
// Tests that -mod=vendor sort of works. Adapted from mod_getmode_vendor.txt.
|
||||
func TestModeGetmodeVendor(t *testing.T) {
|
||||
mt := setup(t, `
|
||||
|
@ -140,7 +159,7 @@ import _ "rsc.io/quote"
|
|||
|
||||
mt.env.GOFLAGS = ""
|
||||
// Clear out the resolver's cache, since we've changed the environment.
|
||||
mt.resolver = &moduleResolver{env: mt.env}
|
||||
mt.resolver = &ModuleResolver{env: mt.env}
|
||||
mt.assertModuleFoundInDir("rsc.io/quote", "quote", `pkg.*mod.*/quote@.*$`)
|
||||
}
|
||||
|
||||
|
@ -486,7 +505,7 @@ var proxyDir string
|
|||
type modTest struct {
|
||||
*testing.T
|
||||
env *ProcessEnv
|
||||
resolver *moduleResolver
|
||||
resolver *ModuleResolver
|
||||
cleanup func()
|
||||
}
|
||||
|
||||
|
@ -538,7 +557,7 @@ func setup(t *testing.T, main, wd string) *modTest {
|
|||
return &modTest{
|
||||
T: t,
|
||||
env: env,
|
||||
resolver: &moduleResolver{env: env},
|
||||
resolver: &ModuleResolver{env: env},
|
||||
cleanup: func() {
|
||||
_ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jsonrpc2
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// Handler is the interface used to hook into the mesage handling of an rpc
|
||||
// connection.
|
||||
type Handler interface {
|
||||
// Deliver is invoked to handle incoming requests.
|
||||
// If the request returns false from IsNotify then the Handler must eventually
|
||||
// call Reply on the Conn with the supplied request.
|
||||
// Handlers are called synchronously, they should pass the work off to a go
|
||||
// routine if they are going to take a long time.
|
||||
// If Deliver returns true all subsequent handlers will be invoked with
|
||||
// delivered set to true, and should not attempt to deliver the message.
|
||||
Deliver(ctx context.Context, r *Request, delivered bool) bool
|
||||
|
||||
// Cancel is invoked for cancelled outgoing requests.
|
||||
// It is okay to use the connection to send notifications, but the context will
|
||||
// be in the cancelled state, so you must do it with the background context
|
||||
// instead.
|
||||
// If Cancel returns true all subsequent handlers will be invoked with
|
||||
// cancelled set to true, and should not attempt to cancel the message.
|
||||
Cancel(ctx context.Context, conn *Conn, id ID, cancelled bool) bool
|
||||
|
||||
// Log is invoked for all messages flowing through a Conn.
|
||||
// direction indicates if the message being received or sent
|
||||
// id is the message id, if not set it was a notification
|
||||
// elapsed is the time between a call being seen and the response, and is
|
||||
// negative for anything that is not a response.
|
||||
// method is the method name specified in the message
|
||||
// payload is the parameters for a call or notification, and the result for a
|
||||
// response
|
||||
|
||||
// Request is called near the start of processing any request.
|
||||
Request(ctx context.Context, direction Direction, r *WireRequest) context.Context
|
||||
// Response is called near the start of processing any response.
|
||||
Response(ctx context.Context, direction Direction, r *WireResponse) context.Context
|
||||
// Done is called when any request is fully processed.
|
||||
// For calls, this means the response has also been processed, for notifies
|
||||
// this is as soon as the message has been written to the stream.
|
||||
// If err is set, it implies the request failed.
|
||||
Done(ctx context.Context, err error)
|
||||
// Read is called with a count each time some data is read from the stream.
|
||||
// The read calls are delayed until after the data has been interpreted so
|
||||
// that it can be attributed to a request/response.
|
||||
Read(ctx context.Context, bytes int64) context.Context
|
||||
// Wrote is called each time some data is written to the stream.
|
||||
Wrote(ctx context.Context, bytes int64) context.Context
|
||||
// Error is called with errors that cannot be delivered through the normal
|
||||
// mechanisms, for instance a failure to process a notify cannot be delivered
|
||||
// back to the other party.
|
||||
Error(ctx context.Context, err error)
|
||||
}
|
||||
|
||||
// Direction is used to indicate to a logger whether the logged message was being
|
||||
// sent or received.
|
||||
type Direction bool
|
||||
|
||||
const (
|
||||
// Send indicates the message is outgoing.
|
||||
Send = Direction(true)
|
||||
// Receive indicates the message is incoming.
|
||||
Receive = Direction(false)
|
||||
)
|
||||
|
||||
func (d Direction) String() string {
|
||||
switch d {
|
||||
case Send:
|
||||
return "send"
|
||||
case Receive:
|
||||
return "receive"
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
type EmptyHandler struct{}
|
||||
|
||||
func (EmptyHandler) Deliver(ctx context.Context, r *Request, delivered bool) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (EmptyHandler) Cancel(ctx context.Context, conn *Conn, id ID, cancelled bool) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (EmptyHandler) Request(ctx context.Context, direction Direction, r *WireRequest) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (EmptyHandler) Response(ctx context.Context, direction Direction, r *WireResponse) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (EmptyHandler) Done(ctx context.Context, err error) {
|
||||
}
|
||||
|
||||
func (EmptyHandler) Read(ctx context.Context, bytes int64) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (EmptyHandler) Wrote(ctx context.Context, bytes int64) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (EmptyHandler) Error(ctx context.Context, err error) {}
|
||||
|
||||
type defaultHandler struct{ EmptyHandler }
|
||||
|
||||
func (defaultHandler) Deliver(ctx context.Context, r *Request, delivered bool) bool {
|
||||
if delivered {
|
||||
return false
|
||||
}
|
||||
if !r.IsNotify() {
|
||||
r.Reply(ctx, nil, NewErrorf(CodeMethodNotFound, "method %q not found", r.Method))
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -13,26 +13,17 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
)
|
||||
|
||||
// Conn is a JSON RPC 2 client server connection.
|
||||
// Conn is bidirectional; it does not have a designated server or client end.
|
||||
type Conn struct {
|
||||
seq int64 // must only be accessed using atomic operations
|
||||
Handler Handler
|
||||
Canceler Canceler
|
||||
Logger Logger
|
||||
Capacity int
|
||||
RejectIfOverloaded bool
|
||||
handlers []Handler
|
||||
stream Stream
|
||||
err error
|
||||
pendingMu sync.Mutex // protects the pending map
|
||||
pending map[ID]chan *wireResponse
|
||||
pending map[ID]chan *WireResponse
|
||||
handlingMu sync.Mutex // protects the handling map
|
||||
handling map[ID]*Request
|
||||
}
|
||||
|
@ -51,73 +42,11 @@ const (
|
|||
type Request struct {
|
||||
conn *Conn
|
||||
cancel context.CancelFunc
|
||||
start time.Time
|
||||
state requestState
|
||||
nextRequest chan struct{}
|
||||
|
||||
// Method is a string containing the method name to invoke.
|
||||
Method string
|
||||
// Params is either a struct or an array with the parameters of the method.
|
||||
Params *json.RawMessage
|
||||
// The id of this request, used to tie the response back to the request.
|
||||
// Will be either a string or a number. If not set, the request is a notify,
|
||||
// and no response is possible.
|
||||
ID *ID
|
||||
}
|
||||
|
||||
// Handler is an option you can pass to NewConn to handle incoming requests.
|
||||
// If the request returns false from IsNotify then the Handler must eventually
|
||||
// call Reply on the Conn with the supplied request.
|
||||
// Handlers are called synchronously, they should pass the work off to a go
|
||||
// routine if they are going to take a long time.
|
||||
type Handler func(context.Context, *Request)
|
||||
|
||||
// Canceler is an option you can pass to NewConn which is invoked for
|
||||
// cancelled outgoing requests.
|
||||
// It is okay to use the connection to send notifications, but the context will
|
||||
// be in the cancelled state, so you must do it with the background context
|
||||
// instead.
|
||||
type Canceler func(context.Context, *Conn, ID)
|
||||
|
||||
type rpcStats struct {
|
||||
server bool
|
||||
method string
|
||||
span trace.Span
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func start(ctx context.Context, server bool, method string, id *ID) (context.Context, *rpcStats) {
|
||||
if method == "" {
|
||||
panic("no method in rpc stats")
|
||||
}
|
||||
s := &rpcStats{
|
||||
server: server,
|
||||
method: method,
|
||||
start: time.Now(),
|
||||
}
|
||||
mode := telemetry.Outbound
|
||||
if server {
|
||||
mode = telemetry.Inbound
|
||||
}
|
||||
ctx, s.span = trace.StartSpan(ctx, method,
|
||||
tag.Tag{Key: telemetry.Method, Value: method},
|
||||
tag.Tag{Key: telemetry.RPCDirection, Value: mode},
|
||||
tag.Tag{Key: telemetry.RPCID, Value: id},
|
||||
)
|
||||
telemetry.Started.Record(ctx, 1)
|
||||
return ctx, s
|
||||
}
|
||||
|
||||
func (s *rpcStats) end(ctx context.Context, err *error) {
|
||||
if err != nil && *err != nil {
|
||||
ctx = telemetry.StatusCode.With(ctx, "ERROR")
|
||||
} else {
|
||||
ctx = telemetry.StatusCode.With(ctx, "OK")
|
||||
}
|
||||
elapsedTime := time.Since(s.start)
|
||||
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
|
||||
telemetry.Latency.Record(ctx, latencyMillis)
|
||||
s.span.End()
|
||||
// The Wire values of the request.
|
||||
WireRequest
|
||||
}
|
||||
|
||||
// NewErrorf builds a Error struct for the suppied message and code.
|
||||
|
@ -133,23 +62,23 @@ func NewErrorf(code int64, format string, args ...interface{}) *Error {
|
|||
// You must call Run for the connection to be active.
|
||||
func NewConn(s Stream) *Conn {
|
||||
conn := &Conn{
|
||||
handlers: []Handler{defaultHandler{}},
|
||||
stream: s,
|
||||
pending: make(map[ID]chan *wireResponse),
|
||||
pending: make(map[ID]chan *WireResponse),
|
||||
handling: make(map[ID]*Request),
|
||||
}
|
||||
// the default handler reports a method error
|
||||
conn.Handler = func(ctx context.Context, r *Request) {
|
||||
if !r.IsNotify() {
|
||||
r.Reply(ctx, nil, NewErrorf(CodeMethodNotFound, "method %q not found", r.Method))
|
||||
}
|
||||
}
|
||||
// the default canceler does nothing
|
||||
conn.Canceler = func(context.Context, *Conn, ID) {}
|
||||
// the default logger does nothing
|
||||
conn.Logger = func(Direction, *ID, time.Duration, string, *json.RawMessage, *Error) {}
|
||||
return conn
|
||||
}
|
||||
|
||||
// AddHandler adds a new handler to the set the connection will invoke.
|
||||
// Handlers are invoked in the reverse order of how they were added, this
|
||||
// allows the most recent addition to be the first one to attempt to handle a
|
||||
// message.
|
||||
func (c *Conn) AddHandler(handler Handler) {
|
||||
// prepend the new handlers so we use them first
|
||||
c.handlers = append([]Handler{handler}, c.handlers...)
|
||||
}
|
||||
|
||||
// Cancel cancels a pending Call on the server side.
|
||||
// The call is identified by its id.
|
||||
// JSON RPC 2 does not specify a cancel message, so cancellation support is not
|
||||
|
@ -168,14 +97,11 @@ func (c *Conn) Cancel(id ID) {
|
|||
// It will return as soon as the notification has been sent, as no response is
|
||||
// possible.
|
||||
func (c *Conn) Notify(ctx context.Context, method string, params interface{}) (err error) {
|
||||
ctx, rpcStats := start(ctx, false, method, nil)
|
||||
defer rpcStats.end(ctx, &err)
|
||||
|
||||
jsonParams, err := marshalToRaw(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshalling notify parameters: %v", err)
|
||||
}
|
||||
request := &wireRequest{
|
||||
request := &WireRequest{
|
||||
Method: method,
|
||||
Params: jsonParams,
|
||||
}
|
||||
|
@ -183,9 +109,18 @@ func (c *Conn) Notify(ctx context.Context, method string, params interface{}) (e
|
|||
if err != nil {
|
||||
return fmt.Errorf("marshalling notify request: %v", err)
|
||||
}
|
||||
c.Logger(Send, nil, -1, request.Method, request.Params, nil)
|
||||
for _, h := range c.handlers {
|
||||
ctx = h.Request(ctx, Send, request)
|
||||
}
|
||||
defer func() {
|
||||
for _, h := range c.handlers {
|
||||
h.Done(ctx, err)
|
||||
}
|
||||
}()
|
||||
n, err := c.stream.Write(ctx, data)
|
||||
telemetry.SentBytes.Record(ctx, n)
|
||||
for _, h := range c.handlers {
|
||||
ctx = h.Wrote(ctx, n)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -195,13 +130,11 @@ func (c *Conn) Notify(ctx context.Context, method string, params interface{}) (e
|
|||
func (c *Conn) Call(ctx context.Context, method string, params, result interface{}) (err error) {
|
||||
// generate a new request identifier
|
||||
id := ID{Number: atomic.AddInt64(&c.seq, 1)}
|
||||
ctx, rpcStats := start(ctx, false, method, &id)
|
||||
defer rpcStats.end(ctx, &err)
|
||||
jsonParams, err := marshalToRaw(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshalling call parameters: %v", err)
|
||||
}
|
||||
request := &wireRequest{
|
||||
request := &WireRequest{
|
||||
ID: &id,
|
||||
Method: method,
|
||||
Params: jsonParams,
|
||||
|
@ -211,9 +144,12 @@ func (c *Conn) Call(ctx context.Context, method string, params, result interface
|
|||
if err != nil {
|
||||
return fmt.Errorf("marshalling call request: %v", err)
|
||||
}
|
||||
for _, h := range c.handlers {
|
||||
ctx = h.Request(ctx, Send, request)
|
||||
}
|
||||
// we have to add ourselves to the pending map before we send, otherwise we
|
||||
// are racing the response
|
||||
rchan := make(chan *wireResponse)
|
||||
rchan := make(chan *WireResponse)
|
||||
c.pendingMu.Lock()
|
||||
c.pending[id] = rchan
|
||||
c.pendingMu.Unlock()
|
||||
|
@ -222,12 +158,15 @@ func (c *Conn) Call(ctx context.Context, method string, params, result interface
|
|||
c.pendingMu.Lock()
|
||||
delete(c.pending, id)
|
||||
c.pendingMu.Unlock()
|
||||
for _, h := range c.handlers {
|
||||
h.Done(ctx, err)
|
||||
}
|
||||
}()
|
||||
// now we are ready to send
|
||||
before := time.Now()
|
||||
c.Logger(Send, request.ID, -1, request.Method, request.Params, nil)
|
||||
n, err := c.stream.Write(ctx, data)
|
||||
telemetry.SentBytes.Record(ctx, n)
|
||||
for _, h := range c.handlers {
|
||||
ctx = h.Wrote(ctx, n)
|
||||
}
|
||||
if err != nil {
|
||||
// sending failed, we will never get a response, so don't leave it pending
|
||||
return err
|
||||
|
@ -235,8 +174,9 @@ func (c *Conn) Call(ctx context.Context, method string, params, result interface
|
|||
// now wait for the response
|
||||
select {
|
||||
case response := <-rchan:
|
||||
elapsed := time.Since(before)
|
||||
c.Logger(Receive, response.ID, elapsed, request.Method, response.Result, response.Error)
|
||||
for _, h := range c.handlers {
|
||||
ctx = h.Response(ctx, Receive, response)
|
||||
}
|
||||
// is it an error response?
|
||||
if response.Error != nil {
|
||||
return response.Error
|
||||
|
@ -250,7 +190,12 @@ func (c *Conn) Call(ctx context.Context, method string, params, result interface
|
|||
return nil
|
||||
case <-ctx.Done():
|
||||
// allow the handler to propagate the cancel
|
||||
c.Canceler(ctx, c, id)
|
||||
cancelled := false
|
||||
for _, h := range c.handlers {
|
||||
if h.Cancel(ctx, c, id, cancelled) {
|
||||
cancelled = true
|
||||
}
|
||||
}
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
@ -290,9 +235,6 @@ func (r *Request) Reply(ctx context.Context, result interface{}, err error) erro
|
|||
if r.IsNotify() {
|
||||
return fmt.Errorf("reply not invoked with a valid call")
|
||||
}
|
||||
ctx, st := trace.StartSpan(ctx, r.Method+":reply")
|
||||
defer st.End()
|
||||
|
||||
// reply ends the handling phase of a call, so if we are not yet
|
||||
// parallel we should be now. The go routine is allowed to continue
|
||||
// to do work after replying, which is why it is important to unlock
|
||||
|
@ -300,12 +242,11 @@ func (r *Request) Reply(ctx context.Context, result interface{}, err error) erro
|
|||
r.Parallel()
|
||||
r.state = requestReplied
|
||||
|
||||
elapsed := time.Since(r.start)
|
||||
var raw *json.RawMessage
|
||||
if err == nil {
|
||||
raw, err = marshalToRaw(result)
|
||||
}
|
||||
response := &wireResponse{
|
||||
response := &WireResponse{
|
||||
Result: raw,
|
||||
ID: r.ID,
|
||||
}
|
||||
|
@ -320,9 +261,13 @@ func (r *Request) Reply(ctx context.Context, result interface{}, err error) erro
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.conn.Logger(Send, response.ID, elapsed, r.Method, response.Result, response.Error)
|
||||
for _, h := range r.conn.handlers {
|
||||
ctx = h.Response(ctx, Send, response)
|
||||
}
|
||||
n, err := r.conn.stream.Write(ctx, data)
|
||||
telemetry.SentBytes.Record(ctx, n)
|
||||
for _, h := range r.conn.handlers {
|
||||
ctx = h.Wrote(ctx, n)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// TODO(iancottrell): if a stream write fails, we really need to shut down
|
||||
|
@ -360,7 +305,7 @@ type combined struct {
|
|||
// caused the termination.
|
||||
// It must be called exactly once for each Conn.
|
||||
// It returns only when the reader is closed or there is an error in the stream.
|
||||
func (c *Conn) Run(ctx context.Context) error {
|
||||
func (c *Conn) Run(runCtx context.Context) error {
|
||||
// we need to make the next request "lock" in an unlocked state to allow
|
||||
// the first incoming request to proceed. All later requests are unlocked
|
||||
// by the preceding request going to parallel mode.
|
||||
|
@ -368,7 +313,7 @@ func (c *Conn) Run(ctx context.Context) error {
|
|||
close(nextRequest)
|
||||
for {
|
||||
// get the data for a message
|
||||
data, n, err := c.stream.Read(ctx)
|
||||
data, n, err := c.stream.Read(runCtx)
|
||||
if err != nil {
|
||||
// the stream failed, we cannot continue
|
||||
return err
|
||||
|
@ -378,26 +323,32 @@ func (c *Conn) Run(ctx context.Context) error {
|
|||
if err := json.Unmarshal(data, msg); err != nil {
|
||||
// a badly formed message arrived, log it and continue
|
||||
// we trust the stream to have isolated the error to just this message
|
||||
c.Logger(Receive, nil, -1, "", nil, NewErrorf(0, "unmarshal failed: %v", err))
|
||||
for _, h := range c.handlers {
|
||||
h.Error(runCtx, fmt.Errorf("unmarshal failed: %v", err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
// work out which kind of message we have
|
||||
switch {
|
||||
case msg.Method != "":
|
||||
// if method is set it must be a request
|
||||
reqCtx, cancelReq := context.WithCancel(ctx)
|
||||
reqCtx, rpcStats := start(reqCtx, true, msg.Method, msg.ID)
|
||||
telemetry.ReceivedBytes.Record(ctx, n)
|
||||
reqCtx, cancelReq := context.WithCancel(runCtx)
|
||||
thisRequest := nextRequest
|
||||
nextRequest = make(chan struct{})
|
||||
req := &Request{
|
||||
conn: c,
|
||||
cancel: cancelReq,
|
||||
nextRequest: nextRequest,
|
||||
start: time.Now(),
|
||||
WireRequest: WireRequest{
|
||||
VersionTag: msg.VersionTag,
|
||||
Method: msg.Method,
|
||||
Params: msg.Params,
|
||||
ID: msg.ID,
|
||||
},
|
||||
}
|
||||
for _, h := range c.handlers {
|
||||
reqCtx = h.Request(reqCtx, Receive, &req.WireRequest)
|
||||
reqCtx = h.Read(reqCtx, n)
|
||||
}
|
||||
c.setHandling(req, true)
|
||||
go func() {
|
||||
|
@ -409,11 +360,17 @@ func (c *Conn) Run(ctx context.Context) error {
|
|||
req.Reply(reqCtx, nil, NewErrorf(CodeInternalError, "method %q did not reply", req.Method))
|
||||
}
|
||||
req.Parallel()
|
||||
rpcStats.end(reqCtx, nil)
|
||||
for _, h := range c.handlers {
|
||||
h.Done(reqCtx, err)
|
||||
}
|
||||
cancelReq()
|
||||
}()
|
||||
c.Logger(Receive, req.ID, -1, req.Method, req.Params, nil)
|
||||
c.Handler(reqCtx, req)
|
||||
delivered := false
|
||||
for _, h := range c.handlers {
|
||||
if h.Deliver(reqCtx, req, delivered) {
|
||||
delivered = true
|
||||
}
|
||||
}
|
||||
}()
|
||||
case msg.ID != nil:
|
||||
// we have a response, get the pending entry from the map
|
||||
|
@ -424,7 +381,7 @@ func (c *Conn) Run(ctx context.Context) error {
|
|||
}
|
||||
c.pendingMu.Unlock()
|
||||
// and send the reply to the channel
|
||||
response := &wireResponse{
|
||||
response := &WireResponse{
|
||||
Result: msg.Result,
|
||||
Error: msg.Error,
|
||||
ID: msg.ID,
|
||||
|
@ -432,7 +389,9 @@ func (c *Conn) Run(ctx context.Context) error {
|
|||
rchan <- response
|
||||
close(rchan)
|
||||
default:
|
||||
c.Logger(Receive, nil, -1, "", nil, NewErrorf(0, "message not a call, notify or response, ignoring"))
|
||||
for _, h := range c.handlers {
|
||||
h.Error(runCtx, fmt.Errorf("message not a call, notify or response, ignoring"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,9 +10,11 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/jsonrpc2"
|
||||
)
|
||||
|
@ -106,10 +108,7 @@ func run(ctx context.Context, t *testing.T, withHeaders bool, r io.ReadCloser, w
|
|||
stream = jsonrpc2.NewStream(r, w)
|
||||
}
|
||||
conn := jsonrpc2.NewConn(stream)
|
||||
conn.Handler = handle
|
||||
if *logRPC {
|
||||
conn.Logger = jsonrpc2.Log
|
||||
}
|
||||
conn.AddHandler(&handle{log: *logRPC})
|
||||
go func() {
|
||||
defer func() {
|
||||
r.Close()
|
||||
|
@ -122,36 +121,82 @@ func run(ctx context.Context, t *testing.T, withHeaders bool, r io.ReadCloser, w
|
|||
return conn
|
||||
}
|
||||
|
||||
func handle(ctx context.Context, r *jsonrpc2.Request) {
|
||||
type handle struct {
|
||||
log bool
|
||||
}
|
||||
|
||||
func (h *handle) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool {
|
||||
switch r.Method {
|
||||
case "no_args":
|
||||
if r.Params != nil {
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidParams, "Expected no params"))
|
||||
return
|
||||
return true
|
||||
}
|
||||
r.Reply(ctx, true, nil)
|
||||
case "one_string":
|
||||
var v string
|
||||
if err := json.Unmarshal(*r.Params, &v); err != nil {
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeParseError, "%v", err.Error()))
|
||||
return
|
||||
return true
|
||||
}
|
||||
r.Reply(ctx, "got:"+v, nil)
|
||||
case "one_number":
|
||||
var v int
|
||||
if err := json.Unmarshal(*r.Params, &v); err != nil {
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeParseError, "%v", err.Error()))
|
||||
return
|
||||
return true
|
||||
}
|
||||
r.Reply(ctx, fmt.Sprintf("got:%d", v), nil)
|
||||
case "join":
|
||||
var v []string
|
||||
if err := json.Unmarshal(*r.Params, &v); err != nil {
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeParseError, "%v", err.Error()))
|
||||
return
|
||||
return true
|
||||
}
|
||||
r.Reply(ctx, path.Join(v...), nil)
|
||||
default:
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeMethodNotFound, "method %q not found", r.Method))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *handle) Cancel(ctx context.Context, conn *jsonrpc2.Conn, id jsonrpc2.ID, cancelled bool) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (h *handle) Request(ctx context.Context, direction jsonrpc2.Direction, r *jsonrpc2.WireRequest) context.Context {
|
||||
if h.log {
|
||||
if r.ID != nil {
|
||||
log.Printf("%v call [%v] %s %v", direction, r.ID, r.Method, r.Params)
|
||||
} else {
|
||||
log.Printf("%v notification %s %v", direction, r.Method, r.Params)
|
||||
}
|
||||
ctx = context.WithValue(ctx, "method", r.Method)
|
||||
ctx = context.WithValue(ctx, "start", time.Now())
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (h *handle) Response(ctx context.Context, direction jsonrpc2.Direction, r *jsonrpc2.WireResponse) context.Context {
|
||||
if h.log {
|
||||
method := ctx.Value("method")
|
||||
elapsed := time.Since(ctx.Value("start").(time.Time))
|
||||
log.Printf("%v response in %v [%v] %s %v", direction, elapsed, r.ID, method, r.Result)
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (h *handle) Done(ctx context.Context, err error) {
|
||||
}
|
||||
|
||||
func (h *handle) Read(ctx context.Context, bytes int64) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (h *handle) Wrote(ctx context.Context, bytes int64) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (h *handle) Error(ctx context.Context, err error) {
|
||||
log.Printf("%v", err)
|
||||
}
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jsonrpc2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Logger is an option you can pass to NewConn which is invoked for
|
||||
// all messages flowing through a Conn.
|
||||
// direction indicates if the message being recieved or sent
|
||||
// id is the message id, if not set it was a notification
|
||||
// elapsed is the time between a call being seen and the response, and is
|
||||
// negative for anything that is not a response.
|
||||
// method is the method name specified in the message
|
||||
// payload is the parameters for a call or notification, and the result for a
|
||||
// response
|
||||
type Logger = func(direction Direction, id *ID, elapsed time.Duration, method string, payload *json.RawMessage, err *Error)
|
||||
|
||||
// Direction is used to indicate to a logger whether the logged message was being
|
||||
// sent or received.
|
||||
type Direction bool
|
||||
|
||||
const (
|
||||
// Send indicates the message is outgoing.
|
||||
Send = Direction(true)
|
||||
// Receive indicates the message is incoming.
|
||||
Receive = Direction(false)
|
||||
)
|
||||
|
||||
func (d Direction) String() string {
|
||||
switch d {
|
||||
case Send:
|
||||
return "send"
|
||||
case Receive:
|
||||
return "receive"
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// Log is an implementation of Logger that outputs using log.Print
|
||||
// It is not used by default, but is provided for easy logging in users code.
|
||||
func Log(direction Direction, id *ID, elapsed time.Duration, method string, payload *json.RawMessage, err *Error) {
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Printf("%v failure [%v] %s %v", direction, id, method, err)
|
||||
case id == nil:
|
||||
log.Printf("%v notification %s %s", direction, method, *payload)
|
||||
case elapsed >= 0:
|
||||
log.Printf("%v response in %v [%v] %s %s", direction, elapsed, id, method, *payload)
|
||||
default:
|
||||
log.Printf("%v call [%v] %s %s", direction, id, method, *payload)
|
||||
}
|
||||
}
|
|
@ -34,8 +34,8 @@ const (
|
|||
CodeServerOverloaded = -32000
|
||||
)
|
||||
|
||||
// wireRequest is sent to a server to represent a Call or Notify operaton.
|
||||
type wireRequest struct {
|
||||
// WireRequest is sent to a server to represent a Call or Notify operaton.
|
||||
type WireRequest struct {
|
||||
// VersionTag is always encoded as the string "2.0"
|
||||
VersionTag VersionTag `json:"jsonrpc"`
|
||||
// Method is a string containing the method name to invoke.
|
||||
|
@ -48,11 +48,11 @@ type wireRequest struct {
|
|||
ID *ID `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// wireResponse is a reply to a Request.
|
||||
// WireResponse is a reply to a Request.
|
||||
// It will always have the ID field set to tie it back to a request, and will
|
||||
// have either the Result or Error fields set depending on whether it is a
|
||||
// success or failure response.
|
||||
type wireResponse struct {
|
||||
type WireResponse struct {
|
||||
// VersionTag is always encoded as the string "2.0"
|
||||
VersionTag VersionTag `json:"jsonrpc"`
|
||||
// Result is the response value, and is required on success.
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
|
||||
"golang.org/x/tools/internal/lsp/debug"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
"golang.org/x/tools/internal/memoize"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
@ -72,12 +71,11 @@ func (c *cache) GetFile(uri span.URI) source.FileHandle {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *cache) NewSession(log xlog.Logger) source.Session {
|
||||
func (c *cache) NewSession(ctx context.Context) source.Session {
|
||||
index := atomic.AddInt64(&sessionIndex, 1)
|
||||
s := &session{
|
||||
cache: c,
|
||||
id: strconv.FormatInt(index, 10),
|
||||
log: log,
|
||||
overlays: make(map[span.URI]*overlay),
|
||||
filesWatchMap: NewWatchMap(),
|
||||
}
|
||||
|
|
|
@ -16,6 +16,8 @@ import (
|
|||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
@ -68,18 +70,22 @@ func (imp *importer) getPkg(ctx context.Context, id packageID) (*pkg, error) {
|
|||
// This goroutine becomes responsible for populating
|
||||
// the entry and broadcasting its readiness.
|
||||
e.pkg, e.err = imp.typeCheck(ctx, id)
|
||||
if e.err != nil {
|
||||
// Don't cache failed packages. If we didn't successfully cache the package
|
||||
// in each file, then this pcache entry won't get invalidated as those files
|
||||
// change.
|
||||
imp.view.pcache.mu.Lock()
|
||||
if imp.view.pcache.packages[id] == e {
|
||||
delete(imp.view.pcache.packages, id)
|
||||
}
|
||||
imp.view.pcache.mu.Unlock()
|
||||
}
|
||||
close(e.ready)
|
||||
}
|
||||
|
||||
if e.err != nil {
|
||||
// If the import had been previously canceled, and that error cached, try again.
|
||||
if e.err == context.Canceled && ctx.Err() == nil {
|
||||
imp.view.pcache.mu.Lock()
|
||||
// Clear out canceled cache entry if it is still there.
|
||||
if imp.view.pcache.packages[id] == e {
|
||||
delete(imp.view.pcache.packages, id)
|
||||
}
|
||||
imp.view.pcache.mu.Unlock()
|
||||
return imp.getPkg(ctx, id)
|
||||
}
|
||||
return nil, e.err
|
||||
|
@ -89,8 +95,8 @@ func (imp *importer) getPkg(ctx context.Context, id packageID) (*pkg, error) {
|
|||
}
|
||||
|
||||
func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "cache.importer.typeCheck")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "cache.importer.typeCheck", telemetry.Package.Of(id))
|
||||
defer done()
|
||||
meta, ok := imp.view.mcache.packages[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no metadata for %v", id)
|
||||
|
@ -117,42 +123,42 @@ func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error)
|
|||
mode = source.ParseExported
|
||||
}
|
||||
var (
|
||||
files []*astFile
|
||||
phs []source.ParseGoHandle
|
||||
files = make([]*ast.File, len(meta.files))
|
||||
errors = make([]error, len(meta.files))
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
for _, filename := range meta.files {
|
||||
uri := span.FileURI(filename)
|
||||
f, err := imp.view.getFile(ctx, uri)
|
||||
if err != nil {
|
||||
log.Error(ctx, "unable to get file", err, telemetry.File.Of(f.URI()))
|
||||
continue
|
||||
}
|
||||
ph := imp.view.session.cache.ParseGoHandle(f.Handle(ctx), mode)
|
||||
phs = append(phs, ph)
|
||||
files = append(files, &astFile{
|
||||
uri: ph.File().Identity().URI,
|
||||
isTrimmed: mode == source.ParseExported,
|
||||
ph: ph,
|
||||
})
|
||||
pkg.files = append(pkg.files, imp.view.session.cache.ParseGoHandle(f.Handle(ctx), mode))
|
||||
}
|
||||
for i, ph := range phs {
|
||||
for i, ph := range pkg.files {
|
||||
wg.Add(1)
|
||||
go func(i int, ph source.ParseGoHandle) {
|
||||
defer wg.Done()
|
||||
|
||||
files[i].file, files[i].err = ph.Parse(ctx)
|
||||
files[i], errors[i] = ph.Parse(ctx)
|
||||
}(i, ph)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
var i int
|
||||
for _, f := range files {
|
||||
pkg.files = append(pkg.files, f)
|
||||
|
||||
if f.err != nil {
|
||||
if f.err == context.Canceled {
|
||||
return nil, f.err
|
||||
if f != nil {
|
||||
files[i] = f
|
||||
i++
|
||||
}
|
||||
imp.view.session.cache.appendPkgError(pkg, f.err)
|
||||
}
|
||||
for _, err := range errors {
|
||||
if err == context.Canceled {
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
imp.view.session.cache.appendPkgError(pkg, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,7 +194,7 @@ func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error)
|
|||
check := types.NewChecker(cfg, imp.fset, pkg.types, pkg.typesInfo)
|
||||
|
||||
// Ignore type-checking errors.
|
||||
check.Files(pkg.GetSyntax())
|
||||
check.Files(files)
|
||||
|
||||
// Add every file in this package to our cache.
|
||||
if err := imp.cachePackage(ctx, pkg, meta, mode); err != nil {
|
||||
|
@ -199,16 +205,17 @@ func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error)
|
|||
}
|
||||
|
||||
func (imp *importer) cachePackage(ctx context.Context, pkg *pkg, meta *metadata, mode source.ParseMode) error {
|
||||
for _, file := range pkg.files {
|
||||
f, err := imp.view.getFile(ctx, file.uri)
|
||||
for _, ph := range pkg.files {
|
||||
uri := ph.File().Identity().URI
|
||||
f, err := imp.view.getFile(ctx, uri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no such file %s: %v", file.uri, err)
|
||||
return fmt.Errorf("no such file %s: %v", uri, err)
|
||||
}
|
||||
gof, ok := f.(*goFile)
|
||||
if !ok {
|
||||
return fmt.Errorf("non Go file %s", file.uri)
|
||||
return fmt.Errorf("non Go file %s", uri)
|
||||
}
|
||||
if err := imp.cachePerFile(gof, file, pkg); err != nil {
|
||||
if err := imp.cachePerFile(gof, ph, pkg); err != nil {
|
||||
return fmt.Errorf("failed to cache file %s: %v", gof.URI(), err)
|
||||
}
|
||||
}
|
||||
|
@ -227,7 +234,7 @@ func (imp *importer) cachePackage(ctx context.Context, pkg *pkg, meta *metadata,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (imp *importer) cachePerFile(gof *goFile, file *astFile, p *pkg) error {
|
||||
func (imp *importer) cachePerFile(gof *goFile, ph source.ParseGoHandle, p *pkg) error {
|
||||
gof.mu.Lock()
|
||||
defer gof.mu.Unlock()
|
||||
|
||||
|
@ -237,25 +244,11 @@ func (imp *importer) cachePerFile(gof *goFile, file *astFile, p *pkg) error {
|
|||
}
|
||||
gof.pkgs[p.id] = p
|
||||
|
||||
// Get the AST for the file.
|
||||
gof.ast = file
|
||||
if gof.ast == nil {
|
||||
return fmt.Errorf("no AST information for %s", file.uri)
|
||||
file, err := ph.Parse(imp.ctx)
|
||||
if file == nil {
|
||||
return fmt.Errorf("no AST for %s: %v", ph.File().Identity().URI, err)
|
||||
}
|
||||
if gof.ast.file == nil {
|
||||
return fmt.Errorf("no AST for %s", file.uri)
|
||||
}
|
||||
// Get the *token.File directly from the AST.
|
||||
pos := gof.ast.file.Pos()
|
||||
if !pos.IsValid() {
|
||||
return fmt.Errorf("AST for %s has an invalid position", file.uri)
|
||||
}
|
||||
tok := imp.view.session.cache.FileSet().File(pos)
|
||||
if tok == nil {
|
||||
return fmt.Errorf("no *token.File for %s", file.uri)
|
||||
}
|
||||
gof.token = tok
|
||||
gof.imports = gof.ast.file.Imports
|
||||
gof.imports = file.Imports
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"os"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
@ -51,8 +52,8 @@ func (h *nativeFileHandle) Kind() source.FileKind {
|
|||
}
|
||||
|
||||
func (h *nativeFileHandle) Read(ctx context.Context) ([]byte, string, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "cache.nativeFileHandle.Read")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "cache.nativeFileHandle.Read", telemetry.File.Of(h.identity.URI.Filename()))
|
||||
defer done()
|
||||
//TODO: this should fail if the version is not the same as the handle
|
||||
data, err := ioutil.ReadFile(h.identity.URI.Filename())
|
||||
if err != nil {
|
||||
|
|
|
@ -34,8 +34,6 @@ type fileBase struct {
|
|||
|
||||
handleMu sync.Mutex
|
||||
handle source.FileHandle
|
||||
|
||||
token *token.File
|
||||
}
|
||||
|
||||
func basename(filename string) string {
|
||||
|
|
|
@ -6,11 +6,14 @@ package cache
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -33,7 +36,6 @@ type goFile struct {
|
|||
|
||||
imports []*ast.ImportSpec
|
||||
|
||||
ast *astFile
|
||||
pkgs map[packageID]*pkg
|
||||
meta map[packageID]*metadata
|
||||
}
|
||||
|
@ -46,71 +48,53 @@ type astFile struct {
|
|||
isTrimmed bool
|
||||
}
|
||||
|
||||
func (f *goFile) GetToken(ctx context.Context) *token.File {
|
||||
f.view.mu.Lock()
|
||||
defer f.view.mu.Unlock()
|
||||
|
||||
if f.isDirty() || f.astIsTrimmed() {
|
||||
if _, err := f.view.loadParseTypecheck(ctx, f); err != nil {
|
||||
f.View().Session().Logger().Errorf(ctx, "unable to check package for %s: %v", f.URI(), err)
|
||||
return nil
|
||||
func (f *goFile) GetToken(ctx context.Context) (*token.File, error) {
|
||||
file, err := f.GetAST(ctx, source.ParseFull)
|
||||
if file == nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
if unexpectedAST(ctx, f) {
|
||||
return nil
|
||||
}
|
||||
return f.token
|
||||
return f.view.session.cache.fset.File(file.Pos()), nil
|
||||
}
|
||||
|
||||
func (f *goFile) GetAnyAST(ctx context.Context) *ast.File {
|
||||
func (f *goFile) GetAST(ctx context.Context, mode source.ParseMode) (*ast.File, error) {
|
||||
f.view.mu.Lock()
|
||||
defer f.view.mu.Unlock()
|
||||
ctx = telemetry.File.With(ctx, f.URI())
|
||||
|
||||
if f.isDirty() {
|
||||
if f.isDirty(ctx) || f.wrongParseMode(ctx, mode) {
|
||||
if _, err := f.view.loadParseTypecheck(ctx, f); err != nil {
|
||||
f.View().Session().Logger().Errorf(ctx, "unable to check package for %s: %v", f.URI(), err)
|
||||
return nil
|
||||
return nil, fmt.Errorf("GetAST: unable to check package for %s: %v", f.URI(), err)
|
||||
}
|
||||
}
|
||||
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
if f.ast == nil {
|
||||
return nil
|
||||
fh := f.Handle(ctx)
|
||||
// Check for a cached AST first, in case getting a trimmed version would actually cause a re-parse.
|
||||
for _, m := range []source.ParseMode{
|
||||
source.ParseHeader,
|
||||
source.ParseExported,
|
||||
source.ParseFull,
|
||||
} {
|
||||
if m < mode {
|
||||
continue
|
||||
}
|
||||
return f.ast.file
|
||||
}
|
||||
|
||||
func (f *goFile) GetAST(ctx context.Context) *ast.File {
|
||||
f.view.mu.Lock()
|
||||
defer f.view.mu.Unlock()
|
||||
|
||||
if f.isDirty() || f.astIsTrimmed() {
|
||||
if _, err := f.view.loadParseTypecheck(ctx, f); err != nil {
|
||||
f.View().Session().Logger().Errorf(ctx, "unable to check package for %s: %v", f.URI(), err)
|
||||
return nil
|
||||
if v, ok := f.view.session.cache.store.Cached(parseKey{
|
||||
file: fh.Identity(),
|
||||
mode: m,
|
||||
}).(*parseGoData); ok {
|
||||
return v.ast, v.err
|
||||
}
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
if unexpectedAST(ctx, f) {
|
||||
return nil
|
||||
}
|
||||
return f.ast.file
|
||||
ph := f.view.session.cache.ParseGoHandle(fh, mode)
|
||||
return ph.Parse(ctx)
|
||||
}
|
||||
|
||||
func (f *goFile) GetPackages(ctx context.Context) []source.Package {
|
||||
f.view.mu.Lock()
|
||||
defer f.view.mu.Unlock()
|
||||
ctx = telemetry.File.With(ctx, f.URI())
|
||||
|
||||
if f.isDirty() || f.astIsTrimmed() {
|
||||
if f.isDirty(ctx) || f.wrongParseMode(ctx, source.ParseFull) {
|
||||
if errs, err := f.view.loadParseTypecheck(ctx, f); err != nil {
|
||||
f.View().Session().Logger().Errorf(ctx, "unable to check package for %s: %v", f.URI(), err)
|
||||
log.Error(ctx, "unable to check package", err, telemetry.File)
|
||||
|
||||
// Create diagnostics for errors if we are able to.
|
||||
if len(errs) > 0 {
|
||||
|
@ -123,9 +107,6 @@ func (f *goFile) GetPackages(ctx context.Context) []source.Package {
|
|||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
if unexpectedAST(ctx, f) {
|
||||
return nil
|
||||
}
|
||||
var pkgs []source.Package
|
||||
for _, pkg := range f.pkgs {
|
||||
pkgs = append(pkgs, pkg)
|
||||
|
@ -148,23 +129,24 @@ func (f *goFile) GetPackage(ctx context.Context) source.Package {
|
|||
return result
|
||||
}
|
||||
|
||||
func unexpectedAST(ctx context.Context, f *goFile) bool {
|
||||
// If the AST comes back nil, something has gone wrong.
|
||||
if f.ast == nil {
|
||||
f.View().Session().Logger().Errorf(ctx, "expected full AST for %s, returned nil", f.URI())
|
||||
return true
|
||||
func (f *goFile) wrongParseMode(ctx context.Context, mode source.ParseMode) bool {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
fh := f.Handle(ctx)
|
||||
for _, pkg := range f.pkgs {
|
||||
for _, ph := range pkg.files {
|
||||
if fh.Identity() == ph.File().Identity() {
|
||||
return ph.Mode() < mode
|
||||
}
|
||||
}
|
||||
// If the AST comes back trimmed, something has gone wrong.
|
||||
if f.ast.isTrimmed {
|
||||
f.View().Session().Logger().Errorf(ctx, "expected full AST for %s, returned trimmed", f.URI())
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isDirty is true if the file needs to be type-checked.
|
||||
// It assumes that the file's view's mutex is held by the caller.
|
||||
func (f *goFile) isDirty() bool {
|
||||
func (f *goFile) isDirty(ctx context.Context) bool {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
|
@ -184,14 +166,16 @@ func (f *goFile) isDirty() bool {
|
|||
if len(f.missingImports) > 0 {
|
||||
return true
|
||||
}
|
||||
return f.token == nil || f.ast == nil
|
||||
}
|
||||
|
||||
func (f *goFile) astIsTrimmed() bool {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
return f.ast != nil && f.ast.isTrimmed
|
||||
fh := f.Handle(ctx)
|
||||
for _, pkg := range f.pkgs {
|
||||
for _, file := range pkg.files {
|
||||
// There is a type-checked package for the current file handle.
|
||||
if file.File().Identity() == fh.Identity() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *goFile) GetActiveReverseDeps(ctx context.Context) []source.GoFile {
|
||||
|
|
|
@ -10,6 +10,10 @@ import (
|
|||
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -19,7 +23,7 @@ func (v *view) loadParseTypecheck(ctx context.Context, f *goFile) ([]packages.Er
|
|||
|
||||
// If the AST for this file is trimmed, and we are explicitly type-checking it,
|
||||
// don't ignore function bodies.
|
||||
if f.astIsTrimmed() {
|
||||
if f.wrongParseMode(ctx, source.ParseFull) {
|
||||
v.pcache.mu.Lock()
|
||||
f.invalidateAST(ctx)
|
||||
v.pcache.mu.Unlock()
|
||||
|
@ -84,7 +88,9 @@ func (v *view) checkMetadata(ctx context.Context, f *goFile) (map[packageID]*met
|
|||
return nil, nil, ctx.Err()
|
||||
}
|
||||
|
||||
pkgs, err := packages.Load(v.Config(), fmt.Sprintf("file=%s", f.filename()))
|
||||
ctx, done := trace.StartSpan(ctx, "packages.Load", telemetry.File.Of(f.filename()))
|
||||
defer done()
|
||||
pkgs, err := packages.Load(v.Config(ctx), fmt.Sprintf("file=%s", f.filename()))
|
||||
if len(pkgs) == 0 {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("go/packages.Load: no packages found for %s", f.filename())
|
||||
|
@ -99,7 +105,10 @@ func (v *view) checkMetadata(ctx context.Context, f *goFile) (map[packageID]*met
|
|||
}
|
||||
// Track missing imports as we look at the package's errors.
|
||||
missingImports := make(map[packagePath]struct{})
|
||||
|
||||
log.Print(ctx, "go/packages.Load", tag.Of("packages", len(pkgs)))
|
||||
for _, pkg := range pkgs {
|
||||
log.Print(ctx, "go/packages.Load", tag.Of("package", pkg.PkgPath), tag.Of("files", pkg.CompiledGoFiles))
|
||||
// If the package comes back with errors from `go list`,
|
||||
// don't bother type-checking it.
|
||||
if len(pkg.Errors) > 0 {
|
||||
|
@ -224,11 +233,13 @@ func (v *view) link(ctx context.Context, pkgPath packagePath, pkg *packages.Pack
|
|||
for _, filename := range m.files {
|
||||
f, err := v.getFile(ctx, span.FileURI(filename))
|
||||
if err != nil {
|
||||
v.session.log.Errorf(ctx, "no file %s: %v", filename, err)
|
||||
log.Error(ctx, "no file", err, telemetry.File.Of(filename))
|
||||
continue
|
||||
}
|
||||
gof, ok := f.(*goFile)
|
||||
if !ok {
|
||||
v.session.log.Errorf(ctx, "not a Go file: %s", f.URI())
|
||||
log.Error(ctx, "not a Go file", nil, telemetry.File.Of(filename))
|
||||
continue
|
||||
}
|
||||
if gof.meta == nil {
|
||||
gof.meta = make(map[packageID]*metadata)
|
||||
|
@ -252,7 +263,7 @@ func (v *view) link(ctx context.Context, pkgPath packagePath, pkg *packages.Pack
|
|||
}
|
||||
if _, ok := m.children[packageID(importPkg.ID)]; !ok {
|
||||
if err := v.link(ctx, importPkgPath, importPkg, m, missingImports); err != nil {
|
||||
v.session.log.Errorf(ctx, "error in dependency %s: %v", importPkgPath, err)
|
||||
log.Error(ctx, "error in dependency", err, telemetry.Package.Of(importPkgPath))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ package cache
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"go/token"
|
||||
)
|
||||
|
||||
|
@ -14,7 +15,10 @@ type modFile struct {
|
|||
fileBase
|
||||
}
|
||||
|
||||
func (*modFile) GetToken(context.Context) *token.File { return nil }
|
||||
func (*modFile) GetToken(context.Context) (*token.File, error) {
|
||||
return nil, fmt.Errorf("GetToken: not implemented")
|
||||
}
|
||||
|
||||
func (*modFile) setContent(content []byte) {}
|
||||
func (*modFile) filename() string { return "" }
|
||||
func (*modFile) isActive() bool { return false }
|
||||
|
|
|
@ -13,12 +13,13 @@ import (
|
|||
"go/token"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/memoize"
|
||||
)
|
||||
|
||||
// Limits the number of parallel parser calls per process.
|
||||
var parseLimit = make(chan bool, 20)
|
||||
var parseLimit = make(chan struct{}, 20)
|
||||
|
||||
// parseKey uniquely identifies a parsed Go file.
|
||||
type parseKey struct {
|
||||
|
@ -74,13 +75,13 @@ func (h *parseGoHandle) Parse(ctx context.Context) (*ast.File, error) {
|
|||
}
|
||||
|
||||
func parseGo(ctx context.Context, c *cache, fh source.FileHandle, mode source.ParseMode) (*ast.File, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "cache.parseGo")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "cache.parseGo", telemetry.File.Of(fh.Identity().URI.Filename()))
|
||||
defer done()
|
||||
buf, _, err := fh.Read(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parseLimit <- true
|
||||
parseLimit <- struct{}{}
|
||||
defer func() { <-parseLimit }()
|
||||
parserMode := parser.AllErrors | parser.ParseComments
|
||||
if mode == source.ParseHeader {
|
||||
|
@ -140,8 +141,8 @@ func isEllipsisArray(n ast.Expr) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
// fix inspects and potentially modifies any *ast.BadStmts or *ast.BadExprs in the AST.
|
||||
// We attempt to modify the AST such that we can type-check it more effectively.
|
||||
// fix inspects the AST and potentially modifies any *ast.BadStmts so that it can be
|
||||
// type-checked more effectively.
|
||||
func fix(ctx context.Context, file *ast.File, tok *token.File, src []byte) error {
|
||||
var parent ast.Node
|
||||
var err error
|
||||
|
@ -207,7 +208,7 @@ func parseDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src
|
|||
var to, curr token.Pos
|
||||
FindTo:
|
||||
for {
|
||||
curr, tkn, lit = s.Scan()
|
||||
curr, tkn, _ = s.Scan()
|
||||
// TODO(rstambler): This still needs more handling to work correctly.
|
||||
// We encounter a specific issue with code that looks like this:
|
||||
//
|
||||
|
|
|
@ -22,7 +22,7 @@ type pkg struct {
|
|||
id packageID
|
||||
pkgPath packagePath
|
||||
|
||||
files []*astFile
|
||||
files []source.ParseGoHandle
|
||||
errors []packages.Error
|
||||
imports map[packagePath]*pkg
|
||||
types *types.Package
|
||||
|
@ -149,17 +149,18 @@ func (pkg *pkg) PkgPath() string {
|
|||
|
||||
func (pkg *pkg) GetFilenames() []string {
|
||||
filenames := make([]string, 0, len(pkg.files))
|
||||
for _, f := range pkg.files {
|
||||
filenames = append(filenames, f.uri.Filename())
|
||||
for _, ph := range pkg.files {
|
||||
filenames = append(filenames, ph.File().Identity().URI.Filename())
|
||||
}
|
||||
return filenames
|
||||
}
|
||||
|
||||
func (pkg *pkg) GetSyntax() []*ast.File {
|
||||
func (pkg *pkg) GetSyntax(ctx context.Context) []*ast.File {
|
||||
var syntax []*ast.File
|
||||
for _, f := range pkg.files {
|
||||
if f.file != nil {
|
||||
syntax = append(syntax, f.file)
|
||||
for _, ph := range pkg.files {
|
||||
file, _ := ph.Parse(ctx)
|
||||
if file != nil {
|
||||
syntax = append(syntax, file)
|
||||
}
|
||||
}
|
||||
return syntax
|
||||
|
|
|
@ -16,15 +16,16 @@ import (
|
|||
|
||||
"golang.org/x/tools/internal/lsp/debug"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/span"
|
||||
"golang.org/x/tools/internal/xcontext"
|
||||
)
|
||||
|
||||
type session struct {
|
||||
cache *cache
|
||||
id string
|
||||
// the logger to use to communicate back with the client
|
||||
log xlog.Logger
|
||||
|
||||
viewMu sync.Mutex
|
||||
views []*view
|
||||
|
@ -64,16 +65,18 @@ func (s *session) Cache() source.Cache {
|
|||
return s.cache
|
||||
}
|
||||
|
||||
func (s *session) NewView(name string, folder span.URI) source.View {
|
||||
func (s *session) NewView(ctx context.Context, name string, folder span.URI) source.View {
|
||||
index := atomic.AddInt64(&viewIndex, 1)
|
||||
s.viewMu.Lock()
|
||||
defer s.viewMu.Unlock()
|
||||
ctx := context.Background()
|
||||
backgroundCtx, cancel := context.WithCancel(ctx)
|
||||
// We want a true background context and not a detatched context here
|
||||
// the spans need to be unrelated and no tag values should pollute it.
|
||||
baseCtx := trace.Detach(xcontext.Detach(ctx))
|
||||
backgroundCtx, cancel := context.WithCancel(baseCtx)
|
||||
v := &view{
|
||||
session: s,
|
||||
id: strconv.FormatInt(index, 10),
|
||||
baseCtx: ctx,
|
||||
baseCtx: baseCtx,
|
||||
backgroundCtx: backgroundCtx,
|
||||
cancel: cancel,
|
||||
name: name,
|
||||
|
@ -92,7 +95,7 @@ func (s *session) NewView(name string, folder span.URI) source.View {
|
|||
}
|
||||
// Preemptively build the builtin package,
|
||||
// so we immediately add builtin.go to the list of ignored files.
|
||||
v.buildBuiltinPkg()
|
||||
v.buildBuiltinPkg(ctx)
|
||||
|
||||
s.views = append(s.views, v)
|
||||
// we always need to drop the view map
|
||||
|
@ -178,11 +181,9 @@ func (s *session) removeView(ctx context.Context, view *view) error {
|
|||
return fmt.Errorf("view %s for %v not found", view.Name(), view.Folder())
|
||||
}
|
||||
|
||||
func (s *session) Logger() xlog.Logger {
|
||||
return s.log
|
||||
}
|
||||
|
||||
func (s *session) DidOpen(ctx context.Context, uri span.URI, text []byte) {
|
||||
// TODO: Propagate the language ID through to the view.
|
||||
func (s *session) DidOpen(ctx context.Context, uri span.URI, _ source.FileKind, text []byte) {
|
||||
ctx = telemetry.File.With(ctx, uri)
|
||||
// Mark the file as open.
|
||||
s.openFiles.Store(uri, true)
|
||||
|
||||
|
@ -197,12 +198,12 @@ func (s *session) DidOpen(ctx context.Context, uri span.URI, text []byte) {
|
|||
if strings.HasPrefix(string(uri), string(view.Folder())) {
|
||||
f, err := view.GetFile(ctx, uri)
|
||||
if err != nil {
|
||||
s.log.Errorf(ctx, "error getting file for %s", uri)
|
||||
log.Error(ctx, "error getting file", nil, telemetry.File)
|
||||
return
|
||||
}
|
||||
gof, ok := f.(*goFile)
|
||||
if !ok {
|
||||
s.log.Errorf(ctx, "%s is not a Go file", uri)
|
||||
log.Error(ctx, "not a Go file", nil, telemetry.File)
|
||||
return
|
||||
}
|
||||
// Mark file as open.
|
||||
|
@ -274,7 +275,7 @@ func (s *session) openOverlay(ctx context.Context, uri span.URI, data []byte) {
|
|||
}
|
||||
_, hash, err := s.cache.GetFile(uri).Read(ctx)
|
||||
if err != nil {
|
||||
s.log.Errorf(ctx, "failed to read %s: %v", uri, err)
|
||||
log.Error(ctx, "failed to read", err, telemetry.File)
|
||||
return
|
||||
}
|
||||
if hash == s.overlays[uri].hash {
|
||||
|
|
|
@ -6,6 +6,7 @@ package cache
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"go/token"
|
||||
)
|
||||
|
||||
|
@ -14,7 +15,10 @@ type sumFile struct {
|
|||
fileBase
|
||||
}
|
||||
|
||||
func (*sumFile) GetToken(context.Context) *token.File { return nil }
|
||||
func (*sumFile) GetToken(context.Context) (*token.File, error) {
|
||||
return nil, fmt.Errorf("GetToken: not implemented")
|
||||
}
|
||||
|
||||
func (*sumFile) setContent(content []byte) {}
|
||||
func (*sumFile) filename() string { return "" }
|
||||
func (*sumFile) isActive() bool { return false }
|
||||
|
|
|
@ -6,17 +6,22 @@ package cache
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/internal/imports"
|
||||
"golang.org/x/tools/internal/lsp/debug"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -48,6 +53,19 @@ type view struct {
|
|||
// env is the environment to use when invoking underlying tools.
|
||||
env []string
|
||||
|
||||
// process is the process env for this view.
|
||||
// Note: this contains cached module and filesystem state.
|
||||
//
|
||||
// TODO(suzmue): the state cached in the process env is specific to each view,
|
||||
// however, there is state that can be shared between views that is not currently
|
||||
// cached, like the module cache.
|
||||
processEnv *imports.ProcessEnv
|
||||
|
||||
// modFileVersions stores the last seen versions of the module files that are used
|
||||
// by processEnvs resolver.
|
||||
// TODO(suzmue): These versions may not actually be on disk.
|
||||
modFileVersions map[string]string
|
||||
|
||||
// buildFlags is the build flags to use when invoking underlying tools.
|
||||
buildFlags []string
|
||||
|
||||
|
@ -111,7 +129,7 @@ func (v *view) Folder() span.URI {
|
|||
|
||||
// Config returns the configuration used for the view's interaction with the
|
||||
// go/packages API. It is shared across all views.
|
||||
func (v *view) Config() *packages.Config {
|
||||
func (v *view) Config(ctx context.Context) *packages.Config {
|
||||
// TODO: Should we cache the config and/or overlay somewhere?
|
||||
return &packages.Config{
|
||||
Dir: v.folder.Filename(),
|
||||
|
@ -128,10 +146,112 @@ func (v *view) Config() *packages.Config {
|
|||
ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) {
|
||||
panic("go/packages must not be used to parse files")
|
||||
},
|
||||
Logf: func(format string, args ...interface{}) {
|
||||
log.Print(ctx, fmt.Sprintf(format, args...))
|
||||
},
|
||||
Tests: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *view) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error, opts *imports.Options) error {
|
||||
v.mu.Lock()
|
||||
defer v.mu.Unlock()
|
||||
if v.processEnv == nil {
|
||||
v.processEnv = v.buildProcessEnv(ctx)
|
||||
}
|
||||
|
||||
// Before running the user provided function, clear caches in the resolver.
|
||||
if v.modFilesChanged() {
|
||||
if r, ok := v.processEnv.GetResolver().(*imports.ModuleResolver); ok {
|
||||
// Clear the resolver cache and set Initialized to false.
|
||||
r.Initialized = false
|
||||
r.Main = nil
|
||||
r.ModsByModPath = nil
|
||||
r.ModsByDir = nil
|
||||
// Reset the modFileVersions.
|
||||
v.modFileVersions = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Run the user function.
|
||||
opts.Env = v.processEnv
|
||||
if err := fn(opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If applicable, store the file versions of the 'go.mod' files that are
|
||||
// looked at by the resolver.
|
||||
v.storeModFileVersions()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *view) buildProcessEnv(ctx context.Context) *imports.ProcessEnv {
|
||||
cfg := v.Config(ctx)
|
||||
env := &imports.ProcessEnv{
|
||||
WorkingDir: cfg.Dir,
|
||||
Logf: func(format string, args ...interface{}) {
|
||||
log.Print(ctx, fmt.Sprintf(format, args...))
|
||||
},
|
||||
}
|
||||
for _, kv := range cfg.Env {
|
||||
split := strings.Split(kv, "=")
|
||||
if len(split) < 2 {
|
||||
continue
|
||||
}
|
||||
switch split[0] {
|
||||
case "GOPATH":
|
||||
env.GOPATH = split[1]
|
||||
case "GOROOT":
|
||||
env.GOROOT = split[1]
|
||||
case "GO111MODULE":
|
||||
env.GO111MODULE = split[1]
|
||||
case "GOPROXY":
|
||||
env.GOROOT = split[1]
|
||||
case "GOFLAGS":
|
||||
env.GOFLAGS = split[1]
|
||||
case "GOSUMDB":
|
||||
env.GOSUMDB = split[1]
|
||||
}
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
func (v *view) modFilesChanged() bool {
|
||||
// Check the versions of the 'go.mod' files of the main module
|
||||
// and modules included by a replace directive. Return true if
|
||||
// any of these file versions do not match.
|
||||
for filename, version := range v.modFileVersions {
|
||||
if version != v.fileVersion(filename) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (v *view) storeModFileVersions() {
|
||||
// Store the mod files versions, if we are using a ModuleResolver.
|
||||
r, moduleMode := v.processEnv.GetResolver().(*imports.ModuleResolver)
|
||||
if !moduleMode || !r.Initialized {
|
||||
return
|
||||
}
|
||||
v.modFileVersions = make(map[string]string)
|
||||
|
||||
// Get the file versions of the 'go.mod' files of the main module
|
||||
// and modules included by a replace directive in the resolver.
|
||||
for _, mod := range r.ModsByModPath {
|
||||
if (mod.Main || mod.Replace != nil) && mod.GoMod != "" {
|
||||
v.modFileVersions[mod.GoMod] = v.fileVersion(mod.GoMod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *view) fileVersion(filename string) string {
|
||||
uri := span.FileURI(filename)
|
||||
f := v.session.GetFile(uri)
|
||||
return f.Identity().Version
|
||||
}
|
||||
|
||||
func (v *view) Env() []string {
|
||||
v.mu.Lock()
|
||||
defer v.mu.Unlock()
|
||||
|
@ -143,6 +263,7 @@ func (v *view) SetEnv(env []string) {
|
|||
defer v.mu.Unlock()
|
||||
//TODO: this should invalidate the entire view
|
||||
v.env = env
|
||||
v.processEnv = nil // recompute process env
|
||||
}
|
||||
|
||||
func (v *view) SetBuildFlags(buildFlags []string) {
|
||||
|
@ -186,9 +307,12 @@ func (v *view) BuiltinPackage() *ast.Package {
|
|||
// buildBuiltinPkg builds the view's builtin package.
|
||||
// It assumes that the view is not active yet,
|
||||
// i.e. it has not been added to the session's list of views.
|
||||
func (v *view) buildBuiltinPkg() {
|
||||
cfg := *v.Config()
|
||||
pkgs, _ := packages.Load(&cfg, "builtin")
|
||||
func (v *view) buildBuiltinPkg(ctx context.Context) {
|
||||
cfg := *v.Config(ctx)
|
||||
pkgs, err := packages.Load(&cfg, "builtin")
|
||||
if err != nil {
|
||||
log.Error(ctx, "error getting package metadata for \"builtin\" package", err)
|
||||
}
|
||||
if len(pkgs) != 1 {
|
||||
v.builtinPkg, _ = ast.NewPackage(cfg.Fset, nil, nil, nil)
|
||||
return
|
||||
|
@ -244,8 +368,6 @@ func (f *goFile) invalidateContent(ctx context.Context) {
|
|||
// including any position and type information that depends on it.
|
||||
func (f *goFile) invalidateAST(ctx context.Context) {
|
||||
f.mu.Lock()
|
||||
f.ast = nil
|
||||
f.token = nil
|
||||
pkgs := f.pkgs
|
||||
f.mu.Unlock()
|
||||
|
||||
|
@ -277,15 +399,25 @@ func (v *view) remove(ctx context.Context, id packageID, seen map[packageID]stru
|
|||
for _, filename := range m.files {
|
||||
f, err := v.findFile(span.FileURI(filename))
|
||||
if err != nil {
|
||||
v.session.log.Errorf(ctx, "cannot find file %s: %v", f.URI(), err)
|
||||
log.Error(ctx, "cannot find file", err, telemetry.File.Of(f.URI()))
|
||||
continue
|
||||
}
|
||||
gof, ok := f.(*goFile)
|
||||
if !ok {
|
||||
v.session.log.Errorf(ctx, "non-Go file %v", f.URI())
|
||||
log.Error(ctx, "non-Go file", nil, telemetry.File.Of(f.URI()))
|
||||
continue
|
||||
}
|
||||
gof.mu.Lock()
|
||||
if pkg, ok := gof.pkgs[id]; ok {
|
||||
// TODO: Ultimately, we shouldn't need this.
|
||||
// Preemptively delete all of the cached keys if we are invalidating a package.
|
||||
for _, ph := range pkg.files {
|
||||
v.session.cache.store.Delete(parseKey{
|
||||
file: ph.File().Identity(),
|
||||
mode: ph.Mode(),
|
||||
})
|
||||
}
|
||||
}
|
||||
delete(gof.pkgs, id)
|
||||
gof.mu.Unlock()
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
package cmd_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -23,7 +22,7 @@ func (r *runner) Diagnostics(t *testing.T, data tests.Diagnostics) {
|
|||
fname := uri.Filename()
|
||||
args := []string{"-remote=internal", "check", fname}
|
||||
out := captureStdOut(t, func() {
|
||||
tool.Main(context.Background(), r.app, args)
|
||||
tool.Main(r.ctx, r.app, args)
|
||||
})
|
||||
// parse got into a collection of reports
|
||||
got := map[string]struct{}{}
|
||||
|
|
|
@ -24,8 +24,10 @@ import (
|
|||
"golang.org/x/tools/internal/lsp/cache"
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/ocagent"
|
||||
"golang.org/x/tools/internal/span"
|
||||
"golang.org/x/tools/internal/tool"
|
||||
"golang.org/x/tools/internal/xcontext"
|
||||
)
|
||||
|
||||
// Application is the main application as passed to tool.Main
|
||||
|
@ -44,6 +46,9 @@ type Application struct {
|
|||
// The base cache to use for sessions from this application.
|
||||
cache source.Cache
|
||||
|
||||
// The name of the binary, used in help and telemetry.
|
||||
name string
|
||||
|
||||
// The working directory to run commands in.
|
||||
wd string
|
||||
|
||||
|
@ -55,23 +60,28 @@ type Application struct {
|
|||
|
||||
// Enable verbose logging
|
||||
Verbose bool `flag:"v" help:"Verbose output"`
|
||||
|
||||
// Control ocagent export of telemetry
|
||||
OCAgent string `flag:"ocagent" help:"The address of the ocagent, or off"`
|
||||
}
|
||||
|
||||
// Returns a new Application ready to run.
|
||||
func New(wd string, env []string) *Application {
|
||||
func New(name, wd string, env []string) *Application {
|
||||
if wd == "" {
|
||||
wd, _ = os.Getwd()
|
||||
}
|
||||
app := &Application{
|
||||
cache: cache.New(),
|
||||
name: name,
|
||||
wd: wd,
|
||||
env: env,
|
||||
OCAgent: "off", //TODO: Remove this line to default the exporter to on
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
// Name implements tool.Application returning the binary name.
|
||||
func (app *Application) Name() string { return "gopls" }
|
||||
func (app *Application) Name() string { return app.name }
|
||||
|
||||
// Usage implements tool.Application returning empty extra argument usage.
|
||||
func (app *Application) Usage() string { return "<command> [command-flags] [command-args]" }
|
||||
|
@ -101,6 +111,7 @@ gopls flags are:
|
|||
// If no arguments are passed it will invoke the server sub command, as a
|
||||
// temporary measure for compatibility.
|
||||
func (app *Application) Run(ctx context.Context, args ...string) error {
|
||||
ocagent.Export(app.name, app.OCAgent)
|
||||
app.Serve.app = app
|
||||
if len(args) == 0 {
|
||||
tool.Main(ctx, &app.Serve, args)
|
||||
|
@ -139,7 +150,7 @@ func (app *Application) connect(ctx context.Context) (*connection, error) {
|
|||
switch app.Remote {
|
||||
case "":
|
||||
connection := newConnection(app)
|
||||
connection.Server = lsp.NewClientServer(app.cache, connection.Client)
|
||||
ctx, connection.Server = lsp.NewClientServer(ctx, app.cache, connection.Client)
|
||||
return connection, connection.initialize(ctx)
|
||||
case "internal":
|
||||
internalMu.Lock()
|
||||
|
@ -148,13 +159,16 @@ func (app *Application) connect(ctx context.Context) (*connection, error) {
|
|||
return c, nil
|
||||
}
|
||||
connection := newConnection(app)
|
||||
ctx := context.Background() //TODO:a way of shutting down the internal server
|
||||
ctx := xcontext.Detach(ctx) //TODO:a way of shutting down the internal server
|
||||
cr, sw, _ := os.Pipe()
|
||||
sr, cw, _ := os.Pipe()
|
||||
var jc *jsonrpc2.Conn
|
||||
jc, connection.Server, _ = protocol.NewClient(jsonrpc2.NewHeaderStream(cr, cw), connection.Client)
|
||||
ctx, jc, connection.Server = protocol.NewClient(ctx, jsonrpc2.NewHeaderStream(cr, cw), connection.Client)
|
||||
go jc.Run(ctx)
|
||||
go lsp.NewServer(app.cache, jsonrpc2.NewHeaderStream(sr, sw)).Run(ctx)
|
||||
go func() {
|
||||
ctx, srv := lsp.NewServer(ctx, app.cache, jsonrpc2.NewHeaderStream(sr, sw))
|
||||
srv.Run(ctx)
|
||||
}()
|
||||
if err := connection.initialize(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -168,7 +182,7 @@ func (app *Application) connect(ctx context.Context) (*connection, error) {
|
|||
}
|
||||
stream := jsonrpc2.NewHeaderStream(conn, conn)
|
||||
var jc *jsonrpc2.Conn
|
||||
jc, connection.Server, _ = protocol.NewClient(stream, connection.Client)
|
||||
ctx, jc, connection.Server = protocol.NewClient(ctx, stream, connection.Client)
|
||||
go jc.Run(ctx)
|
||||
return connection, connection.initialize(ctx)
|
||||
}
|
||||
|
@ -334,12 +348,14 @@ func (c *cmdClient) getFile(ctx context.Context, uri span.URI) *cmdFile {
|
|||
func (c *connection) AddFile(ctx context.Context, uri span.URI) *cmdFile {
|
||||
c.Client.filesMu.Lock()
|
||||
defer c.Client.filesMu.Unlock()
|
||||
|
||||
file := c.Client.getFile(ctx, uri)
|
||||
if !file.added {
|
||||
file.added = true
|
||||
p := &protocol.DidOpenTextDocumentParams{}
|
||||
p.TextDocument.URI = string(uri)
|
||||
p.TextDocument.Text = string(file.mapper.Content)
|
||||
p.TextDocument.LanguageID = source.DetectLanguage("", file.uri.Filename()).String()
|
||||
if err := c.Server.DidOpen(ctx, p); err != nil {
|
||||
file.err = fmt.Errorf("%v: %v", uri, err)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ package cmd_test
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -24,6 +25,7 @@ type runner struct {
|
|||
exporter packagestest.Exporter
|
||||
data *tests.Data
|
||||
app *cmd.Application
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func TestCommandLine(t *testing.T) {
|
||||
|
@ -37,7 +39,8 @@ func testCommandLine(t *testing.T, exporter packagestest.Exporter) {
|
|||
r := &runner{
|
||||
exporter: exporter,
|
||||
data: data,
|
||||
app: cmd.New(data.Config.Dir, data.Exported.Config.Env),
|
||||
app: cmd.New("gopls-test", data.Config.Dir, data.Exported.Config.Env),
|
||||
ctx: tests.Context(t),
|
||||
}
|
||||
tests.Run(t, r, data)
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
package cmd_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -56,7 +55,7 @@ func TestDefinitionHelpExample(t *testing.T) {
|
|||
fmt.Sprintf("%v:#%v", thisFile, cmd.ExampleOffset)} {
|
||||
args := append(baseArgs, query)
|
||||
got := captureStdOut(t, func() {
|
||||
tool.Main(context.Background(), cmd.New("", nil), args)
|
||||
tool.Main(tests.Context(t), cmd.New("gopls-test", "", nil), args)
|
||||
})
|
||||
if !expect.MatchString(got) {
|
||||
t.Errorf("test with %v\nexpected:\n%s\ngot:\n%s", args, expect, got)
|
||||
|
@ -84,7 +83,7 @@ func (r *runner) Definition(t *testing.T, data tests.Definitions) {
|
|||
uri := d.Src.URI()
|
||||
args = append(args, fmt.Sprint(d.Src))
|
||||
got := captureStdOut(t, func() {
|
||||
tool.Main(context.Background(), r.app, args)
|
||||
tool.Main(r.ctx, r.app, args)
|
||||
})
|
||||
got = normalizePaths(r.data, got)
|
||||
if mode&jsonGoDef != 0 && runtime.GOOS == "windows" {
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
package cmd_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
@ -38,9 +37,9 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
|
|||
//TODO: our error handling differs, for now just skip unformattable files
|
||||
continue
|
||||
}
|
||||
app := cmd.New(r.data.Config.Dir, r.data.Config.Env)
|
||||
app := cmd.New("gopls-test", r.data.Config.Dir, r.data.Config.Env)
|
||||
got := captureStdOut(t, func() {
|
||||
tool.Main(context.Background(), app, append([]string{"-remote=internal", "format"}, args...))
|
||||
tool.Main(r.ctx, app, append([]string{"-remote=internal", "format"}, args...))
|
||||
})
|
||||
got = normalizePaths(r.data, got)
|
||||
// check the first two lines are the expected file header
|
||||
|
|
|
@ -20,6 +20,9 @@ import (
|
|||
"golang.org/x/tools/internal/jsonrpc2"
|
||||
"golang.org/x/tools/internal/lsp"
|
||||
"golang.org/x/tools/internal/lsp/debug"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/tool"
|
||||
)
|
||||
|
||||
|
@ -79,8 +82,8 @@ func (s *Serve) Run(ctx context.Context, args ...string) error {
|
|||
}
|
||||
|
||||
// For debugging purposes only.
|
||||
run := func(srv *lsp.Server) {
|
||||
srv.Conn.Logger = logger(s.Trace, out)
|
||||
run := func(ctx context.Context, srv *lsp.Server) {
|
||||
srv.Conn.AddHandler(&handler{loggingRPCs: s.Trace, out: out})
|
||||
go srv.Run(ctx)
|
||||
}
|
||||
if s.Address != "" {
|
||||
|
@ -90,8 +93,8 @@ func (s *Serve) Run(ctx context.Context, args ...string) error {
|
|||
return lsp.RunServerOnPort(ctx, s.app.cache, s.Port, run)
|
||||
}
|
||||
stream := jsonrpc2.NewHeaderStream(os.Stdin, os.Stdout)
|
||||
srv := lsp.NewServer(s.app.cache, stream)
|
||||
srv.Conn.Logger = logger(s.Trace, out)
|
||||
ctx, srv := lsp.NewServer(ctx, s.app.cache, stream)
|
||||
srv.Conn.AddHandler(&handler{loggingRPCs: s.Trace, out: out})
|
||||
return srv.Run(ctx)
|
||||
}
|
||||
|
||||
|
@ -115,14 +118,121 @@ func (s *Serve) forward() error {
|
|||
return <-errc
|
||||
}
|
||||
|
||||
func logger(trace bool, out io.Writer) jsonrpc2.Logger {
|
||||
return func(direction jsonrpc2.Direction, id *jsonrpc2.ID, elapsed time.Duration, method string, payload *json.RawMessage, err *jsonrpc2.Error) {
|
||||
if !trace {
|
||||
type handler struct {
|
||||
loggingRPCs bool
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
type rpcStats struct {
|
||||
method string
|
||||
direction jsonrpc2.Direction
|
||||
id *jsonrpc2.ID
|
||||
payload *json.RawMessage
|
||||
start time.Time
|
||||
delivering func()
|
||||
close func()
|
||||
}
|
||||
|
||||
type statsKeyType int
|
||||
|
||||
const statsKey = statsKeyType(0)
|
||||
|
||||
func (h *handler) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool {
|
||||
stats := h.getStats(ctx)
|
||||
if stats != nil {
|
||||
stats.delivering()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (h *handler) Cancel(ctx context.Context, conn *jsonrpc2.Conn, id jsonrpc2.ID, cancelled bool) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (h *handler) Request(ctx context.Context, direction jsonrpc2.Direction, r *jsonrpc2.WireRequest) context.Context {
|
||||
if r.Method == "" {
|
||||
panic("no method in rpc stats")
|
||||
}
|
||||
stats := &rpcStats{
|
||||
method: r.Method,
|
||||
start: time.Now(),
|
||||
direction: direction,
|
||||
payload: r.Params,
|
||||
}
|
||||
ctx = context.WithValue(ctx, statsKey, stats)
|
||||
mode := telemetry.Outbound
|
||||
if direction == jsonrpc2.Receive {
|
||||
mode = telemetry.Inbound
|
||||
}
|
||||
ctx, stats.close = trace.StartSpan(ctx, r.Method,
|
||||
tag.Tag{Key: telemetry.Method, Value: r.Method},
|
||||
tag.Tag{Key: telemetry.RPCDirection, Value: mode},
|
||||
tag.Tag{Key: telemetry.RPCID, Value: r.ID},
|
||||
)
|
||||
telemetry.Started.Record(ctx, 1)
|
||||
_, stats.delivering = trace.StartSpan(ctx, "queued")
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (h *handler) Response(ctx context.Context, direction jsonrpc2.Direction, r *jsonrpc2.WireResponse) context.Context {
|
||||
stats := h.getStats(ctx)
|
||||
h.logRPC(direction, r.ID, 0, stats.method, r.Result, nil)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (h *handler) Done(ctx context.Context, err error) {
|
||||
stats := h.getStats(ctx)
|
||||
h.logRPC(stats.direction, stats.id, time.Since(stats.start), stats.method, stats.payload, err)
|
||||
if err != nil {
|
||||
ctx = telemetry.StatusCode.With(ctx, "ERROR")
|
||||
} else {
|
||||
ctx = telemetry.StatusCode.With(ctx, "OK")
|
||||
}
|
||||
elapsedTime := time.Since(stats.start)
|
||||
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
|
||||
telemetry.Latency.Record(ctx, latencyMillis)
|
||||
stats.close()
|
||||
}
|
||||
|
||||
func (h *handler) Read(ctx context.Context, bytes int64) context.Context {
|
||||
telemetry.SentBytes.Record(ctx, bytes)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (h *handler) Wrote(ctx context.Context, bytes int64) context.Context {
|
||||
telemetry.ReceivedBytes.Record(ctx, bytes)
|
||||
return ctx
|
||||
}
|
||||
|
||||
const eol = "\r\n\r\n\r\n"
|
||||
|
||||
func (h *handler) Error(ctx context.Context, err error) {
|
||||
stats := h.getStats(ctx)
|
||||
h.logRPC(stats.direction, stats.id, 0, stats.method, nil, err)
|
||||
}
|
||||
|
||||
func (h *handler) getStats(ctx context.Context) *rpcStats {
|
||||
stats, ok := ctx.Value(statsKey).(*rpcStats)
|
||||
if !ok || stats == nil {
|
||||
method, ok := ctx.Value(telemetry.Method).(string)
|
||||
if !ok {
|
||||
method = "???"
|
||||
}
|
||||
stats = &rpcStats{
|
||||
method: method,
|
||||
close: func() {},
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (h *handler) logRPC(direction jsonrpc2.Direction, id *jsonrpc2.ID, elapsed time.Duration, method string, payload *json.RawMessage, err error) {
|
||||
if !h.loggingRPCs {
|
||||
return
|
||||
}
|
||||
const eol = "\r\n\r\n\r\n"
|
||||
if err != nil {
|
||||
fmt.Fprintf(out, "[Error - %v] %s %s%s %v%s", time.Now().Format("3:04:05 PM"),
|
||||
fmt.Fprintf(h.out, "[Error - %v] %s %s%s %v%s", time.Now().Format("3:04:05 PM"),
|
||||
direction, method, id, err, eol)
|
||||
return
|
||||
}
|
||||
|
@ -164,6 +274,5 @@ func logger(trace bool, out io.Writer) jsonrpc2.Logger {
|
|||
params = "{}"
|
||||
}
|
||||
fmt.Fprintf(outx, ".\r\nParams: %s%s", params, eol)
|
||||
fmt.Fprintf(out, "%s", outx.String())
|
||||
}
|
||||
fmt.Fprintf(h.out, "%s", outx.String())
|
||||
}
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -57,7 +59,7 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara
|
|||
if s.wantSuggestedFixes {
|
||||
qf, err := quickFixes(ctx, view, gof)
|
||||
if err != nil {
|
||||
view.Session().Logger().Errorf(ctx, "quick fixes failed for %s: %v", uri, err)
|
||||
log.Error(ctx, "quick fixes failed", err, telemetry.File.Of(uri))
|
||||
}
|
||||
codeActions = append(codeActions, qf...)
|
||||
}
|
||||
|
@ -121,7 +123,8 @@ func findImportErrors(diagnostics []protocol.Diagnostic) bool {
|
|||
return true
|
||||
}
|
||||
// "X imported but not used" is an unused import.
|
||||
if strings.HasSuffix(diagnostic.Message, " imported but not used") {
|
||||
// "X imported but not used as Y" is an unused import.
|
||||
if strings.Contains(diagnostic.Message, " imported but not used") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,8 @@ import (
|
|||
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -35,7 +37,7 @@ func (s *Server) completion(ctx context.Context, params *protocol.CompletionPara
|
|||
WantDocumentaton: s.wantCompletionDocumentation,
|
||||
})
|
||||
if err != nil {
|
||||
s.session.Logger().Infof(ctx, "no completions found for %s:%v:%v: %v", uri, int(params.Position.Line), int(params.Position.Character), err)
|
||||
log.Print(ctx, "no completions found", tag.Of("At", rng), tag.Of("Failure", err))
|
||||
}
|
||||
return &protocol.CompletionList{
|
||||
IsIncomplete: false,
|
||||
|
@ -62,11 +64,11 @@ func (s *Server) toProtocolCompletionItems(ctx context.Context, view source.View
|
|||
prefix = strings.ToLower(surrounding.Prefix())
|
||||
spn, err := surrounding.Range.Span()
|
||||
if err != nil {
|
||||
s.session.Logger().Infof(ctx, "failed to get span for surrounding position: %s:%v:%v: %v", m.URI, int(pos.Line), int(pos.Character), err)
|
||||
log.Print(ctx, "failed to get span for surrounding position: %s:%v:%v: %v", tag.Of("Position", pos), tag.Of("Failure", err))
|
||||
} else {
|
||||
rng, err := m.Range(spn)
|
||||
if err != nil {
|
||||
s.session.Logger().Infof(ctx, "failed to convert surrounding position: %s:%v:%v: %v", m.URI, int(pos.Line), int(pos.Character), err)
|
||||
log.Print(ctx, "failed to convert surrounding position", tag.Of("Position", pos), tag.Of("Failure", err))
|
||||
} else {
|
||||
insertionRange = rng
|
||||
}
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/metric"
|
||||
)
|
||||
|
||||
var (
|
||||
// the distributions we use for histograms
|
||||
bytesDistribution = []int64{1 << 10, 1 << 11, 1 << 12, 1 << 14, 1 << 16, 1 << 20}
|
||||
millisecondsDistribution = []float64{0.1, 0.5, 1, 2, 5, 10, 50, 100, 500, 1000, 5000, 10000, 50000, 100000}
|
||||
|
||||
receivedBytes = metric.HistogramInt64{
|
||||
Name: "received_bytes",
|
||||
Description: "Distribution of received bytes, by method.",
|
||||
Keys: []interface{}{telemetry.RPCDirection, telemetry.Method},
|
||||
Buckets: bytesDistribution,
|
||||
}.Record(telemetry.ReceivedBytes)
|
||||
|
||||
sentBytes = metric.HistogramInt64{
|
||||
Name: "sent_bytes",
|
||||
Description: "Distribution of sent bytes, by method.",
|
||||
Keys: []interface{}{telemetry.RPCDirection, telemetry.Method},
|
||||
Buckets: bytesDistribution,
|
||||
}.Record(telemetry.SentBytes)
|
||||
|
||||
latency = metric.HistogramFloat64{
|
||||
Name: "latency",
|
||||
Description: "Distribution of latency in milliseconds, by method.",
|
||||
Keys: []interface{}{telemetry.RPCDirection, telemetry.Method},
|
||||
Buckets: millisecondsDistribution,
|
||||
}.Record(telemetry.Latency)
|
||||
|
||||
started = metric.Scalar{
|
||||
Name: "started",
|
||||
Description: "Count of RPCs started by method.",
|
||||
Keys: []interface{}{telemetry.RPCDirection, telemetry.Method},
|
||||
}.CountInt64(telemetry.Started)
|
||||
|
||||
completed = metric.Scalar{
|
||||
Name: "completed",
|
||||
Description: "Count of RPCs completed by method and status.",
|
||||
Keys: []interface{}{telemetry.RPCDirection, telemetry.Method, telemetry.StatusCode},
|
||||
}.CountFloat64(telemetry.Latency)
|
||||
)
|
|
@ -0,0 +1,111 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry/metric"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/worker"
|
||||
)
|
||||
|
||||
type prometheus struct {
|
||||
metrics []metric.Data
|
||||
}
|
||||
|
||||
func (p *prometheus) observeMetric(data metric.Data) {
|
||||
name := data.Handle().Name()
|
||||
index := sort.Search(len(p.metrics), func(i int) bool {
|
||||
return p.metrics[i].Handle().Name() >= name
|
||||
})
|
||||
if index >= len(p.metrics) || p.metrics[index].Handle().Name() != name {
|
||||
old := p.metrics
|
||||
p.metrics = make([]metric.Data, len(old)+1)
|
||||
copy(p.metrics, old[:index])
|
||||
copy(p.metrics[index+1:], old[index:])
|
||||
}
|
||||
p.metrics[index] = data
|
||||
}
|
||||
|
||||
func (p *prometheus) header(w http.ResponseWriter, name, description string, isGauge, isHistogram bool) {
|
||||
kind := "counter"
|
||||
if isGauge {
|
||||
kind = "gauge"
|
||||
}
|
||||
if isHistogram {
|
||||
kind = "histogram"
|
||||
}
|
||||
fmt.Fprintf(w, "# HELP %s %s\n", name, description)
|
||||
fmt.Fprintf(w, "# TYPE %s %s\n", name, kind)
|
||||
}
|
||||
|
||||
func (p *prometheus) row(w http.ResponseWriter, name string, group tag.List, extra string, value interface{}) {
|
||||
fmt.Fprint(w, name)
|
||||
buf := &bytes.Buffer{}
|
||||
fmt.Fprint(buf, group)
|
||||
if extra != "" {
|
||||
if buf.Len() > 0 {
|
||||
fmt.Fprint(buf, ",")
|
||||
}
|
||||
fmt.Fprint(buf, extra)
|
||||
}
|
||||
if buf.Len() > 0 {
|
||||
fmt.Fprint(w, "{")
|
||||
buf.WriteTo(w)
|
||||
fmt.Fprint(w, "}")
|
||||
}
|
||||
fmt.Fprintf(w, " %v\n", value)
|
||||
}
|
||||
|
||||
func (p *prometheus) serve(w http.ResponseWriter, r *http.Request) {
|
||||
done := make(chan struct{})
|
||||
worker.Do(func() {
|
||||
defer close(done)
|
||||
for _, data := range p.metrics {
|
||||
switch data := data.(type) {
|
||||
case *metric.Int64Data:
|
||||
p.header(w, data.Info.Name, data.Info.Description, data.IsGauge, false)
|
||||
for i, group := range data.Groups() {
|
||||
p.row(w, data.Info.Name, group, "", data.Rows[i])
|
||||
}
|
||||
|
||||
case *metric.Float64Data:
|
||||
p.header(w, data.Info.Name, data.Info.Description, data.IsGauge, false)
|
||||
for i, group := range data.Groups() {
|
||||
p.row(w, data.Info.Name, group, "", data.Rows[i])
|
||||
}
|
||||
|
||||
case *metric.HistogramInt64Data:
|
||||
p.header(w, data.Info.Name, data.Info.Description, false, true)
|
||||
for i, group := range data.Groups() {
|
||||
row := data.Rows[i]
|
||||
for j, b := range data.Info.Buckets {
|
||||
p.row(w, data.Info.Name+"_bucket", group, fmt.Sprintf(`le="%v"`, b), row.Values[j])
|
||||
}
|
||||
p.row(w, data.Info.Name+"_bucket", group, `le="+Inf"`, row.Count)
|
||||
p.row(w, data.Info.Name+"_count", group, "", row.Count)
|
||||
p.row(w, data.Info.Name+"_sum", group, "", row.Sum)
|
||||
}
|
||||
|
||||
case *metric.HistogramFloat64Data:
|
||||
p.header(w, data.Info.Name, data.Info.Description, false, true)
|
||||
for i, group := range data.Groups() {
|
||||
row := data.Rows[i]
|
||||
for j, b := range data.Info.Buckets {
|
||||
p.row(w, data.Info.Name+"_bucket", group, fmt.Sprintf(`le="%v"`, b), row.Values[j])
|
||||
}
|
||||
p.row(w, data.Info.Name+"_bucket", group, `le="+Inf"`, row.Count)
|
||||
p.row(w, data.Info.Name+"_count", group, "", row.Count)
|
||||
p.row(w, data.Info.Name+"_sum", group, "", row.Sum)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
<-done
|
||||
}
|
|
@ -0,0 +1,209 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/metric"
|
||||
)
|
||||
|
||||
var rpcTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
|
||||
{{define "title"}}RPC Information{{end}}
|
||||
{{define "body"}}
|
||||
<H2>Inbound</H2>
|
||||
{{template "rpcSection" .Inbound}}
|
||||
<H2>Outbound</H2>
|
||||
{{template "rpcSection" .Outbound}}
|
||||
{{end}}
|
||||
{{define "rpcSection"}}
|
||||
{{range .}}<P>
|
||||
<b>{{.Method}}</b> {{.Started}} <a href="/trace/{{.Method}}">traces</a> ({{.InProgress}} in progress)
|
||||
<br>
|
||||
<i>Latency</i> {{with .Latency}}{{.Mean}} ({{.Min}}<{{.Max}}){{end}}
|
||||
<i>By bucket</i> 0s {{range .Latency.Values}}<b>{{.Count}}</b> {{.Limit}} {{end}}
|
||||
<br>
|
||||
<i>Received</i> {{with .Received}}{{.Mean}} ({{.Min}}<{{.Max}}){{end}}
|
||||
<i>Sent</i> {{with .Sent}}{{.Mean}} ({{.Min}}<{{.Max}}){{end}}
|
||||
<br>
|
||||
<i>Result codes</i> {{range .Codes}}{{.Key}}={{.Count}} {{end}}
|
||||
</P>
|
||||
{{end}}
|
||||
{{end}}
|
||||
`))
|
||||
|
||||
type rpcs struct {
|
||||
Inbound []*rpcStats
|
||||
Outbound []*rpcStats
|
||||
}
|
||||
|
||||
type rpcStats struct {
|
||||
Method string
|
||||
Started int64
|
||||
Completed int64
|
||||
InProgress int64
|
||||
Latency rpcTimeHistogram
|
||||
Received rpcBytesHistogram
|
||||
Sent rpcBytesHistogram
|
||||
Codes []*rpcCodeBucket
|
||||
}
|
||||
|
||||
type rpcTimeHistogram struct {
|
||||
Sum timeUnits
|
||||
Count int64
|
||||
Mean timeUnits
|
||||
Min timeUnits
|
||||
Max timeUnits
|
||||
Values []rpcTimeBucket
|
||||
}
|
||||
|
||||
type rpcTimeBucket struct {
|
||||
Limit timeUnits
|
||||
Count int64
|
||||
}
|
||||
|
||||
type rpcBytesHistogram struct {
|
||||
Sum byteUnits
|
||||
Count int64
|
||||
Mean byteUnits
|
||||
Min byteUnits
|
||||
Max byteUnits
|
||||
Values []rpcBytesBucket
|
||||
}
|
||||
|
||||
type rpcBytesBucket struct {
|
||||
Limit byteUnits
|
||||
Count int64
|
||||
}
|
||||
|
||||
type rpcCodeBucket struct {
|
||||
Key string
|
||||
Count int64
|
||||
}
|
||||
|
||||
func (r *rpcs) observeMetric(data metric.Data) {
|
||||
for i, group := range data.Groups() {
|
||||
set := &r.Inbound
|
||||
if group.Get(telemetry.RPCDirection) == telemetry.Outbound {
|
||||
set = &r.Outbound
|
||||
}
|
||||
method, ok := group.Get(telemetry.Method).(string)
|
||||
if !ok {
|
||||
log.Printf("Not a method... %v", group)
|
||||
continue
|
||||
}
|
||||
index := sort.Search(len(*set), func(i int) bool {
|
||||
return (*set)[i].Method >= method
|
||||
})
|
||||
if index >= len(*set) || (*set)[index].Method != method {
|
||||
old := *set
|
||||
*set = make([]*rpcStats, len(old)+1)
|
||||
copy(*set, old[:index])
|
||||
copy((*set)[index+1:], old[index:])
|
||||
(*set)[index] = &rpcStats{Method: method}
|
||||
}
|
||||
stats := (*set)[index]
|
||||
switch data.Handle() {
|
||||
case started:
|
||||
stats.Started = data.(*metric.Int64Data).Rows[i]
|
||||
case completed:
|
||||
status, ok := group.Get(telemetry.StatusCode).(string)
|
||||
if !ok {
|
||||
log.Printf("Not status... %v", group)
|
||||
continue
|
||||
}
|
||||
var b *rpcCodeBucket
|
||||
for c, entry := range stats.Codes {
|
||||
if entry.Key == status {
|
||||
b = stats.Codes[c]
|
||||
break
|
||||
}
|
||||
}
|
||||
if b == nil {
|
||||
b = &rpcCodeBucket{Key: status}
|
||||
stats.Codes = append(stats.Codes, b)
|
||||
sort.Slice(stats.Codes, func(i int, j int) bool {
|
||||
return stats.Codes[i].Key < stats.Codes[i].Key
|
||||
})
|
||||
}
|
||||
b.Count = data.(*metric.Int64Data).Rows[i]
|
||||
case latency:
|
||||
data := data.(*metric.HistogramFloat64Data)
|
||||
row := data.Rows[i]
|
||||
stats.Latency.Count = row.Count
|
||||
stats.Latency.Sum = timeUnits(row.Sum)
|
||||
stats.Latency.Min = timeUnits(row.Min)
|
||||
stats.Latency.Max = timeUnits(row.Max)
|
||||
stats.Latency.Mean = timeUnits(row.Sum) / timeUnits(row.Count)
|
||||
stats.Latency.Values = make([]rpcTimeBucket, len(data.Info.Buckets))
|
||||
last := int64(0)
|
||||
for i, b := range data.Info.Buckets {
|
||||
stats.Latency.Values[i].Limit = timeUnits(b)
|
||||
stats.Latency.Values[i].Count = row.Values[i] - last
|
||||
last = row.Values[i]
|
||||
}
|
||||
case sentBytes:
|
||||
data := data.(*metric.HistogramInt64Data)
|
||||
row := data.Rows[i]
|
||||
stats.Sent.Count = row.Count
|
||||
stats.Sent.Sum = byteUnits(row.Sum)
|
||||
stats.Sent.Min = byteUnits(row.Min)
|
||||
stats.Sent.Max = byteUnits(row.Max)
|
||||
stats.Sent.Mean = byteUnits(row.Sum) / byteUnits(row.Count)
|
||||
case receivedBytes:
|
||||
data := data.(*metric.HistogramInt64Data)
|
||||
row := data.Rows[i]
|
||||
stats.Received.Count = row.Count
|
||||
stats.Received.Sum = byteUnits(row.Sum)
|
||||
stats.Sent.Min = byteUnits(row.Min)
|
||||
stats.Sent.Max = byteUnits(row.Max)
|
||||
stats.Received.Mean = byteUnits(row.Sum) / byteUnits(row.Count)
|
||||
}
|
||||
}
|
||||
|
||||
for _, set := range [][]*rpcStats{r.Inbound, r.Outbound} {
|
||||
for _, stats := range set {
|
||||
stats.Completed = 0
|
||||
for _, b := range stats.Codes {
|
||||
stats.Completed += b.Count
|
||||
}
|
||||
stats.InProgress = stats.Started - stats.Completed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rpcs) getData(req *http.Request) interface{} {
|
||||
return r
|
||||
}
|
||||
|
||||
func units(v float64, suffixes []string) string {
|
||||
s := ""
|
||||
for _, s = range suffixes {
|
||||
n := v / 1000
|
||||
if n < 1 {
|
||||
break
|
||||
}
|
||||
v = n
|
||||
}
|
||||
return fmt.Sprintf("%.2f%s", v, s)
|
||||
}
|
||||
|
||||
type timeUnits float64
|
||||
|
||||
func (v timeUnits) String() string {
|
||||
v = v * 1000 * 1000
|
||||
return units(float64(v), []string{"ns", "μs", "ms", "s"})
|
||||
}
|
||||
|
||||
type byteUnits float64
|
||||
|
||||
func (v byteUnits) String() string {
|
||||
return units(float64(v), []string{"B", "KB", "MB", "GB", "TB"})
|
||||
}
|
|
@ -9,7 +9,6 @@ import (
|
|||
"context"
|
||||
"go/token"
|
||||
"html/template"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
|
@ -19,6 +18,11 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/metric"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/worker"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -211,7 +215,13 @@ func Serve(ctx context.Context, addr string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("Debug serving on port: %d", listener.Addr().(*net.TCPAddr).Port)
|
||||
log.Print(ctx, "Debug serving", tag.Of("Port", listener.Addr().(*net.TCPAddr).Port))
|
||||
prometheus := prometheus{}
|
||||
metric.RegisterObservers(prometheus.observeMetric)
|
||||
rpcs := rpcs{}
|
||||
metric.RegisterObservers(rpcs.observeMetric)
|
||||
traces := traces{}
|
||||
trace.RegisterObservers(traces.export)
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", Render(mainTmpl, func(*http.Request) interface{} { return data }))
|
||||
|
@ -221,6 +231,9 @@ func Serve(ctx context.Context, addr string) error {
|
|||
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
||||
mux.HandleFunc("/metrics/", prometheus.serve)
|
||||
mux.HandleFunc("/rpc/", Render(rpcTmpl, rpcs.getData))
|
||||
mux.HandleFunc("/trace/", Render(traceTmpl, traces.getData))
|
||||
mux.HandleFunc("/cache/", Render(cacheTmpl, getCache))
|
||||
mux.HandleFunc("/session/", Render(sessionTmpl, getSession))
|
||||
mux.HandleFunc("/view/", Render(viewTmpl, getView))
|
||||
|
@ -228,23 +241,28 @@ func Serve(ctx context.Context, addr string) error {
|
|||
mux.HandleFunc("/info", Render(infoTmpl, getInfo))
|
||||
mux.HandleFunc("/memory", Render(memoryTmpl, getMemory))
|
||||
if err := http.Serve(listener, mux); err != nil {
|
||||
log.Printf("Debug server failed with %v", err)
|
||||
log.Error(ctx, "Debug server failed", err)
|
||||
return
|
||||
}
|
||||
log.Printf("Debug server finished")
|
||||
log.Print(ctx, "Debug server finished")
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func Render(tmpl *template.Template, fun func(*http.Request) interface{}) func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
done := make(chan struct{})
|
||||
worker.Do(func() {
|
||||
defer close(done)
|
||||
var data interface{}
|
||||
if fun != nil {
|
||||
data = fun(r)
|
||||
}
|
||||
if err := tmpl.Execute(w, data); err != nil {
|
||||
log.Print(err)
|
||||
log.Error(context.Background(), "", err)
|
||||
}
|
||||
})
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -276,6 +294,10 @@ var BaseTemplate = template.Must(template.New("").Parse(`
|
|||
td.value {
|
||||
text-align: right;
|
||||
}
|
||||
ul.events {
|
||||
list-style-type: none;
|
||||
}
|
||||
|
||||
</style>
|
||||
{{block "head" .}}{{end}}
|
||||
</head>
|
||||
|
@ -283,7 +305,9 @@ td.value {
|
|||
<a href="/">Main</a>
|
||||
<a href="/info">Info</a>
|
||||
<a href="/memory">Memory</a>
|
||||
<a href="/debug/">Debug</a>
|
||||
<a href="/metrics">Metrics</a>
|
||||
<a href="/rpc">RPC</a>
|
||||
<a href="/trace">Trace</a>
|
||||
<hr>
|
||||
<h1>{{template "title" .}}</h1>
|
||||
{{block "body" .}}
|
||||
|
@ -354,8 +378,6 @@ var debugTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
|
|||
{{define "title"}}GoPls Debug pages{{end}}
|
||||
{{define "body"}}
|
||||
<a href="/debug/pprof">Profiling</a>
|
||||
<a href="/debug/rpcz">RPCz</a>
|
||||
<a href="/debug/tracez">Tracez</a>
|
||||
{{end}}
|
||||
`))
|
||||
|
||||
|
|
|
@ -0,0 +1,172 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
)
|
||||
|
||||
var traceTmpl = template.Must(template.Must(BaseTemplate.Clone()).Parse(`
|
||||
{{define "title"}}Trace Information{{end}}
|
||||
{{define "body"}}
|
||||
{{range .Traces}}<a href="/trace/{{.Name}}">{{.Name}}</a> last: {{.Last.Duration}}, longest: {{.Longest.Duration}}<br>{{end}}
|
||||
{{if .Selected}}
|
||||
<H2>{{.Selected.Name}}</H2>
|
||||
{{if .Selected.Last}}<H3>Last</H3><ul>{{template "details" .Selected.Last}}</ul>{{end}}
|
||||
{{if .Selected.Longest}}<H3>Longest</H3><ul>{{template "details" .Selected.Longest}}</ul>{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{define "details"}}
|
||||
<li>{{.Offset}} {{.Name}} {{.Duration}} {{.Tags}}</li>
|
||||
{{if .Events}}<ul class=events>{{range .Events}}<li>{{.Offset}} {{.Tags}}</li>{{end}}</ul>{{end}}
|
||||
{{if .Children}}<ul>{{range .Children}}{{template "details" .}}{{end}}</ul>{{end}}
|
||||
{{end}}
|
||||
`))
|
||||
|
||||
type traces struct {
|
||||
sets map[string]*traceSet
|
||||
unfinished map[trace.SpanID]*traceData
|
||||
}
|
||||
|
||||
type traceResults struct {
|
||||
Traces []*traceSet
|
||||
Selected *traceSet
|
||||
}
|
||||
|
||||
type traceSet struct {
|
||||
Name string
|
||||
Last *traceData
|
||||
Longest *traceData
|
||||
}
|
||||
|
||||
type traceData struct {
|
||||
ID trace.SpanID
|
||||
ParentID trace.SpanID
|
||||
Name string
|
||||
Start time.Time
|
||||
Finish time.Time
|
||||
Offset time.Duration
|
||||
Duration time.Duration
|
||||
Tags string
|
||||
Events []traceEvent
|
||||
Children []*traceData
|
||||
}
|
||||
|
||||
type traceEvent struct {
|
||||
Time time.Time
|
||||
Offset time.Duration
|
||||
Tags string
|
||||
}
|
||||
|
||||
func (t *traces) export(span *trace.Span) {
|
||||
if t.sets == nil {
|
||||
t.sets = make(map[string]*traceSet)
|
||||
t.unfinished = make(map[trace.SpanID]*traceData)
|
||||
}
|
||||
// is this a completed span?
|
||||
if span.Finish.IsZero() {
|
||||
t.start(span)
|
||||
} else {
|
||||
t.finish(span)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *traces) start(span *trace.Span) {
|
||||
// just starting, add it to the unfinished map
|
||||
td := &traceData{
|
||||
ID: span.SpanID,
|
||||
ParentID: span.ParentID,
|
||||
Name: span.Name,
|
||||
Start: span.Start,
|
||||
Tags: renderTags(span.Tags),
|
||||
}
|
||||
t.unfinished[span.SpanID] = td
|
||||
// and wire up parents if we have them
|
||||
if !span.ParentID.IsValid() {
|
||||
return
|
||||
}
|
||||
parent, found := t.unfinished[span.ParentID]
|
||||
if !found {
|
||||
// trace had an invalid parent, so it cannot itself be valid
|
||||
return
|
||||
}
|
||||
parent.Children = append(parent.Children, td)
|
||||
|
||||
}
|
||||
|
||||
func (t *traces) finish(span *trace.Span) {
|
||||
// finishing, must be already in the map
|
||||
td, found := t.unfinished[span.SpanID]
|
||||
if !found {
|
||||
return // if this happens we are in a bad place
|
||||
}
|
||||
delete(t.unfinished, span.SpanID)
|
||||
|
||||
td.Finish = span.Finish
|
||||
td.Duration = span.Finish.Sub(span.Start)
|
||||
td.Events = make([]traceEvent, len(span.Events))
|
||||
for i, event := range span.Events {
|
||||
td.Events[i] = traceEvent{
|
||||
Time: event.Time,
|
||||
Tags: renderTags(event.Tags),
|
||||
}
|
||||
}
|
||||
|
||||
set, ok := t.sets[span.Name]
|
||||
if !ok {
|
||||
set = &traceSet{Name: span.Name}
|
||||
t.sets[span.Name] = set
|
||||
}
|
||||
set.Last = td
|
||||
if set.Longest == nil || set.Last.Duration > set.Longest.Duration {
|
||||
set.Longest = set.Last
|
||||
}
|
||||
if !td.ParentID.IsValid() {
|
||||
fillOffsets(td, td.Start)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *traces) getData(req *http.Request) interface{} {
|
||||
if len(t.sets) == 0 {
|
||||
return nil
|
||||
}
|
||||
data := traceResults{}
|
||||
data.Traces = make([]*traceSet, 0, len(t.sets))
|
||||
for _, set := range t.sets {
|
||||
data.Traces = append(data.Traces, set)
|
||||
}
|
||||
sort.Slice(data.Traces, func(i, j int) bool { return data.Traces[i].Name < data.Traces[j].Name })
|
||||
if bits := strings.SplitN(req.URL.Path, "/trace/", 2); len(bits) > 1 {
|
||||
data.Selected = t.sets[bits[1]]
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func fillOffsets(td *traceData, start time.Time) {
|
||||
td.Offset = td.Start.Sub(start)
|
||||
for i := range td.Events {
|
||||
td.Events[i].Offset = td.Events[i].Time.Sub(start)
|
||||
}
|
||||
for _, child := range td.Children {
|
||||
fillOffsets(child, start)
|
||||
}
|
||||
}
|
||||
|
||||
func renderTags(tags tag.List) string {
|
||||
buf := &bytes.Buffer{}
|
||||
for _, tag := range tags {
|
||||
fmt.Fprintf(buf, "%v=%q ", tag.Key, tag.Value)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -10,13 +10,16 @@ import (
|
|||
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
func (s *Server) Diagnostics(ctx context.Context, view source.View, uri span.URI) {
|
||||
ctx = telemetry.File.With(ctx, uri)
|
||||
f, err := view.GetFile(ctx, uri)
|
||||
if err != nil {
|
||||
s.session.Logger().Errorf(ctx, "no file for %s: %v", uri, err)
|
||||
log.Error(ctx, "no file", err, telemetry.File)
|
||||
return
|
||||
}
|
||||
// For non-Go files, don't return any diagnostics.
|
||||
|
@ -26,7 +29,7 @@ func (s *Server) Diagnostics(ctx context.Context, view source.View, uri span.URI
|
|||
}
|
||||
reports, err := source.Diagnostics(ctx, view, gof, s.disabledAnalyses)
|
||||
if err != nil {
|
||||
s.session.Logger().Errorf(ctx, "failed to compute diagnostics for %s: %v", gof.URI(), err)
|
||||
log.Error(ctx, "failed to compute diagnostics", err, telemetry.File)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -38,7 +41,7 @@ func (s *Server) Diagnostics(ctx context.Context, view source.View, uri span.URI
|
|||
if s.undelivered == nil {
|
||||
s.undelivered = make(map[span.URI][]source.Diagnostic)
|
||||
}
|
||||
s.session.Logger().Errorf(ctx, "failed to deliver diagnostic for %s (will retry): %v", uri, err)
|
||||
log.Error(ctx, "failed to deliver diagnostic (will retry)", err, telemetry.File)
|
||||
s.undelivered[uri] = diagnostics
|
||||
continue
|
||||
}
|
||||
|
@ -49,7 +52,7 @@ func (s *Server) Diagnostics(ctx context.Context, view source.View, uri span.URI
|
|||
// undelivered ones (only for remaining URIs).
|
||||
for uri, diagnostics := range s.undelivered {
|
||||
if err := s.publishDiagnostics(ctx, view, uri, diagnostics); err != nil {
|
||||
s.session.Logger().Errorf(ctx, "failed to deliver diagnostic for %s (will not retry): %v", uri, err)
|
||||
log.Error(ctx, "failed to deliver diagnostic for (will not retry)", err, telemetry.File)
|
||||
}
|
||||
// If we fail to deliver the same diagnostics twice, just give up.
|
||||
delete(s.undelivered, uri)
|
||||
|
|
|
@ -6,7 +6,6 @@ package lsp
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
|
@ -39,9 +38,9 @@ func spanToRange(ctx context.Context, view source.View, s span.Span) (source.GoF
|
|||
}
|
||||
if rng.Start == rng.End {
|
||||
// If we have a single point, assume we want the whole file.
|
||||
tok := f.GetToken(ctx)
|
||||
if tok == nil {
|
||||
return nil, nil, span.Range{}, fmt.Errorf("no file information for %s", f.URI())
|
||||
tok, err := f.GetToken(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, span.Range{}, err
|
||||
}
|
||||
rng.End = tok.Pos(tok.Size())
|
||||
}
|
||||
|
|
|
@ -15,16 +15,21 @@ import (
|
|||
"golang.org/x/tools/internal/lsp/debug"
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
func (s *Server) initialize(ctx context.Context, params *protocol.InitializeParams) (*protocol.InitializeResult, error) {
|
||||
s.initializedMu.Lock()
|
||||
defer s.initializedMu.Unlock()
|
||||
if s.isInitialized {
|
||||
s.stateMu.Lock()
|
||||
state := s.state
|
||||
s.stateMu.Unlock()
|
||||
if state >= serverInitializing {
|
||||
return nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidRequest, "server already initialized")
|
||||
}
|
||||
s.isInitialized = true // mark server as initialized now
|
||||
s.stateMu.Lock()
|
||||
s.state = serverInitializing
|
||||
s.stateMu.Unlock()
|
||||
|
||||
// TODO: Remove the option once we are certain there are no issues here.
|
||||
s.textDocumentSyncKind = protocol.Incremental
|
||||
|
@ -125,6 +130,10 @@ func (s *Server) setClientCapabilities(caps protocol.ClientCapabilities) {
|
|||
}
|
||||
|
||||
func (s *Server) initialized(ctx context.Context, params *protocol.InitializedParams) error {
|
||||
s.stateMu.Lock()
|
||||
s.state = serverInitialized
|
||||
s.stateMu.Unlock()
|
||||
|
||||
if s.configurationSupported {
|
||||
if s.dynamicConfigurationSupported {
|
||||
s.client.RegisterCapability(ctx, &protocol.RegistrationParams{
|
||||
|
@ -138,23 +147,36 @@ func (s *Server) initialized(ctx context.Context, params *protocol.InitializedPa
|
|||
})
|
||||
}
|
||||
for _, view := range s.session.Views() {
|
||||
config, err := s.client.Configuration(ctx, &protocol.ConfigurationParams{
|
||||
Items: []protocol.ConfigurationItem{{
|
||||
ScopeURI: protocol.NewURI(view.Folder()),
|
||||
Section: "gopls",
|
||||
}},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.processConfig(ctx, view, config[0]); err != nil {
|
||||
if err := s.fetchConfig(ctx, view); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
debug.PrintVersionInfo(buf, true, debug.PlainText)
|
||||
s.session.Logger().Infof(ctx, "%s", buf)
|
||||
log.Print(ctx, buf.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) fetchConfig(ctx context.Context, view source.View) error {
|
||||
configs, err := s.client.Configuration(ctx, &protocol.ConfigurationParams{
|
||||
Items: []protocol.ConfigurationItem{{
|
||||
ScopeURI: protocol.NewURI(view.Folder()),
|
||||
Section: "gopls",
|
||||
}, {
|
||||
ScopeURI: protocol.NewURI(view.Folder()),
|
||||
Section: view.Name(),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, config := range configs {
|
||||
if err := s.processConfig(ctx, view, config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -209,7 +231,7 @@ func (s *Server) processConfig(ctx context.Context, view source.View, config int
|
|||
case "FullDocumentation":
|
||||
s.hoverKind = source.FullDocumentation
|
||||
default:
|
||||
view.Session().Logger().Errorf(ctx, "unsupported hover kind %s", hoverKind)
|
||||
log.Error(ctx, "unsupported hover kind", nil, tag.Of("HoverKind", hoverKind))
|
||||
// The default value is already be set to synopsis.
|
||||
}
|
||||
}
|
||||
|
@ -234,19 +256,21 @@ func (s *Server) processConfig(ctx context.Context, view source.View, config int
|
|||
}
|
||||
|
||||
func (s *Server) shutdown(ctx context.Context) error {
|
||||
s.initializedMu.Lock()
|
||||
defer s.initializedMu.Unlock()
|
||||
if !s.isInitialized {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
if s.state < serverInitialized {
|
||||
return jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidRequest, "server not initialized")
|
||||
}
|
||||
// drop all the active views
|
||||
s.session.Shutdown(ctx)
|
||||
s.isInitialized = false
|
||||
s.state = serverShutDown
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) exit(ctx context.Context) error {
|
||||
if s.isInitialized {
|
||||
s.stateMu.Lock()
|
||||
defer s.stateMu.Unlock()
|
||||
if s.state != serverShutDown {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -29,7 +31,7 @@ func (s *Server) documentHighlight(ctx context.Context, params *protocol.TextDoc
|
|||
}
|
||||
spans, err := source.Highlight(ctx, f, rng.Start)
|
||||
if err != nil {
|
||||
view.Session().Logger().Errorf(ctx, "no highlight for %s: %v", spn, err)
|
||||
log.Error(ctx, "no highlight", err, tag.Of("Span", spn))
|
||||
}
|
||||
return toProtocolHighlight(m, spans), nil
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ func (s *Server) hover(ctx context.Context, params *protocol.TextDocumentPositio
|
|||
}
|
||||
ident, err := source.Identifier(ctx, view, f, identRange.Start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil
|
||||
}
|
||||
hover, err := ident.Hover(ctx, s.preferredContentFormat == protocol.Markdown, s.hoverKind)
|
||||
if err != nil {
|
||||
|
|
|
@ -7,9 +7,16 @@ package lsp
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -20,30 +27,105 @@ func (s *Server) documentLink(ctx context.Context, params *protocol.DocumentLink
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file := f.GetAST(ctx)
|
||||
file, err := f.GetAST(ctx, source.ParseFull)
|
||||
if file == nil {
|
||||
return nil, fmt.Errorf("no AST for %v", uri)
|
||||
}
|
||||
// Add a Godoc link for each imported package.
|
||||
var result []protocol.DocumentLink
|
||||
for _, imp := range file.Imports {
|
||||
spn, err := span.NewRange(view.Session().Cache().FileSet(), imp.Pos(), imp.End()).Span()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rng, err := m.Range(spn)
|
||||
|
||||
var links []protocol.DocumentLink
|
||||
ast.Inspect(file, func(node ast.Node) bool {
|
||||
switch n := node.(type) {
|
||||
case *ast.ImportSpec:
|
||||
target, err := strconv.Unquote(n.Path.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
target, err := strconv.Unquote(imp.Path.Value)
|
||||
if err != nil {
|
||||
continue
|
||||
log.Error(ctx, "cannot unquote import path", err, tag.Of("Path", n.Path.Value))
|
||||
return false
|
||||
}
|
||||
target = "https://godoc.org/" + target
|
||||
result = append(result, protocol.DocumentLink{
|
||||
l, err := toProtocolLink(view, m, target, n.Pos(), n.End())
|
||||
if err != nil {
|
||||
log.Error(ctx, "cannot initialize DocumentLink", err, tag.Of("Path", n.Path.Value))
|
||||
return false
|
||||
}
|
||||
links = append(links, l)
|
||||
return false
|
||||
case *ast.BasicLit:
|
||||
if n.Kind != token.STRING {
|
||||
return false
|
||||
}
|
||||
l, err := findLinksInString(n.Value, n.Pos(), view, m)
|
||||
if err != nil {
|
||||
log.Error(ctx, "cannot find links in string", err)
|
||||
return false
|
||||
}
|
||||
links = append(links, l...)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
for _, commentGroup := range file.Comments {
|
||||
for _, comment := range commentGroup.List {
|
||||
l, err := findLinksInString(comment.Text, comment.Pos(), view, m)
|
||||
if err != nil {
|
||||
log.Error(ctx, "cannot find links in comment", err)
|
||||
continue
|
||||
}
|
||||
links = append(links, l...)
|
||||
}
|
||||
}
|
||||
|
||||
return links, nil
|
||||
}
|
||||
|
||||
func findLinksInString(src string, pos token.Pos, view source.View, mapper *protocol.ColumnMapper) ([]protocol.DocumentLink, error) {
|
||||
var links []protocol.DocumentLink
|
||||
re, err := getURLRegexp()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create regexp for links: %s", err.Error())
|
||||
}
|
||||
for _, urlIndex := range re.FindAllIndex([]byte(src), -1) {
|
||||
start := urlIndex[0]
|
||||
end := urlIndex[1]
|
||||
startPos := token.Pos(int(pos) + start)
|
||||
endPos := token.Pos(int(pos) + end)
|
||||
target := src[start:end]
|
||||
l, err := toProtocolLink(view, mapper, target, startPos, endPos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
links = append(links, l)
|
||||
}
|
||||
return links, nil
|
||||
}
|
||||
|
||||
const urlRegexpString = "(http|ftp|https)://([\\w_-]+(?:(?:\\.[\\w_-]+)+))([\\w.,@?^=%&:/~+#-]*[\\w@?^=%&/~+#-])?"
|
||||
|
||||
var (
|
||||
urlRegexp *regexp.Regexp
|
||||
regexpOnce sync.Once
|
||||
regexpErr error
|
||||
)
|
||||
|
||||
func getURLRegexp() (*regexp.Regexp, error) {
|
||||
regexpOnce.Do(func() {
|
||||
urlRegexp, regexpErr = regexp.Compile(urlRegexpString)
|
||||
})
|
||||
return urlRegexp, regexpErr
|
||||
}
|
||||
|
||||
func toProtocolLink(view source.View, mapper *protocol.ColumnMapper, target string, start, end token.Pos) (protocol.DocumentLink, error) {
|
||||
spn, err := span.NewRange(view.Session().Cache().FileSet(), start, end).Span()
|
||||
if err != nil {
|
||||
return protocol.DocumentLink{}, err
|
||||
}
|
||||
rng, err := mapper.Range(spn)
|
||||
if err != nil {
|
||||
return protocol.DocumentLink{}, err
|
||||
}
|
||||
l := protocol.DocumentLink{
|
||||
Range: rng,
|
||||
Target: target,
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
return l, nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/tests"
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -32,18 +31,19 @@ func TestLSP(t *testing.T) {
|
|||
type runner struct {
|
||||
server *Server
|
||||
data *tests.Data
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
const viewName = "lsp_test"
|
||||
|
||||
func testLSP(t *testing.T, exporter packagestest.Exporter) {
|
||||
ctx := tests.Context(t)
|
||||
data := tests.Load(t, exporter, "testdata")
|
||||
defer data.Exported.Cleanup()
|
||||
|
||||
log := xlog.New(xlog.StdSink{})
|
||||
cache := cache.New()
|
||||
session := cache.NewSession(log)
|
||||
view := session.NewView(viewName, span.FileURI(data.Config.Dir))
|
||||
session := cache.NewSession(ctx)
|
||||
view := session.NewView(ctx, viewName, span.FileURI(data.Config.Dir))
|
||||
view.SetEnv(data.Config.Env)
|
||||
for filename, content := range data.Config.Overlay {
|
||||
session.SetOverlay(span.FileURI(filename), content)
|
||||
|
@ -59,6 +59,7 @@ func testLSP(t *testing.T, exporter packagestest.Exporter) {
|
|||
hoverKind: source.SynopsisDocumentation,
|
||||
},
|
||||
data: data,
|
||||
ctx: ctx,
|
||||
}
|
||||
tests.Run(t, r, data)
|
||||
}
|
||||
|
@ -67,7 +68,7 @@ func testLSP(t *testing.T, exporter packagestest.Exporter) {
|
|||
func (r *runner) Diagnostics(t *testing.T, data tests.Diagnostics) {
|
||||
v := r.server.session.View(viewName)
|
||||
for uri, want := range data {
|
||||
f, err := v.GetFile(context.Background(), uri)
|
||||
f, err := v.GetFile(r.ctx, uri)
|
||||
if err != nil {
|
||||
t.Fatalf("no file for %s: %v", f, err)
|
||||
}
|
||||
|
@ -75,7 +76,7 @@ func (r *runner) Diagnostics(t *testing.T, data tests.Diagnostics) {
|
|||
if !ok {
|
||||
t.Fatalf("%s is not a Go file: %v", uri, err)
|
||||
}
|
||||
results, err := source.Diagnostics(context.Background(), v, gof, nil)
|
||||
results, err := source.Diagnostics(r.ctx, v, gof, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -218,7 +219,7 @@ func (r *runner) Completion(t *testing.T, data tests.Completions, snippets tests
|
|||
|
||||
func (r *runner) runCompletion(t *testing.T, src span.Span) *protocol.CompletionList {
|
||||
t.Helper()
|
||||
list, err := r.server.Completion(context.Background(), &protocol.CompletionParams{
|
||||
list, err := r.server.Completion(r.ctx, &protocol.CompletionParams{
|
||||
TextDocumentPositionParams: protocol.TextDocumentPositionParams{
|
||||
TextDocument: protocol.TextDocumentIdentifier{
|
||||
URI: protocol.NewURI(src.URI()),
|
||||
|
@ -295,7 +296,6 @@ func summarizeCompletionItems(i int, want []source.CompletionItem, got []protoco
|
|||
}
|
||||
|
||||
func (r *runner) Format(t *testing.T, data tests.Formats) {
|
||||
ctx := context.Background()
|
||||
for _, spn := range data {
|
||||
uri := spn.URI()
|
||||
filename := uri.Filename()
|
||||
|
@ -305,7 +305,7 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
|
|||
return out, nil
|
||||
}))
|
||||
|
||||
edits, err := r.server.Formatting(context.Background(), &protocol.DocumentFormattingParams{
|
||||
edits, err := r.server.Formatting(r.ctx, &protocol.DocumentFormattingParams{
|
||||
TextDocument: protocol.TextDocumentIdentifier{
|
||||
URI: protocol.NewURI(uri),
|
||||
},
|
||||
|
@ -316,7 +316,7 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
|
|||
}
|
||||
continue
|
||||
}
|
||||
_, m, err := getSourceFile(ctx, r.server.session.ViewOf(uri), uri)
|
||||
_, m, err := getSourceFile(r.ctx, r.server.session.ViewOf(uri), uri)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -333,7 +333,6 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
|
|||
}
|
||||
|
||||
func (r *runner) Import(t *testing.T, data tests.Imports) {
|
||||
ctx := context.Background()
|
||||
for _, spn := range data {
|
||||
uri := spn.URI()
|
||||
filename := uri.Filename()
|
||||
|
@ -343,7 +342,7 @@ func (r *runner) Import(t *testing.T, data tests.Imports) {
|
|||
return out, nil
|
||||
}))
|
||||
|
||||
actions, err := r.server.CodeAction(context.Background(), &protocol.CodeActionParams{
|
||||
actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{
|
||||
TextDocument: protocol.TextDocumentIdentifier{
|
||||
URI: protocol.NewURI(uri),
|
||||
},
|
||||
|
@ -354,7 +353,7 @@ func (r *runner) Import(t *testing.T, data tests.Imports) {
|
|||
}
|
||||
continue
|
||||
}
|
||||
_, m, err := getSourceFile(ctx, r.server.session.ViewOf(uri), uri)
|
||||
_, m, err := getSourceFile(r.ctx, r.server.session.ViewOf(uri), uri)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -393,13 +392,13 @@ func (r *runner) Definition(t *testing.T, data tests.Definitions) {
|
|||
var locs []protocol.Location
|
||||
var hover *protocol.Hover
|
||||
if d.IsType {
|
||||
locs, err = r.server.TypeDefinition(context.Background(), params)
|
||||
locs, err = r.server.TypeDefinition(r.ctx, params)
|
||||
} else {
|
||||
locs, err = r.server.Definition(context.Background(), params)
|
||||
locs, err = r.server.Definition(r.ctx, params)
|
||||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", d.Src, err)
|
||||
}
|
||||
hover, err = r.server.Hover(context.Background(), params)
|
||||
hover, err = r.server.Hover(r.ctx, params)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", d.Src, err)
|
||||
|
@ -446,7 +445,7 @@ func (r *runner) Highlight(t *testing.T, data tests.Highlights) {
|
|||
TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI},
|
||||
Position: loc.Range.Start,
|
||||
}
|
||||
highlights, err := r.server.DocumentHighlight(context.Background(), params)
|
||||
highlights, err := r.server.DocumentHighlight(r.ctx, params)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -492,7 +491,7 @@ func (r *runner) Reference(t *testing.T, data tests.References) {
|
|||
Position: loc.Range.Start,
|
||||
},
|
||||
}
|
||||
got, err := r.server.References(context.Background(), params)
|
||||
got, err := r.server.References(r.ctx, params)
|
||||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", src, err)
|
||||
}
|
||||
|
@ -509,7 +508,6 @@ func (r *runner) Reference(t *testing.T, data tests.References) {
|
|||
}
|
||||
|
||||
func (r *runner) Rename(t *testing.T, data tests.Renames) {
|
||||
ctx := context.Background()
|
||||
for spn, newText := range data {
|
||||
tag := fmt.Sprintf("%s-rename", newText)
|
||||
|
||||
|
@ -524,7 +522,7 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) {
|
|||
t.Fatalf("failed for %v: %v", spn, err)
|
||||
}
|
||||
|
||||
workspaceEdits, err := r.server.Rename(ctx, &protocol.RenameParams{
|
||||
workspaceEdits, err := r.server.Rename(r.ctx, &protocol.RenameParams{
|
||||
TextDocument: protocol.TextDocumentIdentifier{
|
||||
URI: protocol.NewURI(uri),
|
||||
},
|
||||
|
@ -544,7 +542,7 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) {
|
|||
var res []string
|
||||
for uri, edits := range *workspaceEdits.Changes {
|
||||
spnURI := span.URI(uri)
|
||||
_, m, err := getSourceFile(ctx, r.server.session.ViewOf(span.URI(spnURI)), spnURI)
|
||||
_, m, err := getSourceFile(r.ctx, r.server.session.ViewOf(span.URI(spnURI)), spnURI)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -582,7 +580,6 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) {
|
|||
|
||||
func applyEdits(contents string, edits []source.TextEdit) string {
|
||||
res := contents
|
||||
sortSourceTextEdits(edits)
|
||||
|
||||
// Apply the edits from the end of the file forward
|
||||
// to preserve the offsets
|
||||
|
@ -596,15 +593,6 @@ func applyEdits(contents string, edits []source.TextEdit) string {
|
|||
return res
|
||||
}
|
||||
|
||||
func sortSourceTextEdits(d []source.TextEdit) {
|
||||
sort.Slice(d, func(i int, j int) bool {
|
||||
if r := span.Compare(d[i].Span, d[j].Span); r != 0 {
|
||||
return r < 0
|
||||
}
|
||||
return d[i].NewText < d[j].NewText
|
||||
})
|
||||
}
|
||||
|
||||
func (r *runner) Symbol(t *testing.T, data tests.Symbols) {
|
||||
for uri, expectedSymbols := range data {
|
||||
params := &protocol.DocumentSymbolParams{
|
||||
|
@ -612,7 +600,7 @@ func (r *runner) Symbol(t *testing.T, data tests.Symbols) {
|
|||
URI: string(uri),
|
||||
},
|
||||
}
|
||||
symbols, err := r.server.DocumentSymbol(context.Background(), params)
|
||||
symbols, err := r.server.DocumentSymbol(r.ctx, params)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -688,7 +676,7 @@ func (r *runner) SignatureHelp(t *testing.T, data tests.Signatures) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", loc, err)
|
||||
}
|
||||
gotSignatures, err := r.server.SignatureHelp(context.Background(), &protocol.TextDocumentPositionParams{
|
||||
gotSignatures, err := r.server.SignatureHelp(r.ctx, &protocol.TextDocumentPositionParams{
|
||||
TextDocument: protocol.TextDocumentIdentifier{
|
||||
URI: protocol.NewURI(spn.URI()),
|
||||
},
|
||||
|
@ -754,7 +742,7 @@ func (r *runner) Link(t *testing.T, data tests.Links) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotLinks, err := r.server.DocumentLink(context.Background(), &protocol.DocumentLinkParams{
|
||||
gotLinks, err := r.server.DocumentLink(r.ctx, &protocol.DocumentLinkParams{
|
||||
TextDocument: protocol.TextDocumentIdentifier{
|
||||
URI: protocol.NewURI(uri),
|
||||
},
|
||||
|
@ -762,15 +750,30 @@ func (r *runner) Link(t *testing.T, data tests.Links) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var notePositions []token.Position
|
||||
links := make(map[span.Span]string, len(wantLinks))
|
||||
for _, link := range wantLinks {
|
||||
links[link.Src] = link.Target
|
||||
notePositions = append(notePositions, link.NotePosition)
|
||||
}
|
||||
|
||||
for _, link := range gotLinks {
|
||||
spn, err := m.RangeSpan(link.Range)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
linkInNote := false
|
||||
for _, notePosition := range notePositions {
|
||||
// Drop the links found inside expectation notes arguments as this links are not collected by expect package
|
||||
if notePosition.Line == spn.Start().Line() &&
|
||||
notePosition.Column <= spn.Start().Column() {
|
||||
delete(links, spn)
|
||||
linkInNote = true
|
||||
}
|
||||
}
|
||||
if linkInNote {
|
||||
continue
|
||||
}
|
||||
if target, ok := links[spn]; ok {
|
||||
delete(links, spn)
|
||||
if target != link.Target {
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/xcontext"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.AddLogger(logger)
|
||||
}
|
||||
|
||||
type contextKey int
|
||||
|
||||
const (
|
||||
clientKey = contextKey(iota)
|
||||
)
|
||||
|
||||
func WithClient(ctx context.Context, client Client) context.Context {
|
||||
return context.WithValue(ctx, clientKey, client)
|
||||
}
|
||||
|
||||
// logger implements log.Logger in terms of the LogMessage call to a client.
|
||||
func logger(ctx context.Context, at time.Time, tags tag.List) bool {
|
||||
client, ok := ctx.Value(clientKey).(Client)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
entry := log.ToEntry(ctx, time.Time{}, tags)
|
||||
msg := &LogMessageParams{Type: Info, Message: fmt.Sprint(entry)}
|
||||
if entry.Error != nil {
|
||||
msg.Type = Error
|
||||
}
|
||||
go client.LogMessage(xcontext.Detach(ctx), msg)
|
||||
return true
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// detatch returns a context that keeps all the values of its parent context
|
||||
// but detatches from the cancellation and error handling.
|
||||
func detatchContext(ctx context.Context) context.Context { return detatchedContext{ctx} }
|
||||
|
||||
type detatchedContext struct{ parent context.Context }
|
||||
|
||||
func (v detatchedContext) Deadline() (time.Time, bool) { return time.Time{}, false }
|
||||
func (v detatchedContext) Done() <-chan struct{} { return nil }
|
||||
func (v detatchedContext) Err() error { return nil }
|
||||
func (v detatchedContext) Value(key interface{}) interface{} { return v.parent.Value(key) }
|
|
@ -13,7 +13,7 @@ var (
|
|||
namesInitializeError [int(UnknownProtocolVersion) + 1]string
|
||||
namesMessageType [int(Log) + 1]string
|
||||
namesFileChangeType [int(Deleted) + 1]string
|
||||
namesWatchKind [int(Change) + 1]string
|
||||
namesWatchKind [int(WatchDelete) + 1]string
|
||||
namesCompletionTriggerKind [int(TriggerForIncompleteCompletions) + 1]string
|
||||
namesDiagnosticSeverity [int(SeverityHint) + 1]string
|
||||
namesDiagnosticTag [int(Unnecessary) + 1]string
|
||||
|
@ -40,7 +40,9 @@ func init() {
|
|||
namesFileChangeType[int(Changed)] = "Changed"
|
||||
namesFileChangeType[int(Deleted)] = "Deleted"
|
||||
|
||||
namesWatchKind[int(Change)] = "Change"
|
||||
namesWatchKind[int(WatchCreate)] = "WatchCreate"
|
||||
namesWatchKind[int(WatchChange)] = "WatchChange"
|
||||
namesWatchKind[int(WatchDelete)] = "WatchDelete"
|
||||
|
||||
namesCompletionTriggerKind[int(Invoked)] = "Invoked"
|
||||
namesCompletionTriggerKind[int(TriggerCharacter)] = "TriggerCharacter"
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
)
|
||||
|
||||
// logSink implements xlog.Sink in terms of the LogMessage call to a client.
|
||||
type logSink struct {
|
||||
client Client
|
||||
}
|
||||
|
||||
// NewLogger returns an xlog.Sink that sends its messages using client.LogMessage.
|
||||
// It maps Debug to the Log level, Info and Error to their matching levels, and
|
||||
// does not support warnings.
|
||||
func NewLogger(client Client) xlog.Sink {
|
||||
return logSink{client: client}
|
||||
}
|
||||
|
||||
func (s logSink) Log(ctx context.Context, level xlog.Level, message string) {
|
||||
typ := Log
|
||||
switch level {
|
||||
case xlog.ErrorLevel:
|
||||
typ = Error
|
||||
case xlog.InfoLevel:
|
||||
typ = Info
|
||||
case xlog.DebugLevel:
|
||||
typ = Log
|
||||
}
|
||||
s.client.LogMessage(ctx, &LogMessageParams{Type: typ, Message: message})
|
||||
}
|
|
@ -8,46 +8,56 @@ import (
|
|||
"context"
|
||||
|
||||
"golang.org/x/tools/internal/jsonrpc2"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
"golang.org/x/tools/internal/xcontext"
|
||||
)
|
||||
|
||||
const defaultMessageBufferSize = 20
|
||||
const defaultRejectIfOverloaded = false
|
||||
type DocumentUri = string
|
||||
|
||||
func canceller(ctx context.Context, conn *jsonrpc2.Conn, id jsonrpc2.ID) {
|
||||
ctx = detatchContext(ctx)
|
||||
ctx, span := trace.StartSpan(ctx, "protocol.canceller")
|
||||
defer span.End()
|
||||
type canceller struct{ jsonrpc2.EmptyHandler }
|
||||
|
||||
type clientHandler struct {
|
||||
canceller
|
||||
client Client
|
||||
}
|
||||
|
||||
type serverHandler struct {
|
||||
canceller
|
||||
server Server
|
||||
}
|
||||
|
||||
func (canceller) Cancel(ctx context.Context, conn *jsonrpc2.Conn, id jsonrpc2.ID, cancelled bool) bool {
|
||||
if cancelled {
|
||||
return false
|
||||
}
|
||||
ctx = xcontext.Detach(ctx)
|
||||
ctx, done := trace.StartSpan(ctx, "protocol.canceller")
|
||||
defer done()
|
||||
conn.Notify(ctx, "$/cancelRequest", &CancelParams{ID: id})
|
||||
return true
|
||||
}
|
||||
|
||||
func NewClient(stream jsonrpc2.Stream, client Client) (*jsonrpc2.Conn, Server, xlog.Logger) {
|
||||
log := xlog.New(NewLogger(client))
|
||||
func NewClient(ctx context.Context, stream jsonrpc2.Stream, client Client) (context.Context, *jsonrpc2.Conn, Server) {
|
||||
ctx = WithClient(ctx, client)
|
||||
conn := jsonrpc2.NewConn(stream)
|
||||
conn.Capacity = defaultMessageBufferSize
|
||||
conn.RejectIfOverloaded = defaultRejectIfOverloaded
|
||||
conn.Handler = clientHandler(log, client)
|
||||
conn.Canceler = jsonrpc2.Canceler(canceller)
|
||||
return conn, &serverDispatcher{Conn: conn}, log
|
||||
conn.AddHandler(&clientHandler{client: client})
|
||||
return ctx, conn, &serverDispatcher{Conn: conn}
|
||||
}
|
||||
|
||||
func NewServer(stream jsonrpc2.Stream, server Server) (*jsonrpc2.Conn, Client, xlog.Logger) {
|
||||
func NewServer(ctx context.Context, stream jsonrpc2.Stream, server Server) (context.Context, *jsonrpc2.Conn, Client) {
|
||||
conn := jsonrpc2.NewConn(stream)
|
||||
client := &clientDispatcher{Conn: conn}
|
||||
log := xlog.New(NewLogger(client))
|
||||
conn.Capacity = defaultMessageBufferSize
|
||||
conn.RejectIfOverloaded = defaultRejectIfOverloaded
|
||||
conn.Handler = serverHandler(log, server)
|
||||
conn.Canceler = jsonrpc2.Canceler(canceller)
|
||||
return conn, client, log
|
||||
ctx = WithClient(ctx, client)
|
||||
conn.AddHandler(&serverHandler{server: server})
|
||||
return ctx, conn, client
|
||||
}
|
||||
|
||||
func sendParseError(ctx context.Context, log xlog.Logger, req *jsonrpc2.Request, err error) {
|
||||
func sendParseError(ctx context.Context, req *jsonrpc2.Request, err error) {
|
||||
if _, ok := err.(*jsonrpc2.Error); !ok {
|
||||
err = jsonrpc2.NewErrorf(jsonrpc2.CodeParseError, "%v", err)
|
||||
}
|
||||
if err := req.Reply(ctx, nil, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"encoding/json"
|
||||
|
||||
"golang.org/x/tools/internal/jsonrpc2"
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
|
@ -23,117 +23,127 @@ type Client interface {
|
|||
ApplyEdit(context.Context, *ApplyWorkspaceEditParams) (*ApplyWorkspaceEditResponse, error)
|
||||
}
|
||||
|
||||
func clientHandler(log xlog.Logger, client Client) jsonrpc2.Handler {
|
||||
return func(ctx context.Context, r *jsonrpc2.Request) {
|
||||
func (h clientHandler) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool {
|
||||
if delivered {
|
||||
return false
|
||||
}
|
||||
switch r.Method {
|
||||
case "$/cancelRequest":
|
||||
var params CancelParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
r.Conn().Cancel(params.ID)
|
||||
return true
|
||||
case "window/showMessage": // notif
|
||||
var params ShowMessageParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := client.ShowMessage(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.client.ShowMessage(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "window/logMessage": // notif
|
||||
var params LogMessageParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := client.LogMessage(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.client.LogMessage(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "telemetry/event": // notif
|
||||
var params interface{}
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := client.Event(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.client.Event(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/publishDiagnostics": // notif
|
||||
var params PublishDiagnosticsParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := client.PublishDiagnostics(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.client.PublishDiagnostics(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "workspace/workspaceFolders": // req
|
||||
if r.Params != nil {
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidParams, "Expected no params"))
|
||||
return
|
||||
return true
|
||||
}
|
||||
resp, err := client.WorkspaceFolders(ctx)
|
||||
resp, err := h.client.WorkspaceFolders(ctx)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "workspace/configuration": // req
|
||||
var params ConfigurationParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := client.Configuration(ctx, ¶ms)
|
||||
resp, err := h.client.Configuration(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "client/registerCapability": // req
|
||||
var params RegistrationParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
err := client.RegisterCapability(ctx, ¶ms)
|
||||
err := h.client.RegisterCapability(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, nil, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "client/unregisterCapability": // req
|
||||
var params UnregistrationParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
err := client.UnregisterCapability(ctx, ¶ms)
|
||||
err := h.client.UnregisterCapability(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, nil, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "window/showMessageRequest": // req
|
||||
var params ShowMessageRequestParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := client.ShowMessageRequest(ctx, ¶ms)
|
||||
resp, err := h.client.ShowMessageRequest(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "workspace/applyEdit": // req
|
||||
var params ApplyWorkspaceEditParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := client.ApplyEdit(ctx, ¶ms)
|
||||
resp, err := h.client.ApplyEdit(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
|
||||
default:
|
||||
if r.IsNotify() {
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeMethodNotFound, "method %q not found", r.Method))
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// Package protocol contains data types and code for LSP jsonrpcs
|
||||
// generated automatically from vscode-languageserver-node
|
||||
// commit: c1e8923f8ea3b1f9c61dadf97448244d9ffbf7ae
|
||||
// last fetched Tue May 21 2019 07:36:27 GMT-0400 (Eastern Daylight Time)
|
||||
// commit: 8801c20b667945f455d7e023c71d2f741caeda25
|
||||
// last fetched Sat Jul 13 2019 18:33:10 GMT-0700 (Pacific Daylight Time)
|
||||
package protocol
|
||||
|
||||
// Code generated (see typescript/README.md) DO NOT EDIT.
|
||||
|
@ -155,6 +155,26 @@ type FoldingRangeParams struct {
|
|||
TextDocument TextDocumentIdentifier `json:"textDocument"`
|
||||
}
|
||||
|
||||
// SelectionRangeProviderOptions is
|
||||
type SelectionRangeProviderOptions struct {
|
||||
}
|
||||
|
||||
/*SelectionRangeParams defined:
|
||||
* A parameter literal used in selection range requests.
|
||||
*/
|
||||
type SelectionRangeParams struct {
|
||||
|
||||
/*TextDocument defined:
|
||||
* The text document.
|
||||
*/
|
||||
TextDocument TextDocumentIdentifier `json:"textDocument"`
|
||||
|
||||
/*Positions defined:
|
||||
* The positions inside the text document.
|
||||
*/
|
||||
Positions []Position `json:"positions"`
|
||||
}
|
||||
|
||||
/*Registration defined:
|
||||
* General parameters to to register for an notification or to register a provider.
|
||||
*/
|
||||
|
@ -1241,6 +1261,19 @@ type ClientCapabilities struct {
|
|||
*/
|
||||
LinkSupport bool `json:"linkSupport,omitempty"`
|
||||
} `json:"declaration,omitempty"`
|
||||
|
||||
/*SelectionRange defined:
|
||||
* Capabilities specific to `textDocument/selectionRange` requests
|
||||
*/
|
||||
SelectionRange struct {
|
||||
|
||||
/*DynamicRegistration defined:
|
||||
* Whether implementation supports dynamic registration for selection range providers. If this is set to `true`
|
||||
* the client supports the new `(SelectionRangeProviderOptions & TextDocumentRegistrationOptions & StaticRegistrationOptions)`
|
||||
* return value for the corresponding server capability as well.
|
||||
*/
|
||||
DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
|
||||
} `json:"selectionRange,omitempty"`
|
||||
} `json:"textDocument,omitempty"`
|
||||
|
||||
/*Window defined:
|
||||
|
@ -1600,6 +1633,11 @@ type ServerCapabilities struct {
|
|||
* The server provides Goto Type Definition support.
|
||||
*/
|
||||
DeclarationProvider bool `json:"declarationProvider,omitempty"` // boolean | (TextDocumentRegistrationOptions & StaticRegistrationOptions)
|
||||
|
||||
/*SelectionRangeProvider defined:
|
||||
* The server provides selection range support.
|
||||
*/
|
||||
SelectionRangeProvider bool `json:"selectionRangeProvider,omitempty"` // boolean | (TextDocumentRegistrationOptions & StaticRegistrationOptions & SelectionRangeProviderOptions)
|
||||
}
|
||||
|
||||
// InitializeParams is
|
||||
|
@ -1626,7 +1664,7 @@ type InitializeParams struct {
|
|||
*
|
||||
* @deprecated in favour of workspaceFolders.
|
||||
*/
|
||||
RootURI string `json:"rootUri"`
|
||||
RootURI DocumentUri `json:"rootUri"`
|
||||
|
||||
/*Capabilities defined:
|
||||
* The capabilities provided by the client (editor or tool)
|
||||
|
@ -1861,7 +1899,7 @@ type FileEvent struct {
|
|||
/*URI defined:
|
||||
* The file's uri.
|
||||
*/
|
||||
URI string `json:"uri"`
|
||||
URI DocumentUri `json:"uri"`
|
||||
|
||||
/*Type defined:
|
||||
* The change type.
|
||||
|
@ -1910,10 +1948,12 @@ type PublishDiagnosticsParams struct {
|
|||
/*URI defined:
|
||||
* The URI for which diagnostic information is reported.
|
||||
*/
|
||||
URI string `json:"uri"`
|
||||
URI DocumentUri `json:"uri"`
|
||||
|
||||
/*Version defined:
|
||||
* Optional the version number of the document the diagnostics are published for.
|
||||
*
|
||||
* @since 3.15
|
||||
*/
|
||||
Version float64 `json:"version,omitempty"`
|
||||
|
||||
|
@ -2264,7 +2304,7 @@ type Range struct {
|
|||
type Location struct {
|
||||
|
||||
// URI is
|
||||
URI string `json:"uri"`
|
||||
URI DocumentUri `json:"uri"`
|
||||
|
||||
// Range is
|
||||
Range Range `json:"range"`
|
||||
|
@ -2287,7 +2327,7 @@ type LocationLink struct {
|
|||
/*TargetURI defined:
|
||||
* The target resource identifier of this link.
|
||||
*/
|
||||
TargetURI string `json:"targetUri"`
|
||||
TargetURI DocumentUri `json:"targetUri"`
|
||||
|
||||
/*TargetRange defined:
|
||||
* The full target range of this link. If the target for example is a symbol then target range is the
|
||||
|
@ -2528,7 +2568,7 @@ type CreateFile struct {
|
|||
/*URI defined:
|
||||
* The resource to create.
|
||||
*/
|
||||
URI string `json:"uri"`
|
||||
URI DocumentUri `json:"uri"`
|
||||
|
||||
/*Options defined:
|
||||
* Additional options
|
||||
|
@ -2565,12 +2605,12 @@ type RenameFile struct {
|
|||
/*OldURI defined:
|
||||
* The old (existing) location.
|
||||
*/
|
||||
OldURI string `json:"oldUri"`
|
||||
OldURI DocumentUri `json:"oldUri"`
|
||||
|
||||
/*NewURI defined:
|
||||
* The new location.
|
||||
*/
|
||||
NewURI string `json:"newUri"`
|
||||
NewURI DocumentUri `json:"newUri"`
|
||||
|
||||
/*Options defined:
|
||||
* Rename options.
|
||||
|
@ -2607,7 +2647,7 @@ type DeleteFile struct {
|
|||
/*URI defined:
|
||||
* The file to delete.
|
||||
*/
|
||||
URI string `json:"uri"`
|
||||
URI DocumentUri `json:"uri"`
|
||||
|
||||
/*Options defined:
|
||||
* Delete options.
|
||||
|
@ -2656,7 +2696,7 @@ type TextDocumentIdentifier struct {
|
|||
/*URI defined:
|
||||
* The text document's uri.
|
||||
*/
|
||||
URI string `json:"uri"`
|
||||
URI DocumentUri `json:"uri"`
|
||||
}
|
||||
|
||||
/*VersionedTextDocumentIdentifier defined:
|
||||
|
@ -2684,7 +2724,7 @@ type TextDocumentItem struct {
|
|||
/*URI defined:
|
||||
* The text document's uri.
|
||||
*/
|
||||
URI string `json:"uri"`
|
||||
URI DocumentUri `json:"uri"`
|
||||
|
||||
/*LanguageID defined:
|
||||
* The text document's language identifier
|
||||
|
@ -2809,8 +2849,6 @@ type CompletionItem struct {
|
|||
* and a completion item with an `insertText` of `console` is provided it
|
||||
* will only insert `sole`. Therefore it is recommended to use `textEdit` instead
|
||||
* since it avoids additional client side interpretation.
|
||||
*
|
||||
* @deprecated Use textEdit instead.
|
||||
*/
|
||||
InsertText string `json:"insertText,omitempty"`
|
||||
|
||||
|
@ -3262,6 +3300,23 @@ type DocumentLink struct {
|
|||
Data interface{} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
/*SelectionRange defined:
|
||||
* A selection range represents a part of a selection hierarchy. A selection range
|
||||
* may have a parent selection range that contains it.
|
||||
*/
|
||||
type SelectionRange struct {
|
||||
|
||||
/*Range defined:
|
||||
* The [range](#Range) of this selection range.
|
||||
*/
|
||||
Range Range `json:"range"`
|
||||
|
||||
/*Parent defined:
|
||||
* The parent selection range containing this range. Therefore `parent.range` must contain `this.range`.
|
||||
*/
|
||||
Parent *SelectionRange `json:"parent,omitempty"`
|
||||
}
|
||||
|
||||
/*TextDocument defined:
|
||||
* A simple text document. Not to be implemented.
|
||||
*/
|
||||
|
@ -3274,7 +3329,7 @@ type TextDocument struct {
|
|||
*
|
||||
* @readonly
|
||||
*/
|
||||
URI string `json:"uri"`
|
||||
URI DocumentUri `json:"uri"`
|
||||
|
||||
/*LanguageID defined:
|
||||
* The identifier of the language associated with this document.
|
||||
|
@ -3556,10 +3611,20 @@ const (
|
|||
*/
|
||||
Deleted FileChangeType = 3
|
||||
|
||||
/*Change defined:
|
||||
/*WatchCreate defined:
|
||||
* Interested in create events.
|
||||
*/
|
||||
WatchCreate WatchKind = 1
|
||||
|
||||
/*WatchChange defined:
|
||||
* Interested in change events
|
||||
*/
|
||||
Change WatchKind = 2
|
||||
WatchChange WatchKind = 2
|
||||
|
||||
/*WatchDelete defined:
|
||||
* Interested in delete events
|
||||
*/
|
||||
WatchDelete WatchKind = 4
|
||||
|
||||
/*Invoked defined:
|
||||
* Completion was triggered by typing an identifier (24x7 code
|
||||
|
@ -3952,6 +4017,12 @@ type DocumentFilter struct {
|
|||
*/
|
||||
type DocumentSelector []DocumentFilter
|
||||
|
||||
// DocumentURI is a type
|
||||
/**
|
||||
* A tagging type for string properties that are actually URIs.
|
||||
*/
|
||||
type DocumentURI string
|
||||
|
||||
// DefinitionLink is a type
|
||||
/**
|
||||
* Information about where a symbol is defined.
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"encoding/json"
|
||||
|
||||
"golang.org/x/tools/internal/jsonrpc2"
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
)
|
||||
|
||||
type Server interface {
|
||||
|
@ -29,6 +29,7 @@ type Server interface {
|
|||
ColorPresentation(context.Context, *ColorPresentationParams) ([]ColorPresentation, error)
|
||||
FoldingRange(context.Context, *FoldingRangeParams) ([]FoldingRange, error)
|
||||
Declaration(context.Context, *TextDocumentPositionParams) ([]DeclarationLink, error)
|
||||
SelectionRange(context.Context, *SelectionRangeParams) ([]SelectionRange, error)
|
||||
Initialize(context.Context, *InitializeParams) (*InitializeResult, error)
|
||||
Shutdown(context.Context) error
|
||||
WillSaveWaitUntil(context.Context, *WillSaveTextDocumentParams) ([]TextEdit, error)
|
||||
|
@ -54,414 +55,466 @@ type Server interface {
|
|||
ExecuteCommand(context.Context, *ExecuteCommandParams) (interface{}, error)
|
||||
}
|
||||
|
||||
func serverHandler(log xlog.Logger, server Server) jsonrpc2.Handler {
|
||||
return func(ctx context.Context, r *jsonrpc2.Request) {
|
||||
func (h serverHandler) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool {
|
||||
if delivered {
|
||||
return false
|
||||
}
|
||||
switch r.Method {
|
||||
case "$/cancelRequest":
|
||||
var params CancelParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
r.Conn().Cancel(params.ID)
|
||||
return true
|
||||
case "workspace/didChangeWorkspaceFolders": // notif
|
||||
var params DidChangeWorkspaceFoldersParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.DidChangeWorkspaceFolders(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.DidChangeWorkspaceFolders(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "initialized": // notif
|
||||
var params InitializedParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.Initialized(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.Initialized(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "exit": // notif
|
||||
if err := server.Exit(ctx); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.Exit(ctx); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "workspace/didChangeConfiguration": // notif
|
||||
var params DidChangeConfigurationParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.DidChangeConfiguration(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.DidChangeConfiguration(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/didOpen": // notif
|
||||
var params DidOpenTextDocumentParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.DidOpen(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.DidOpen(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/didChange": // notif
|
||||
var params DidChangeTextDocumentParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.DidChange(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.DidChange(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/didClose": // notif
|
||||
var params DidCloseTextDocumentParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.DidClose(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.DidClose(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/didSave": // notif
|
||||
var params DidSaveTextDocumentParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.DidSave(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.DidSave(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/willSave": // notif
|
||||
var params WillSaveTextDocumentParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.WillSave(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.WillSave(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "workspace/didChangeWatchedFiles": // notif
|
||||
var params DidChangeWatchedFilesParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.DidChangeWatchedFiles(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.DidChangeWatchedFiles(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "$/setTraceNotification": // notif
|
||||
var params SetTraceParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.SetTraceNotification(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.SetTraceNotification(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "$/logTraceNotification": // notif
|
||||
var params LogTraceParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := server.LogTraceNotification(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
if err := h.server.LogTraceNotification(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/implementation": // req
|
||||
var params TextDocumentPositionParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.Implementation(ctx, ¶ms)
|
||||
resp, err := h.server.Implementation(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/typeDefinition": // req
|
||||
var params TextDocumentPositionParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.TypeDefinition(ctx, ¶ms)
|
||||
resp, err := h.server.TypeDefinition(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/documentColor": // req
|
||||
var params DocumentColorParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.DocumentColor(ctx, ¶ms)
|
||||
resp, err := h.server.DocumentColor(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/colorPresentation": // req
|
||||
var params ColorPresentationParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.ColorPresentation(ctx, ¶ms)
|
||||
resp, err := h.server.ColorPresentation(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/foldingRange": // req
|
||||
var params FoldingRangeParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.FoldingRange(ctx, ¶ms)
|
||||
resp, err := h.server.FoldingRange(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/declaration": // req
|
||||
var params TextDocumentPositionParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.Declaration(ctx, ¶ms)
|
||||
resp, err := h.server.Declaration(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/selectionRange": // req
|
||||
var params SelectionRangeParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := h.server.SelectionRange(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "initialize": // req
|
||||
var params InitializeParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.Initialize(ctx, ¶ms)
|
||||
resp, err := h.server.Initialize(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "shutdown": // req
|
||||
if r.Params != nil {
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidParams, "Expected no params"))
|
||||
return
|
||||
return true
|
||||
}
|
||||
err := server.Shutdown(ctx)
|
||||
err := h.server.Shutdown(ctx)
|
||||
if err := r.Reply(ctx, nil, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/willSaveWaitUntil": // req
|
||||
var params WillSaveTextDocumentParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.WillSaveWaitUntil(ctx, ¶ms)
|
||||
resp, err := h.server.WillSaveWaitUntil(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/completion": // req
|
||||
var params CompletionParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.Completion(ctx, ¶ms)
|
||||
resp, err := h.server.Completion(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "completionItem/resolve": // req
|
||||
var params CompletionItem
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.Resolve(ctx, ¶ms)
|
||||
resp, err := h.server.Resolve(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/hover": // req
|
||||
var params TextDocumentPositionParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.Hover(ctx, ¶ms)
|
||||
resp, err := h.server.Hover(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/signatureHelp": // req
|
||||
var params TextDocumentPositionParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.SignatureHelp(ctx, ¶ms)
|
||||
resp, err := h.server.SignatureHelp(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/definition": // req
|
||||
var params TextDocumentPositionParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.Definition(ctx, ¶ms)
|
||||
resp, err := h.server.Definition(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/references": // req
|
||||
var params ReferenceParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.References(ctx, ¶ms)
|
||||
resp, err := h.server.References(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/documentHighlight": // req
|
||||
var params TextDocumentPositionParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.DocumentHighlight(ctx, ¶ms)
|
||||
resp, err := h.server.DocumentHighlight(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/documentSymbol": // req
|
||||
var params DocumentSymbolParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.DocumentSymbol(ctx, ¶ms)
|
||||
resp, err := h.server.DocumentSymbol(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "workspace/symbol": // req
|
||||
var params WorkspaceSymbolParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.Symbol(ctx, ¶ms)
|
||||
resp, err := h.server.Symbol(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/codeAction": // req
|
||||
var params CodeActionParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.CodeAction(ctx, ¶ms)
|
||||
resp, err := h.server.CodeAction(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/codeLens": // req
|
||||
var params CodeLensParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.CodeLens(ctx, ¶ms)
|
||||
resp, err := h.server.CodeLens(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "codeLens/resolve": // req
|
||||
var params CodeLens
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.ResolveCodeLens(ctx, ¶ms)
|
||||
resp, err := h.server.ResolveCodeLens(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/formatting": // req
|
||||
var params DocumentFormattingParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.Formatting(ctx, ¶ms)
|
||||
resp, err := h.server.Formatting(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/rangeFormatting": // req
|
||||
var params DocumentRangeFormattingParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.RangeFormatting(ctx, ¶ms)
|
||||
resp, err := h.server.RangeFormatting(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/onTypeFormatting": // req
|
||||
var params DocumentOnTypeFormattingParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.OnTypeFormatting(ctx, ¶ms)
|
||||
resp, err := h.server.OnTypeFormatting(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/rename": // req
|
||||
var params RenameParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.Rename(ctx, ¶ms)
|
||||
resp, err := h.server.Rename(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/prepareRename": // req
|
||||
var params TextDocumentPositionParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.PrepareRename(ctx, ¶ms)
|
||||
resp, err := h.server.PrepareRename(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "textDocument/documentLink": // req
|
||||
var params DocumentLinkParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.DocumentLink(ctx, ¶ms)
|
||||
resp, err := h.server.DocumentLink(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "documentLink/resolve": // req
|
||||
var params DocumentLink
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.ResolveDocumentLink(ctx, ¶ms)
|
||||
resp, err := h.server.ResolveDocumentLink(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
case "workspace/executeCommand": // req
|
||||
var params ExecuteCommandParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
resp, err := server.ExecuteCommand(ctx, ¶ms)
|
||||
resp, err := h.server.ExecuteCommand(ctx, ¶ms)
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true
|
||||
|
||||
default:
|
||||
if r.IsNotify() {
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeMethodNotFound, "method %q not found", r.Method))
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -564,6 +617,14 @@ func (s *serverDispatcher) Declaration(ctx context.Context, params *TextDocument
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (s *serverDispatcher) SelectionRange(ctx context.Context, params *SelectionRangeParams) ([]SelectionRange, error) {
|
||||
var result []SelectionRange
|
||||
if err := s.Conn.Call(ctx, "textDocument/selectionRange", params, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *serverDispatcher) Initialize(ctx context.Context, params *InitializeParams) (*InitializeResult, error) {
|
||||
var result InitializeResult
|
||||
if err := s.Conn.Call(ctx, "initialize", params, &result); err != nil {
|
||||
|
|
|
@ -4,10 +4,10 @@
|
|||
|
||||
1. Make sure `node` is installed.
|
||||
As explained at the [node site](<https://nodejs.org> Node)
|
||||
you may need `node install @types/node` for the node runtime types
|
||||
2. Install the typescript compiler, with `node install typescript`.
|
||||
you may need `npm install @types/node` for the node runtime types
|
||||
2. Install the typescript compiler, with `npm install typescript`.
|
||||
3. Make sure `tsc` and `node` are in your execution path.
|
||||
4. Get the typescript code for the jsonrpc protocol with `git clone vscode-lanuageserver-node.git`
|
||||
4. Get the typescript code for the jsonrpc protocol with `git clone git@github.com:microsoft/vscode-languageserver-node.git`
|
||||
|
||||
## Usage
|
||||
|
||||
|
|
|
@ -582,7 +582,7 @@ function generate(files: string[], options: ts.CompilerOptions): void {
|
|||
}
|
||||
if (x[0].goType == 'bool') { // take it
|
||||
if (x[1].goType == 'RenameOptions') {
|
||||
return ({goType: 'RenameOptions', gostuff: getText(node)})
|
||||
return ({goType: 'interface{}', gostuff: getText(node)})
|
||||
}
|
||||
return ({goType: 'bool', gostuff: getText(node)})
|
||||
}
|
||||
|
@ -927,7 +927,7 @@ let byName = new Map<string, Struct>();
|
|||
// consts are unique. (Go consts are package-level, but Typescript's are
|
||||
// not.) Use suffixes to minimize changes to gopls.
|
||||
let pref = new Map<string, string>(
|
||||
[['DiagnosticSeverity', 'Severity']]) // typeName->prefix
|
||||
[['DiagnosticSeverity', 'Severity'], ['WatchKind', 'Watch']]) // typeName->prefix
|
||||
let suff = new Map<string, string>([
|
||||
['CompletionItemKind', 'Completion'], ['InsertTextFormat', 'TextFormat']
|
||||
])
|
||||
|
|
|
@ -59,7 +59,7 @@ function generate(files: string[], options: ts.CompilerOptions): void {
|
|||
setReceives(); // distinguish client and server
|
||||
// for each of Client and Server there are 3 parts to the output:
|
||||
// 1. type X interface {methods}
|
||||
// 2. serverHandler(...) { return func(...) { switch r.method}}
|
||||
// 2. func (h *serverHandler) Deliver(...) { switch r.method }
|
||||
// 3. func (x *xDispatcher) Method(ctx, parm)
|
||||
not.forEach(
|
||||
(v, k) => {
|
||||
|
@ -99,7 +99,7 @@ function sig(nm: string, a: string, b: string, names?: boolean): string {
|
|||
|
||||
const notNil = `if r.Params != nil {
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidParams, "Expected no params"))
|
||||
return
|
||||
return true
|
||||
}`;
|
||||
// Go code for notifications. Side is client or server, m is the request method
|
||||
function goNot(side: side, m: string) {
|
||||
|
@ -113,16 +113,18 @@ function goNot(side: side, m: string) {
|
|||
if (a != '') {
|
||||
case1 = `var params ${a}
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
if err := ${side.name}.${nm}(ctx, ¶ms); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
}`;
|
||||
if err := h.${side.name}.${nm}(ctx, ¶ms); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true`;
|
||||
} else {
|
||||
case1 = `if err := ${side.name}.${nm}(ctx); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
}`;
|
||||
case1 = `if err := h.${side.name}.${nm}(ctx); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true`;
|
||||
}
|
||||
side.cases.push(`${caseHdr}\n${case1}`);
|
||||
|
||||
|
@ -152,24 +154,26 @@ function goReq(side: side, m: string) {
|
|||
if (a != '') {
|
||||
case1 = `var params ${a}
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}`;
|
||||
}
|
||||
const arg2 = a == '' ? '' : ', ¶ms';
|
||||
let case2 = `if err := ${side.name}.${nm}(ctx${arg2}); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
let case2 = `if err := h.${side.name}.${nm}(ctx${arg2}); err != nil {
|
||||
log.Error(ctx, "", err)
|
||||
}`;
|
||||
if (b != '') {
|
||||
case2 = `resp, err := ${side.name}.${nm}(ctx${arg2})
|
||||
case2 = `resp, err := h.${side.name}.${nm}(ctx${arg2})
|
||||
if err := r.Reply(ctx, resp, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
}`;
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true`;
|
||||
} else { // response is nil
|
||||
case2 = `err := ${side.name}.${nm}(ctx${arg2})
|
||||
case2 = `err := h.${side.name}.${nm}(ctx${arg2})
|
||||
if err := r.Reply(ctx, nil, err); err != nil {
|
||||
log.Errorf(ctx, "%v", err)
|
||||
}`
|
||||
log.Error(ctx, "", err)
|
||||
}
|
||||
return true`
|
||||
}
|
||||
|
||||
side.cases.push(`${caseHdr}\n${case1}\n${case2}`);
|
||||
|
@ -222,32 +226,31 @@ function output(side: side) {
|
|||
"encoding/json"
|
||||
|
||||
"golang.org/x/tools/internal/jsonrpc2"
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
)
|
||||
`);
|
||||
const a = side.name[0].toUpperCase() + side.name.substring(1)
|
||||
f(`type ${a} interface {`);
|
||||
side.methods.forEach((v) => { f(v) });
|
||||
f('}\n');
|
||||
f(`func ${side.name}Handler(log xlog.Logger, ${side.name} ${
|
||||
side.goName}) jsonrpc2.Handler {
|
||||
return func(ctx context.Context, r *jsonrpc2.Request) {
|
||||
f(`func (h ${side.name}Handler) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool {
|
||||
if delivered {
|
||||
return false
|
||||
}
|
||||
switch r.Method {
|
||||
case "$/cancelRequest":
|
||||
var params CancelParams
|
||||
if err := json.Unmarshal(*r.Params, ¶ms); err != nil {
|
||||
sendParseError(ctx, log, r, err)
|
||||
return
|
||||
sendParseError(ctx, r, err)
|
||||
return true
|
||||
}
|
||||
r.Conn().Cancel(params.ID)`);
|
||||
r.Conn().Cancel(params.ID)
|
||||
return true`);
|
||||
side.cases.forEach((v) => { f(v) });
|
||||
f(`
|
||||
default:
|
||||
if r.IsNotify() {
|
||||
r.Reply(ctx, nil, jsonrpc2.NewErrorf(jsonrpc2.CodeMethodNotFound, "method %q not found", r.Method))
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}`);
|
||||
f(`
|
||||
type ${side.name}Dispatcher struct {
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -34,7 +36,7 @@ func (s *Server) references(ctx context.Context, params *protocol.ReferenceParam
|
|||
}
|
||||
references, err := ident.References(ctx)
|
||||
if err != nil {
|
||||
view.Session().Logger().Errorf(ctx, "no references for %s: %v", ident.Name, err)
|
||||
log.Error(ctx, "no references", err, tag.Of("Identifier", ident.Name))
|
||||
}
|
||||
if params.Context.IncludeDeclaration {
|
||||
// The declaration of this identifier may not be in the
|
||||
|
|
|
@ -13,37 +13,36 @@ import (
|
|||
"golang.org/x/tools/internal/jsonrpc2"
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
// NewClientServer
|
||||
func NewClientServer(cache source.Cache, client protocol.Client) *Server {
|
||||
return &Server{
|
||||
func NewClientServer(ctx context.Context, cache source.Cache, client protocol.Client) (context.Context, *Server) {
|
||||
ctx = protocol.WithClient(ctx, client)
|
||||
return ctx, &Server{
|
||||
client: client,
|
||||
session: cache.NewSession(xlog.New(protocol.NewLogger(client))),
|
||||
session: cache.NewSession(ctx),
|
||||
}
|
||||
}
|
||||
|
||||
// NewServer starts an LSP server on the supplied stream, and waits until the
|
||||
// stream is closed.
|
||||
func NewServer(cache source.Cache, stream jsonrpc2.Stream) *Server {
|
||||
func NewServer(ctx context.Context, cache source.Cache, stream jsonrpc2.Stream) (context.Context, *Server) {
|
||||
s := &Server{}
|
||||
var log xlog.Logger
|
||||
s.Conn, s.client, log = protocol.NewServer(stream, s)
|
||||
s.session = cache.NewSession(log)
|
||||
return s
|
||||
ctx, s.Conn, s.client = protocol.NewServer(ctx, stream, s)
|
||||
s.session = cache.NewSession(ctx)
|
||||
return ctx, s
|
||||
}
|
||||
|
||||
// RunServerOnPort starts an LSP server on the given port and does not exit.
|
||||
// This function exists for debugging purposes.
|
||||
func RunServerOnPort(ctx context.Context, cache source.Cache, port int, h func(s *Server)) error {
|
||||
func RunServerOnPort(ctx context.Context, cache source.Cache, port int, h func(ctx context.Context, s *Server)) error {
|
||||
return RunServerOnAddress(ctx, cache, fmt.Sprintf(":%v", port), h)
|
||||
}
|
||||
|
||||
// RunServerOnPort starts an LSP server on the given port and does not exit.
|
||||
// This function exists for debugging purposes.
|
||||
func RunServerOnAddress(ctx context.Context, cache source.Cache, addr string, h func(s *Server)) error {
|
||||
func RunServerOnAddress(ctx context.Context, cache source.Cache, addr string, h func(ctx context.Context, s *Server)) error {
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -53,7 +52,7 @@ func RunServerOnAddress(ctx context.Context, cache source.Cache, addr string, h
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h(NewServer(cache, jsonrpc2.NewHeaderStream(conn, conn)))
|
||||
h(NewServer(ctx, cache, jsonrpc2.NewHeaderStream(conn, conn)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,12 +60,21 @@ func (s *Server) Run(ctx context.Context) error {
|
|||
return s.Conn.Run(ctx)
|
||||
}
|
||||
|
||||
type serverState int
|
||||
|
||||
const (
|
||||
serverCreated = serverState(iota)
|
||||
serverInitializing // set once the server has received "initialize" request
|
||||
serverInitialized // set once the server has received "initialized" request
|
||||
serverShutDown
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Conn *jsonrpc2.Conn
|
||||
client protocol.Client
|
||||
|
||||
initializedMu sync.Mutex
|
||||
isInitialized bool // set once the server has received "initialize" request
|
||||
stateMu sync.Mutex
|
||||
state serverState
|
||||
|
||||
// Configurations.
|
||||
// TODO(rstambler): Separate these into their own struct?
|
||||
|
@ -264,6 +272,11 @@ func (s *Server) PrepareRename(context.Context, *protocol.TextDocumentPositionPa
|
|||
func (s *Server) SetTraceNotification(context.Context, *protocol.SetTraceParams) error {
|
||||
return notImplemented("SetTraceNotification")
|
||||
}
|
||||
|
||||
func (s *Server) SelectionRange(context.Context, *protocol.SelectionRangeParams) ([]protocol.SelectionRange, error) {
|
||||
return nil, notImplemented("SelectionRange")
|
||||
}
|
||||
|
||||
func notImplemented(method string) *jsonrpc2.Error {
|
||||
return jsonrpc2.NewErrorf(jsonrpc2.CodeMethodNotFound, "method %q not yet implemented", method)
|
||||
}
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
|
||||
"golang.org/x/tools/internal/lsp/protocol"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -29,7 +31,7 @@ func (s *Server) signatureHelp(ctx context.Context, params *protocol.TextDocumen
|
|||
}
|
||||
info, err := source.SignatureHelp(ctx, f, rng.Start)
|
||||
if err != nil {
|
||||
s.session.Logger().Infof(ctx, "no signature help for %s:%v:%v : %s", uri, int(params.Position.Line), int(params.Position.Character), err)
|
||||
log.Print(ctx, "no signature help", tag.Of("At", rng), tag.Of("Failure", err))
|
||||
return nil, nil
|
||||
}
|
||||
return toProtocolSignatureHelp(info), nil
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -24,8 +23,8 @@ import (
|
|||
)
|
||||
|
||||
func analyze(ctx context.Context, v View, pkgs []Package, analyzers []*analysis.Analyzer) ([]*Action, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.analyze")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "source.analyze")
|
||||
defer done()
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
@ -148,7 +147,7 @@ func (act *Action) execOnce(ctx context.Context, fset *token.FileSet) error {
|
|||
pass := &analysis.Pass{
|
||||
Analyzer: act.Analyzer,
|
||||
Fset: fset,
|
||||
Files: act.Pkg.GetSyntax(),
|
||||
Files: act.Pkg.GetSyntax(ctx),
|
||||
Pkg: act.Pkg.GetTypes(),
|
||||
TypesInfo: act.Pkg.GetTypesInfo(),
|
||||
TypesSizes: act.Pkg.GetTypesSizes(),
|
||||
|
@ -245,12 +244,12 @@ func (act *Action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
|
|||
// exportObjectFact implements Pass.ExportObjectFact.
|
||||
func (act *Action) exportObjectFact(obj types.Object, fact analysis.Fact) {
|
||||
if act.pass.ExportObjectFact == nil {
|
||||
log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact)
|
||||
panic(fmt.Sprintf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact))
|
||||
}
|
||||
|
||||
if obj.Pkg() != act.Pkg.GetTypes() {
|
||||
log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
|
||||
act.Analyzer, act.Pkg, obj, fact)
|
||||
panic(fmt.Sprintf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
|
||||
act.Analyzer, act.Pkg, obj, fact))
|
||||
}
|
||||
|
||||
key := objectFactKey{obj, factType(fact)}
|
||||
|
@ -284,7 +283,7 @@ func (act *Action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool
|
|||
// exportPackageFact implements Pass.ExportPackageFact.
|
||||
func (act *Action) exportPackageFact(fact analysis.Fact) {
|
||||
if act.pass.ExportPackageFact == nil {
|
||||
log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact)
|
||||
panic(fmt.Sprintf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact))
|
||||
}
|
||||
|
||||
key := packageFactKey{act.pass.Pkg, factType(fact)}
|
||||
|
@ -294,7 +293,7 @@ func (act *Action) exportPackageFact(fact analysis.Fact) {
|
|||
func factType(fact analysis.Fact) reflect.Type {
|
||||
t := reflect.TypeOf(fact)
|
||||
if t.Kind() != reflect.Ptr {
|
||||
log.Fatalf("invalid Fact type: got %T, want pointer", t)
|
||||
panic(fmt.Sprintf("invalid Fact type: got %T, want pointer", t))
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
|
|
@ -278,13 +278,13 @@ type CompletionOptions struct {
|
|||
// the client to score the quality of the completion. For instance, some clients
|
||||
// may tolerate imperfect matches as valid completion results, since users may make typos.
|
||||
func Completion(ctx context.Context, view View, f GoFile, pos token.Pos, opts CompletionOptions) ([]CompletionItem, *Selection, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.Completion")
|
||||
defer ts.End()
|
||||
file := f.GetAST(ctx)
|
||||
if file == nil {
|
||||
return nil, nil, fmt.Errorf("no AST for %s", f.URI())
|
||||
}
|
||||
ctx, done := trace.StartSpan(ctx, "source.Completion")
|
||||
defer done()
|
||||
|
||||
file, err := f.GetAST(ctx, ParseFull)
|
||||
if file == nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
pkg := f.GetPackage(ctx)
|
||||
if pkg == nil || pkg.IsIllTyped() {
|
||||
return nil, nil, fmt.Errorf("package for %s is ill typed", f.URI())
|
||||
|
@ -509,6 +509,7 @@ func (c *completer) lexical() error {
|
|||
if scope == types.Universe {
|
||||
score *= 0.1
|
||||
}
|
||||
|
||||
// If we haven't already added a candidate for an object with this name.
|
||||
if _, ok := seen[obj.Name()]; !ok {
|
||||
seen[obj.Name()] = struct{}{}
|
||||
|
|
|
@ -14,6 +14,8 @@ import (
|
|||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/snippet"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -97,31 +99,38 @@ func (c *completer) item(cand candidate) (CompletionItem, error) {
|
|||
if c.opts.WantDocumentaton {
|
||||
declRange, err := objToRange(c.ctx, c.view.Session().Cache().FileSet(), obj)
|
||||
if err != nil {
|
||||
return CompletionItem{}, err
|
||||
log.Error(c.ctx, "failed to get declaration range for object", err, tag.Of("Name", obj.Name()))
|
||||
goto Return
|
||||
}
|
||||
pos := declRange.FileSet.Position(declRange.Start)
|
||||
if !pos.IsValid() {
|
||||
return CompletionItem{}, fmt.Errorf("invalid declaration position for %v", item.Label)
|
||||
log.Error(c.ctx, "invalid declaration position", err, tag.Of("Label", item.Label))
|
||||
goto Return
|
||||
}
|
||||
uri := span.FileURI(pos.Filename)
|
||||
f, err := c.view.GetFile(c.ctx, uri)
|
||||
if err != nil {
|
||||
return CompletionItem{}, err
|
||||
log.Error(c.ctx, "unable to get file", err, tag.Of("URI", uri))
|
||||
goto Return
|
||||
}
|
||||
gof, ok := f.(GoFile)
|
||||
if !ok {
|
||||
return CompletionItem{}, fmt.Errorf("declaration for %s not in a Go file: %s", item.Label, uri)
|
||||
log.Error(c.ctx, "declaration in a Go file", err, tag.Of("Label", item.Label))
|
||||
goto Return
|
||||
}
|
||||
ident, err := Identifier(c.ctx, c.view, gof, declRange.Start)
|
||||
if err != nil {
|
||||
return CompletionItem{}, err
|
||||
log.Error(c.ctx, "no identifier", err, tag.Of("Name", obj.Name()))
|
||||
goto Return
|
||||
}
|
||||
documentation, err := ident.Documentation(c.ctx, SynopsisDocumentation)
|
||||
if err != nil {
|
||||
return CompletionItem{}, err
|
||||
log.Error(c.ctx, "no documentation", err, tag.Of("Name", obj.Name()))
|
||||
goto Return
|
||||
}
|
||||
item.Documentation = documentation
|
||||
}
|
||||
Return:
|
||||
return item, nil
|
||||
}
|
||||
|
||||
|
@ -192,7 +201,7 @@ func formatFieldList(ctx context.Context, v View, list *ast.FieldList) ([]string
|
|||
cfg := printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 4}
|
||||
b := &bytes.Buffer{}
|
||||
if err := cfg.Fprint(b, v.Session().Cache().FileSet(), p.Type); err != nil {
|
||||
v.Session().Logger().Errorf(ctx, "unable to print type %v", p.Type)
|
||||
log.Error(ctx, "unable to print type", nil, tag.Of("Type", p.Type))
|
||||
continue
|
||||
}
|
||||
typ := replacer.Replace(b.String())
|
||||
|
|
|
@ -34,6 +34,9 @@ import (
|
|||
"golang.org/x/tools/go/analysis/passes/unsafeptr"
|
||||
"golang.org/x/tools/go/analysis/passes/unusedresult"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/internal/lsp/telemetry"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -59,6 +62,8 @@ const (
|
|||
)
|
||||
|
||||
func Diagnostics(ctx context.Context, view View, f GoFile, disabledAnalyses map[string]struct{}) (map[span.URI][]Diagnostic, error) {
|
||||
ctx, done := trace.StartSpan(ctx, "source.Diagnostics", telemetry.File.Of(f.URI()))
|
||||
defer done()
|
||||
pkg := f.GetPackage(ctx)
|
||||
if pkg == nil {
|
||||
return singleDiagnostic(f.URI(), "%s is not part of a package", f.URI()), nil
|
||||
|
@ -81,7 +86,7 @@ func Diagnostics(ctx context.Context, view View, f GoFile, disabledAnalyses map[
|
|||
if !diagnostics(ctx, view, pkg, reports) {
|
||||
// If we don't have any list, parse, or type errors, run analyses.
|
||||
if err := analyses(ctx, view, pkg, disabledAnalyses, reports); err != nil {
|
||||
view.Session().Logger().Errorf(ctx, "failed to run analyses for %s: %v", f.URI(), err)
|
||||
log.Error(ctx, "failed to run analyses", err, telemetry.File)
|
||||
}
|
||||
}
|
||||
// Updates to the diagnostics for this package may need to be propagated.
|
||||
|
@ -104,6 +109,8 @@ type diagnosticSet struct {
|
|||
}
|
||||
|
||||
func diagnostics(ctx context.Context, v View, pkg Package, reports map[span.URI][]Diagnostic) bool {
|
||||
ctx, done := trace.StartSpan(ctx, "source.diagnostics", telemetry.Package.Of(pkg.ID()))
|
||||
defer done()
|
||||
diagSets := make(map[span.URI]*diagnosticSet)
|
||||
for _, err := range pkg.GetErrors() {
|
||||
diag := Diagnostic{
|
||||
|
@ -229,30 +236,31 @@ func parseDiagnosticMessage(input string) span.Span {
|
|||
|
||||
func pointToSpan(ctx context.Context, view View, spn span.Span) span.Span {
|
||||
f, err := view.GetFile(ctx, spn.URI())
|
||||
ctx = telemetry.File.With(ctx, spn.URI())
|
||||
if err != nil {
|
||||
view.Session().Logger().Errorf(ctx, "could not find file for diagnostic: %v", spn.URI())
|
||||
log.Error(ctx, "could not find file for diagnostic", nil, telemetry.File)
|
||||
return spn
|
||||
}
|
||||
diagFile, ok := f.(GoFile)
|
||||
if !ok {
|
||||
view.Session().Logger().Errorf(ctx, "%s is not a Go file", spn.URI())
|
||||
log.Error(ctx, "not a Go file", nil, telemetry.File)
|
||||
return spn
|
||||
}
|
||||
tok := diagFile.GetToken(ctx)
|
||||
if tok == nil {
|
||||
view.Session().Logger().Errorf(ctx, "could not find token.File for diagnostic: %v", spn.URI())
|
||||
tok, err := diagFile.GetToken(ctx)
|
||||
if err != nil {
|
||||
log.Error(ctx, "could not find token.File for diagnostic", err, telemetry.File)
|
||||
return spn
|
||||
}
|
||||
data, _, err := diagFile.Handle(ctx).Read(ctx)
|
||||
if err != nil {
|
||||
view.Session().Logger().Errorf(ctx, "could not find content for diagnostic: %v", spn.URI())
|
||||
log.Error(ctx, "could not find content for diagnostic", err, telemetry.File)
|
||||
return spn
|
||||
}
|
||||
c := span.NewTokenConverter(diagFile.FileSet(), tok)
|
||||
s, err := spn.WithOffset(c)
|
||||
//we just don't bother producing an error if this failed
|
||||
if err != nil {
|
||||
view.Session().Logger().Errorf(ctx, "invalid span for diagnostic: %v: %v", spn.URI(), err)
|
||||
log.Error(ctx, "invalid span for diagnostic", err, telemetry.File)
|
||||
return spn
|
||||
}
|
||||
start := s.Start()
|
||||
|
|
|
@ -10,27 +10,36 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/internal/imports"
|
||||
"golang.org/x/tools/internal/lsp/diff"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
// Format formats a file with a given range.
|
||||
func Format(ctx context.Context, f GoFile, rng span.Range) ([]TextEdit, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.Format")
|
||||
defer ts.End()
|
||||
file := f.GetAST(ctx)
|
||||
ctx, done := trace.StartSpan(ctx, "source.Format")
|
||||
defer done()
|
||||
|
||||
file, err := f.GetAST(ctx, ParseFull)
|
||||
if file == nil {
|
||||
return nil, fmt.Errorf("no AST for %s", f.URI())
|
||||
return nil, err
|
||||
}
|
||||
pkg := f.GetPackage(ctx)
|
||||
if hasListErrors(pkg.GetErrors()) || hasParseErrors(pkg.GetErrors()) {
|
||||
return nil, fmt.Errorf("%s has parse errors, not formatting", f.URI())
|
||||
// Even if this package has list or parse errors, this file may not
|
||||
// have any parse errors and can still be formatted. Using format.Node
|
||||
// on an ast with errors may result in code being added or removed.
|
||||
// Attempt to format the source of this file instead.
|
||||
formatted, err := formatSource(ctx, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return computeTextEdits(ctx, f, string(formatted)), nil
|
||||
}
|
||||
path, exact := astutil.PathEnclosingInterval(file, rng.Start, rng.End)
|
||||
if !exact || len(path) == 0 {
|
||||
|
@ -51,10 +60,20 @@ func Format(ctx context.Context, f GoFile, rng span.Range) ([]TextEdit, error) {
|
|||
return computeTextEdits(ctx, f, buf.String()), nil
|
||||
}
|
||||
|
||||
func formatSource(ctx context.Context, file File) ([]byte, error) {
|
||||
ctx, done := trace.StartSpan(ctx, "source.formatSource")
|
||||
defer done()
|
||||
data, _, err := file.Handle(ctx).Read(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return format.Source(data)
|
||||
}
|
||||
|
||||
// Imports formats a file using the goimports tool.
|
||||
func Imports(ctx context.Context, view View, f GoFile, rng span.Range) ([]TextEdit, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.Imports")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "source.Imports")
|
||||
defer done()
|
||||
data, _, err := f.Handle(ctx).Read(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -66,8 +85,8 @@ func Imports(ctx context.Context, view View, f GoFile, rng span.Range) ([]TextEd
|
|||
if hasListErrors(pkg.GetErrors()) {
|
||||
return nil, fmt.Errorf("%s has list errors, not running goimports", f.URI())
|
||||
}
|
||||
|
||||
options := &imports.Options{
|
||||
Env: buildProcessEnv(ctx, view),
|
||||
// Defaults.
|
||||
AllErrors: true,
|
||||
Comments: true,
|
||||
|
@ -76,10 +95,16 @@ func Imports(ctx context.Context, view View, f GoFile, rng span.Range) ([]TextEd
|
|||
TabIndent: true,
|
||||
TabWidth: 8,
|
||||
}
|
||||
formatted, err := imports.Process(f.URI().Filename(), data, options)
|
||||
var formatted []byte
|
||||
importFn := func(opts *imports.Options) error {
|
||||
formatted, err = imports.Process(f.URI().Filename(), data, opts)
|
||||
return err
|
||||
}
|
||||
err = view.RunProcessEnvFunc(ctx, importFn, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return computeTextEdits(ctx, f, string(formatted)), nil
|
||||
}
|
||||
|
||||
|
@ -101,43 +126,12 @@ func hasListErrors(errors []packages.Error) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func buildProcessEnv(ctx context.Context, view View) *imports.ProcessEnv {
|
||||
cfg := view.Config()
|
||||
env := &imports.ProcessEnv{
|
||||
WorkingDir: cfg.Dir,
|
||||
Logf: func(format string, v ...interface{}) {
|
||||
view.Session().Logger().Infof(ctx, format, v...)
|
||||
},
|
||||
}
|
||||
for _, kv := range cfg.Env {
|
||||
split := strings.Split(kv, "=")
|
||||
if len(split) < 2 {
|
||||
continue
|
||||
}
|
||||
switch split[0] {
|
||||
case "GOPATH":
|
||||
env.GOPATH = split[1]
|
||||
case "GOROOT":
|
||||
env.GOROOT = split[1]
|
||||
case "GO111MODULE":
|
||||
env.GO111MODULE = split[1]
|
||||
case "GOPROXY":
|
||||
env.GOROOT = split[1]
|
||||
case "GOFLAGS":
|
||||
env.GOFLAGS = split[1]
|
||||
case "GOSUMDB":
|
||||
env.GOSUMDB = split[1]
|
||||
}
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
func computeTextEdits(ctx context.Context, file File, formatted string) (edits []TextEdit) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.computeTextEdits")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "source.computeTextEdits")
|
||||
defer done()
|
||||
data, _, err := file.Handle(ctx).Read(ctx)
|
||||
if err != nil {
|
||||
file.View().Session().Logger().Errorf(ctx, "Cannot compute text edits: %v", err)
|
||||
log.Error(ctx, "Cannot compute text edits", err)
|
||||
return nil
|
||||
}
|
||||
u := diff.SplitLines(string(data))
|
||||
|
|
|
@ -16,11 +16,12 @@ import (
|
|||
)
|
||||
|
||||
func Highlight(ctx context.Context, f GoFile, pos token.Pos) ([]span.Span, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.Highlight")
|
||||
defer ts.End()
|
||||
file := f.GetAST(ctx)
|
||||
ctx, done := trace.StartSpan(ctx, "source.Highlight")
|
||||
defer done()
|
||||
|
||||
file, err := f.GetAST(ctx, ParseFull)
|
||||
if file == nil {
|
||||
return nil, fmt.Errorf("no AST for %s", f.URI())
|
||||
return nil, err
|
||||
}
|
||||
fset := f.FileSet()
|
||||
path, _ := astutil.PathEnclosingInterval(file, pos, pos)
|
||||
|
|
|
@ -33,8 +33,8 @@ const (
|
|||
)
|
||||
|
||||
func (i *IdentifierInfo) Hover(ctx context.Context, markdownSupported bool, hoverKind HoverKind) (string, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.Hover")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "source.Hover")
|
||||
defer done()
|
||||
h, err := i.decl.hover(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -80,10 +80,12 @@ func (i *IdentifierInfo) Documentation(ctx context.Context, hoverKind HoverKind)
|
|||
}
|
||||
|
||||
func (d declaration) hover(ctx context.Context) (*documentation, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.hover")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "source.hover")
|
||||
defer done()
|
||||
obj := d.obj
|
||||
switch node := d.node.(type) {
|
||||
case *ast.ImportSpec:
|
||||
return &documentation{node, nil}, nil
|
||||
case *ast.GenDecl:
|
||||
switch obj := obj.(type) {
|
||||
case *types.TypeName, *types.Var, *types.Const, *types.Func:
|
||||
|
|
|
@ -63,11 +63,12 @@ func Identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*Ident
|
|||
|
||||
// identifier checks a single position for a potential identifier.
|
||||
func identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*IdentifierInfo, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.identifier")
|
||||
defer ts.End()
|
||||
file := f.GetAST(ctx)
|
||||
ctx, done := trace.StartSpan(ctx, "source.identifier")
|
||||
defer done()
|
||||
|
||||
file, err := f.GetAST(ctx, ParseFull)
|
||||
if file == nil {
|
||||
return nil, fmt.Errorf("no AST for %s", f.URI())
|
||||
return nil, err
|
||||
}
|
||||
pkg := f.GetPackage(ctx)
|
||||
if pkg == nil || pkg.IsIllTyped() {
|
||||
|
@ -121,8 +122,6 @@ func identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*Ident
|
|||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
// Handle builtins separately.
|
||||
if result.decl.obj.Parent() == types.Universe {
|
||||
decl, ok := lookupBuiltinDecl(f.View(), result.Name).(ast.Node)
|
||||
|
@ -235,14 +234,13 @@ func objToNode(ctx context.Context, view View, originPkg *types.Package, obj typ
|
|||
}
|
||||
// If the object is exported from a different package,
|
||||
// we don't need its full AST to find the definition.
|
||||
var declAST *ast.File
|
||||
mode := ParseFull
|
||||
if obj.Exported() && obj.Pkg() != originPkg {
|
||||
declAST = declFile.GetAnyAST(ctx)
|
||||
} else {
|
||||
declAST = declFile.GetAST(ctx)
|
||||
mode = ParseExported
|
||||
}
|
||||
declAST, err := declFile.GetAST(ctx, mode)
|
||||
if declAST == nil {
|
||||
return nil, fmt.Errorf("no AST for %s", f.URI())
|
||||
return nil, err
|
||||
}
|
||||
path, _ := astutil.PathEnclosingInterval(declAST, rng.Start, rng.End)
|
||||
if path == nil {
|
||||
|
@ -292,12 +290,12 @@ func importSpec(ctx context.Context, f GoFile, fAST *ast.File, pkg Package, pos
|
|||
if importedPkg == nil {
|
||||
return nil, fmt.Errorf("no import for %q", importPath)
|
||||
}
|
||||
if importedPkg.GetSyntax() == nil {
|
||||
if importedPkg.GetSyntax(ctx) == nil {
|
||||
return nil, fmt.Errorf("no syntax for for %q", importPath)
|
||||
}
|
||||
// Heuristic: Jump to the longest (most "interesting") file of the package.
|
||||
var dest *ast.File
|
||||
for _, f := range importedPkg.GetSyntax() {
|
||||
for _, f := range importedPkg.GetSyntax(ctx) {
|
||||
if dest == nil || f.End()-f.Pos() > dest.End()-dest.Pos() {
|
||||
dest = f
|
||||
}
|
||||
|
@ -306,6 +304,7 @@ func importSpec(ctx context.Context, f GoFile, fAST *ast.File, pkg Package, pos
|
|||
return nil, fmt.Errorf("package %q has no files", importPath)
|
||||
}
|
||||
result.decl.rng = span.NewRange(f.FileSet(), dest.Name.Pos(), dest.Name.End())
|
||||
result.decl.node = imp
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -27,8 +27,8 @@ type ReferenceInfo struct {
|
|||
// References returns a list of references for a given identifier within the packages
|
||||
// containing i.File. Declarations appear first in the result.
|
||||
func (i *IdentifierInfo) References(ctx context.Context) ([]*ReferenceInfo, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.References")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "source.References")
|
||||
defer done()
|
||||
var references []*ReferenceInfo
|
||||
|
||||
// If the object declaration is nil, assume it is an import spec and do not look for references.
|
||||
|
|
|
@ -37,8 +37,9 @@ type renamer struct {
|
|||
|
||||
// Rename returns a map of TextEdits for each file modified when renaming a given identifier within a package.
|
||||
func (i *IdentifierInfo) Rename(ctx context.Context, newName string) (map[span.URI][]TextEdit, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.Rename")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "source.Rename")
|
||||
defer done()
|
||||
|
||||
if i.Name == newName {
|
||||
return nil, fmt.Errorf("old and new names are the same: %s", newName)
|
||||
}
|
||||
|
@ -85,7 +86,16 @@ func (i *IdentifierInfo) Rename(ctx context.Context, newName string) (map[span.U
|
|||
return nil, fmt.Errorf(r.errors)
|
||||
}
|
||||
|
||||
return r.update()
|
||||
changes, err := r.update()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sort edits for each file.
|
||||
for _, edits := range changes {
|
||||
sortTextEdits(edits)
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// Rename all references to the identifier.
|
||||
|
|
|
@ -113,7 +113,7 @@ func (r *renamer) checkInPackageBlock(from types.Object) {
|
|||
}
|
||||
|
||||
// Check for conflicts between package block and all file blocks.
|
||||
for _, f := range pkg.GetSyntax() {
|
||||
for _, f := range pkg.GetSyntax(r.ctx) {
|
||||
fileScope := pkg.GetTypesInfo().Scopes[f]
|
||||
b, prev := fileScope.LookupParent(r.to, token.NoPos)
|
||||
if b == fileScope {
|
||||
|
@ -328,7 +328,7 @@ func forEachLexicalRef(ctx context.Context, pkg Package, obj types.Object, fn fu
|
|||
return true
|
||||
}
|
||||
|
||||
for _, f := range pkg.GetSyntax() {
|
||||
for _, f := range pkg.GetSyntax(ctx) {
|
||||
ast.Inspect(f, visit)
|
||||
if len(stack) != 0 {
|
||||
panic(stack)
|
||||
|
@ -802,7 +802,7 @@ func (r *renamer) satisfy() map[satisfy.Constraint]bool {
|
|||
r.from, r.to, pkg.PkgPath())
|
||||
return nil
|
||||
}
|
||||
f.Find(pkg.GetTypesInfo(), pkg.GetSyntax())
|
||||
f.Find(pkg.GetTypesInfo(), pkg.GetSyntax(r.ctx))
|
||||
}
|
||||
r.satisfyConstraints = f.Result
|
||||
}
|
||||
|
@ -835,7 +835,7 @@ func someUse(info *types.Info, obj types.Object) *ast.Ident {
|
|||
//
|
||||
func pathEnclosingInterval(ctx context.Context, fset *token.FileSet, pkg Package, start, end token.Pos) (resPkg Package, path []ast.Node, exact bool) {
|
||||
var pkgs = []Package{pkg}
|
||||
for _, f := range pkg.GetSyntax() {
|
||||
for _, f := range pkg.GetSyntax(ctx) {
|
||||
for _, imp := range f.Imports {
|
||||
if imp == nil {
|
||||
continue
|
||||
|
@ -848,7 +848,7 @@ func pathEnclosingInterval(ctx context.Context, fset *token.FileSet, pkg Package
|
|||
}
|
||||
}
|
||||
for _, p := range pkgs {
|
||||
for _, f := range p.GetSyntax() {
|
||||
for _, f := range p.GetSyntax(ctx) {
|
||||
if f.Pos() == token.NoPos {
|
||||
// This can happen if the parser saw
|
||||
// too many errors and bailed out.
|
||||
|
|
|
@ -26,11 +26,12 @@ type ParameterInformation struct {
|
|||
}
|
||||
|
||||
func SignatureHelp(ctx context.Context, f GoFile, pos token.Pos) (*SignatureInformation, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.SignatureHelp")
|
||||
defer ts.End()
|
||||
file := f.GetAST(ctx)
|
||||
ctx, done := trace.StartSpan(ctx, "source.SignatureHelp")
|
||||
defer done()
|
||||
|
||||
file, err := f.GetAST(ctx, ParseFull)
|
||||
if file == nil {
|
||||
return nil, fmt.Errorf("no AST for %s", f.URI())
|
||||
return nil, err
|
||||
}
|
||||
pkg := f.GetPackage(ctx)
|
||||
if pkg == nil || pkg.IsIllTyped() {
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
"golang.org/x/tools/internal/lsp/diff"
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/lsp/tests"
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -30,18 +29,20 @@ func TestSource(t *testing.T) {
|
|||
type runner struct {
|
||||
view source.View
|
||||
data *tests.Data
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func testSource(t *testing.T, exporter packagestest.Exporter) {
|
||||
ctx := tests.Context(t)
|
||||
data := tests.Load(t, exporter, "../testdata")
|
||||
defer data.Exported.Cleanup()
|
||||
|
||||
log := xlog.New(xlog.StdSink{})
|
||||
cache := cache.New()
|
||||
session := cache.NewSession(log)
|
||||
session := cache.NewSession(ctx)
|
||||
r := &runner{
|
||||
view: session.NewView("source_test", span.FileURI(data.Config.Dir)),
|
||||
view: session.NewView(ctx, "source_test", span.FileURI(data.Config.Dir)),
|
||||
data: data,
|
||||
ctx: ctx,
|
||||
}
|
||||
r.view.SetEnv(data.Config.Env)
|
||||
for filename, content := range data.Config.Overlay {
|
||||
|
@ -52,11 +53,11 @@ func testSource(t *testing.T, exporter packagestest.Exporter) {
|
|||
|
||||
func (r *runner) Diagnostics(t *testing.T, data tests.Diagnostics) {
|
||||
for uri, want := range data {
|
||||
f, err := r.view.GetFile(context.Background(), uri)
|
||||
f, err := r.view.GetFile(r.ctx, uri)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
results, err := source.Diagnostics(context.Background(), r.view, f.(source.GoFile), nil)
|
||||
results, err := source.Diagnostics(r.ctx, r.view, f.(source.GoFile), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -132,7 +133,7 @@ func summarizeDiagnostics(i int, want []source.Diagnostic, got []source.Diagnost
|
|||
}
|
||||
|
||||
func (r *runner) Completion(t *testing.T, data tests.Completions, snippets tests.CompletionSnippets, items tests.CompletionItems) {
|
||||
ctx := context.Background()
|
||||
ctx := r.ctx
|
||||
for src, itemList := range data {
|
||||
var want []source.CompletionItem
|
||||
for _, pos := range itemList {
|
||||
|
@ -142,9 +143,9 @@ func (r *runner) Completion(t *testing.T, data tests.Completions, snippets tests
|
|||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", src, err)
|
||||
}
|
||||
tok := f.(source.GoFile).GetToken(ctx)
|
||||
if tok == nil {
|
||||
t.Fatalf("failed to get token for %v", src)
|
||||
tok, err := f.(source.GoFile).GetToken(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get token for %s: %v", src.URI(), err)
|
||||
}
|
||||
pos := tok.Pos(src.Start().Offset())
|
||||
list, surrounding, err := source.Completion(ctx, r.view, f.(source.GoFile), pos, source.CompletionOptions{
|
||||
|
@ -180,7 +181,10 @@ func (r *runner) Completion(t *testing.T, data tests.Completions, snippets tests
|
|||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", src, err)
|
||||
}
|
||||
tok := f.GetToken(ctx)
|
||||
tok, err := f.(source.GoFile).GetToken(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get token for %s: %v", src.URI(), err)
|
||||
}
|
||||
pos := tok.Pos(src.Start().Offset())
|
||||
list, _, err := source.Completion(ctx, r.view, f.(source.GoFile), pos, source.CompletionOptions{
|
||||
DeepComplete: strings.Contains(string(src.URI()), "deepcomplete"),
|
||||
|
@ -289,7 +293,7 @@ func summarizeCompletionItems(i int, want []source.CompletionItem, got []source.
|
|||
}
|
||||
|
||||
func (r *runner) Format(t *testing.T, data tests.Formats) {
|
||||
ctx := context.Background()
|
||||
ctx := r.ctx
|
||||
for _, spn := range data {
|
||||
uri := spn.URI()
|
||||
filename := uri.Filename()
|
||||
|
@ -302,7 +306,11 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", spn, err)
|
||||
}
|
||||
rng, err := spn.Range(span.NewTokenConverter(f.FileSet(), f.GetToken(ctx)))
|
||||
tok, err := f.(source.GoFile).GetToken(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get token for %s: %v", spn.URI(), err)
|
||||
}
|
||||
rng, err := spn.Range(span.NewTokenConverter(f.FileSet(), tok))
|
||||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", spn, err)
|
||||
}
|
||||
|
@ -327,7 +335,7 @@ func (r *runner) Format(t *testing.T, data tests.Formats) {
|
|||
}
|
||||
|
||||
func (r *runner) Import(t *testing.T, data tests.Imports) {
|
||||
ctx := context.Background()
|
||||
ctx := r.ctx
|
||||
for _, spn := range data {
|
||||
uri := spn.URI()
|
||||
filename := uri.Filename()
|
||||
|
@ -340,7 +348,11 @@ func (r *runner) Import(t *testing.T, data tests.Imports) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", spn, err)
|
||||
}
|
||||
rng, err := spn.Range(span.NewTokenConverter(f.FileSet(), f.GetToken(ctx)))
|
||||
tok, err := f.(source.GoFile).GetToken(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get token for %s: %v", spn.URI(), err)
|
||||
}
|
||||
rng, err := spn.Range(span.NewTokenConverter(f.FileSet(), tok))
|
||||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", spn, err)
|
||||
}
|
||||
|
@ -365,13 +377,16 @@ func (r *runner) Import(t *testing.T, data tests.Imports) {
|
|||
}
|
||||
|
||||
func (r *runner) Definition(t *testing.T, data tests.Definitions) {
|
||||
ctx := context.Background()
|
||||
ctx := r.ctx
|
||||
for _, d := range data {
|
||||
f, err := r.view.GetFile(ctx, d.Src.URI())
|
||||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", d.Src, err)
|
||||
}
|
||||
tok := f.GetToken(ctx)
|
||||
tok, err := f.(source.GoFile).GetToken(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get token for %s: %v", d.Src.URI(), err)
|
||||
}
|
||||
pos := tok.Pos(d.Src.Start().Offset())
|
||||
ident, err := source.Identifier(ctx, r.view, f.(source.GoFile), pos)
|
||||
if err != nil {
|
||||
|
@ -407,14 +422,17 @@ func (r *runner) Definition(t *testing.T, data tests.Definitions) {
|
|||
}
|
||||
|
||||
func (r *runner) Highlight(t *testing.T, data tests.Highlights) {
|
||||
ctx := context.Background()
|
||||
ctx := r.ctx
|
||||
for name, locations := range data {
|
||||
src := locations[0]
|
||||
f, err := r.view.GetFile(ctx, src.URI())
|
||||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", src, err)
|
||||
}
|
||||
tok := f.GetToken(ctx)
|
||||
tok, err := f.(source.GoFile).GetToken(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get token for %s: %v", src.URI(), err)
|
||||
}
|
||||
pos := tok.Pos(src.Start().Offset())
|
||||
highlights, err := source.Highlight(ctx, f.(source.GoFile), pos)
|
||||
if err != nil {
|
||||
|
@ -432,14 +450,16 @@ func (r *runner) Highlight(t *testing.T, data tests.Highlights) {
|
|||
}
|
||||
|
||||
func (r *runner) Reference(t *testing.T, data tests.References) {
|
||||
ctx := context.Background()
|
||||
ctx := r.ctx
|
||||
for src, itemList := range data {
|
||||
f, err := r.view.GetFile(ctx, src.URI())
|
||||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", src, err)
|
||||
}
|
||||
|
||||
tok := f.GetToken(ctx)
|
||||
tok, err := f.(source.GoFile).GetToken(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get token for %s: %v", src.URI(), err)
|
||||
}
|
||||
pos := tok.Pos(src.Start().Offset())
|
||||
ident, err := source.Identifier(ctx, r.view, f.(source.GoFile), pos)
|
||||
if err != nil {
|
||||
|
@ -478,7 +498,7 @@ func (r *runner) Reference(t *testing.T, data tests.References) {
|
|||
}
|
||||
|
||||
func (r *runner) Rename(t *testing.T, data tests.Renames) {
|
||||
ctx := context.Background()
|
||||
ctx := r.ctx
|
||||
for spn, newText := range data {
|
||||
tag := fmt.Sprintf("%s-rename", newText)
|
||||
|
||||
|
@ -486,14 +506,18 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) {
|
|||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", spn, err)
|
||||
}
|
||||
tok := f.GetToken(ctx)
|
||||
tok, err := f.(source.GoFile).GetToken(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get token for %s: %v", spn.URI(), err)
|
||||
}
|
||||
pos := tok.Pos(spn.Start().Offset())
|
||||
|
||||
ident, err := source.Identifier(context.Background(), r.view, f.(source.GoFile), pos)
|
||||
ident, err := source.Identifier(r.ctx, r.view, f.(source.GoFile), pos)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
changes, err := ident.Rename(context.Background(), newText)
|
||||
changes, err := ident.Rename(r.ctx, newText)
|
||||
if err != nil {
|
||||
renamed := string(r.data.Golden(tag, spn.URI().Filename(), func() ([]byte, error) {
|
||||
return []byte(err.Error()), nil
|
||||
|
@ -544,7 +568,6 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) {
|
|||
|
||||
func applyEdits(contents string, edits []source.TextEdit) string {
|
||||
res := contents
|
||||
sortSourceTextEdits(edits)
|
||||
|
||||
// Apply the edits from the end of the file forward
|
||||
// to preserve the offsets
|
||||
|
@ -558,17 +581,8 @@ func applyEdits(contents string, edits []source.TextEdit) string {
|
|||
return res
|
||||
}
|
||||
|
||||
func sortSourceTextEdits(d []source.TextEdit) {
|
||||
sort.Slice(d, func(i int, j int) bool {
|
||||
if r := span.Compare(d[i].Span, d[j].Span); r != 0 {
|
||||
return r < 0
|
||||
}
|
||||
return d[i].NewText < d[j].NewText
|
||||
})
|
||||
}
|
||||
|
||||
func (r *runner) Symbol(t *testing.T, data tests.Symbols) {
|
||||
ctx := context.Background()
|
||||
ctx := r.ctx
|
||||
for uri, expectedSymbols := range data {
|
||||
f, err := r.view.GetFile(ctx, uri)
|
||||
if err != nil {
|
||||
|
@ -632,13 +646,16 @@ func summarizeSymbols(i int, want []source.Symbol, got []source.Symbol, reason s
|
|||
}
|
||||
|
||||
func (r *runner) SignatureHelp(t *testing.T, data tests.Signatures) {
|
||||
ctx := context.Background()
|
||||
ctx := r.ctx
|
||||
for spn, expectedSignature := range data {
|
||||
f, err := r.view.GetFile(ctx, spn.URI())
|
||||
if err != nil {
|
||||
t.Fatalf("failed for %v: %v", spn, err)
|
||||
}
|
||||
tok := f.GetToken(ctx)
|
||||
tok, err := f.(source.GoFile).GetToken(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get token for %s: %v", spn.URI(), err)
|
||||
}
|
||||
pos := tok.Pos(spn.Start().Offset())
|
||||
gotSignature, err := source.SignatureHelp(ctx, f.(source.GoFile), pos)
|
||||
if err != nil {
|
||||
|
|
|
@ -8,10 +8,10 @@ import (
|
|||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
func getCodeActions(fset *token.FileSet, diag analysis.Diagnostic) ([]CodeAction, error) {
|
||||
var cas []CodeAction
|
||||
func getCodeActions(fset *token.FileSet, diag analysis.Diagnostic) ([]SuggestedFixes, error) {
|
||||
var cas []SuggestedFixes
|
||||
for _, fix := range diag.SuggestedFixes {
|
||||
var ca CodeAction
|
||||
var ca SuggestedFixes
|
||||
ca.Title = fix.Message
|
||||
for _, te := range fix.TextEdits {
|
||||
span, err := span.NewRange(fset, te.Pos, te.End).Span()
|
||||
|
|
|
@ -42,12 +42,13 @@ type Symbol struct {
|
|||
}
|
||||
|
||||
func DocumentSymbols(ctx context.Context, f GoFile) ([]Symbol, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "source.DocumentSymbols")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "source.DocumentSymbols")
|
||||
defer done()
|
||||
|
||||
fset := f.FileSet()
|
||||
file := f.GetAST(ctx)
|
||||
file, err := f.GetAST(ctx, ParseFull)
|
||||
if file == nil {
|
||||
return nil, fmt.Errorf("no AST for %s", f.URI())
|
||||
return nil, err
|
||||
}
|
||||
pkg := f.GetPackage(ctx)
|
||||
if pkg == nil || pkg.IsIllTyped() {
|
||||
|
|
|
@ -9,9 +9,41 @@ import (
|
|||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func DetectLanguage(langID, filename string) FileKind {
|
||||
switch langID {
|
||||
case "go":
|
||||
return Go
|
||||
case "go.mod":
|
||||
return Mod
|
||||
case "go.sum":
|
||||
return Sum
|
||||
}
|
||||
// Fallback to detecting the language based on the file extension.
|
||||
switch filepath.Ext(filename) {
|
||||
case ".mod":
|
||||
return Mod
|
||||
case ".sum":
|
||||
return Sum
|
||||
default: // fallback to Go
|
||||
return Go
|
||||
}
|
||||
}
|
||||
|
||||
func (k FileKind) String() string {
|
||||
switch k {
|
||||
case Mod:
|
||||
return "go.mod"
|
||||
case Sum:
|
||||
return "go.sum"
|
||||
default:
|
||||
return "go"
|
||||
}
|
||||
}
|
||||
|
||||
// indexExprAtPos returns the index of the expression containing pos.
|
||||
func indexExprAtPos(pos token.Pos, args []ast.Expr) int {
|
||||
for i, expr := range args {
|
||||
|
|
|
@ -9,12 +9,13 @@ import (
|
|||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/internal/imports"
|
||||
"golang.org/x/tools/internal/lsp/diff"
|
||||
"golang.org/x/tools/internal/lsp/xlog"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
|
@ -111,7 +112,7 @@ type Cache interface {
|
|||
FileSystem
|
||||
|
||||
// NewSession creates a new Session manager and returns it.
|
||||
NewSession(log xlog.Logger) Session
|
||||
NewSession(ctx context.Context) Session
|
||||
|
||||
// FileSet returns the shared fileset used by all files in the system.
|
||||
FileSet() *token.FileSet
|
||||
|
@ -129,14 +130,11 @@ type Cache interface {
|
|||
// A session may have many active views at any given time.
|
||||
type Session interface {
|
||||
// NewView creates a new View and returns it.
|
||||
NewView(name string, folder span.URI) View
|
||||
NewView(ctx context.Context, name string, folder span.URI) View
|
||||
|
||||
// Cache returns the cache that created this session.
|
||||
Cache() Cache
|
||||
|
||||
// Returns the logger in use for this session.
|
||||
Logger() xlog.Logger
|
||||
|
||||
// View returns a view with a mathing name, if the session has one.
|
||||
View(name string) View
|
||||
|
||||
|
@ -154,7 +152,7 @@ type Session interface {
|
|||
FileSystem
|
||||
|
||||
// DidOpen is invoked each time a file is opened in the editor.
|
||||
DidOpen(ctx context.Context, uri span.URI, text []byte)
|
||||
DidOpen(ctx context.Context, uri span.URI, kind FileKind, text []byte)
|
||||
|
||||
// DidSave is invoked each time an open file is saved in the editor.
|
||||
DidSave(uri span.URI)
|
||||
|
@ -210,7 +208,11 @@ type View interface {
|
|||
// Ignore returns true if this file should be ignored by this view.
|
||||
Ignore(span.URI) bool
|
||||
|
||||
Config() *packages.Config
|
||||
Config(ctx context.Context) *packages.Config
|
||||
|
||||
// RunProcessEnvFunc runs fn with the process env for this view inserted into opts.
|
||||
// Note: the process env contains cached module and filesystem state.
|
||||
RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error, opts *imports.Options) error
|
||||
}
|
||||
|
||||
// File represents a source file of any type.
|
||||
|
@ -219,19 +221,15 @@ type File interface {
|
|||
View() View
|
||||
Handle(ctx context.Context) FileHandle
|
||||
FileSet() *token.FileSet
|
||||
GetToken(ctx context.Context) *token.File
|
||||
GetToken(ctx context.Context) (*token.File, error)
|
||||
}
|
||||
|
||||
// GoFile represents a Go source file that has been type-checked.
|
||||
type GoFile interface {
|
||||
File
|
||||
|
||||
// GetAnyAST returns an AST that may or may not contain function bodies.
|
||||
// It should be used in scenarios where function bodies are not necessary.
|
||||
GetAnyAST(ctx context.Context) *ast.File
|
||||
|
||||
// GetAST returns the full AST for the file.
|
||||
GetAST(ctx context.Context) *ast.File
|
||||
GetAST(ctx context.Context, mode ParseMode) (*ast.File, error)
|
||||
|
||||
// GetPackage returns the package that this file belongs to.
|
||||
GetPackage(ctx context.Context) Package
|
||||
|
@ -258,7 +256,7 @@ type Package interface {
|
|||
ID() string
|
||||
PkgPath() string
|
||||
GetFilenames() []string
|
||||
GetSyntax() []*ast.File
|
||||
GetSyntax(context.Context) []*ast.File
|
||||
GetErrors() []packages.Error
|
||||
GetTypes() *types.Package
|
||||
GetTypesInfo() *types.Info
|
||||
|
@ -322,3 +320,10 @@ func EditsToDiff(edits []TextEdit) []*diff.Op {
|
|||
}
|
||||
return ops
|
||||
}
|
||||
|
||||
func sortTextEdits(d []TextEdit) {
|
||||
// Use a stable sort to maintain the order of edits inserted at the same position.
|
||||
sort.SliceStable(d, func(i int, j int) bool {
|
||||
return span.Compare(d[i].Span, d[j].Span) < 0
|
||||
})
|
||||
}
|
||||
|
|
|
@ -14,8 +14,8 @@ import (
|
|||
)
|
||||
|
||||
func (s *Server) documentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) ([]protocol.DocumentSymbol, error) {
|
||||
ctx, ts := trace.StartSpan(ctx, "lsp.Server.documentSymbol")
|
||||
defer ts.End()
|
||||
ctx, done := trace.StartSpan(ctx, "lsp.Server.documentSymbol")
|
||||
defer done()
|
||||
uri := span.NewURI(params.TextDocument.URI)
|
||||
view := s.session.ViewOf(uri)
|
||||
f, m, err := getGoFile(ctx, view, uri)
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
)
|
||||
|
||||
type Entry struct {
|
||||
At time.Time
|
||||
Message string
|
||||
Error error
|
||||
Tags tag.List
|
||||
}
|
||||
|
||||
func ToEntry(ctx context.Context, at time.Time, tags tag.List) Entry {
|
||||
//TODO: filter more efficiently for the common case of stripping prefixes only
|
||||
entry := Entry{
|
||||
At: at,
|
||||
}
|
||||
for _, t := range tags {
|
||||
switch t.Key {
|
||||
case MessageTag:
|
||||
entry.Message = t.Value.(string)
|
||||
case ErrorTag:
|
||||
entry.Error = t.Value.(error)
|
||||
default:
|
||||
entry.Tags = append(entry.Tags, t)
|
||||
}
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func (e Entry) Format(f fmt.State, r rune) {
|
||||
if !e.At.IsZero() {
|
||||
fmt.Fprint(f, e.At.Format("2006/01/02 15:04:05 "))
|
||||
}
|
||||
fmt.Fprint(f, e.Message)
|
||||
if e.Error != nil {
|
||||
fmt.Fprintf(f, ": %v", e.Error)
|
||||
}
|
||||
for _, tag := range e.Tags {
|
||||
fmt.Fprintf(f, "\n\t%v = %v", tag.Key, tag.Value)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package log is a context based logging package, designed to interact well
|
||||
// with both the lsp protocol and the other telemetry packages.
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/worker"
|
||||
)
|
||||
|
||||
const (
|
||||
// The well known tag keys for the logging system.
|
||||
MessageTag = tag.Key("message")
|
||||
ErrorTag = tag.Key("error")
|
||||
)
|
||||
|
||||
// Logger is a function that handles logging messages.
|
||||
// Loggers are registered at start up, and may use information in the context
|
||||
// to decide what to do with a given log message.
|
||||
type Logger func(ctx context.Context, at time.Time, tags tag.List) bool
|
||||
|
||||
// With sends a tag list to the installed loggers.
|
||||
func With(ctx context.Context, tags ...tag.Tag) {
|
||||
at := time.Now()
|
||||
worker.Do(func() {
|
||||
deliver(ctx, at, tags)
|
||||
})
|
||||
}
|
||||
|
||||
// Print takes a message and a tag list and combines them into a single tag
|
||||
// list before delivering them to the loggers.
|
||||
func Print(ctx context.Context, message string, tags ...tag.Tagger) {
|
||||
at := time.Now()
|
||||
worker.Do(func() {
|
||||
tags := append(tag.Tags(ctx, tags...), MessageTag.Of(message))
|
||||
deliver(ctx, at, tags)
|
||||
})
|
||||
}
|
||||
|
||||
type errorString string
|
||||
|
||||
// Error allows errorString to conform to the error interface.
|
||||
func (err errorString) Error() string { return string(err) }
|
||||
|
||||
// Print takes a message and a tag list and combines them into a single tag
|
||||
// list before delivering them to the loggers.
|
||||
func Error(ctx context.Context, message string, err error, tags ...tag.Tagger) {
|
||||
at := time.Now()
|
||||
worker.Do(func() {
|
||||
if err == nil {
|
||||
err = errorString(message)
|
||||
message = ""
|
||||
}
|
||||
tags := append(tag.Tags(ctx, tags...), MessageTag.Of(message), ErrorTag.Of(err))
|
||||
deliver(ctx, at, tags)
|
||||
})
|
||||
}
|
||||
|
||||
func deliver(ctx context.Context, at time.Time, tags tag.List) {
|
||||
delivered := false
|
||||
for _, logger := range loggers {
|
||||
if logger(ctx, at, tags) {
|
||||
delivered = true
|
||||
}
|
||||
}
|
||||
if !delivered {
|
||||
// no logger processed the message, so we log to stderr just in case
|
||||
Stderr(ctx, at, tags)
|
||||
}
|
||||
}
|
||||
|
||||
var loggers = []Logger{}
|
||||
|
||||
func AddLogger(logger Logger) {
|
||||
worker.Do(func() {
|
||||
loggers = append(loggers, logger)
|
||||
})
|
||||
}
|
||||
|
||||
// Stderr is a logger that logs to stderr in the standard format.
|
||||
func Stderr(ctx context.Context, at time.Time, tags tag.List) bool {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", ToEntry(ctx, at, tags))
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,412 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package metric aggregates stats into metrics that can be exported.
|
||||
package metric
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry/stats"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/worker"
|
||||
)
|
||||
|
||||
// Handle uniquely identifies a constructed metric.
|
||||
// It can be used to detect which observed data objects belong
|
||||
// to that metric.
|
||||
type Handle struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Data represents a single point in the time series of a metric.
|
||||
// This provides the common interface to all metrics no matter their data
|
||||
// format.
|
||||
// To get the actual values for the metric you must type assert to a concrete
|
||||
// metric type.
|
||||
type Data interface {
|
||||
// Handle returns the metric handle this data is for.
|
||||
Handle() Handle
|
||||
// Groups reports the rows that currently exist for this metric.
|
||||
Groups() []tag.List
|
||||
}
|
||||
|
||||
// Scalar represents the construction information for a scalar metric.
|
||||
type Scalar struct {
|
||||
// Name is the unique name of this metric.
|
||||
Name string
|
||||
// Description can be used by observers to describe the metric to users.
|
||||
Description string
|
||||
// Keys is the set of tags that collectively describe rows of the metric.
|
||||
Keys []interface{}
|
||||
}
|
||||
|
||||
// HistogramInt64 represents the construction information for an int64 histogram metric.
|
||||
type HistogramInt64 struct {
|
||||
// Name is the unique name of this metric.
|
||||
Name string
|
||||
// Description can be used by observers to describe the metric to users.
|
||||
Description string
|
||||
// Keys is the set of tags that collectively describe rows of the metric.
|
||||
Keys []interface{}
|
||||
// Buckets holds the inclusive upper bound of each bucket in the histogram.
|
||||
Buckets []int64
|
||||
}
|
||||
|
||||
// HistogramFloat64 represents the construction information for an float64 histogram metric.
|
||||
type HistogramFloat64 struct {
|
||||
// Name is the unique name of this metric.
|
||||
Name string
|
||||
// Description can be used by observers to describe the metric to users.
|
||||
Description string
|
||||
// Keys is the set of tags that collectively describe rows of the metric.
|
||||
Keys []interface{}
|
||||
// Buckets holds the inclusive upper bound of each bucket in the histogram.
|
||||
Buckets []float64
|
||||
}
|
||||
|
||||
// Observer is the type for functions that want to observe metric values
|
||||
// as they arrive.
|
||||
// Each data point delivered to an observer is immutable and can be stored if
|
||||
// needed.
|
||||
type Observer func(Data)
|
||||
|
||||
// CountInt64 creates a new metric based on the Scalar information that counts
|
||||
// the number of times the supplied int64 measure is set.
|
||||
// Metrics of this type will use Int64Data.
|
||||
func (info Scalar) CountInt64(measure *stats.Int64Measure) Handle {
|
||||
data := &Int64Data{Info: &info}
|
||||
measure.Subscribe(data.countInt64)
|
||||
return Handle{info.Name}
|
||||
}
|
||||
|
||||
// SumInt64 creates a new metric based on the Scalar information that sums all
|
||||
// the values recorded on the int64 measure.
|
||||
// Metrics of this type will use Int64Data.
|
||||
func (info Scalar) SumInt64(measure *stats.Int64Measure) Handle {
|
||||
data := &Int64Data{Info: &info}
|
||||
measure.Subscribe(data.sum)
|
||||
_ = data
|
||||
return Handle{info.Name}
|
||||
}
|
||||
|
||||
// LatestInt64 creates a new metric based on the Scalar information that tracks
|
||||
// the most recent value recorded on the int64 measure.
|
||||
// Metrics of this type will use Int64Data.
|
||||
func (info Scalar) LatestInt64(measure *stats.Int64Measure) Handle {
|
||||
data := &Int64Data{Info: &info, IsGauge: true}
|
||||
measure.Subscribe(data.latest)
|
||||
return Handle{info.Name}
|
||||
}
|
||||
|
||||
// CountFloat64 creates a new metric based on the Scalar information that counts
|
||||
// the number of times the supplied float64 measure is set.
|
||||
// Metrics of this type will use Int64Data.
|
||||
func (info Scalar) CountFloat64(measure *stats.Float64Measure) Handle {
|
||||
data := &Int64Data{Info: &info}
|
||||
measure.Subscribe(data.countFloat64)
|
||||
return Handle{info.Name}
|
||||
}
|
||||
|
||||
// SumFloat64 creates a new metric based on the Scalar information that sums all
|
||||
// the values recorded on the float64 measure.
|
||||
// Metrics of this type will use Float64Data.
|
||||
func (info Scalar) SumFloat64(measure *stats.Float64Measure) Handle {
|
||||
data := &Float64Data{Info: &info}
|
||||
measure.Subscribe(data.sum)
|
||||
return Handle{info.Name}
|
||||
}
|
||||
|
||||
// LatestFloat64 creates a new metric based on the Scalar information that tracks
|
||||
// the most recent value recorded on the float64 measure.
|
||||
// Metrics of this type will use Float64Data.
|
||||
func (info Scalar) LatestFloat64(measure *stats.Float64Measure) Handle {
|
||||
data := &Float64Data{Info: &info, IsGauge: true}
|
||||
measure.Subscribe(data.latest)
|
||||
return Handle{info.Name}
|
||||
}
|
||||
|
||||
// Record creates a new metric based on the HistogramInt64 information that
|
||||
// tracks the bucketized counts of values recorded on the int64 measure.
|
||||
// Metrics of this type will use HistogramInt64Data.
|
||||
func (info HistogramInt64) Record(measure *stats.Int64Measure) Handle {
|
||||
data := &HistogramInt64Data{Info: &info}
|
||||
measure.Subscribe(data.record)
|
||||
return Handle{info.Name}
|
||||
}
|
||||
|
||||
// Record creates a new metric based on the HistogramFloat64 information that
|
||||
// tracks the bucketized counts of values recorded on the float64 measure.
|
||||
// Metrics of this type will use HistogramFloat64Data.
|
||||
func (info HistogramFloat64) Record(measure *stats.Float64Measure) Handle {
|
||||
data := &HistogramFloat64Data{Info: &info}
|
||||
measure.Subscribe(data.record)
|
||||
return Handle{info.Name}
|
||||
}
|
||||
|
||||
// Int64Data is a concrete implementation of Data for int64 scalar metrics.
|
||||
type Int64Data struct {
|
||||
// Info holds the original consruction information.
|
||||
Info *Scalar
|
||||
// IsGauge is true for metrics that track values, rather than increasing over time.
|
||||
IsGauge bool
|
||||
// Rows holds the per group values for the metric.
|
||||
Rows []int64
|
||||
|
||||
groups []tag.List
|
||||
}
|
||||
|
||||
// Float64Data is a concrete implementation of Data for float64 scalar metrics.
|
||||
type Float64Data struct {
|
||||
// Info holds the original consruction information.
|
||||
Info *Scalar
|
||||
// IsGauge is true for metrics that track values, rather than increasing over time.
|
||||
IsGauge bool
|
||||
// Rows holds the per group values for the metric.
|
||||
Rows []float64
|
||||
|
||||
groups []tag.List
|
||||
}
|
||||
|
||||
// HistogramInt64Data is a concrete implementation of Data for int64 histogram metrics.
|
||||
type HistogramInt64Data struct {
|
||||
// Info holds the original consruction information.
|
||||
Info *HistogramInt64
|
||||
// Rows holds the per group values for the metric.
|
||||
Rows []*HistogramInt64Row
|
||||
|
||||
groups []tag.List
|
||||
}
|
||||
|
||||
// HistogramInt64Row holds the values for a single row of a HistogramInt64Data.
|
||||
type HistogramInt64Row struct {
|
||||
// Values is the counts per bucket.
|
||||
Values []int64
|
||||
// Count is the total count.
|
||||
Count int64
|
||||
// Sum is the sum of all the values recorded.
|
||||
Sum int64
|
||||
// Min is the smallest recorded value.
|
||||
Min int64
|
||||
// Max is the largest recorded value.
|
||||
Max int64
|
||||
}
|
||||
|
||||
// HistogramFloat64Data is a concrete implementation of Data for float64 histogram metrics.
|
||||
type HistogramFloat64Data struct {
|
||||
// Info holds the original consruction information.
|
||||
Info *HistogramFloat64
|
||||
// Rows holds the per group values for the metric.
|
||||
Rows []*HistogramFloat64Row
|
||||
|
||||
groups []tag.List
|
||||
}
|
||||
|
||||
// HistogramFloat64Row holds the values for a single row of a HistogramFloat64Data.
|
||||
type HistogramFloat64Row struct {
|
||||
// Values is the counts per bucket.
|
||||
Values []int64
|
||||
// Count is the total count.
|
||||
Count int64
|
||||
// Sum is the sum of all the values recorded.
|
||||
Sum float64
|
||||
// Min is the smallest recorded value.
|
||||
Min float64
|
||||
// Max is the largest recorded value.
|
||||
Max float64
|
||||
}
|
||||
|
||||
// Name returns the name of the metric this is a handle for.
|
||||
func (h Handle) Name() string { return h.name }
|
||||
|
||||
var observers []Observer
|
||||
|
||||
// RegisterObservers adds a new metric observer to the system.
|
||||
// There is no way to unregister an observer.
|
||||
func RegisterObservers(e ...Observer) {
|
||||
worker.Do(func() {
|
||||
observers = append(e, observers...)
|
||||
})
|
||||
}
|
||||
|
||||
// export must only be called from inside a worker
|
||||
func export(m Data) {
|
||||
for _, e := range observers {
|
||||
e(m)
|
||||
}
|
||||
}
|
||||
|
||||
func getGroup(ctx context.Context, g *[]tag.List, keys []interface{}) (int, bool) {
|
||||
group := tag.Get(ctx, keys...)
|
||||
old := *g
|
||||
index := sort.Search(len(old), func(i int) bool {
|
||||
return !old[i].Less(group)
|
||||
})
|
||||
if index < len(old) && group.Equal(old[index]) {
|
||||
// not a new group
|
||||
return index, false
|
||||
}
|
||||
*g = make([]tag.List, len(old)+1)
|
||||
copy(*g, old[:index])
|
||||
copy((*g)[index+1:], old[index:])
|
||||
(*g)[index] = group
|
||||
return index, true
|
||||
}
|
||||
|
||||
func (data *Int64Data) Handle() Handle { return Handle{data.Info.Name} }
|
||||
func (data *Int64Data) Groups() []tag.List { return data.groups }
|
||||
|
||||
func (data *Int64Data) modify(ctx context.Context, f func(v int64) int64) {
|
||||
worker.Do(func() {
|
||||
index, insert := getGroup(ctx, &data.groups, data.Info.Keys)
|
||||
old := data.Rows
|
||||
if insert {
|
||||
data.Rows = make([]int64, len(old)+1)
|
||||
copy(data.Rows, old[:index])
|
||||
copy(data.Rows[index+1:], old[index:])
|
||||
} else {
|
||||
data.Rows = make([]int64, len(old))
|
||||
copy(data.Rows, old)
|
||||
}
|
||||
data.Rows[index] = f(data.Rows[index])
|
||||
frozen := *data
|
||||
export(&frozen)
|
||||
})
|
||||
}
|
||||
|
||||
func (data *Int64Data) countInt64(ctx context.Context, measure *stats.Int64Measure, value int64) {
|
||||
data.modify(ctx, func(v int64) int64 { return v + 1 })
|
||||
}
|
||||
|
||||
func (data *Int64Data) countFloat64(ctx context.Context, measure *stats.Float64Measure, value float64) {
|
||||
data.modify(ctx, func(v int64) int64 { return v + 1 })
|
||||
}
|
||||
|
||||
func (data *Int64Data) sum(ctx context.Context, measure *stats.Int64Measure, value int64) {
|
||||
data.modify(ctx, func(v int64) int64 { return v + value })
|
||||
}
|
||||
|
||||
func (data *Int64Data) latest(ctx context.Context, measure *stats.Int64Measure, value int64) {
|
||||
data.modify(ctx, func(v int64) int64 { return value })
|
||||
}
|
||||
|
||||
func (data *Float64Data) Handle() Handle { return Handle{data.Info.Name} }
|
||||
func (data *Float64Data) Groups() []tag.List { return data.groups }
|
||||
|
||||
func (data *Float64Data) modify(ctx context.Context, f func(v float64) float64) {
|
||||
worker.Do(func() {
|
||||
index, insert := getGroup(ctx, &data.groups, data.Info.Keys)
|
||||
old := data.Rows
|
||||
if insert {
|
||||
data.Rows = make([]float64, len(old)+1)
|
||||
copy(data.Rows, old[:index])
|
||||
copy(data.Rows[index+1:], old[index:])
|
||||
} else {
|
||||
data.Rows = make([]float64, len(old))
|
||||
copy(data.Rows, old)
|
||||
}
|
||||
data.Rows[index] = f(data.Rows[index])
|
||||
frozen := *data
|
||||
export(&frozen)
|
||||
})
|
||||
}
|
||||
|
||||
func (data *Float64Data) sum(ctx context.Context, measure *stats.Float64Measure, value float64) {
|
||||
data.modify(ctx, func(v float64) float64 { return v + value })
|
||||
}
|
||||
|
||||
func (data *Float64Data) latest(ctx context.Context, measure *stats.Float64Measure, value float64) {
|
||||
data.modify(ctx, func(v float64) float64 { return value })
|
||||
}
|
||||
|
||||
func (data *HistogramInt64Data) Handle() Handle { return Handle{data.Info.Name} }
|
||||
func (data *HistogramInt64Data) Groups() []tag.List { return data.groups }
|
||||
|
||||
func (data *HistogramInt64Data) modify(ctx context.Context, f func(v *HistogramInt64Row)) {
|
||||
worker.Do(func() {
|
||||
index, insert := getGroup(ctx, &data.groups, data.Info.Keys)
|
||||
old := data.Rows
|
||||
var v HistogramInt64Row
|
||||
if insert {
|
||||
data.Rows = make([]*HistogramInt64Row, len(old)+1)
|
||||
copy(data.Rows, old[:index])
|
||||
copy(data.Rows[index+1:], old[index:])
|
||||
} else {
|
||||
data.Rows = make([]*HistogramInt64Row, len(old))
|
||||
copy(data.Rows, old)
|
||||
v = *data.Rows[index]
|
||||
}
|
||||
oldValues := v.Values
|
||||
v.Values = make([]int64, len(data.Info.Buckets))
|
||||
copy(v.Values, oldValues)
|
||||
f(&v)
|
||||
data.Rows[index] = &v
|
||||
frozen := *data
|
||||
export(&frozen)
|
||||
})
|
||||
}
|
||||
|
||||
func (data *HistogramInt64Data) record(ctx context.Context, measure *stats.Int64Measure, value int64) {
|
||||
data.modify(ctx, func(v *HistogramInt64Row) {
|
||||
v.Sum += value
|
||||
if v.Min > value || v.Count == 0 {
|
||||
v.Min = value
|
||||
}
|
||||
if v.Max < value || v.Count == 0 {
|
||||
v.Max = value
|
||||
}
|
||||
v.Count++
|
||||
for i, b := range data.Info.Buckets {
|
||||
if value <= b {
|
||||
v.Values[i]++
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (data *HistogramFloat64Data) Handle() Handle { return Handle{data.Info.Name} }
|
||||
func (data *HistogramFloat64Data) Groups() []tag.List { return data.groups }
|
||||
|
||||
func (data *HistogramFloat64Data) modify(ctx context.Context, f func(v *HistogramFloat64Row)) {
|
||||
worker.Do(func() {
|
||||
index, insert := getGroup(ctx, &data.groups, data.Info.Keys)
|
||||
old := data.Rows
|
||||
var v HistogramFloat64Row
|
||||
if insert {
|
||||
data.Rows = make([]*HistogramFloat64Row, len(old)+1)
|
||||
copy(data.Rows, old[:index])
|
||||
copy(data.Rows[index+1:], old[index:])
|
||||
} else {
|
||||
data.Rows = make([]*HistogramFloat64Row, len(old))
|
||||
copy(data.Rows, old)
|
||||
v = *data.Rows[index]
|
||||
}
|
||||
oldValues := v.Values
|
||||
v.Values = make([]int64, len(data.Info.Buckets))
|
||||
copy(v.Values, oldValues)
|
||||
f(&v)
|
||||
data.Rows[index] = &v
|
||||
frozen := *data
|
||||
export(&frozen)
|
||||
})
|
||||
}
|
||||
|
||||
func (data *HistogramFloat64Data) record(ctx context.Context, measure *stats.Float64Measure, value float64) {
|
||||
data.modify(ctx, func(v *HistogramFloat64Row) {
|
||||
v.Sum += value
|
||||
if v.Min > value || v.Count == 0 {
|
||||
v.Min = value
|
||||
}
|
||||
if v.Max < value || v.Count == 0 {
|
||||
v.Max = value
|
||||
}
|
||||
v.Count++
|
||||
for i, b := range data.Info.Buckets {
|
||||
if value <= b {
|
||||
v.Values[i]++
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
|
@ -0,0 +1,242 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ocagent adds the ability to export all telemetry to an ocagent.
|
||||
// This keeps the complie time dependencies to zero and allows the agent to
|
||||
// have the exporters needed for telemetry aggregation and viewing systems.
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/metric"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/ocagent/wire"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/trace"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/worker"
|
||||
)
|
||||
|
||||
const DefaultAddress = "http://localhost:55678"
|
||||
const exportRate = 2 * time.Second
|
||||
|
||||
type exporter struct {
|
||||
address string
|
||||
node *wire.Node
|
||||
spans []*wire.Span
|
||||
metrics []*wire.Metric
|
||||
}
|
||||
|
||||
func Export(service, address string) {
|
||||
if address == "off" {
|
||||
return
|
||||
}
|
||||
hostname, _ := os.Hostname()
|
||||
exporter := &exporter{
|
||||
address: address,
|
||||
node: &wire.Node{
|
||||
Identifier: &wire.ProcessIdentifier{
|
||||
HostName: hostname,
|
||||
Pid: uint32(os.Getpid()),
|
||||
StartTimestamp: convertTimestamp(time.Now()),
|
||||
},
|
||||
LibraryInfo: &wire.LibraryInfo{
|
||||
Language: wire.LanguageGo,
|
||||
ExporterVersion: "0.0.1",
|
||||
CoreLibraryVersion: "x/tools",
|
||||
},
|
||||
ServiceInfo: &wire.ServiceInfo{
|
||||
Name: service,
|
||||
},
|
||||
},
|
||||
}
|
||||
if exporter.address == "" {
|
||||
exporter.address = DefaultAddress
|
||||
}
|
||||
//TODO: add metrics once the ocagent json metric interface works
|
||||
trace.RegisterObservers(exporter.observeTrace)
|
||||
go func() {
|
||||
for _ = range time.Tick(exportRate) {
|
||||
worker.Do(func() {
|
||||
exporter.flush()
|
||||
})
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (e *exporter) observeTrace(span *trace.Span) {
|
||||
// is this a completed span?
|
||||
if span.Finish.IsZero() {
|
||||
return
|
||||
}
|
||||
e.spans = append(e.spans, convertSpan(span))
|
||||
}
|
||||
|
||||
func (e *exporter) observeMetric(data metric.Data) {
|
||||
e.metrics = append(e.metrics, convertMetric(data))
|
||||
}
|
||||
|
||||
func (e *exporter) flush() {
|
||||
spans := e.spans
|
||||
e.spans = nil
|
||||
metrics := e.metrics
|
||||
e.metrics = nil
|
||||
|
||||
if len(spans) > 0 {
|
||||
e.send("/v1/trace", &wire.ExportTraceServiceRequest{
|
||||
Node: e.node,
|
||||
Spans: spans,
|
||||
//TODO: Resource?
|
||||
})
|
||||
}
|
||||
if len(metrics) > 0 {
|
||||
e.send("/v1/metrics", &wire.ExportMetricsServiceRequest{
|
||||
Node: e.node,
|
||||
Metrics: metrics,
|
||||
//TODO: Resource?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (e *exporter) send(endpoint string, message interface{}) {
|
||||
blob, err := json.Marshal(message)
|
||||
if err != nil {
|
||||
errorInExport("ocagent failed to marshal message for %v: %v", endpoint, err)
|
||||
return
|
||||
}
|
||||
uri := e.address + endpoint
|
||||
req, err := http.NewRequest("POST", uri, bytes.NewReader(blob))
|
||||
if err != nil {
|
||||
errorInExport("ocagent failed to build request for %v: %v", uri, err)
|
||||
return
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
errorInExport("ocagent failed to send message: %v \n", err)
|
||||
return
|
||||
}
|
||||
res.Body.Close()
|
||||
return
|
||||
}
|
||||
|
||||
func errorInExport(message string, args ...interface{}) {
|
||||
// This function is useful when debugging the exporter, but in general we
|
||||
// want to just drop any export
|
||||
}
|
||||
|
||||
func convertTimestamp(t time.Time) wire.Timestamp {
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
func toTruncatableString(s string) *wire.TruncatableString {
|
||||
return &wire.TruncatableString{Value: s}
|
||||
}
|
||||
|
||||
func convertSpan(span *trace.Span) *wire.Span {
|
||||
result := &wire.Span{
|
||||
TraceId: span.TraceID[:],
|
||||
SpanId: span.SpanID[:],
|
||||
TraceState: nil, //TODO?
|
||||
ParentSpanId: span.ParentID[:],
|
||||
Name: toTruncatableString(span.Name),
|
||||
Kind: wire.UnspecifiedSpanKind,
|
||||
StartTime: convertTimestamp(span.Start),
|
||||
EndTime: convertTimestamp(span.Finish),
|
||||
Attributes: convertAttributes(span.Tags),
|
||||
TimeEvents: convertEvents(span.Events),
|
||||
SameProcessAsParentSpan: true,
|
||||
//TODO: StackTrace?
|
||||
//TODO: Links?
|
||||
//TODO: Status?
|
||||
//TODO: Resource?
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func convertMetric(data metric.Data) *wire.Metric {
|
||||
return nil //TODO:
|
||||
}
|
||||
|
||||
func convertAttributes(tags tag.List) *wire.Attributes {
|
||||
if len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
attributes := make(map[string]wire.Attribute)
|
||||
for _, tag := range tags {
|
||||
attributes[fmt.Sprint(tag.Key)] = convertAttribute(tag.Value)
|
||||
}
|
||||
return &wire.Attributes{AttributeMap: attributes}
|
||||
}
|
||||
|
||||
func convertAttribute(v interface{}) wire.Attribute {
|
||||
switch v := v.(type) {
|
||||
case int8:
|
||||
return wire.IntAttribute{IntValue: int64(v)}
|
||||
case int16:
|
||||
return wire.IntAttribute{IntValue: int64(v)}
|
||||
case int32:
|
||||
return wire.IntAttribute{IntValue: int64(v)}
|
||||
case int64:
|
||||
return wire.IntAttribute{IntValue: v}
|
||||
case uint8:
|
||||
return wire.IntAttribute{IntValue: int64(v)}
|
||||
case uint16:
|
||||
return wire.IntAttribute{IntValue: int64(v)}
|
||||
case uint32:
|
||||
return wire.IntAttribute{IntValue: int64(v)}
|
||||
case uint64:
|
||||
return wire.IntAttribute{IntValue: int64(v)}
|
||||
case uint:
|
||||
return wire.IntAttribute{IntValue: int64(v)}
|
||||
case float32:
|
||||
return wire.DoubleAttribute{DoubleValue: float64(v)}
|
||||
case float64:
|
||||
return wire.DoubleAttribute{DoubleValue: v}
|
||||
case bool:
|
||||
return wire.BoolAttribute{BoolValue: v}
|
||||
case string:
|
||||
return wire.StringAttribute{StringValue: toTruncatableString(v)}
|
||||
default:
|
||||
return wire.StringAttribute{StringValue: toTruncatableString(fmt.Sprint(v))}
|
||||
}
|
||||
}
|
||||
|
||||
func convertEvents(events []trace.Event) *wire.TimeEvents {
|
||||
//TODO: MessageEvents?
|
||||
result := make([]wire.TimeEvent, len(events))
|
||||
for i, event := range events {
|
||||
result[i] = convertEvent(event)
|
||||
}
|
||||
return &wire.TimeEvents{TimeEvent: result}
|
||||
}
|
||||
|
||||
func convertEvent(event trace.Event) wire.TimeEvent {
|
||||
return wire.TimeEvent{
|
||||
Time: convertTimestamp(event.Time),
|
||||
Annotation: convertAnnotation(event.Tags),
|
||||
}
|
||||
}
|
||||
|
||||
func convertAnnotation(tags tag.List) *wire.Annotation {
|
||||
entry := log.ToEntry(nil, time.Time{}, tags)
|
||||
description := entry.Message
|
||||
if description == "" && entry.Error != nil {
|
||||
description = entry.Error.Error()
|
||||
entry.Error = nil
|
||||
}
|
||||
tags = entry.Tags
|
||||
if entry.Error != nil {
|
||||
tags = append(tags, tag.Of("Error", entry.Error))
|
||||
}
|
||||
return &wire.Annotation{
|
||||
Description: toTruncatableString(description),
|
||||
Attributes: convertAttributes(tags),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package wire
|
||||
|
||||
// This file holds common ocagent types
|
||||
|
||||
type Node struct {
|
||||
Identifier *ProcessIdentifier `json:"identifier,omitempty"`
|
||||
LibraryInfo *LibraryInfo `json:"library_info,omitempty"`
|
||||
ServiceInfo *ServiceInfo `json:"service_info,omitempty"`
|
||||
Attributes map[string]string `json:"attributes,omitempty"`
|
||||
}
|
||||
|
||||
type Resource struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
}
|
||||
|
||||
type TruncatableString struct {
|
||||
Value string `json:"value,omitempty"`
|
||||
TruncatedByteCount int32 `json:"truncated_byte_count,omitempty"`
|
||||
}
|
||||
|
||||
type Attributes struct {
|
||||
AttributeMap map[string]Attribute `json:"attributeMap,omitempty"`
|
||||
DroppedAttributesCount int32 `json:"dropped_attributes_count,omitempty"`
|
||||
}
|
||||
|
||||
type StringAttribute struct {
|
||||
StringValue *TruncatableString `json:"stringValue,omitempty"`
|
||||
}
|
||||
|
||||
type IntAttribute struct {
|
||||
IntValue int64 `json:"intValue,omitempty"`
|
||||
}
|
||||
|
||||
type BoolAttribute struct {
|
||||
BoolValue bool `json:"boolValue,omitempty"`
|
||||
}
|
||||
|
||||
type DoubleAttribute struct {
|
||||
DoubleValue float64 `json:"doubleValue,omitempty"`
|
||||
}
|
||||
|
||||
type Attribute interface {
|
||||
tagAttribute()
|
||||
}
|
||||
|
||||
func (StringAttribute) tagAttribute() {}
|
||||
func (IntAttribute) tagAttribute() {}
|
||||
func (BoolAttribute) tagAttribute() {}
|
||||
func (DoubleAttribute) tagAttribute() {}
|
||||
|
||||
type StackTrace struct {
|
||||
StackFrames *StackFrames `json:"stack_frames,omitempty"`
|
||||
StackTraceHashId uint64 `json:"stack_trace_hash_id,omitempty"`
|
||||
}
|
||||
|
||||
type StackFrames struct {
|
||||
Frame []*StackFrame `json:"frame,omitempty"`
|
||||
DroppedFramesCount int32 `json:"dropped_frames_count,omitempty"`
|
||||
}
|
||||
|
||||
type StackFrame struct {
|
||||
FunctionName *TruncatableString `json:"function_name,omitempty"`
|
||||
OriginalFunctionName *TruncatableString `json:"original_function_name,omitempty"`
|
||||
FileName *TruncatableString `json:"file_name,omitempty"`
|
||||
LineNumber int64 `json:"line_number,omitempty"`
|
||||
ColumnNumber int64 `json:"column_number,omitempty"`
|
||||
LoadModule *Module `json:"load_module,omitempty"`
|
||||
SourceVersion *TruncatableString `json:"source_version,omitempty"`
|
||||
}
|
||||
|
||||
type Module struct {
|
||||
Module *TruncatableString `json:"module,omitempty"`
|
||||
BuildId *TruncatableString `json:"build_id,omitempty"`
|
||||
}
|
||||
|
||||
type ProcessIdentifier struct {
|
||||
HostName string `json:"host_name,omitempty"`
|
||||
Pid uint32 `json:"pid,omitempty"`
|
||||
StartTimestamp Timestamp `json:"start_timestamp,omitempty"`
|
||||
}
|
||||
|
||||
type LibraryInfo struct {
|
||||
Language Language `json:"language,omitempty"`
|
||||
ExporterVersion string `json:"exporter_version,omitempty"`
|
||||
CoreLibraryVersion string `json:"core_library_version,omitempty"`
|
||||
}
|
||||
|
||||
type Language int32
|
||||
|
||||
const (
|
||||
LanguageGo Language = 4
|
||||
)
|
||||
|
||||
type ServiceInfo struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package wire
|
||||
|
||||
// This file contains type that match core proto types
|
||||
|
||||
type Timestamp = string
|
||||
|
||||
type Int64Value struct {
|
||||
Value int64 `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type DoubleValue struct {
|
||||
Value float64 `json:"value,omitempty"`
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package wire
|
||||
|
||||
type ExportMetricsServiceRequest struct {
|
||||
Node *Node `json:"node,omitempty"`
|
||||
Metrics []*Metric `json:"metrics,omitempty"`
|
||||
Resource *Resource `json:"resource,omitempty"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
MetricDescriptor *MetricDescriptor `json:"metric_descriptor,omitempty"`
|
||||
Timeseries []*TimeSeries `json:"timeseries,omitempty"`
|
||||
Resource *Resource `json:"resource,omitempty"`
|
||||
}
|
||||
|
||||
type MetricDescriptor struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
Type MetricDescriptor_Type `json:"type,omitempty"`
|
||||
LabelKeys []*LabelKey `json:"label_keys,omitempty"`
|
||||
}
|
||||
|
||||
type MetricDescriptor_Type int32
|
||||
|
||||
const (
|
||||
MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0
|
||||
MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1
|
||||
MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2
|
||||
MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3
|
||||
MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4
|
||||
MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5
|
||||
MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6
|
||||
MetricDescriptor_SUMMARY MetricDescriptor_Type = 7
|
||||
)
|
||||
|
||||
type LabelKey struct {
|
||||
Key string `json:"key,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
type TimeSeries struct {
|
||||
StartTimestamp *Timestamp `json:"start_timestamp,omitempty"`
|
||||
LabelValues []*LabelValue `json:"label_values,omitempty"`
|
||||
Points []*Point `json:"points,omitempty"`
|
||||
}
|
||||
|
||||
type LabelValue struct {
|
||||
Value string `json:"value,omitempty"`
|
||||
HasValue bool `json:"has_value,omitempty"`
|
||||
}
|
||||
|
||||
type Point struct {
|
||||
Timestamp *Timestamp `json:"timestamp,omitempty"`
|
||||
Value PointValue `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type PointInt64Value struct {
|
||||
Int64Value int64 `json:"int64Value,omitempty"`
|
||||
}
|
||||
|
||||
type PointDoubleValue struct {
|
||||
DoubleValue float64 `json:"doubleValue,omitempty"`
|
||||
}
|
||||
|
||||
type PointDistributionValue struct {
|
||||
DistributionValue *DistributionValue `json:"distributionValue,omitempty"`
|
||||
}
|
||||
|
||||
type PointSummaryValue struct {
|
||||
SummaryValue *SummaryValue `json:"summaryValue,omitempty"`
|
||||
}
|
||||
|
||||
type PointValue interface {
|
||||
tagPointValue()
|
||||
}
|
||||
|
||||
func (PointInt64Value) tagPointValue() {}
|
||||
func (PointDoubleValue) tagPointValue() {}
|
||||
func (PointDistributionValue) tagPointValue() {}
|
||||
func (PointSummaryValue) tagPointValue() {}
|
||||
|
||||
type DistributionValue struct {
|
||||
Count int64 `json:"count,omitempty"`
|
||||
Sum float64 `json:"sum,omitempty"`
|
||||
SumOfSquaredDeviation float64 `json:"sum_of_squared_deviation,omitempty"`
|
||||
BucketOptions BucketOptions `json:"bucket_options,omitempty"`
|
||||
Buckets []*Bucket `json:"buckets,omitempty"`
|
||||
}
|
||||
|
||||
type BucketOptionsExplicit struct {
|
||||
Bounds []float64 `json:"bounds,omitempty"`
|
||||
}
|
||||
|
||||
type BucketOptions interface {
|
||||
tagBucketOptions()
|
||||
}
|
||||
|
||||
func (BucketOptionsExplicit) tagBucketOptions() {}
|
||||
|
||||
type Bucket struct {
|
||||
Count int64 `json:"count,omitempty"`
|
||||
Exemplar *Exemplar `json:"exemplar,omitempty"`
|
||||
}
|
||||
|
||||
type Exemplar struct {
|
||||
Value float64 `json:"value,omitempty"`
|
||||
Timestamp *Timestamp `json:"timestamp,omitempty"`
|
||||
Attachments map[string]string `json:"attachments,omitempty"`
|
||||
}
|
||||
|
||||
type SummaryValue struct {
|
||||
Count *Int64Value `json:"count,omitempty"`
|
||||
Sum *DoubleValue `json:"sum,omitempty"`
|
||||
Snapshot *Snapshot `json:"snapshot,omitempty"`
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
Count *Int64Value `json:"count,omitempty"`
|
||||
Sum *DoubleValue `json:"sum,omitempty"`
|
||||
PercentileValues []*SnapshotValueAtPercentile `json:"percentile_values,omitempty"`
|
||||
}
|
||||
|
||||
type SnapshotValueAtPercentile struct {
|
||||
Percentile float64 `json:"percentile,omitempty"`
|
||||
Value float64 `json:"value,omitempty"`
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package wire
|
||||
|
||||
type ExportTraceServiceRequest struct {
|
||||
Node *Node `json:"node,omitempty"`
|
||||
Spans []*Span `json:"spans,omitempty"`
|
||||
Resource *Resource `json:"resource,omitempty"`
|
||||
}
|
||||
|
||||
type Span struct {
|
||||
TraceId []byte `json:"trace_id,omitempty"`
|
||||
SpanId []byte `json:"span_id,omitempty"`
|
||||
TraceState *TraceState `json:"tracestate,omitempty"`
|
||||
ParentSpanId []byte `json:"parent_span_id,omitempty"`
|
||||
Name *TruncatableString `json:"name,omitempty"`
|
||||
Kind SpanKind `json:"kind,omitempty"`
|
||||
StartTime Timestamp `json:"start_time,omitempty"`
|
||||
EndTime Timestamp `json:"end_time,omitempty"`
|
||||
Attributes *Attributes `json:"attributes,omitempty"`
|
||||
StackTrace *StackTrace `json:"stack_trace,omitempty"`
|
||||
TimeEvents *TimeEvents `json:"time_events,omitempty"`
|
||||
Links *Links `json:"links,omitempty"`
|
||||
Status *Status `json:"status,omitempty"`
|
||||
Resource *Resource `json:"resource,omitempty"`
|
||||
SameProcessAsParentSpan bool `json:"same_process_as_parent_span,omitempty"`
|
||||
ChildSpanCount bool `json:"child_span_count,omitempty"`
|
||||
}
|
||||
|
||||
type TraceState struct {
|
||||
Entries []*TraceStateEntry `json:"entries,omitempty"`
|
||||
}
|
||||
|
||||
type TraceStateEntry struct {
|
||||
Key string `json:"key,omitempty"`
|
||||
Value string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type SpanKind int32
|
||||
|
||||
const (
|
||||
UnspecifiedSpanKind SpanKind = 0
|
||||
ServerSpanKind SpanKind = 1
|
||||
ClientSpanKind SpanKind = 2
|
||||
)
|
||||
|
||||
type TimeEvents struct {
|
||||
TimeEvent []TimeEvent `json:"timeEvent,omitempty"`
|
||||
DroppedAnnotationsCount int32 `json:"dropped_annotations_count,omitempty"`
|
||||
DroppedMessageEventsCount int32 `json:"dropped_message_events_count,omitempty"`
|
||||
}
|
||||
|
||||
type TimeEvent struct {
|
||||
Time Timestamp `json:"time,omitempty"`
|
||||
MessageEvent *MessageEvent `json:"messageEvent,omitempty"`
|
||||
Annotation *Annotation `json:"annotation,omitempty"`
|
||||
}
|
||||
|
||||
type Annotation struct {
|
||||
Description *TruncatableString `json:"description,omitempty"`
|
||||
Attributes *Attributes `json:"attributes,omitempty"`
|
||||
}
|
||||
|
||||
type MessageEvent struct {
|
||||
Type MessageEventType `json:"type,omitempty"`
|
||||
Id uint64 `json:"id,omitempty"`
|
||||
UncompressedSize uint64 `json:"uncompressed_size,omitempty"`
|
||||
CompressedSize uint64 `json:"compressed_size,omitempty"`
|
||||
}
|
||||
|
||||
type MessageEventType int32
|
||||
|
||||
const (
|
||||
UnspecifiedMessageEvent MessageEventType = iota
|
||||
SentMessageEvent
|
||||
ReceivedMessageEvent
|
||||
)
|
||||
|
||||
type TimeEventValue interface {
|
||||
tagTimeEventValue()
|
||||
}
|
||||
|
||||
func (Annotation) tagTimeEventValue() {}
|
||||
func (MessageEvent) tagTimeEventValue() {}
|
||||
|
||||
type Links struct {
|
||||
Link []*Link `json:"link,omitempty"`
|
||||
DroppedLinksCount int32 `json:"dropped_links_count,omitempty"`
|
||||
}
|
||||
|
||||
type Link struct {
|
||||
TraceId []byte `json:"trace_id,omitempty"`
|
||||
SpanId []byte `json:"span_id,omitempty"`
|
||||
Type LinkType `json:"type,omitempty"`
|
||||
Attributes *Attributes `json:"attributes,omitempty"`
|
||||
TraceState *TraceState `json:"tracestate,omitempty"`
|
||||
}
|
||||
|
||||
type LinkType int32
|
||||
|
||||
const (
|
||||
UnspecifiedLinkType LinkType = 0
|
||||
ChildLinkType LinkType = 1
|
||||
ParentLinkType LinkType = 2
|
||||
)
|
||||
|
||||
type Status struct {
|
||||
Code int32 `json:"code,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
|
@ -17,12 +17,24 @@ import (
|
|||
// those values in the context.
|
||||
type Key string
|
||||
|
||||
// Of returns a Tag for a key and value.
|
||||
// This is a trivial helper that makes common logging easier to read.
|
||||
func Of(key interface{}, value interface{}) Tag {
|
||||
return Tag{Key: key, Value: value}
|
||||
}
|
||||
|
||||
// Of creates a new Tag with this key and the supplied value.
|
||||
// You can use this when building a tag list.
|
||||
func (k Key) Of(v interface{}) Tag {
|
||||
return Tag{Key: k, Value: v}
|
||||
}
|
||||
|
||||
// Tag can be used to get a tag for the key from a context.
|
||||
// It makes Key conform to the Tagger interface.
|
||||
func (k Key) Tag(ctx context.Context) Tag {
|
||||
return Tag{Key: k, Value: ctx.Value(k)}
|
||||
}
|
||||
|
||||
// With applies sets this key to the supplied value on the context and
|
||||
// returns the new context generated.
|
||||
// It uses the With package level function so that observers are also notified.
|
||||
|
|
|
@ -25,6 +25,14 @@ type Tag struct {
|
|||
Value interface{}
|
||||
}
|
||||
|
||||
// Tagger is the interface to somthing that returns a Tag given a context.
|
||||
// Both Tag itself and Key support this interface, allowing methods that can
|
||||
// take either (and other implementations as well)
|
||||
type Tagger interface {
|
||||
// Tag returns a Tag potentially using information from the Context.
|
||||
Tag(context.Context) Tag
|
||||
}
|
||||
|
||||
// List is a way of passing around a collection of key value pairs.
|
||||
// It is an alternative to the less efficient and unordered method of using
|
||||
// maps.
|
||||
|
@ -64,6 +72,15 @@ func Get(ctx context.Context, keys ...interface{}) List {
|
|||
return tags
|
||||
}
|
||||
|
||||
// Tags collects a list of tags for the taggers from the context.
|
||||
func Tags(ctx context.Context, taggers ...Tagger) List {
|
||||
tags := make(List, len(taggers))
|
||||
for i, t := range taggers {
|
||||
tags[i] = t.Tag(ctx)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
var observers = []Observer{}
|
||||
|
||||
// Observe adds a new tag observer to the registered set.
|
||||
|
@ -80,6 +97,12 @@ func (t Tag) Format(f fmt.State, r rune) {
|
|||
fmt.Fprintf(f, `%v="%v"`, t.Key, t.Value)
|
||||
}
|
||||
|
||||
// Get returns the tag unmodified.
|
||||
// It makes Key conform to the Tagger interface.
|
||||
func (t Tag) Tag(ctx context.Context) Tag {
|
||||
return t
|
||||
}
|
||||
|
||||
// Get will get a single key's value from the list.
|
||||
func (l List) Get(k interface{}) interface{} {
|
||||
for _, t := range l {
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tag adds support for telemetry tracins.
|
||||
package trace
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type TraceID [16]byte
|
||||
type SpanID [8]byte
|
||||
|
||||
func (t TraceID) String() string {
|
||||
return fmt.Sprintf("%02x", t[:])
|
||||
}
|
||||
|
||||
func (s SpanID) String() string {
|
||||
return fmt.Sprintf("%02x", s[:])
|
||||
}
|
||||
|
||||
func (s SpanID) IsValid() bool {
|
||||
return s != SpanID{}
|
||||
}
|
||||
|
||||
var (
|
||||
generationMu sync.Mutex
|
||||
nextSpanID uint64
|
||||
spanIDInc uint64
|
||||
|
||||
traceIDAdd [2]uint64
|
||||
traceIDRand *rand.Rand
|
||||
)
|
||||
|
||||
func initGenerator() {
|
||||
var rngSeed int64
|
||||
for _, p := range []interface{}{
|
||||
&rngSeed, &traceIDAdd, &nextSpanID, &spanIDInc,
|
||||
} {
|
||||
binary.Read(crand.Reader, binary.LittleEndian, p)
|
||||
}
|
||||
traceIDRand = rand.New(rand.NewSource(rngSeed))
|
||||
spanIDInc |= 1
|
||||
}
|
||||
|
||||
func newTraceID() TraceID {
|
||||
generationMu.Lock()
|
||||
defer generationMu.Unlock()
|
||||
if traceIDRand == nil {
|
||||
initGenerator()
|
||||
}
|
||||
var tid [16]byte
|
||||
binary.LittleEndian.PutUint64(tid[0:8], traceIDRand.Uint64()+traceIDAdd[0])
|
||||
binary.LittleEndian.PutUint64(tid[8:16], traceIDRand.Uint64()+traceIDAdd[1])
|
||||
return tid
|
||||
}
|
||||
|
||||
func newSpanID() SpanID {
|
||||
var id uint64
|
||||
for id == 0 {
|
||||
id = atomic.AddUint64(&nextSpanID, spanIDInc)
|
||||
}
|
||||
var sid [8]byte
|
||||
binary.LittleEndian.PutUint64(sid[:], id)
|
||||
return sid
|
||||
}
|
|
@ -2,59 +2,139 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tag adds support for telemetry tracins.
|
||||
// Package trace adds support for telemetry tracing.
|
||||
package trace
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/telemetry/log"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/tag"
|
||||
"golang.org/x/tools/internal/lsp/telemetry/worker"
|
||||
)
|
||||
|
||||
type Span interface {
|
||||
AddAttributes(attributes ...Attribute)
|
||||
AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64)
|
||||
AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64)
|
||||
Annotate(attributes []Attribute, str string)
|
||||
Annotatef(attributes []Attribute, format string, a ...interface{})
|
||||
End()
|
||||
IsRecordingEvents() bool
|
||||
SetName(name string)
|
||||
SetStatus(status Status)
|
||||
type Span struct {
|
||||
Name string
|
||||
TraceID TraceID
|
||||
SpanID SpanID
|
||||
ParentID SpanID
|
||||
Start time.Time
|
||||
Finish time.Time
|
||||
Tags tag.List
|
||||
Events []Event
|
||||
|
||||
ready bool
|
||||
}
|
||||
|
||||
type Attribute interface{}
|
||||
|
||||
type Status struct {
|
||||
Code int32
|
||||
Message string
|
||||
type Event struct {
|
||||
Time time.Time
|
||||
Tags tag.List
|
||||
}
|
||||
|
||||
type nullSpan struct{}
|
||||
type Observer func(*Span)
|
||||
|
||||
func (nullSpan) AddAttributes(attributes ...Attribute) {}
|
||||
func (nullSpan) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {}
|
||||
func (nullSpan) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {}
|
||||
func (nullSpan) Annotate(attributes []Attribute, str string) {}
|
||||
func (nullSpan) Annotatef(attributes []Attribute, format string, a ...interface{}) {}
|
||||
func (nullSpan) End() {}
|
||||
func (nullSpan) IsRecordingEvents() bool { return false }
|
||||
func (nullSpan) SetName(name string) {}
|
||||
func (nullSpan) SetStatus(status Status) {}
|
||||
func RegisterObservers(o ...Observer) {
|
||||
worker.Do(func() {
|
||||
if !registered {
|
||||
registered = true
|
||||
tag.Observe(tagObserver)
|
||||
log.AddLogger(logger)
|
||||
}
|
||||
observers = append(observers, o...)
|
||||
})
|
||||
}
|
||||
|
||||
func StartSpan(ctx context.Context, name string, tags ...tag.Tag) (context.Context, func()) {
|
||||
span := &Span{
|
||||
Name: name,
|
||||
Start: time.Now(),
|
||||
}
|
||||
if parent := fromContext(ctx); parent != nil {
|
||||
span.TraceID = parent.TraceID
|
||||
span.ParentID = parent.SpanID
|
||||
} else {
|
||||
span.TraceID = newTraceID()
|
||||
}
|
||||
span.SpanID = newSpanID()
|
||||
ctx = context.WithValue(ctx, contextKey, span)
|
||||
if len(tags) > 0 {
|
||||
ctx = tag.With(ctx, tags...)
|
||||
}
|
||||
worker.Do(func() {
|
||||
span.ready = true
|
||||
for _, o := range observers {
|
||||
o(span)
|
||||
}
|
||||
})
|
||||
return ctx, span.close
|
||||
}
|
||||
|
||||
func (s *Span) close() {
|
||||
now := time.Now()
|
||||
worker.Do(func() {
|
||||
s.Finish = now
|
||||
for _, o := range observers {
|
||||
o(s)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Span) Format(f fmt.State, r rune) {
|
||||
fmt.Fprintf(f, "%v %v:%v", s.Name, s.TraceID, s.SpanID)
|
||||
if s.ParentID.IsValid() {
|
||||
fmt.Fprintf(f, "[%v]", s.ParentID)
|
||||
}
|
||||
fmt.Fprintf(f, " %v->%v", s.Start, s.Finish)
|
||||
}
|
||||
|
||||
type contextKeyType int
|
||||
|
||||
var contextKey contextKeyType
|
||||
|
||||
func fromContext(ctx context.Context) *Span {
|
||||
v := ctx.Value(contextKey)
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
return v.(*Span)
|
||||
}
|
||||
|
||||
var (
|
||||
FromContext = func(ctx context.Context) Span { return nullSpan{} }
|
||||
NewContext = func(ctx context.Context, span Span) context.Context { return ctx }
|
||||
StartSpan = func(ctx context.Context, name string, options ...interface{}) (context.Context, Span) {
|
||||
return ctx, nullSpan{}
|
||||
}
|
||||
BoolAttribute = func(key string, value bool) Attribute { return nil }
|
||||
Float64Attribute = func(key string, value float64) Attribute { return nil }
|
||||
Int64Attribute = func(key string, value int64) Attribute { return nil }
|
||||
StringAttribute = func(key string, value string) Attribute { return nil }
|
||||
WithSpanKind = func(spanKind int) interface{} { return nil }
|
||||
observers []Observer
|
||||
registered bool
|
||||
)
|
||||
|
||||
const (
|
||||
SpanKindUnspecified = iota
|
||||
SpanKindServer
|
||||
SpanKindClient
|
||||
)
|
||||
func tagObserver(ctx context.Context, at time.Time, tags tag.List) {
|
||||
span := fromContext(ctx)
|
||||
if span == nil {
|
||||
return
|
||||
}
|
||||
if !span.ready {
|
||||
span.Tags = append(span.Tags, tags...)
|
||||
return
|
||||
}
|
||||
span.Events = append(span.Events, Event{
|
||||
Time: at,
|
||||
Tags: tags,
|
||||
})
|
||||
}
|
||||
|
||||
func logger(ctx context.Context, at time.Time, tags tag.List) bool {
|
||||
span := fromContext(ctx)
|
||||
if span == nil {
|
||||
return false
|
||||
}
|
||||
span.Events = append(span.Events, Event{
|
||||
Time: at,
|
||||
Tags: tags,
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
// Detach returns a context without an associated span.
|
||||
// This allows the creation of spans that are not children of the current span.
|
||||
func Detach(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, contextKey, nil)
|
||||
}
|
||||
|
|
|
@ -6,8 +6,13 @@
|
|||
// to work cooperatively and efficiently.
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
workQueue = make(chan func(), 100)
|
||||
workQueue = make(chan func(), 1000)
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -26,5 +31,10 @@ func init() {
|
|||
// This function may block, but in general it will return very quickly and
|
||||
// before the task has been run.
|
||||
func Do(task func()) {
|
||||
select {
|
||||
case workQueue <- task:
|
||||
default:
|
||||
fmt.Fprint(os.Stderr, "work queue is full\n")
|
||||
workQueue <- task
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,6 +66,7 @@ foo/foo.go:1:9-12: defined here as
|
|||
}
|
||||
|
||||
-- PackageFoo-hover --
|
||||
myFoo "golang.org/x/tools/internal/lsp/foo" //@godef("foo", PackageFoo),godef("myFoo", PackageFoo)
|
||||
|
||||
-- S1-definition --
|
||||
godef/b/b.go:8:6-8: defined here as S1 struct {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue